hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2d6bc49a1295bcbc496414817c40b451148a5a5 | 17,327 | py | Python | spider_test.py | orxg/newspider | 1b7d09394a8dba8149d874f5a6635e53f83e13a5 | [
"Apache-2.0"
] | null | null | null | spider_test.py | orxg/newspider | 1b7d09394a8dba8149d874f5a6635e53f83e13a5 | [
"Apache-2.0"
] | null | null | null | spider_test.py | orxg/newspider | 1b7d09394a8dba8149d874f5a6635e53f83e13a5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 09:41:51 2018
@author: ldh
"""
# spider_test.py
import urlparse
import re
import random
import datetime as dt
import time
import requests
from bs4 import BeautifulSoup,Tag
import urlparse
import pandas as pd
def ip_generator():
ip = [str(random.randint(0,255)) for i in range(4)]
ip = '.'.join(ip)
return ip
def header_generator():
header = {'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}
header['X-Forwarded-For'] = ip_generator()
return header
#%% 同花顺
def ths_news_time_convertor(ths_time):
'''
同花顺新闻时间转换成正常时间。
'''
year = dt.datetime.today().year
month = ths_time[:2]
day = ths_time[3:5]
hour = ths_time[7:9]
minute = ths_time[10:12]
return str(year) + '-' + month + '-' + day + ' ' + \
hour + ':' + minute
url_ths = 'http://news.10jqka.com.cn/cjzx_list/index_1.shtml'
last_news_time = None
# 标题页数据获取
response = requests.get(url_ths,headers = header_generator())
# 标题页数据解析
soup = BeautifulSoup(response.content,'html.parser')
content_titles = soup.find_all('span',class_ = 'arc-title')
titles = []
news_time = []
hrefs = []
for each in content_titles:
tmp = each.find('a')
titles.append(tmp.attrs['title'])
news_time.append(ths_news_time_convertor(each.find('span').text))
hrefs.append(tmp.attrs['href'])
news_df = pd.DataFrame([titles,news_time,hrefs],
index = ['title','news_time','href']).T
additions = news_df
# 增量爬取
for idx,row in additions.iterrows():
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
main_text = tmp_soup.find(class_ = 'main-text atc-content')
bottom = main_text.find(class_ = 'bottomSign')
bottom.clear()
js_script = main_text.find(type = 'text/javascript')
js_script.clear()
for a in main_text.find_all():
if a.has_attr('href'):
del a['href']
news_content = str(main_text).decode('utf8').replace(u' ',u' ')
p_list = main_text.find_all('p')
for p in p_list:
if p.has_attr('class'):
if p.attrs['class'] == 'bottomSign':
break
else:
news_content += str(p).decode('utf8')
additions.loc[idx,'content'] = news_content
#%% 中证网
zzw_url = 'http://www.cs.com.cn/xwzx/hg/'
response = requests.get(zzw_url,headers = header_generator())
# parse titles
content = response.content
soup = BeautifulSoup(content,'html.parser')
content_titles = soup.find('ul',class_ = 'list-lm pad10')
content_titles = content_titles.find_all('li')
news_time = [each.find('span').text for each in content_titles]
hrefs = [each.find('a').attrs['href'] for each in content_titles]
titles = [each.find('a').text for each in content_titles]
news_time = map(lambda x: '20' + x,news_time)
hrefs = map(lambda x: urlparse.urljoin(zzw_url,x),hrefs)
latest_flag = max(news_time)
news_df = pd.DataFrame([titles,news_time,hrefs],index = ['title','news_time','href']).T
additions = news_df
additions['content'] = None
additions['news_source'] = u'中证网'
for idx,row in additions.iterrows():
link = row['href']
link_response = requests.get(link,headers = header_generator())
encoding = requests.utils.get_encodings_from_content(link_response.text)
link_response.encoding = encoding[0]
# parse content
content = link_response.text
tmp_soup = BeautifulSoup(content,'html.parser')
article = tmp_soup.find('div',class_ = 'article-t hidden')
# 解析总页数
js_script = article.find(class_ = 'page').text
count_page_str = re.findall(r'var countPage = (\d+)',js_script)
count_page = int(count_page_str[0])
# 剔除javascript
pages_js = article.find(class_ = 'page')
pages_js.clear()
if count_page == 1:
additions.loc[idx,'content'] = str(article).decode('utf8')
else:
link_compoent_list = link.split('.')
last_file = link_compoent_list[-2]
pages_list = [last_file + '_' + str(i) for i in range(1,count_page)]
other_links = []
for i in range(0,count_page - 1):
link_compoent_list[-2] = pages_list[i]
other_links.append('.'.join(link_compoent_list))
content_all = str(article).decode('utf8')
for other_link in other_links:
link_response = requests.get(other_link,headers = header_generator())
encoding = requests.utils.get_encodings_from_content(link_response.text)
link_response.encoding = encoding[0]
content = link_response.text
tmp_soup = BeautifulSoup(content,'html.parser')
article = tmp_soup.find('div',class_ = 'article-t hidden')
pages_js = article.find(class_ = 'page')
pages_js.clear()
content_all += str(article).decode('utf8')
additions.loc[idx,'content'] = content_all
break
#%% 中国证券网
url = 'http://news.cnstock.com/news/sns_yw/index.html'
web_url = 'http://www.cnstock.com'
response = requests.get(url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content,'html.parser')
content_titles = soup.find('ul',class_ = 'new-list article-mini')
news_time = [each.text[1:-1] for each in content_titles.find_all('span',class_ = 'time')]
hrefs = [each.attrs['href'] for each in content_titles.find_all('a')]
titles = [each.attrs['title'] for each in content_titles.find_all('a')]
news_df = pd.DataFrame([titles,news_time,hrefs],index = ['title',
'news_time','href']).T
additions = news_df
for idx,row in additions.iterrows():
break
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
content = tmp_soup.find('div',class_ = 'content')
imgs = content.find_all('img')
for img in imgs:
img.attrs['src'] = urlparse.urljoin(web_url,img.attrs['src'])
content = str(content).decode('utf8')
additions.loc[idx,'content'] = content
break
#%% 证券时报网
stcn_url = 'http://news.stcn.com/'
response = requests.get(stcn_url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content)
content_titles = soup.find_all(class_ = 'tit')[:100]
news_time = soup.find_all(class_ = 'sj')[:100]
titles = [each.find('a').text for each in content_titles]
hrefs = [each.find('a').attrs['href'] for each in content_titles]
news_time = [each.text[:10] + ' ' + each.text[10:] for each in news_time]
news_df = pd.DataFrame([titles,news_time,hrefs],index = ['title',
'news_time','href']).T
additions = news_df
additions['news_source'] = ''
for idx,row in additions.iterrows():
break
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
content = tmp_soup.find(class_ = 'txt_con')
content = str(content).decode('utf8')
additions.loc[idx,'content'] = content
break
#%% 人民网
url = 'http://finance.people.com.cn/index1.html'
web_url = 'http://finance.people.com.cn/'
response = requests.get(url,headers = header_generator())
response.encoding = response.apparent_encoding
content = response.text
soup = BeautifulSoup(content,'html.parser')
related = soup.find_all('div',class_ = 'left w310')
for each in related:
if each.find('h2'):
if each.find('h2').text == u'宏观':
hrefs = each
break
hrefs = [each.attrs['href'] for each in hrefs.find_all('a')]
hrefs = [urlparse.urljoin(url,each) for each in hrefs]
news_df = pd.DataFrame([hrefs],index = ['href']).T
additions = news_df
additions['news_source'] = ''
for idx,row in additions.iterrows():
break
link = row['href']
response = requests.get(link,headers = header_generator())
response.encoding = response.apparent_encoding
tmp_soup = BeautifulSoup(response.text,'html.parser')
header = tmp_soup.find('div',class_ = 'clearfix w1000_320 text_title')
title = header.find('h1').text.replace(u'\xa0',' ')
news_time = header.find(class_ = 'fl').text[:16]
news_time = news_time.replace(u'年','-')
news_time = news_time.replace(u'月','-')
news_time = news_time.replace(u'日',' ')
content = tmp_soup.find(class_ = 'box_con')
pics = content.find_all('img')
for pic in pics:
pic['src'] = urlparse.urljoin(web_url,pic['src'])
content = str(content).decode('utf8').replace(u' ',u' ').replace(u'\xa0',u' ')
additions.loc[idx,'title'] = title
additions.loc[idx,'news_time'] = news_time
additions.loc[idx,'content'] = content
break
#%% 人民网热点新闻
url = 'http://finance.people.com.cn/'
response = requests.get(url,headers = header_generator())
response.encoding = response.apparent_encoding
content = response.text
soup = BeautifulSoup(content,'html.parser')
related = soup.find('div',class_ = 'title mt15')
related1 = related.find('a')
href = urlparse.urljoin(url,related1.attrs['href'])
title = related1.text
news_df = pd.DataFrame([[title,href]],columns = ['title','href'])
additions = news_df
additions['news_source'] = u'人民网'
additions['if_header'] = 1
for idx,row in additions.iterrows():
break
link = row['href']
response = requests.get(link,headers = header_generator())
response.encoding = response.apparent_encoding
tmp_soup = BeautifulSoup(response.text,'html.parser')
header = tmp_soup.find('div',class_ = 'clearfix w1000_320 text_title')
title = header.find('h1').text.replace(u'\xa0',' ')
news_time = header.find(class_ = 'fl').text[:16]
news_time = news_time.replace(u'年','-')
news_time = news_time.replace(u'月','-')
news_time = news_time.replace(u'日',' ')
content = tmp_soup.find(class_ = 'box_con')
pics = content.find_all('img')
for pic in pics:
pic['src'] = urlparse.urljoin(web_url,pic['src'])
content = str(content).decode('utf8').replace(u' ',u' ').replace(u'\xa0',u' ')
additions.loc[idx,'news_time'] = news_time
additions.loc[idx,'content'] = content
break
#%% 证券时报网热点
stcn_url = 'http://news.stcn.com/'
response = requests.get(stcn_url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content,'html.parser')
hot_news = soup.find(class_ = 'hotNews')
href = hot_news.dt.a.attrs['href']
title = hot_news.dd.text
news_time = hot_news.find(class_ = 'sj')
news_time = news_time.text[:10] + ' ' + news_time.span.text
news_df = pd.DataFrame([[title,news_time,href]],columns = ['title',
'news_time','href'])
additions = news_df
additions['news_source'] = ''
for idx,row in additions.iterrows():
break
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
content = tmp_soup.find(class_ = 'txt_con')
content = str(content).decode('utf8')
additions.loc[idx,'content'] = content
break
#%% 中证网热点
zzw_url = 'http://www.cs.com.cn/xwzx/'
response = requests.get(zzw_url,headers = header_generator())
# parse titles
content = response.content
soup = BeautifulSoup(content,'html.parser')
top_news = soup.find(class_ = 'topnews hidden')
title = top_news.h1.text
href = urlparse.urljoin(zzw_url,top_news.h1.a.attrs['href'])
news_df = pd.DataFrame([[title,href]],columns = ['title','href'])
additions = news_df
additions['content'] = None
additions['news_source'] = u'中证网'
for idx,row in additions.iterrows():
break
link = row['href']
link_response = requests.get(link,headers = header_generator())
encoding = requests.utils.get_encodings_from_content(link_response.text)
link_response.encoding = encoding[0]
# parse content
content = link_response.text
tmp_soup = BeautifulSoup(content,'html.parser')
info = tmp_soup.find(class_ = 'info')
news_time = info.find('em').text
article = tmp_soup.find('div',class_ = 'article-t hidden')
# 解析总页数
js_script = article.find(class_ = 'page').text
count_page_str = re.findall(r'var countPage = (\d+)',js_script)
count_page = int(count_page_str[0])
# 剔除javascript
pages_js = article.find(class_ = 'page')
pages_js.clear()
if count_page == 1:
additions.loc[idx,'content'] = str(article).decode('utf8')
else:
link_compoent_list = link.split('.')
last_file = link_compoent_list[-2]
pages_list = [last_file + '_' + str(i) for i in range(1,count_page)]
other_links = []
for i in range(0,count_page - 1):
link_compoent_list[-2] = pages_list[i]
other_links.append('.'.join(link_compoent_list))
content_all = str(article).decode('utf8')
for other_link in other_links:
link_response = requests.get(other_link,headers = header_generator())
encoding = requests.utils.get_encodings_from_content(link_response.text)
link_response.encoding = encoding[0]
content = link_response.text
tmp_soup = BeautifulSoup(content,'html.parser')
article = tmp_soup.find('div',class_ = 'article-t hidden')
pages_js = article.find(class_ = 'page')
pages_js.clear()
content_all += str(article).decode('utf8')
additions.loc[idx,'content'] = content_all
break
#%% 中国证券网热点
url = 'http://www.cnstock.com/'
web_url = 'http://www.cnstock.com'
response = requests.get(url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content,'htm
data_list = soup.find(id = 'data_list')
href = header_topic.h1.a.attrs['href']
title = header_topic.h1.a.text
news_df = pd.DataFrame([[title,href]],columns = ['title','href'])
additions = news_df
for idx,row in additions.iterrows():
break
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
content = tmp_soup.find('div',class_ = 'content')
imgs = content.find_all('img')
for img in imgs:
img.attrs['src'] = urlparse.urljoin(web_url,img.attrs['src'])
content = str(content).decode('utf8')
news_time = tmp_soup.find(class_ = 'timer').text
additions.loc[idx,'content'] = content
break
#%% 二十一世纪经济网
url = 'http://www.21jingji.com/channel/politics/'
web_url = 'http://www.21jingji.com/'
response = requests.get(url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content,'html.parser')
data_list = soup.find(id = 'data_list')
news = data_list.find_all('a',class_ = 'listTit')
data = []
for each in news:
data.append([each.attrs['title'],each.attrs['href']])
news_df = pd.DataFrame(data,columns = ['title','href'])
additions = news_df
for idx,row in additions.iterrows():
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
# 内容
content = tmp_soup.find('div',class_ = 'detailCont')
last_tag = content.find('a',class_ = 'goindex')
last_tag.clear()
content = str(content).decode('utf8')
# 时间
news_date = tmp_soup.find('span',class_='').text
news_date = news_date.replace(u'年',u'-')
news_date = news_date.replace(u'月',u'-')
news_date = news_date.replace(u'日',u'')
hour_time = tmp_soup.find('span',class_='hour').text
news_time = news_date + ' ' + hour_time
additions.loc[idx,'content'] = content
additions.loc[idx,'news_time'] = news_time
break
#%% 二十一世纪经济网头条
url = 'http://www.21jingji.com/'
response = requests.get(url,headers = header_generator())
content = response.content
soup = BeautifulSoup(content,'html.parser')
data_ul = soup.find('ul',class_ = 'listUl')
data_list = data_ul.find_all('a')
data = []
for each in data_list:
if each.has_attr('title'):
data.append([each.attrs['title'],each.attrs['href']])
news_df = pd.DataFrame(data,columns = ['title','href'])
additions = news_df
for idx,row in additions.iterrows():
link = row['href']
link_response = requests.get(link,headers = header_generator())
tmp_soup = BeautifulSoup(link_response.content,'html.parser')
# 内容
content = tmp_soup.find('div',class_ = 'detailCont')
last_tag = content.find('a',class_ = 'goindex')
last_tag.clear()
content = str(content).decode('utf8')
# 时间
news_date = tmp_soup.find('span',class_='').text
news_date = news_date.replace(u'年',u'-')
news_date = news_date.replace(u'月',u'-')
news_date = news_date.replace(u'日',u'')
hour_time = tmp_soup.find('span',class_='hour').text
news_time = news_date + ' ' + hour_time
additions.loc[idx,'content'] = content
additions.loc[idx,'news_time'] = news_time
break | 30.721631 | 117 | 0.649853 | 2,327 | 17,327 | 4.651053 | 0.112591 | 0.039176 | 0.042133 | 0.03123 | 0.782131 | 0.77132 | 0.758847 | 0.741292 | 0.731313 | 0.731313 | 0 | 0.011202 | 0.196283 | 17,327 | 564 | 118 | 30.721631 | 0.765977 | 0 | 0 | 0.698701 | 0 | 0.002597 | 0.121269 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.023377 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a2e98ead0b66251b25258d930c344188ffd7164c | 68 | py | Python | sub_server/package/__init__.py | kankanpan/pyAutoControl | 056cc0d28a88aa796bbf689dc5617e1d0177ee01 | [
"MIT"
] | null | null | null | sub_server/package/__init__.py | kankanpan/pyAutoControl | 056cc0d28a88aa796bbf689dc5617e1d0177ee01 | [
"MIT"
] | null | null | null | sub_server/package/__init__.py | kankanpan/pyAutoControl | 056cc0d28a88aa796bbf689dc5617e1d0177ee01 | [
"MIT"
] | null | null | null | from .import findImg
from .import findWindow
from .import guiAction
| 17 | 23 | 0.823529 | 9 | 68 | 6.222222 | 0.555556 | 0.535714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132353 | 68 | 3 | 24 | 22.666667 | 0.949153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0c452d24a34353c728dbbaae1947b401836a9a8f | 418 | py | Python | env/sawyer/__init__.py | clvrai/mopa-rl | a263b4b3f10573c2eca48193223368d032f96b09 | [
"MIT"
] | 43 | 2020-10-23T06:37:34.000Z | 2022-02-12T19:08:04.000Z | env/sawyer/__init__.py | clvrai/mopa-pd | ac55f568149d8e79c28326bcd9b63336ed065a61 | [
"MIT"
] | 1 | 2021-01-01T13:26:32.000Z | 2021-11-12T03:23:49.000Z | env/sawyer/__init__.py | clvrai/mopa-rl | a263b4b3f10573c2eca48193223368d032f96b09 | [
"MIT"
] | 7 | 2020-11-05T06:59:06.000Z | 2021-12-12T05:14:31.000Z | from env.sawyer.sawyer_push import SawyerPushEnv
from env.sawyer.sawyer_push_obstacle import SawyerPushObstacleEnv
from env.sawyer.sawyer_lift import SawyerLiftEnv
from env.sawyer.sawyer_assembly import SawyerAssemblyEnv
from env.sawyer.sawyer_assembly_obstacle import SawyerAssemblyObstacleEnv
from env.sawyer.sawyer_assembly import SawyerAssemblyEnv
from env.sawyer.sawyer_lift_obstacle import SawyerLiftObstacleEnv
| 52.25 | 73 | 0.899522 | 52 | 418 | 7.038462 | 0.269231 | 0.13388 | 0.248634 | 0.363388 | 0.598361 | 0.377049 | 0.377049 | 0.377049 | 0.377049 | 0.377049 | 0 | 0 | 0.066986 | 418 | 7 | 74 | 59.714286 | 0.938462 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ac1e661b88b73ec920a23ac4130d3eff611879e4 | 98 | py | Python | tests/conftest.py | jed-frey/bot_01 | c9afefa048824b85cf9473457e9c0e4849e2e60d | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | jed-frey/bot_01 | c9afefa048824b85cf9473457e9c0e4849e2e60d | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | jed-frey/bot_01 | c9afefa048824b85cf9473457e9c0e4849e2e60d | [
"BSD-3-Clause"
] | null | null | null | import pytest
from uuid import uuid4
@pytest.fixture(scope="module")
def uuid():
return uuid4()
| 14 | 31 | 0.744898 | 14 | 98 | 5.214286 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023529 | 0.132653 | 98 | 6 | 32 | 16.333333 | 0.835294 | 0 | 0 | 0 | 0 | 0 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
ac35e85d20ad9e3c1ba20e046b551f575547ec7f | 6,619 | py | Python | polypy/tests/test_msd.py | symmy596/Polypy | 9e9ce03b3e2c287f6d7efb04b31a4e9be7dea396 | [
"MIT"
] | 8 | 2020-11-08T20:08:39.000Z | 2022-03-24T06:36:40.000Z | polypy/tests/test_msd.py | symmy596/Polypy | 9e9ce03b3e2c287f6d7efb04b31a4e9be7dea396 | [
"MIT"
] | 4 | 2018-09-02T05:08:45.000Z | 2020-09-09T12:58:30.000Z | polypy/tests/test_msd.py | symmy596/Polypy | 9e9ce03b3e2c287f6d7efb04b31a4e9be7dea396 | [
"MIT"
] | 2 | 2021-03-03T17:20:15.000Z | 2021-11-22T09:37:32.000Z | import numpy as np
import os
from polypy import read
from polypy import msd as msd
import unittest
from numpy.testing import assert_almost_equal
test_history = os.path.join(os.path.dirname(__file__), 'HISTORY')
test_config = os.path.join(os.path.dirname(__file__), 'CONFIG')
class testMSDContainer(unittest.TestCase):
def test_smooth_msd_data(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
x_data = np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
y_data = np.array([5, 4, 3, 2, 1, 5, 4, 3, 2, 1])
x, y = msds.smooth_msd_data(x_data, y_data)
predicted_x = np.array([1, 2, 3, 4, 5])
predicted_y = np.array([5, 4, 3, 2, 1])
assert_almost_equal(x, predicted_x)
assert_almost_equal(y, predicted_y)
def test_xyz_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.xyz_diffusion_coefficient(), 200.00000000)
def test_xy_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.xy_diffusion_coefficient(), 200.00000000)
def test_xz_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.xz_diffusion_coefficient(), 200.00000000)
def test_yz_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.yz_diffusion_coefficient(), 200.00000000)
def test_x_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.x_diffusion_coefficient(), 200.00000000)
def test_y_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.y_diffusion_coefficient(), 200.00000000)
def test_z_diffusion_coefficient(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
assert_almost_equal(msds.z_diffusion_coefficient(), 200.00000000)
class TestMSD(unittest.TestCase):
def msd_fail_1(self):
data = read.Config(test_history, ['CA'])
data.trajectory.timesteps = 1
with self.assertRaises(ValueError):
msd_data = msd.MSD(data.trajectory)
def msd_fail_2(self):
data = read.Config(test_history, ['CA'])
data.trajectory.atom_name.append('F')
with self.assertRaises(ValueError):
msd_data = msd.MSD(data.trajectory)
def test_msd(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
msds = msd_data.msd()
expected_msd = np.array([3, 12, 27, 48])
assert_almost_equal(msds.msd, expected_msd)
def test_calculate_distances(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
trajectories = np.split(data.trajectory.fractional_trajectory,
data.trajectory.timesteps)
x, y = msd_data.calculate_distances(trajectories, 1)
x = np.asarray(x)**2
msd_ = np.sum(x, axis=1)
expected_msd = np.array([3, 12, 27, 3, 12, 27, 48, 75, 108])
assert_almost_equal(msd_, expected_msd)
def test_squared_displacements(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
trajectories = np.split(data.trajectory.fractional_trajectory,
data.trajectory.timesteps)
x, y = msd_data.calculate_distances(trajectories, 1)
x = np.asarray(x)
msd_data.squared_displacements(x, 1)
expected_msd = np.array([3, 12, 27, 3, 12, 27, 48, 75, 108])
assert_almost_equal(msd_data.msd_information.msd, expected_msd)
def test_three_dimension_square_distance(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
trajectories = np.split(data.trajectory.fractional_trajectory,
data.trajectory.timesteps)
x, y = msd_data.calculate_distances(trajectories, 1)
x = np.asarray(x) ** 2
msd_data.three_dimension_square_distance(x, 1)
expected_msd = np.array([3, 12, 27, 3, 12, 27, 48, 75, 108])
assert_almost_equal(msd_data.msd_information.msd, expected_msd)
def test_two_dimension_square_distance(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
trajectories = np.split(data.trajectory.fractional_trajectory,
data.trajectory.timesteps)
x, y = msd_data.calculate_distances(trajectories, 1)
x = np.asarray(x) ** 2
msd_data.two_dimension_square_distance(x, 1)
expected_msd = np.array([2, 8, 18, 2, 8, 18, 32, 50, 72])
assert_almost_equal(msd_data.msd_information.xymsd, expected_msd)
def test_one_dimension_square_distance(self):
data = read.History(test_history, ['CA'])
msd_data = msd.MSD(data.trajectory)
trajectories = np.split(data.trajectory.fractional_trajectory,
data.trajectory.timesteps)
x, y = msd_data.calculate_distances(trajectories, 1)
x = np.asarray(x) ** 2
msd_data.one_dimension_square_distance(x, 1)
expected_msd = np.array([1, 4, 9, 1, 4, 9, 16, 25, 36])
assert_almost_equal(msd_data.msd_information.xmsd, expected_msd)
class TestRegionalMSD(unittest.TestCase):
def test_analyse_trajectory(self):
data = read.History(test_history, ['CA'])
msd_data = msd.RegionalMSD(data.trajectory, -5, 5, trajectory_length=1)
msds = msd_data.analyse_trajectory()
expected_msd = np.array([3, 12, 27, 48])
assert_almost_equal(msds.msd, expected_msd)
def test_initialise_new_trajectory(self):
data = read.History(test_history, ['CA'])
msd_data = msd.RegionalMSD(data.trajectory, -5, 5, trajectory_length=1)
new_traj = msd_data.initialise_new_trajectory()
assert new_traj.total_atoms == 1
| 41.36875 | 79 | 0.6504 | 881 | 6,619 | 4.628831 | 0.115778 | 0.102992 | 0.076018 | 0.074546 | 0.80873 | 0.801864 | 0.745954 | 0.702305 | 0.702305 | 0.653507 | 0 | 0.043718 | 0.232815 | 6,619 | 159 | 80 | 41.628931 | 0.759354 | 0 | 0 | 0.559701 | 0 | 0 | 0.007554 | 0 | 0 | 0 | 0 | 0 | 0.149254 | 1 | 0.134328 | false | 0 | 0.044776 | 0 | 0.201493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ac452ee8bf888ce5320c3efe70093efe02fea464 | 3,154 | py | Python | Tests/test_parallel.py | joseroma/parallel-plotting | 79b06da804329e8755f3b576628949f97c7e8c31 | [
"MIT"
] | null | null | null | Tests/test_parallel.py | joseroma/parallel-plotting | 79b06da804329e8755f3b576628949f97c7e8c31 | [
"MIT"
] | 10 | 2020-03-24T16:15:57.000Z | 2022-03-11T23:31:33.000Z | Tests/test_parallel.py | joseroma/parallel-plotting | 79b06da804329e8755f3b576628949f97c7e8c31 | [
"MIT"
] | null | null | null | import unittest
from Code.parallelCoordinates import ParallelCoordinates
class TestMethods(unittest.TestCase):
##### Missing data #########
def test_empty_file_name_throws_exception(self):
self.parallel = ParallelCoordinates(file='', library='pyplot')
with self.assertRaises(Exception):
self.parallel.plot()
def test_try_to_call_save_without_passing_result_parameter_throws_exception(self):
self.parallel = ParallelCoordinates(file='FUN.BB11002.tsv', library='pyplot')
with self.assertRaises(Exception):
self.parallel.save("jpeh.html")
def test_empty_file_throws_exception_no_columns_parse_from_file(self):
self.parallel = ParallelCoordinates(file='../Tests/testing_files/empty', library='pyplot')
with self.assertRaises(Exception):
self.parallel.plot()
##### Wrong data #########
def test_wrong_library_name_throws_exception(self):
self.parallel = ParallelCoordinates(file='FUN.BB11002.tsv', library='pypplot')
with self.assertRaises(Exception):
self.parallel.plot()
def test_wrong_file_name_throws_exception(self):
self.parallel = ParallelCoordinates(file='FUN.BB11003.tsv', library='pyplot')
with self.assertRaises(Exception):
self.parallel.plot()
def test_try_to_save_on_a_extension_not_allowed_throws_exception_plotly(self):
self.parallel = ParallelCoordinates(file='../data/FUN.BB11001.tsv', library='plotly')
res = self.parallel.plot()
with self.assertRaises(Exception):
self.parallel.save(res, "results.ppp")
def test_try_to_save_on_a_extension_not_allowed_throws_exception_pyplot(self):
self.parallel = ParallelCoordinates(file='../data/FUN.BB11001.tsv', library='pyplot', save_file_name="Results/results.ppp")
res = self.parallel.plot()
with self.assertRaises(Exception):
self.parallel.save(res)
def test_try_to_save_on_a_extension_not_allowed_throws_exception_bokeh(self):
self.parallel = ParallelCoordinates(file='../data/FUN.BB11001.tsv', library='bokeh', save_file_name="Results/results.ppp")
res = self.parallel.plot()
with self.assertRaises(Exception):
self.parallel.save(res)
def test_try_to_execute_data_frame_with_too_much_columns_throws_exception(self):
self.parallel = ParallelCoordinates(file='../Tests/testing_files/tooBig.csv', library='plotly')
with self.assertRaises(Exception):
self.parallel.plot()
def test_try_to_read_data_frame_with_invalid_extension_throws_exception(self):
self.parallel = ParallelCoordinates(file='../Tests/testing_files/empty.ppp', library='plotly')
with self.assertRaises(Exception):
self.parallel.plot()
def test_try_to_set_tags_and_length_tags_does_not_match_length_columns_throws_exception(self):
self.parallel = ParallelCoordinates(file='iris.csv', library='plotly', tags=['Una'])
with self.assertRaises(Exception):
self.parallel.plot()
def tearDown(self):
print("Test passed correctly")
| 46.382353 | 131 | 0.712112 | 370 | 3,154 | 5.775676 | 0.208108 | 0.140384 | 0.082358 | 0.180159 | 0.773982 | 0.773982 | 0.773982 | 0.77211 | 0.717829 | 0.550772 | 0 | 0.011494 | 0.172479 | 3,154 | 67 | 132 | 47.074627 | 0.80728 | 0.007926 | 0 | 0.442308 | 0 | 0 | 0.117172 | 0.052292 | 0 | 0 | 0 | 0 | 0.211538 | 1 | 0.230769 | false | 0.038462 | 0.038462 | 0 | 0.288462 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ac579bbd35c5149b9debe04f2826642965f5e53c | 115 | py | Python | src/algorithms/classic/testing/dijkstra_testing.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 46 | 2020-12-25T04:09:15.000Z | 2022-03-25T12:32:42.000Z | src/algorithms/classic/testing/dijkstra_testing.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 36 | 2020-12-21T16:10:02.000Z | 2022-01-03T01:42:01.000Z | src/algorithms/classic/testing/dijkstra_testing.py | judicaelclair/PathBenchURO | 101e67674efdfa8e27e1cf7787dac9fdf99552fe | [
"BSD-3-Clause"
] | 11 | 2021-01-06T23:34:12.000Z | 2022-03-21T17:21:47.000Z | from algorithms.classic.testing.a_star_testing import AStarTesting
class DijkstraTesting(AStarTesting):
pass
| 19.166667 | 66 | 0.834783 | 13 | 115 | 7.230769 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.113043 | 115 | 5 | 67 | 23 | 0.921569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
ac73fdb925e62726811d3b77d174da4a95dfb18c | 46 | py | Python | kloppy/sportscode.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/sportscode.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | kloppy/sportscode.py | benoitblanc/kloppy | 5c3f94ff8806f9e23f8bad095a948a403a06a54c | [
"BSD-3-Clause"
] | null | null | null | from ._providers.sportscode import load, save
| 23 | 45 | 0.826087 | 6 | 46 | 6.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108696 | 46 | 1 | 46 | 46 | 0.902439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3ba30ad79bc5a323c87ee593a479ae1feea9daba | 36 | py | Python | __init__.py | joshwatson/emilator | 8f30b35957c9bb51ab90e7e40f0de09439a5b347 | [
"MIT"
] | 29 | 2017-03-14T11:05:40.000Z | 2021-06-27T02:35:52.000Z | __init__.py | joshwatson/emilator | 8f30b35957c9bb51ab90e7e40f0de09439a5b347 | [
"MIT"
] | 3 | 2018-08-16T09:06:35.000Z | 2020-12-11T23:41:47.000Z | __init__.py | joshwatson/emilator | 8f30b35957c9bb51ab90e7e40f0de09439a5b347 | [
"MIT"
] | 2 | 2017-07-28T17:34:54.000Z | 2020-05-01T02:30:40.000Z | from emilator import *
import errors | 18 | 22 | 0.833333 | 5 | 36 | 6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 36 | 2 | 23 | 18 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3bafe9c18c658a3daf590c8ea12e8ea0aff47548 | 27,971 | py | Python | test/test_transformations.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 6 | 2020-08-28T22:44:07.000Z | 2022-01-24T20:53:00.000Z | test/test_transformations.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 1 | 2021-02-20T09:38:46.000Z | 2021-02-20T09:38:46.000Z | test/test_transformations.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 2 | 2021-10-04T09:25:06.000Z | 2022-02-09T09:09:09.000Z | import unittest
import sys
import pandas as pd
import numpy as np
import itertools
from ariadne.transformations import (
StandardScale,
MinMaxScale,
ToCylindrical,
Normalize,
DropShort,
DropSpinningTracks,
ToCartesian,
Compose,
ToBuckets,
ConstraintsNormalize,
BakeStationValues,
DropTracksWithHoles
)
path = '../data/200.csv'
path_radial = '../data/200_radial.csv'
import logging
logging.basicConfig(stream=sys.stderr)
logging.getLogger("TestLogger").setLevel(logging.DEBUG)
class StandardTestCase(unittest.TestCase):
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def _init_scaler(self, drop_old=True):
self.scaler = StandardScale(drop_old=drop_old, with_mean=True, with_std=True)
def test_init(self):
self._init_scaler()
self.assertEqual(self.scaler.drop_old, True)
self._init_scaler(drop_old=False)
self.assertEqual(self.scaler.drop_old, False)
def test_transform(self):
self._init_scaler()
temp = self.radial_df.loc[0, :]
self.assertAlmostEqual(self.scaler(self.data).loc[0, 'x'], temp['x'])
self.assertAlmostEqual(self.scaler(self.data).loc[0, 'y'], temp['y'])
self.assertAlmostEqual(self.scaler(self.data).loc[0, 'z'], temp['z'])
#
# def test_drop_true(self):
# self._init_data()
# self._init_scaler(True)
# self.assertRaises(self.scaler(self.data, columns=['x', 'y', 'z']).loc[0, 'x_old'], KeyError)
def test_drop_false(self):
self._init_scaler(drop_old=False)
temp = self.data.loc[0,'x']
self.assertEqual(self.scaler(self.data).loc[0, 'x_old'], temp)
class MinMaxTestCase(unittest.TestCase):
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def _init_scaler(self, drop_old=True, range=(0,1)):
self.scaler = MinMaxScale(drop_old=drop_old, feature_range=range)
def test_init(self):
self._init_scaler()
self.assertEqual(self.scaler.drop_old, True)
self._init_scaler(drop_old=False)
self.assertEqual(self.scaler.drop_old, False)
def test_transform_zero_one(self):
self._init_scaler(drop_old=True, range=(0, 1))
self.assertEqual(min(self.scaler(self.data).loc[:, 'x']), 0)
self.assertEqual(max(self.scaler(self.data).loc[:, 'x']), 1)
def test_transform_minus_one_one(self):
self._init_scaler(drop_old=True, range=(-1, 1))
self.assertEqual(min(self.scaler(self.data).loc[:, 'x']), -1)
self.assertEqual(max(self.scaler(self.data).loc[:, 'x']), 1)
# def test_drop_true(self):
# self._init_scaler(True)
# self._init_data()
# self.assertRaises(self.scaler(self.data, columns=['x', 'y', 'z']).loc[0, 'x_old'], KeyError)
def test_drop_false(self):
self._init_scaler(False)
temp = self.data.loc[0, 'x']
self.assertEqual(self.scaler.drop_old, False)
self.assertEqual(self.scaler(self.data).loc[0, 'x_old'], temp)
class NormalTestCase(unittest.TestCase):
def _init_scaler(self, drop_old=True, norm='l2'):
self.scaler = Normalize(drop_old=drop_old, norm=norm)
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_scaler()
self.assertEqual(self.scaler.drop_old, True)
self._init_scaler(drop_old=False)
self.assertEqual(self.scaler.drop_old, False)
def test_transform_l2(self):
self._init_scaler(drop_old=True)
self.assertEqual(round(min(self.scaler(self.data).loc[:, 'x']), 2), -1)
self.assertEqual(round(max(self.scaler(self.data).loc[:, 'x']), 2), 1)
# def test_drop_true(self):
# self._init_scaler(True)
# self._init_data()
# self.assertRaises(self.scaler(self.data, columns=['x', 'y', 'z']).loc[0, 'x_old'], KeyError)
def test_drop_false(self):
self._init_scaler(False)
temp = self.data.loc[0, 'x']
self.assertEqual(self.scaler.drop_old, False)
self.assertEqual(self.scaler(self.data).loc[0, 'x_old'], temp)
class CylindricalTestCase(unittest.TestCase):
def _init_transformer(self, drop_old=True):
self.transformer = ToCylindrical(drop_old=drop_old)
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_transformer()
self.assertEqual(self.transformer.drop_old, True)
self._init_transformer(drop_old=False)
self.assertEqual(self.transformer.drop_old, False)
def test_drop_false(self):
self._init_transformer(False)
self.assertEqual(self.transformer.drop_old, False)
self.assertEqual(self.transformer(self.data).loc[0, 'x'], self.data.loc[0, 'x'])
class CartesianTestCase(unittest.TestCase):
def _init_transformer(self, drop_old=True):
self.transformer = ToCartesian(drop_old=drop_old)
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_transformer()
self.assertEqual(self.transformer.drop_old, True)
self._init_transformer(drop_old=False)
self.assertEqual(self.transformer.drop_old, False)
def test_transform(self):
self._init_transformer()
# this test is working only if data is scaled previosly
#self.assertEqual(self.transformer(self.data, columns=['x','y','z']).loc[0, 'r'], self.radial_df.loc[0,'r'] )
#self.assertEqual(self.transformer(self.data, columns=['x', 'y', 'z']).loc[0, 'phi'], self.radial_df.loc[0, 'phi'])
self.assertAlmostEqual(self.transformer(self.radial_df).loc[0, 'z'], self.radial_df.loc[0, 'z'])
self.assertAlmostEqual(self.transformer(self.radial_df).loc[0, 'y'], self.radial_df.loc[0, 'y'])
self.assertAlmostEqual(self.transformer(self.radial_df).loc[0, 'x'], self.radial_df.loc[0, 'x'])
def test_drop_false(self):
self._init_transformer(False)
self.assertEqual(self.transformer(self.radial_df).loc[0, 'phi'], self.radial_df.loc[0,'phi'])
class DropShortTestCase(unittest.TestCase):
def _init_transformer(self, num_stations=None, keep_filtered=True):
self.transformer = DropShort(num_stations=num_stations, keep_filtered=keep_filtered)
def setUp(self):
self.data = pd.DataFrame({'r': [1., 0.5, 0.1, 0.2, 0.8, 0.6, 0.2],
'phi': [3., 0.5, 2., 0.2, 1.1, -0.5, -0.1],
'z': [0.1, 0.2, 0.33, 0.1, 0.2, 0.2, 0.1],
'track': [1, 1, 1, 2, 2, -1, -1],
'station': [1, 2, 3, 1, 2, 1, 3],
'event': [0, 0, 0, 0, 0, 0, 0]})
def test_init(self):
self._init_transformer()
self.assertEqual(len(self.data), 7)
self.assertEqual(len(self.data.columns), 6)
self.assertEqual(self.transformer.num_stations, None)
self._init_transformer(num_stations=3)
self.assertEqual(self.transformer.num_stations, 3)
self.assertEqual(self.transformer._broken_tracks, None)
self.assertEqual(self.transformer._num_broken_tracks, None)
def test_transform(self):
self._init_transformer()
self.assertEqual(len(self.transformer(self.data)), len(self.data))
def test_transform_3(self):
self._init_transformer(3)
self.assertEqual(len(self.transformer(self.data)), len(self.data))
self._init_transformer(num_stations=3, keep_filtered=True)
result = self.transformer(self.data)
self.assertEqual(len(result[result['track'] == -1]), 4)
self.assertEqual(len(result), len(self.data))
self._init_transformer(num_stations=3, keep_filtered=False)
result = self.transformer(self.data)
self.assertEqual(len(result[result['track'] == -1]), 2)
self.assertEqual(len(result), len(self.data) - 2)
def test_transform_no_keep_4(self):
self._init_transformer(num_stations=4)
result = self.transformer(self.data)
self.assertEqual(len(result[result['track']==-1]), 7)
self.assertEqual(len(result), len(self.data))
self._init_transformer(num_stations=4, keep_filtered=False)
result = self.transformer(self.data)
self.assertEqual(len(result[result['track'] == -1]), 2)
self.assertEqual(len(result), 2)
def test_transform_track(self):
self._init_transformer(num_stations=3)
result = self.transformer(self.data)
result.reset_index(inplace=True, drop=True)
self.assertEqual(len(result[result.track==-1]), 4)
self.assertEqual(result.iloc[0, 3], 1)
self.assertEqual(result.iloc[1, 3], 1)
self.assertEqual(result.iloc[3, 3], -1)
self.assertEqual(result.iloc[4, 3], -1)
def test_get_broken(self):
self._init_transformer(3)
self.assertIsNone(self.transformer.get_num_broken())
self.transformer(self.data)
self.assertEqual(1, self.transformer.get_num_broken())
self.transformer(self.data[:3])
self.assertEqual(0, self.transformer.get_num_broken())
class DropWarpsTestCase(unittest.TestCase):
def _init_transformer(self, keep_filtered=True):
self.transformer = DropSpinningTracks(keep_filtered=keep_filtered)
def setUp(self):
self.data = pd.DataFrame({'r': [1., 0.5, 0.1, 0.2, 0.8, 0.6, 0.2],
'phi': [3., 0.5, 2., 0.2, 1.1, -0.5, -0.1],
'z': [0.1, 0.2, 0.33, 0.1, 0.2, 0.2, 0.1],
'track': [1, 1, 1, 2, 2, 2, -1],
'station': [1, 2, 3, 1, 2, 2, 3],
'event': [0, 0, 0, 0, 0, 0, 0]})
def test_init(self):
self._init_transformer()
self.assertEqual(len(self.data), 7)
self.assertEqual(len(self.data.columns), 6)
self.assertEqual(self.transformer._broken_tracks, None)
self.assertEqual(self.transformer._num_broken_tracks, None)
def test_transform(self):
self._init_transformer()
result = self.transformer(self.data)
self.assertEqual(len(result), len(self.data))
self.assertEqual(len(result[result['track'] == -1]), 4)
self.assertEqual(result.iloc[0, 3], 1)
self.assertEqual(result.iloc[3, 3], -1)
self.assertEqual(result.iloc[5, 3], -1)
def test_transform_no_keep(self):
self._init_transformer(keep_filtered=False)
self.assertEqual(len(self.transformer(self.data)), 4)
self._init_transformer(keep_filtered=True)
self.assertEqual(len(self.transformer(self.data)), 7)
def test_get_broken(self):
self._init_transformer(2)
self.assertEqual(self.transformer.get_num_broken(), None)
self.transformer(self.data)
self.assertEqual(self.transformer.get_num_broken(), 1)
class DropHolesTestCase(unittest.TestCase):
def _init_transformer(self, keep_filtered=True,min_station_num=1):
self.transformer = DropTracksWithHoles(keep_filtered=keep_filtered, min_station_num=min_station_num)
def setUp(self):
self.data = pd.DataFrame({'r': [1., 0.1, 0.2, 0.8, 0.6, 0.2],
'phi': [3., 2., 0.2, 1.1, -0.5, -0.1],
'z': [0.1, 0.33, 0.1, 0.2, 0.2, 0.1],
'track': [1, 1, 2, 2, 2, -1],
'station': [1, 3, 1, 2, 3, 3],
'event': [0, 0, 0, 0, 0, 0]})
def test_init(self):
self._init_transformer()
self.assertEqual(len(self.data), 6)
self.assertEqual(len(self.data.columns), 6)
self.assertEqual(self.transformer._broken_tracks, None)
self.assertEqual(self.transformer._num_broken_tracks, None)
def test_transform(self):
self._init_transformer()
result = self.transformer(self.data)
log = logging.getLogger('test')
log.info(result)
self.assertEqual(len(result), len(self.data))
self.assertEqual(len(result[result['track'] == -1]), 3)
def test_transform_no_keep(self):
self._init_transformer(keep_filtered=False)
self.assertEqual(len(self.transformer(self.data)), 4)
self._init_transformer(keep_filtered=True)
self.assertEqual(len(self.transformer(self.data)), 6)
def test_transform_from_zero(self):
self._init_transformer(keep_filtered=False, min_station_num=0)
self.assertEqual(len(self.transformer(self.data)), 1)
self._init_transformer(keep_filtered=True, min_station_num=0)
self.assertEqual(len(self.transformer(self.data)), 6)
def test_get_broken(self):
self._init_transformer(2)
self.assertEqual(self.transformer.get_num_broken(), None)
self.transformer(self.data)
self.assertEqual(self.transformer.get_num_broken(), 1)
class BakeColumnTestCase(unittest.TestCase):
def _init_transformer(self, keep_filtered=True):
self.transformer = BakeStationValues(values={0: 0.1, 1:0.3, 2: 0.5, 3:0.7})
def setUp(self):
self.data = pd.DataFrame({'r': [1., 0.1, 0.2, 0.8, 0.6, 0.2],
'phi': [3., 2., 0.2, 1.1, -0.5, -0.1],
'z': [0.1, 0.33, 0.1, 0.2, 0.2, 0.1],
'track': [1, 1, 2, 2, 2, -1],
'station': [1, 3, 1, 2, 3, 3],
'event': [0, 0, 0, 0, 0, 0]})
def test_init(self):
self._init_transformer()
self.assertEqual(len(self.data), 6)
self.assertEqual(len(self.data.columns), 6)
def test_transform(self):
self._init_transformer()
result = self.transformer(self.data)
self.assertEqual(len(result), len(self.data))
self.assertEqual(len(result[result['z'] == 0.1]), 0)
self.assertEqual(len(result[result['z'] == 0.3]), 2)
self.assertEqual(len(result[result['z'] == 0.5]), 1)
self.assertEqual(len(result[result['z'] == 0.7]), 3)
class ComposeTestCase(unittest.TestCase):
def _init_transformer(self):
self.transformer = Compose([
StandardScale(),
ToCylindrical()
]
)
def setUp(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_transformer()
def test_transform(self):
self._init_transformer()
self.assertEqual(len(self.transformer(self.data)), 100000)
def test_coords(self):
self._init_transformer()
transformed = self.transformer(self.data)
self.assertAlmostEqual(transformed.loc[0, 'z'], self.radial_df.loc[0, 'z'])
self.assertAlmostEqual(transformed.loc[0, 'y'], self.radial_df.loc[0, 'y'])
self.assertAlmostEqual(transformed.loc[0, 'x'], self.radial_df.loc[0, 'x'])
def test_polar(self):
self._init_transformer()
transformed = self.transformer(self.data)
self.assertAlmostEqual(transformed.loc[0, 'phi'], self.radial_df.loc[0, 'phi'])
self.assertAlmostEqual(transformed.loc[0, 'r'], self.radial_df.loc[0, 'r'])
class ToBucketsTestCase(unittest.TestCase):
def _init_transformer(self, flat=True, shuffle=False, max_stations=None, max_bucket_size=None, keep_fakes=True):
self.transformer = ToBuckets(flat=flat, shuffle=shuffle, max_stations=max_stations, max_bucket_size=max_bucket_size, keep_fakes=keep_fakes)
def setUp(self):
self.data = pd.DataFrame({'r': [1., 0.5, 0.1, 0.2, 0.1, 0.2, 0.8, 0.6, 0.2,0.8, 0.6, 0.2,0.2, 0.2, 0.1,0.2, 0.1,0.2, 0.2, 0.1],
'phi': [3., 0.5, 2., 0.2, 1.1, -0.5, -0.1, 2., 0.2, 1.1, -0.5, -0.1, 0.2, 0.2, 0.1,0.2, 0.1,0.2, 0.2, 0.1],
'z': [0.1, 0.2, 0.33, 0.1, 0.2, 0.2, 0.1, 0.33, 0.1, 0.2, 0.2, 0.1,0.2, 0.2, 0.1,0.2, 0.1,0.2, 0.2, 0.1],
'track': [1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5],
'station': [1, 2, 3, 4, 5, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 4, 5],
'event': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0,0, 0, 0, 0, 0]})
def test_init(self):
self._init_transformer()
self.assertEqual(len(self.data), 20)
self.assertEqual(len(self.data.columns), 6)
self.assertEqual(self.transformer.max_num_stations, None)
self._init_transformer(shuffle=True, max_stations=3)
self.assertEqual(self.transformer.max_num_stations, 3)
def test_transform(self):
log = logging.getLogger("TestLogger")
self._init_transformer()
result = self.transformer(self.data)
self.assertEqual(len(result), 20)
self.assertEqual(len(result[result['bucket'] == 5]), 10)
self.assertEqual(len(result[result['bucket'] == 4]), 4)
self.assertEqual(len(result[result['bucket'] == 3]), 6)
self.assertEqual(self.transformer.max_num_stations, 5)
def test_transform_not_flat(self):
self._init_transformer(flat=False)
result = self.transformer(self.data)
self.assertEqual(len(result[5]), 10)
self.assertEqual(len(result[4]), 4)
self.assertEqual(len(result[3]), 6)
def test_transform_with_balancing(self):
self._init_transformer(flat=False)
track_list = []
station_list = []
event_list = []
num_tracks = [2, 10]
for num, i in enumerate(range(3, 5)):
for track in range(num_tracks[num]):
track_list.append(np.full(i, i*1000+track))
event_list.append(np.zeros(i))
station_list.append(np.arange(1, i+1))
events = list(np.concatenate(event_list))
self.data = pd.DataFrame(
{
'track': list(np.concatenate(track_list)),
'station': list(np.concatenate(station_list)),
'event': events,
'r': list(np.random.rand(len(events))),
'phi': list(np.random.rand(len(events))),
'z': list(np.random.rand(len(events))),
})
result = self.transformer(self.data)
#log.info(f'\n {result}')
#log.info(np.in1d(result[3], result[4]))
#log.info(result[4])
#log.info(result[3])
self.assertEqual(any(np.in1d(result[3]['index'], result[4]['index'])), False)
self.assertEqual(len(result[4]), 10*4)
self.assertEqual(len(result[3]), 2*3)
def test_transform_with_balancing_and_bucket_size(self):
self._init_transformer(flat=False, max_bucket_size=6)
track_list = []
station_list = []
event_list = []
num_tracks = [2, 10]
for num, i in enumerate(range(3, 5)):
for track in range(num_tracks[num]):
track_list.append(np.full(i, i*1000+track))
event_list.append(np.zeros(i))
station_list.append(np.arange(1, i+1))
events = list(np.concatenate(event_list))
self.data = pd.DataFrame(
{
'track': list(np.concatenate(track_list)),
'station': list(np.concatenate(station_list)),
'event': events,
'r': list(np.random.rand(len(events))),
'phi': list(np.random.rand(len(events))),
'z': list(np.random.rand(len(events))),
})
log = logging.getLogger("TestLogger")
result = self.transformer(self.data)
#log.info(f'\n {result}')
#log.info(np.in1d(result[3], result[4]))
#log.info(result[4])
#log.info(result[3])
self.assertEqual(any(np.in1d(result[3]['index'], result[4]['index'])), False)
self.assertEqual(len(result[4]), 6*4)
self.assertEqual(len(result[3]), 6*3)
def test_transform_with_empty_bucket_and_bucket_size(self):
self._init_transformer(flat=False, max_bucket_size=6)
track_list = []
station_list = []
event_list = []
num_tracks = [2, 0, 10]
for num, i in enumerate(range(3, 6)):
if i == 1:
continue
for track in range(num_tracks[num]):
track_list.append(np.full(i, i*1000+track))
event_list.append(np.zeros(i))
station_list.append(np.arange(1, i+1))
events = list(np.concatenate(event_list))
self.data = pd.DataFrame(
{
'track': list(np.concatenate(track_list)),
'station': list(np.concatenate(station_list)),
'event': events,
'r': list(np.random.rand(len(events))),
'phi': list(np.random.rand(len(events))),
'z': list(np.random.rand(len(events))),
})
log = logging.getLogger("TestLogger")
result = self.transformer(self.data)
#log.info(f'\n {result}')
#log.info(np.in1d(result[3], result[4]))
#log.info(result[4])
#log.info(result[3])
self.assertEqual(any(np.in1d(result[3]['index'], result[5]['index'])), False)
self.assertEqual(len(result[5]), 6*5)
self.assertEqual(len(result[4]), 4*4)
self.assertEqual(len(result[3]), 2*3)
def test_transform_with_longer_bucket_and_bucket_size_and_maxlen(self):
self._init_transformer(flat=False, max_bucket_size=6, max_stations=4)
track_list = []
station_list = []
event_list = []
num_tracks = [2, 3, 10]
for num, i in enumerate(range(3, 6)):
for track in range(num_tracks[num]):
track_list.append(np.full(i, i*1000+track))
event_list.append(np.zeros(i))
station_list.append(np.arange(1, i+1))
events = list(np.concatenate(event_list))
self.data = pd.DataFrame(
{
'track': list(np.concatenate(track_list)),
'station': list(np.concatenate(station_list)),
'event': events,
'r': list(np.random.rand(len(events))),
'phi': list(np.random.rand(len(events))),
'z': list(np.random.rand(len(events))),
})
log = logging.getLogger("TestLogger")
result = self.transformer(self.data)
#log.info(f'\n {result}')
#log.info(np.in1d(result[3], result[4]))
#log.info(result[4])
#log.info(result[3])
self.assertEqual(any(np.in1d(result[3]['index'], result[4]['index'])), False)
self.assertEqual(len(result[3]), 6*3)
self.assertEqual(len(result[4]), 6*4)
self.assertEqual(5 in result.keys(), False)
def test_transform_with_longer_bucket_and_bucket_size_and_maxlen_flat(self):
self._init_transformer(flat=True, max_bucket_size=6, max_stations=4)
track_list = []
station_list = []
event_list = []
num_tracks = [2, 3, 10]
for num, i in enumerate(range(3, 6)):
for track in range(num_tracks[num]):
track_list.append(np.full(i, i*1000+track))
event_list.append(np.zeros(i))
station_list.append(np.arange(1, i+1))
events = list(np.concatenate(event_list))
self.data = pd.DataFrame(
{
'track': list(np.concatenate(track_list)),
'station': list(np.concatenate(station_list)),
'event': events,
'r': list(np.random.rand(len(events))),
'phi': list(np.random.rand(len(events))),
'z': list(np.random.rand(len(events))),
})
log = logging.getLogger("TestLogger")
result = self.transformer(self.data)
#log.info(f'\n {result}')
#log.info(np.in1d(result[3], result[4]))
#log.info(result[4])
#log.info(result[3])
self.assertEqual(any(np.in1d(result[result['bucket']==3]['index'], result[result['bucket']==4]['index'])), False)
self.assertEqual(len(result[result['bucket']==3]), 6*3)
self.assertEqual(len(result[result['bucket']==4]), 6*4)
self.assertEqual(5 in result['bucket'], False)
def test_transform_bes(self):
data = pd.DataFrame({'r': [1., 0.5, 0.1, 0.2, 0.8, 0.6],
'phi': [3., 0.5, 2., 0.2, 1.1, -0.5],
'z': [0.1, 0.2, 0.33, 0.1, 0.2, 0.2],
'track': [1, 1, 1, 2, 2, 2],
'station': [1, 2, 3, 1, 2, 2],
'event': [0, 0, 0, 0, 0, 0]})
self._init_transformer(flat=False, keep_fakes=False)
result = self.transformer(data)
self.assertEqual(len(result), 1)
self.assertEqual(len(result[3]), 2*3)
self._init_transformer(flat=True)
result = self.transformer(data)
self.assertEqual(len(result), 6)
self.assertEqual(all(result['bucket'] == 3), True)
'''
class ToBucketsTestCase(unittest.TestCase):
def _init_transformer(self, flat=True, keep_fakes=True):
self.transformer = ToBuckets(flat=flat, keep_fakes=keep_fakes)
def _init_data(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_transformer()
self._init_data()
self.assertEqual(self.transformer.keep_fakes, True)
self.assertEqual(self.transformer.flat, True)
def test_misses(self):
self._init_data()
self._init_transformer(keep_fakes=False)
self.assertEqual(len(self.transformer(self.data)), 100000-76969)
self._init_transformer(keep_fakes=True)
self.assertEqual(len(self.transformer(self.data)), 100000)
def test_flat(self):
self._init_data()
self._init_transformer()
self.assertEqual(len(self.transformer(self.data).head()),5)
def test_no_flat(self):
self._init_data()
self._init_transformer(flat=False)
self.assertEqual(len(self.transformer(self.data)[3].head()), 5)
def test_bucket_lens(self):
self._init_data()
self._init_transformer(keep_fakes=False)
transformed = self.transformer(self.data)
self.assertEqual(23031, self.transformer.get_buckets_sizes()[3])
class ConstraintsTestCase(unittest.TestCase):
def _init_transformer(self, drop_old=True, columns=('x', 'y', 'z'), use_global_constraints=True):
self.transformer = ConstraintsNormalize(drop_old=drop_old, columns=columns, use_global_constraints=use_global_constraints)
def _init_data(self):
self.data = pd.read_csv(path)
self.radial_df = pd.read_csv(path_radial)
def test_init(self):
self._init_transformer()
self._init_data()
self.assertEqual(self.transformer.drop_old, True)
self.assertEqual(len(self.transformer.columns), 3)
self.assertEqual(self.transformer.margin, 1e-3)
def test_values_constraints(self):
self._init_data()
self._init_transformer()
transformed = self.transformer(self.data)
self.assertLessEqual(max(transformed['x']),1)
self.assertLessEqual(-1, min(transformed['x']))
self.assertLessEqual(max(transformed['y']), 1)
self.assertLessEqual(-1, min(transformed['y']))
self.assertLessEqual(max(transformed['z']), 1)
self.assertLessEqual(-1, min(transformed['z']))
def test_lens(self):
self._init_data()
self._init_transformer()
self.assertEqual( 100000, len(self.transformer(self.data)))
'''
if __name__ == '__main__':
unittest.main()
| 41.194404 | 147 | 0.600622 | 3,751 | 27,971 | 4.320981 | 0.049054 | 0.117535 | 0.069965 | 0.056762 | 0.868954 | 0.842423 | 0.793744 | 0.759255 | 0.706318 | 0.664425 | 0 | 0.039165 | 0.245075 | 27,971 | 678 | 148 | 41.255162 | 0.728405 | 0.046441 | 0 | 0.634298 | 0 | 0 | 0.02287 | 0.000916 | 0 | 0 | 0 | 0 | 0.260331 | 1 | 0.13843 | false | 0 | 0.014463 | 0 | 0.17562 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3bb88ade4fded6d9f80b1529075371cf9594bb1b | 12,381 | py | Python | tests/test_tasks_move_cube.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 25 | 2020-08-15T12:11:10.000Z | 2022-03-18T12:43:49.000Z | tests/test_tasks_move_cube.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 12 | 2020-08-14T09:39:05.000Z | 2021-12-15T16:26:53.000Z | tests/test_tasks_move_cube.py | compsciencelab/trifinger_simulation | ddd93c0b370072d706d85a6d1567f49a4de7d5c6 | [
"BSD-3-Clause"
] | 10 | 2020-08-17T12:13:29.000Z | 2022-02-01T18:28:05.000Z | #!/usr/bin/env python3
import pytest
import numpy as np
from scipy.spatial.transform import Rotation
from trifinger_simulation.tasks import move_cube
def test_get_cube_corner_positions():
# cube half width
chw = move_cube._CUBE_WIDTH / 2
# no transformation
expected_origin_corners = np.array(
[
[-chw, -chw, -chw],
[-chw, -chw, +chw],
[-chw, +chw, -chw],
[-chw, +chw, +chw],
[+chw, -chw, -chw],
[+chw, -chw, +chw],
[+chw, +chw, -chw],
[+chw, +chw, +chw],
]
)
origin = move_cube.Pose()
origin_corners = move_cube.get_cube_corner_positions(origin)
np.testing.assert_array_almost_equal(
expected_origin_corners, origin_corners
)
# only translation
expected_translated_corners = np.array(
[
[-chw + 1, -chw + 2, -chw + 3],
[-chw + 1, -chw + 2, +chw + 3],
[-chw + 1, +chw + 2, -chw + 3],
[-chw + 1, +chw + 2, +chw + 3],
[+chw + 1, -chw + 2, -chw + 3],
[+chw + 1, -chw + 2, +chw + 3],
[+chw + 1, +chw + 2, -chw + 3],
[+chw + 1, +chw + 2, +chw + 3],
]
)
translated = move_cube.get_cube_corner_positions(
move_cube.Pose([1, 2, 3], [0, 0, 0, 1])
)
np.testing.assert_array_almost_equal(
expected_translated_corners, translated
)
# only rotation
rot_z90 = Rotation.from_euler("z", 90, degrees=True).as_quat()
expected_rotated_corners = np.array(
[
[+chw, -chw, -chw],
[+chw, -chw, +chw],
[-chw, -chw, -chw],
[-chw, -chw, +chw],
[+chw, +chw, -chw],
[+chw, +chw, +chw],
[-chw, +chw, -chw],
[-chw, +chw, +chw],
]
)
rotated = move_cube.get_cube_corner_positions(
move_cube.Pose([0, 0, 0], rot_z90)
)
np.testing.assert_array_almost_equal(expected_rotated_corners, rotated)
# both rotation and translation
expected_both_corners = np.array(
[
[+chw + 1, -chw + 2, -chw + 3],
[+chw + 1, -chw + 2, +chw + 3],
[-chw + 1, -chw + 2, -chw + 3],
[-chw + 1, -chw + 2, +chw + 3],
[+chw + 1, +chw + 2, -chw + 3],
[+chw + 1, +chw + 2, +chw + 3],
[-chw + 1, +chw + 2, -chw + 3],
[-chw + 1, +chw + 2, +chw + 3],
]
)
both = move_cube.get_cube_corner_positions(
move_cube.Pose([1, 2, 3], rot_z90)
)
np.testing.assert_array_almost_equal(expected_both_corners, both)
def test_sample_goal_difficulty_1_no_initial_pose():
for i in range(1000):
goal = move_cube.sample_goal(difficulty=1)
# verify the goal is valid (i.e. within the allowed ranges)
try:
move_cube.validate_goal(goal)
except move_cube.InvalidGoalError as e:
pytest.fail(
msg="Invalid goal: {} pose is {}, {}".format(
e, e.position, e.orientation
),
)
# verify the goal satisfies conditions of difficulty 1
# always on ground
assert goal.position[2] == (move_cube._CUBE_WIDTH / 2)
# no orientation
np.testing.assert_array_equal(goal.orientation, [0, 0, 0, 1])
def test_sample_goal_difficulty_2_no_initial_pose():
for i in range(1000):
goal = move_cube.sample_goal(difficulty=2)
# verify the goal is valid (i.e. within the allowed ranges)
try:
move_cube.validate_goal(goal)
except move_cube.InvalidGoalError as e:
pytest.fail(
msg="Invalid goal: {} pose is {}, {}".format(
e, e.position, e.orientation
),
)
# verify the goal satisfies conditions of difficulty 2
assert goal.position[2] <= move_cube._max_height
assert goal.position[2] >= move_cube._min_height
# no orientation
np.testing.assert_array_equal(goal.orientation, [0, 0, 0, 1])
def test_sample_goal_difficulty_3_no_initial_pose():
for i in range(1000):
goal = move_cube.sample_goal(difficulty=3)
# verify the goal is valid (i.e. within the allowed ranges)
try:
move_cube.validate_goal(goal)
except move_cube.InvalidGoalError as e:
pytest.fail(
msg="Invalid goal: {} pose is {}, {}".format(
e, e.position, e.orientation
),
)
# verify the goal satisfies conditions of difficulty 2
assert goal.position[2] <= move_cube._max_height
assert goal.position[2] >= move_cube._min_height
# no orientation
np.testing.assert_array_equal(goal.orientation, [0, 0, 0, 1])
def test_sample_goal_difficulty_4_no_initial_pose():
for i in range(1000):
goal = move_cube.sample_goal(difficulty=4)
# verify the goal is valid (i.e. within the allowed ranges)
try:
move_cube.validate_goal(goal)
except move_cube.InvalidGoalError as e:
pytest.fail(
msg="Invalid goal: {} pose is {}, {}".format(
e, e.position, e.orientation
),
)
# verify the goal satisfies conditions of difficulty 2
assert goal.position[2] <= move_cube._max_height
assert goal.position[2] >= move_cube._min_height
def test_evaluate_state_difficulty_1():
difficulty = 1
pose_origin = move_cube.Pose()
pose_trans = move_cube.Pose(position=[1, 2, 3])
pose_rot = move_cube.Pose(
orientation=Rotation.from_euler("z", 0.42).as_quat()
)
pose_both = move_cube.Pose(
[1, 2, 3], Rotation.from_euler("z", 0.42).as_quat()
)
# needs to be zero for exact match
cost = move_cube.evaluate_state(pose_origin, pose_origin, difficulty)
assert cost == 0
# None-zero if there is translation, rotation is ignored
assert move_cube.evaluate_state(pose_origin, pose_trans, difficulty) != 0
assert move_cube.evaluate_state(pose_origin, pose_rot, difficulty) == 0
assert move_cube.evaluate_state(pose_origin, pose_both, difficulty) != 0
def test_evaluate_state_difficulty_2():
difficulty = 2
pose_origin = move_cube.Pose()
pose_trans = move_cube.Pose(position=[1, 2, 3])
pose_rot = move_cube.Pose(
orientation=Rotation.from_euler("z", 0.42).as_quat()
)
pose_both = move_cube.Pose(
[1, 2, 3], Rotation.from_euler("z", 0.42).as_quat()
)
# needs to be zero for exact match
cost = move_cube.evaluate_state(pose_origin, pose_origin, difficulty)
assert cost == 0
# None-zero if there is translation, rotation is ignored
assert move_cube.evaluate_state(pose_origin, pose_trans, difficulty) != 0
assert move_cube.evaluate_state(pose_origin, pose_rot, difficulty) == 0
assert move_cube.evaluate_state(pose_origin, pose_both, difficulty) != 0
def test_evaluate_state_difficulty_3():
difficulty = 3
pose_origin = move_cube.Pose()
pose_trans = move_cube.Pose(position=[1, 2, 3])
pose_rot = move_cube.Pose(
orientation=Rotation.from_euler("z", 0.42).as_quat()
)
pose_both = move_cube.Pose(
[1, 2, 3], Rotation.from_euler("z", 0.42).as_quat()
)
# needs to be zero for exact match
cost = move_cube.evaluate_state(pose_origin, pose_origin, difficulty)
assert cost == 0
# None-zero if there is translation, rotation is ignored
assert move_cube.evaluate_state(pose_origin, pose_trans, difficulty) != 0
assert move_cube.evaluate_state(pose_origin, pose_rot, difficulty) == 0
assert move_cube.evaluate_state(pose_origin, pose_both, difficulty) != 0
def test_evaluate_state_difficulty_4():
difficulty = 4
pose_origin = move_cube.Pose()
pose_trans = move_cube.Pose(position=[1, 2, 3])
pose_rot = move_cube.Pose(
orientation=Rotation.from_euler("z", 0.42).as_quat()
)
pose_both = move_cube.Pose(
[1, 2, 3], Rotation.from_euler("z", 0.42).as_quat()
)
# needs to be zero for exact match
cost = move_cube.evaluate_state(pose_origin, pose_origin, difficulty)
assert cost == 0
# None-zero if there is translation, rotation or both
assert move_cube.evaluate_state(pose_origin, pose_trans, difficulty) != 0
assert move_cube.evaluate_state(pose_origin, pose_rot, difficulty) != 0
assert move_cube.evaluate_state(pose_origin, pose_both, difficulty) != 0
def test_evaluate_state_dict():
"""Test evaluate state using a dict instead of Pose."""
difficulty = 4
pose_origin = move_cube.Pose()
dict_origin = {"position": [0, 0, 0], "orientation": [0, 0, 0, 1]}
pose_pose = move_cube.Pose(
[1, 2, 3], Rotation.from_euler("z", 0.42).as_quat()
)
dict_pose = {
"position": [1, 2, 3],
"orientation": Rotation.from_euler("z", 0.42).as_quat(),
}
# needs to be zero for exact match
pose_cost = move_cube.evaluate_state(pose_origin, pose_pose, difficulty)
dict_cost = move_cube.evaluate_state(dict_origin, dict_pose, difficulty)
assert pose_cost == dict_cost
def test_validate_goal():
half_width = move_cube._CUBE_WIDTH / 2
yaw_rotation = Rotation.from_euler("z", 0.42).as_quat()
full_rotation = Rotation.from_euler("zxz", [0.42, 0.1, -2.3]).as_quat()
# test some valid goals
try:
move_cube.validate_goal(
move_cube.Pose([0, 0, half_width], [0, 0, 0, 1])
)
except Exception as e:
pytest.fail("Valid goal was considered invalid because %s" % e)
try:
move_cube.validate_goal(
move_cube.Pose([0.05, -0.1, half_width], yaw_rotation)
)
except Exception as e:
pytest.fail("Valid goal was considered invalid because %s" % e)
try:
move_cube.validate_goal(
move_cube.Pose([-0.12, 0.0, 0.06], full_rotation)
)
except Exception as e:
pytest.fail("Valid goal was considered invalid because %s" % e)
# test some invalid goals
# invalid values
with pytest.raises(ValueError):
move_cube.validate_goal(move_cube.Pose([0, 0], [0, 0, 0, 1]))
with pytest.raises(ValueError):
move_cube.validate_goal(move_cube.Pose([0, 0, 0], [0, 0, 1]))
# invalid positions
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(
move_cube.Pose([0.3, 0, half_width], [0, 0, 0, 1])
)
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(
move_cube.Pose([0, -0.3, half_width], [0, 0, 0, 1])
)
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(move_cube.Pose([0, 0, 0.3], [0, 0, 0, 1]))
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(move_cube.Pose([0, 0, 0], [0, 0, 0, 1]))
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(move_cube.Pose([0, 0, -0.01], [0, 0, 0, 1]))
# valid CoM position but rotation makes it reach out of valid range
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(
move_cube.Pose([0, 0, half_width], full_rotation)
)
def test_validate_goal_dict():
"""Test validation of a goal that is passed as dict."""
half_width = move_cube._CUBE_WIDTH / 2
full_rotation = Rotation.from_euler("zxz", [0.42, 0.1, -2.3]).as_quat()
# a valid goal
try:
move_cube.validate_goal(
{"position": [-0.12, 0.0, 0.06], "orientation": full_rotation}
)
except Exception as e:
pytest.fail("Valid goal was considered invalid because %s" % e)
# some invalid goals
# invalid values
with pytest.raises(ValueError):
move_cube.validate_goal(
{"position": [0, 0], "orientation": [0, 0, 0, 1]}
)
# invalid positions
with pytest.raises(move_cube.InvalidGoalError):
move_cube.validate_goal(
{"position": [0.3, 0, half_width], "orientation": [0, 0, 0, 1]}
)
# invalid dict keys
with pytest.raises(KeyError):
move_cube.validate_goal(
{"wrong_key": [-0.12, 0.0, 0.06], "orientation": full_rotation}
)
| 34.107438 | 77 | 0.605282 | 1,678 | 12,381 | 4.243147 | 0.085816 | 0.11236 | 0.055618 | 0.070787 | 0.863624 | 0.847191 | 0.824157 | 0.794522 | 0.785253 | 0.757725 | 0 | 0.036545 | 0.270657 | 12,381 | 362 | 78 | 34.201657 | 0.751938 | 0.107584 | 0 | 0.59176 | 0 | 0 | 0.03972 | 0 | 0 | 0 | 0 | 0 | 0.116105 | 1 | 0.044944 | false | 0 | 0.014981 | 0 | 0.059925 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3bc85e0c158875d9ff13d3899d9e4613285b6189 | 271 | py | Python | tests/permutation/mutation/testecm.py | sglumac/pyislands | a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f | [
"MIT"
] | null | null | null | tests/permutation/mutation/testecm.py | sglumac/pyislands | a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f | [
"MIT"
] | null | null | null | tests/permutation/mutation/testecm.py | sglumac/pyislands | a5eaceb68a0f21bd8bc8586fdf8cf0d9b7a0134f | [
"MIT"
] | null | null | null | from pyislands.permutation.mutation.rsm import get_reversed_sequence_mutation
from pyislands.permutation.mutation.ecm import get_every_city_mutation
from tests.permutation.mutation import check_mutation
def test_ecm():
check_mutation(get_every_city_mutation(1.0))
| 30.111111 | 77 | 0.859779 | 38 | 271 | 5.815789 | 0.473684 | 0.257919 | 0.217195 | 0.289593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008032 | 0.081181 | 271 | 8 | 78 | 33.875 | 0.879518 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.6 | 0 | 0.8 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
3be0a14989d37a9b94f7a1a91068e423c0d3ddc5 | 208 | py | Python | src/decisionengine_modules/htcondor/sources/slots_source_proxy.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | src/decisionengine_modules/htcondor/sources/slots_source_proxy.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | src/decisionengine_modules/htcondor/sources/slots_source_proxy.py | hyunwoo18/decisionengine_modules | a67462628c2074e768d0825edee4ee5d570030e0 | [
"BSD-3-Clause"
] | null | null | null | from decisionengine.framework.modules import SourceProxy
from decisionengine.framework.modules import Source
StartdManifestsSourceProxy = SourceProxy.SourceProxy
Source.describe(StartdManifestsSourceProxy)
| 29.714286 | 56 | 0.889423 | 18 | 208 | 10.277778 | 0.5 | 0.194595 | 0.291892 | 0.367568 | 0.432432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067308 | 208 | 6 | 57 | 34.666667 | 0.953608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
ce1d0c286788552e37ac647d85caae3c43ed48f7 | 31 | py | Python | algorithms/__init__.py | HoliestCow/wind_daq | 6a8b30ba6b14b3162f2fa3144b52cdf7ed17ec3a | [
"MIT"
] | 1 | 2017-08-23T18:52:21.000Z | 2017-08-23T18:52:21.000Z | algorithms/__init__.py | HoliestCow/wind_daq | 6a8b30ba6b14b3162f2fa3144b52cdf7ed17ec3a | [
"MIT"
] | 10 | 2018-05-30T15:49:45.000Z | 2019-06-12T18:20:29.000Z | algorithms/__init__.py | HoliestCow/wind_daq | 6a8b30ba6b14b3162f2fa3144b52cdf7ed17ec3a | [
"MIT"
] | null | null | null |
from .visualTracking import *
| 10.333333 | 29 | 0.774194 | 3 | 31 | 8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16129 | 31 | 2 | 30 | 15.5 | 0.923077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0231ce9813b69dfaca9b388dde0e3974101037f8 | 197 | py | Python | website/app/util/__init__.py | yqh231/cloudmusic_api | d41b04b1b619b3690d0fe6b5a7b8297feef77c49 | [
"Apache-2.0"
] | 4 | 2017-07-17T09:58:08.000Z | 2019-04-19T10:07:41.000Z | website/app/util/__init__.py | yqh231/cloudmusic_api | d41b04b1b619b3690d0fe6b5a7b8297feef77c49 | [
"Apache-2.0"
] | null | null | null | website/app/util/__init__.py | yqh231/cloudmusic_api | d41b04b1b619b3690d0fe6b5a7b8297feef77c49 | [
"Apache-2.0"
] | null | null | null | from website.app.util.response import *
from website.app.util.param_check import *
from website.app.util.error_catch import *
__all__ = ['JsonError', 'JsonSuccess', 'Param', 'ParamCheck', 'error'] | 39.4 | 70 | 0.756345 | 26 | 197 | 5.5 | 0.538462 | 0.230769 | 0.293706 | 0.377622 | 0.335664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096447 | 197 | 5 | 70 | 39.4 | 0.803371 | 0 | 0 | 0 | 0 | 0 | 0.20202 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
5a1462aa401e2d8848df99112e3f34a53ea284fe | 5,937 | py | Python | src/parse_git_log/xmlRead.py | saheel1115/szz- | dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3 | [
"MIT"
] | 9 | 2017-10-21T13:29:46.000Z | 2022-01-10T23:49:54.000Z | src/parse_git_log/xmlRead.py | saheel1115/szz- | dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3 | [
"MIT"
] | 3 | 2018-01-09T11:28:55.000Z | 2019-01-20T08:45:18.000Z | src/parse_git_log/xmlRead.py | saheel1115/szz- | dbfbf3c2d007f61ee8c8793f83dbdc071ad02da3 | [
"MIT"
] | 1 | 2020-12-29T05:10:31.000Z | 2020-12-29T05:10:31.000Z | #import xml.etree.cElementTree as ET
#from lxml import etree as ET
from lxml import etree as ET
from commit import commit
import sys
import os
import nltk
import util
import classify
def dump_xml(commit_list,out_file):
xmlFile = xmlWrite(out_file)
for c in commit_list:
xmlFile.setCommitAll(c.sha, c.tag, c.project, c.committer, c.commit_date, c.author, c.author_date, c.subject, c.body, c.isbug, c.bug_type)
xmlFile.dump()
def parse_xml_by_bug_type(xml_doc, bug_type):
buggy_list = []
commit_list = parse_xml_all(xml_doc)
for co in commit_list:
if bug_type == co.getBugType():
buggy_list.append(co)
return buggy_list
def parse_xml(xml_doc, lang='java'):
print "---> parse_xml_all"
context = ET.iterparse(xml_doc, events=("start", "end"))
file_name = os.path.basename(xml_doc)
file_name = os.path.splitext(file_name)[0]
project = ""
commit_list = []
co = None
add = None
delete = None
file_name = None
count = 0
for event, element in context:
if event is "start":
if element.tag == 'commit':
if not co is None:
commit_list.append(co)
co = None
add = None
delete = None
file_name = None
count += 1
if element.tag == "sha":
co = commit(project, element.text)
#co.sha = element.text
elif element.tag == "tag": co.tag = element.text
elif element.tag == "project": co.project = element.text
elif element.tag == "committer": co.committer = element.text
elif element.tag == "commit_date": co.commit_date = element.text
elif element.tag == "author": co.author = element.text
elif element.tag == "author_date": co.author_date = element.text
elif element.tag == "bug": co.isbug = element.text
elif element.tag == "bug_type_root": co.bug_type_root = element.text
elif element.tag == "bug_type_impact": co.bug_type_impact = element.text
elif element.tag == "bug_type_component": co.bug_type_comp = element.text
elif element.tag == "subject": co.subject = element.text
elif element.tag == "body": co.body = element.text
elif element.tag == "add": add = element.text
elif element.tag == "delete": delete = element.text
elif element.tag == "file": file_name = element.text
elif element.tag == "language": language = element.text
if event is "end" and element.tag == "change":
co.addChange(add, delete, file_name,language)
commit_list.append(co)
return commit_list
'''
def parse_xml(xml_doc, lang):
if lang is None:
return parse_xml_all(xml_doc)
context = ET.iterparse(xml_doc, events=("start", "end"))
file_name = os.path.basename(xml_doc)
file_name = os.path.splitext(file_name)[0]
commit_list = []
co = None
add = None
delete = None
file_name = None
count = 0
for event, element in context:
if event is "start":
if element.tag == 'commit':
if not co is None:
if co.tag is lang:
commit_list.append(co)
co = None
add = None
delete = None
file_name = None
count += 1
if element.tag == "sha":
co = commit(project, element.text)
cp.sha = element.text
elif element.tag == "tag": co.tag = element.text
elif element.tag == "project": co.project = element.text
elif element.tag == "committer": co.committer = element.text
elif element.tag == "commit_date": co.commit_date = element.text
elif element.tag == "author": co.author = element.text
elif element.tag == "author_date": co.author_date = element.text
elif element.tag == "bug": co.isbug = element.text
elif element.tag == "bug_type_root": co.bug_type_root = element.text
elif element.tag == "bug_type_impact": co.bug_type_impact = element.text
elif element.tag == "bug_type_component": co.bug_type_comp = element.text
elif element.tag == "subject": co.subject = element.text
elif element.tag == "body": co.body = element.text
elif element.tag == "add": add = element.text
elif element.tag == "delete": delete = element.text
elif element.tag == "file": file_name = element.text
elif element.tag == "language": language = element.text
if event is "end" and element.tag == "change":
co.addChange(add, delete, file_name,language)
if co.tag is lang:
#print "------"
commit_list.append(co)
return commit_list
'''
if __name__ == "__main__":
print (sys.argv[1])
commit_list = parse_xml(sys.argv[1], None)
featuresets = []
for c in commit_list:
'''
print c.sha
print c.body
print c.subject
print c.bug_type
'''
bugRootType = c.getBugRootType()
bugImpactType = c.getBugImpactType()
logStr = c.getLog()
features = classify.dialogue_act_features(logStr)
featuresets.append((features, bugRootType))
features1 = classify.dialogue_act_features(logStr)
featuresets.append((features, bugImpactType))
for f in featuresets:
print f
| 33.167598 | 146 | 0.552131 | 711 | 5,937 | 4.461322 | 0.130802 | 0.119798 | 0.151324 | 0.221942 | 0.788777 | 0.759773 | 0.759773 | 0.723834 | 0.687264 | 0.67024 | 0 | 0.002311 | 0.344113 | 5,937 | 178 | 147 | 33.353933 | 0.812275 | 0.014149 | 0 | 0.155844 | 0 | 0 | 0.055769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.090909 | null | null | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5a75ffa2140e0c6aab6a3c72c844204943b89eb3 | 444 | py | Python | material/__init__.py | willist/django-material | 73e50eb0105a67dde1c3f6846f868f10bda1f4ea | [
"BSD-3-Clause"
] | 2,703 | 2015-02-05T00:55:14.000Z | 2022-03-16T19:58:23.000Z | material/__init__.py | willist/django-material | 73e50eb0105a67dde1c3f6846f868f10bda1f4ea | [
"BSD-3-Clause"
] | 495 | 2015-04-03T14:20:23.000Z | 2022-03-01T13:05:51.000Z | material/__init__.py | willist/django-material | 73e50eb0105a67dde1c3f6846f868f10bda1f4ea | [
"BSD-3-Clause"
] | 552 | 2015-04-04T12:09:36.000Z | 2022-03-04T13:59:19.000Z | """Django-material - material design for the django framework."""
from .base import (
Layout, Fieldset, Row, Column, Span, Field,
Span2, Span3, Span4, Span5, Span6, Span7,
Span8, Span9, Span10, Span11, Span12,
LayoutMixin)
__all__ = (
'Layout', 'Fieldset', 'Row', 'Column', 'Span', 'Field',
'Span2', 'Span3', 'Span4', 'Span5', 'Span6', 'Span7',
'Span8', 'Span9', 'Span10', 'Span11', 'Span12',
'LayoutMixin'
)
| 27.75 | 65 | 0.612613 | 48 | 444 | 5.583333 | 0.583333 | 0.104478 | 0.126866 | 0.171642 | 0.753731 | 0.753731 | 0.753731 | 0.753731 | 0.753731 | 0.753731 | 0 | 0.078431 | 0.195946 | 444 | 15 | 66 | 29.6 | 0.672269 | 0.132883 | 0 | 0 | 0 | 0 | 0.266491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ce72e68343258b1356aade9d78371680b4492178 | 120 | py | Python | trempy/__init__.py | briqInstitute/trempy | 3bc71942ab2ae5e2e3d2526210d28c023259aa86 | [
"MIT"
] | 1 | 2019-10-17T11:58:08.000Z | 2019-10-17T11:58:08.000Z | trempy/__init__.py | OpenSourceEconomics/trempy | 3bc71942ab2ae5e2e3d2526210d28c023259aa86 | [
"MIT"
] | 4 | 2018-11-18T13:49:49.000Z | 2019-02-05T13:40:15.000Z | trempy/__init__.py | briqInstitute/trempy | 3bc71942ab2ae5e2e3d2526210d28c023259aa86 | [
"MIT"
] | 1 | 2020-05-12T13:30:25.000Z | 2020-05-12T13:30:25.000Z | from trempy.estimate.estimate import estimate # noqa: F401
from trempy.simulate.simulate import simulate # noqa: F401
| 40 | 59 | 0.8 | 16 | 120 | 6 | 0.4375 | 0.208333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057692 | 0.133333 | 120 | 2 | 60 | 60 | 0.865385 | 0.175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ce927178fc1efbb05b1a2113b4e321fc9cd2c756 | 122 | py | Python | midas_web_solution/midas_web_solution/settings/partials/__init__.py | jupiny/MIDASWebSolution | c6250bb7aeab815b3c759ae4f7b419da50c26b1c | [
"MIT"
] | null | null | null | midas_web_solution/midas_web_solution/settings/partials/__init__.py | jupiny/MIDASWebSolution | c6250bb7aeab815b3c759ae4f7b419da50c26b1c | [
"MIT"
] | null | null | null | midas_web_solution/midas_web_solution/settings/partials/__init__.py | jupiny/MIDASWebSolution | c6250bb7aeab815b3c759ae4f7b419da50c26b1c | [
"MIT"
] | null | null | null | from .base import *
from .auth import *
from .database import *
from .internationalization import *
from .static import *
| 20.333333 | 35 | 0.754098 | 15 | 122 | 6.133333 | 0.466667 | 0.434783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.163934 | 122 | 5 | 36 | 24.4 | 0.901961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0cbd869de506ee8107a10aad361faf43e451ed39 | 249 | py | Python | shell/encoders/shellcode/payloads.py | vasco2016/shellsploit-framework | 04eb4a0449acaba0b70c40a78c61a0d5e2527406 | [
"MIT"
] | 61 | 2017-06-13T13:48:38.000Z | 2022-03-02T17:43:45.000Z | shell/encoders/shellcode/payloads.py | T0mcat3r/shellsploit-framework | 04eb4a0449acaba0b70c40a78c61a0d5e2527406 | [
"MIT"
] | null | null | null | shell/encoders/shellcode/payloads.py | T0mcat3r/shellsploit-framework | 04eb4a0449acaba0b70c40a78c61a0d5e2527406 | [
"MIT"
] | 28 | 2017-08-15T05:38:27.000Z | 2020-12-31T03:39:38.000Z | class Encoders(object):
def __init__(self):
self.shellcode = [
"encoders/shellcode/intel/x86/xor86.py",
"encoders/shellcode/intel/x86/xor_b3m.py",
"encoders/shellcode/intel/x86/xor64.py",
]
def ret(self):
return len(self.shellcode) | 24.9 | 45 | 0.710843 | 34 | 249 | 5.058824 | 0.5 | 0.296512 | 0.383721 | 0.436047 | 0.313953 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050691 | 0.128514 | 249 | 10 | 46 | 24.9 | 0.741935 | 0 | 0 | 0 | 0 | 0 | 0.452 | 0.452 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0.111111 | 0.444444 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 6 |
0cf042e207fd15392632feb110390ec6dbc0c281 | 45 | py | Python | DotfilesLibrary/__init__.py | errose28/DotfilesLibrary | 6d0998abf0557b8963c4c9f908175c2a4b4b6c2f | [
"Apache-2.0"
] | null | null | null | DotfilesLibrary/__init__.py | errose28/DotfilesLibrary | 6d0998abf0557b8963c4c9f908175c2a4b4b6c2f | [
"Apache-2.0"
] | null | null | null | DotfilesLibrary/__init__.py | errose28/DotfilesLibrary | 6d0998abf0557b8963c4c9f908175c2a4b4b6c2f | [
"Apache-2.0"
] | null | null | null | from .DotfilesLibrary import DotfilesLibrary
| 22.5 | 44 | 0.888889 | 4 | 45 | 10 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.088889 | 45 | 1 | 45 | 45 | 0.97561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
0bbcb5e0031a4303d15e8acf2b7a3ec13543646a | 24 | py | Python | app/__init__.py | xiao-ming-team/xiao-ming | c2b40ab5e040f9f8d4125ba3a273346464613660 | [
"MIT"
] | null | null | null | app/__init__.py | xiao-ming-team/xiao-ming | c2b40ab5e040f9f8d4125ba3a273346464613660 | [
"MIT"
] | null | null | null | app/__init__.py | xiao-ming-team/xiao-ming | c2b40ab5e040f9f8d4125ba3a273346464613660 | [
"MIT"
] | null | null | null | from server import app
| 8 | 22 | 0.791667 | 4 | 24 | 4.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 24 | 2 | 23 | 12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f07dbf3593f6463ed5edb09ad22ef1705f800e5c | 41 | py | Python | structure/__init__.py | NLP-Discourse-SoochowU/TDDiscourseParser | 2f9c7cef85c564c47b368ee4935caf1fad7c598d | [
"Apache-2.0"
] | 9 | 2020-11-24T01:16:01.000Z | 2022-01-26T09:37:00.000Z | structure/__init__.py | NLP-Discourse-SoochowU/TDDiscourseParser | 2f9c7cef85c564c47b368ee4935caf1fad7c598d | [
"Apache-2.0"
] | 2 | 2020-11-29T17:49:49.000Z | 2021-05-20T02:53:25.000Z | structure/__init__.py | NLP-Discourse-SoochowU/TDDiscourseParser | 2f9c7cef85c564c47b368ee4935caf1fad7c598d | [
"Apache-2.0"
] | 1 | 2022-01-26T11:00:33.000Z | 2022-01-26T11:00:33.000Z | # coding: UTF-8
from .nodes import *
| 10.25 | 21 | 0.609756 | 6 | 41 | 4.166667 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0.268293 | 41 | 3 | 22 | 13.666667 | 0.8 | 0.317073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b2b4feb6aedb4bb668dd1b25bed827e42fb5d120 | 21,550 | py | Python | certificate_validator/tests/test_resources.py | ludo/certificate-validator | ee22fbc1c429eeeabbc6feb56af661f24245691a | [
"MIT"
] | 1 | 2019-11-04T00:41:02.000Z | 2019-11-04T00:41:02.000Z | certificate_validator/tests/test_resources.py | NickolasHKraus/certificate-validator | 54b1b3c7dc908332a27146daa3b11d5b11d74075 | [
"MIT"
] | 8 | 2019-09-16T15:19:47.000Z | 2020-04-01T17:23:45.000Z | certificate_validator/tests/test_resources.py | NickolasHKraus/certificate-validator | 54b1b3c7dc908332a27146daa3b11d5b11d74075 | [
"MIT"
] | 4 | 2019-09-10T20:07:26.000Z | 2020-04-17T15:48:30.000Z | # -*- coding: utf-8 -*-
"""Tests for the `resources` module."""
from unittest.mock import call, patch
from botocore import exceptions
from certificate_validator import resources
from certificate_validator.resources import (
Action, Certificate, CertificateMixin, CertificateValidator
)
from .base import (
BaseTestCase, CertificateBaseTestCase, CertificateValidatorBaseTestCase
)
class ActionTestCase(BaseTestCase):
def setUp(self):
super(ActionTestCase, self).setUp()
def test_class(self):
self.assertEqual('CREATE', Action.CREATE.value)
self.assertEqual('UPSERT', Action.UPSERT.value)
self.assertEqual('DELETE', Action.DELETE.value)
class CertificateMixinTestCase(BaseTestCase):
def setUp(self):
super(CertificateMixinTestCase, self).setUp()
def test_is_valid_arn(self):
valid_arn = 'arn:aws:acm:us-east-1:123:certificate/1337'
self.assertTrue(CertificateMixin().is_valid_arn(valid_arn))
invalid_arn = 'invalid'
self.assertFalse(CertificateMixin().is_valid_arn(invalid_arn))
class CertificateTestCase(CertificateBaseTestCase):
def setUp(self):
super(CertificateTestCase, self).setUp()
def test_init(self):
c = Certificate(self.request, self.response)
self.assertEqual(self.request, c.request)
self.assertEqual(self.response, c.response)
def test_create_success(self):
c = Certificate(self.request, self.mock_response)
c.create()
self.mock_request_certificate.assert_called_with(
domain_name='certificate-validator.com',
subject_alternative_names=['www.certificate-validator.com'],
)
self.mock_response.set_status.assert_called_with(success=True)
self.mock_response.set_physical_resource_id.assert_called_with(
physical_resource_id='arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_response.set_data.assert_called_with({
'CertificateArn': 'arn:aws:acm:us-east-1:123:certificate/1337'
})
def test_create_failed(self):
c = Certificate(self.request, self.mock_response)
self.mock_request_certificate.side_effect = exceptions.ClientError(
error_response={'Error': {
'Code': 'Code',
'Message': 'Message'
}},
operation_name='Operation'
)
c.create()
self.mock_request_certificate.assert_called_with(
domain_name='certificate-validator.com',
subject_alternative_names=['www.certificate-validator.com'],
)
self.mock_response.set_status.assert_called_with(success=False)
reason = \
'An error occurred (Code) when calling the Operation operation: ' \
'Message'
self.mock_response.set_reason.assert_called_with(reason=reason)
def test_update(self):
c = Certificate(self.request, self.response)
mock_create = patch.object(c, 'create').start()
c.update()
mock_create.assert_called_once()
def test_delete_success_certificate_does_not_exist(self):
self.mock_request.physical_resource_id = ''
c = Certificate(self.mock_request, self.mock_response)
c.delete()
self.mock_response.set_status.assert_called_with(success=True)
self.mock_response.set_reason.assert_called_with(
reason='Certificate does not exist.'
)
def test_delete_success_certificate_not_found(self):
self.mock_request.physical_resource_id = \
'arn:aws:acm:us-east-1:123:certificate/1337'
self.mock_delete_certificate.side_effect = exceptions.ClientError(
error_response={
'Error': {
'Code': 'ResourceNotFoundException',
'Message': 'Message'
}
},
operation_name='Operation'
)
c = Certificate(self.mock_request, self.mock_response)
c.delete()
self.mock_delete_certificate.assert_called_with(
certificate_arn='arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_response.set_status.assert_called_with(success=True)
self.mock_response.set_reason.assert_called_with(
reason='Certificate not found.'
)
def test_delete_success(self):
self.mock_request.physical_resource_id = \
'arn:aws:acm:us-east-1:123:certificate/1337'
c = Certificate(self.mock_request, self.mock_response)
c.delete()
self.mock_delete_certificate.assert_called_with(
certificate_arn='arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_response.set_status.assert_called_with(success=True)
def test_delete_failed_certificate_arn_is_invalid(self):
self.mock_request.physical_resource_id = 'invalid'
c = Certificate(self.mock_request, self.mock_response)
c.delete()
self.mock_response.set_status.assert_called_with(success=False)
self.mock_response.set_reason.assert_called_with(
reason='Certificate ARN is invalid.'
)
def test_delete_failed(self):
self.mock_request.physical_resource_id = \
'arn:aws:acm:us-east-1:123:certificate/1337'
c = Certificate(self.mock_request, self.mock_response)
self.mock_delete_certificate.side_effect = exceptions.ClientError(
error_response={'Error': {
'Code': 'Code',
'Message': 'Message'
}},
operation_name='Operation'
)
c.delete()
self.mock_delete_certificate.assert_called_with(
certificate_arn='arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_response.set_status.assert_called_with(success=False)
reason = \
'An error occurred (Code) when calling the Operation operation: ' \
'Message'
self.mock_response.set_reason.assert_called_with(reason=reason)
class CertificateValidatorTestCase(CertificateValidatorBaseTestCase):
def setUp(self):
super(CertificateValidatorTestCase, self).setUp()
def test_init(self):
cv = CertificateValidator(self.request, self.response)
self.assertEqual(self.request, cv.request)
self.assertEqual(self.response, cv.response)
def test_change_resource_record_sets_failed_certificate_arn_is_invalid(
self
):
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets('invalid', Action.CREATE)
self.mock_response.set_status.assert_called_with(success=False)
self.mock_response.set_reason.assert_called_with(
reason='Certificate ARN is invalid.'
)
def test_change_resource_record_sets_create_success(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.CREATE)
self.mock_get_hosted_zone_id.assert_called_with(
'certificate-validator.com'
)
self.mock_get_domain_validation_options.assert_called_with(
'arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_get_change_batch.assert_called_with(
'CREATE', {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
)
self.mock_change_resource_record_sets.assert_called_with(
hosted_zone_id='Z23ABC4XYZL05B',
change_batch={
'Changes': {
'ResourceRecordSet': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'TTL': 300,
'ResourceRecords': [{
'Value': '_x2.acm-validations.aws.'
}]
}
}
}
)
self.mock_response.set_status.assert_called_with(success=True)
def test_change_resource_record_sets_create_failed(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
self.mock_get_domain_validation_options.side_effect = \
exceptions.ClientError(
error_response={'Error': {
'Code': 'Code',
'Message': 'Message'
}},
operation_name='Operation'
)
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.CREATE)
self.mock_response.set_status.assert_called_with(success=False)
reason = \
'An error occurred (Code) when calling the Operation operation: ' \
'Message'
self.mock_response.set_reason.assert_called_with(reason=reason)
def test_change_resource_record_sets_create_failed_invalid_cb(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
self.mock_get_domain_validation_options.side_effect = \
exceptions.ClientError(
error_response={'Error': {
'Code': 'InvalidChangeBatch',
'Message': 'Message'
}},
operation_name='Operation'
)
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.DELETE)
self.mock_response.set_status.assert_called_with(success=False)
reason = \
'An error occurred (InvalidChangeBatch) when calling the ' \
'Operation operation: Message'
self.mock_response.set_reason.assert_called_with(reason=reason)
def test_change_resource_record_sets_upsert(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.UPSERT)
self.mock_get_hosted_zone_id.assert_called_with(
'certificate-validator.com'
)
self.mock_get_domain_validation_options.assert_called_with(
'arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_get_change_batch.assert_called_with(
'UPSERT', {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
)
self.mock_change_resource_record_sets.assert_called_with(
hosted_zone_id='Z23ABC4XYZL05B',
change_batch={
'Changes': {
'ResourceRecordSet': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'TTL': 300,
'ResourceRecords': [{
'Value': '_x2.acm-validations.aws.'
}]
}
}
}
)
self.mock_response.set_status.assert_called_with(success=True)
def test_change_resource_record_sets_delete_success(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.DELETE)
self.mock_get_hosted_zone_id.assert_called_with(
'certificate-validator.com'
)
self.mock_get_domain_validation_options.assert_called_with(
'arn:aws:acm:us-east-1:123:certificate/1337'
)
self.mock_get_change_batch.assert_called_with(
'DELETE', {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
)
self.mock_change_resource_record_sets.assert_called_with(
hosted_zone_id='Z23ABC4XYZL05B',
change_batch={
'Changes': {
'ResourceRecordSet': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'TTL': 300,
'ResourceRecords': [{
'Value': '_x2.acm-validations.aws.'
}]
}
}
}
)
self.mock_response.set_status.assert_called_with(success=True)
def test_change_resource_record_sets_delete_failed(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
self.mock_get_domain_validation_options.side_effect = \
exceptions.ClientError(
error_response={'Error': {
'Code': 'Code',
'Message': 'Message'
}},
operation_name='Operation'
)
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.DELETE)
self.mock_response.set_status.assert_called_with(success=False)
reason = \
'An error occurred (Code) when calling the Operation operation: ' \
'Message'
self.mock_response.set_reason.assert_called_with(reason=reason)
def test_change_resource_record_sets_delete_failed_cert_not_found(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
self.mock_get_domain_validation_options.side_effect = \
exceptions.ClientError(
error_response={'Error': {
'Code': 'ResourceNotFoundException',
'Message': 'Message'
}},
operation_name='Operation'
)
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.DELETE)
self.mock_response.set_status.assert_called_with(success=True)
self.mock_response.set_reason.assert_called_with(
reason='Certificate not found.'
)
def test_change_resource_record_sets_delete_failed_rrset_not_found(self):
self.mock_request.resource_properties = {
'CertificateArn': self.certificate_arn
}
message = \
'Tried to delete resource record set ' \
'[name=\'_x1.certificate-validator.com.\', type=\'CNAME\'] but ' \
'it was not found'
self.mock_get_domain_validation_options.side_effect = \
exceptions.ClientError(
error_response={'Error': {
'Code': 'InvalidChangeBatch',
'Message': message
}},
operation_name='Operation'
)
cv = CertificateValidator(self.mock_request, self.mock_response)
cv.change_resource_record_sets(self.certificate_arn, Action.DELETE)
self.mock_response.set_status.assert_called_with(success=True)
self.mock_response.set_reason.assert_called_with(
reason='Resource Record Set not found.'
)
def test_create(self):
mock_wait = patch.object(resources.ACM, 'wait').start()
mock_change_resource_record_sets = \
patch.object(resources.CertificateValidator,
'change_resource_record_sets').start()
cv = CertificateValidator(self.request, self.mock_response)
cv.create()
self.mock_response.set_physical_resource_id.assert_called_with('1337')
mock_change_resource_record_sets.assert_called_with(
'arn:aws:acm:us-east-1:123:certificate/1', Action.UPSERT
)
mock_wait.assert_called_once_with(
'arn:aws:acm:us-east-1:123:certificate/1'
)
def test_update(self):
mock_change_resource_record_sets = \
patch.object(resources.CertificateValidator,
'change_resource_record_sets').start()
cv = CertificateValidator(self.request, self.mock_response)
cv.update()
mock_change_resource_record_sets.assert_has_calls([
call('arn:aws:acm:us-east-1:123:certificate/0', Action.DELETE),
call('arn:aws:acm:us-east-1:123:certificate/1', Action.UPSERT)
])
def test_delete(self):
mock_change_resource_record_sets = \
patch.object(resources.CertificateValidator,
'change_resource_record_sets').start()
cv = CertificateValidator(self.request, self.mock_response)
cv.delete()
mock_change_resource_record_sets.assert_called_with(
'arn:aws:acm:us-east-1:123:certificate/1', Action.DELETE
)
def test_get_domain_validation_options(self):
patch.stopall()
mock_describe_certificate = patch.object(
resources.ACM, 'describe_certificate'
).start()
mock_describe_certificate.return_value = {
'Certificate': {
'DomainName': 'certificate-validator.com',
'DomainValidationOptions': [{
'DomainName': 'certificate-validator.com',
'ResourceRecord': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
}]
}
}
cv = CertificateValidator(self.request, self.response)
actual = cv.get_domain_validation_options(
certificate_arn='arn:aws:acm:us-east-1:123:certificate/1337'
)
expected = [{
'DomainName': 'certificate-validator.com',
'ResourceRecord': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
}]
self.assertEqual(expected, actual)
def test_get_domain_validation_options_poll(self):
patch.stopall()
patch('time.sleep', return_value=None).start()
mock_describe_certificate = patch.object(
resources.ACM, 'describe_certificate'
).start()
mock_describe_certificate.side_effect = [{
'Certificate': {
'DomainName': 'certificate-validator.com',
'DomainValidationOptions': [{
'DomainName': 'certificate-validator.com'
}],
}
}, {
'Certificate': {
'DomainName': 'certificate-validator.com',
'DomainValidationOptions': [{
'DomainName': 'certificate-validator.com',
'ResourceRecord': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
}]
}
}]
cv = CertificateValidator(self.request, self.response)
actual = cv.get_domain_validation_options(
certificate_arn='arn:aws:acm:us-east-1:123:certificate/1337'
)
expected = [{
'DomainName': 'certificate-validator.com',
'ResourceRecord': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
}]
self.assertEqual(expected, actual)
def test_get_hosted_zone_id(self):
patch.stopall()
mock_list_hosted_zones_by_name = patch.object(
resources.Route53, 'list_hosted_zones_by_name'
).start()
mock_list_hosted_zones_by_name.return_value = {
'HostedZones': [{
'Id': '/hostedzone/Z23ABC4XYZL05B',
'Name': 'certificate-validator.com.',
}]
}
cv = CertificateValidator(self.request, self.response)
actual = cv.get_hosted_zone_id(domain_name='certificate-validator.com')
expected = 'Z23ABC4XYZL05B'
self.assertEqual(expected, actual)
def test_get_change_batch(self):
patch.stopall()
cv = CertificateValidator(self.request, self.response)
resource_record = {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'Value': '_x2.acm-validations.aws.'
}
actual = cv.get_change_batch(
action='CREATE', resource_record=resource_record
)
expected = {
'Changes': [{
'Action': 'CREATE',
'ResourceRecordSet': {
'Name': '_x1.certificate-validator.com.',
'Type': 'CNAME',
'TTL': 300,
'ResourceRecords': [{
'Value': '_x2.acm-validations.aws.'
}]
}
}]
}
self.assertEqual(expected, actual)
| 39.981447 | 79 | 0.598794 | 2,109 | 21,550 | 5.818397 | 0.072072 | 0.067802 | 0.06389 | 0.046451 | 0.855432 | 0.835221 | 0.803846 | 0.784614 | 0.764241 | 0.751772 | 0 | 0.013656 | 0.3 | 21,550 | 538 | 80 | 40.055762 | 0.799801 | 0.002599 | 0 | 0.625 | 0 | 0 | 0.181497 | 0.098474 | 0 | 0 | 0 | 0 | 0.131048 | 1 | 0.064516 | false | 0 | 0.010081 | 0 | 0.082661 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b2d1f3a550b2bf3f7a5b243f9c82aa544f31470b | 10,554 | py | Python | views.py | Nixo-SK/api_ipf | 2dec798e60f3fd32d798a052b8bfd2449fcdd07b | [
"BSD-3-Clause"
] | null | null | null | views.py | Nixo-SK/api_ipf | 2dec798e60f3fd32d798a052b8bfd2449fcdd07b | [
"BSD-3-Clause"
] | null | null | null | views.py | Nixo-SK/api_ipf | 2dec798e60f3fd32d798a052b8bfd2449fcdd07b | [
"BSD-3-Clause"
] | null | null | null | from os import remove
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from rest_framework.decorators import api_view
from api_ipf.serializers import *
from api_ipf.helpers import *
@csrf_exempt
@api_view(['GET', 'POST'])
def config(request):
"""
An API view function that processes with a not specified file.
In case of GET request, returns list of all configuration files.
In case of POST request takes data from request, serialize them, checks
their correctness and stores them into a database.
:param request: client's request
:return: JSON response
"""
if request.method == 'GET':
conf_list = ConfigFile.objects.all()
serializer = AccessConfigFileSerializer(conf_list, many=True)
return JSONResponse(serializer.data, status=200)
elif request.method == 'POST':
# temporary file that is used to check correctness of uploaded file.
temp_title = '.temp.bck'
default_storage.save(''.join([BCK_DIR, temp_title]),
ContentFile(request.FILES['directory'].read()))
serializer = ConfigFileSerializer(data=request.FILES)
if serializer.is_valid():
response = config_addition(temp_title, str(request.FILES['form']))
# file was checked so it can be deleted
default_storage.delete(temp_title)
if response.status_code == 201:
serializer.save()
return response
else:
return JSONResponse(serializer.errors, status=400)
@csrf_exempt
@api_view(['GET', 'PUT', 'DELETE'])
def config_detail(request, title):
"""
An API view function that processes request with a specified file.
In case of GET request, returns configuration file's content.
In case of PUT request get itself form and data from request,
serialize them, checks their correctness and stores them into a database.
In case of DELETE request delete configuration file from a disk and
object from a database.
:param request: client's request
:param title: a unique configuration file's title
:return: JSON response
"""
try:
config = ConfigFile.objects.get(title=title)
path = ''.join([CONF_DIR, title])
except ConfigFile.DoesNotExist:
return JSONResponse('Error: No such file (db).', status=404)
if request.method == 'GET':
return file_content(path)
elif request.method == 'PUT':
# temporary file that is used to check correctness of uploaded file.
temp_title = '.temp.bck'
default_storage.save(''.join([BCK_DIR, temp_title]),
ContentFile(request.FILES['directory'].read()))
request.FILES['form'] = config.get_form()
serializer = ConfigFileSerializer(config, data=request.FILES)
if serializer.is_valid():
response = config_addition(temp_title, str(request.FILES['form']))
default_storage.delete(temp_title)
if response.status_code == 201:
remove(''.join([CONF_DIR, str(request.FILES['title'])]))
serializer.save()
return response
else:
return JSONResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
return config_delete(config, path)
@csrf_exempt
@api_view(['GET'])
def config_activate(request, title):
"""
An API view function that processes activation of configuration file.
:param request: client's request
:param title: a unique configuration file's title
:return: JSON response
"""
if request.method == 'GET':
try:
config = ConfigFile.objects.get(title=title)
path = ''.join([CONF_DIR, title])
return activate(config.get_form(), path)
except ConfigFile.DoesNotExist:
return JSONResponse('Error: No such file (db).', status=404)
@csrf_exempt
@api_view(['GET', 'POST'])
def log(request):
"""
An API view function that processes with a not specified log.
In case of GET request, returns list of all logs.
In case of POST request takes data from request, serialize them, checks
their validness and stores them into a database. Afterwards the function
starts logging mechanism with a redirection of ipmon output to the log.
:param request: client's request
:return: JSON response
"""
if request.method == 'GET':
log_list = LogFile.objects.all()
serializer = LogFileSerializer(log_list, many=True)
return JSONResponse(serializer.data, status=200)
elif request.method == 'POST':
serializer = LogFileSerializer(data=request.DATA)
if serializer.is_valid():
path = serializer.save()
sh.ipmon('-aD', path)
return JSONResponse('Log created.', status=200)
else:
return JSONResponse(serializer.errors, status=400)
@csrf_exempt
@api_view(['GET', 'DELETE'])
def log_detail(request, title):
"""
An API view function that processes request with a specified log.
In case of GET request, returns log's content.
In case of DELETE request deletes log from a disk
and object from a database.
:param request: client's request
:param title: a unique log's title
:return: JSON response
"""
try:
log = LogFile.objects.get(title=title)
path = ''.join([LOG_DIR, title, '.log'])
except LogFile.DoesNotExist:
return JSONResponse('Error: No such file (db).', status=404)
if request.method == 'GET':
return file_content(path)
elif request.method == 'DELETE':
return log_delete(log, path)
@csrf_exempt
@api_view(['GET'])
def blacklist(request):
"""
An API view function that updates IP blacklist on client's request.
:param request: client's request
:return: JSON response
"""
if request.method == 'GET':
response = update_blacklist()
if response:
return JSONResponse(response, status=400)
return JSONResponse('Blacklist updated.', status=200)
@csrf_exempt
@api_view(['GET'])
def ipf(request, args):
"""
An API view function that takes arguments from request and tries execute
them with an ipf command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: ipf arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.ipf(str(args))), status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def ipnat(request, args):
"""
An API view function that takes arguments from request and tries execute
them with an ipnat command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: ipnat arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.ipnat(str(args))), status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def ippool(request, args):
"""
An API view function that takes arguments from request and tries execute
them with an ippool command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: ippool arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.ippool(str(args))), status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def ipfstat_base(request):
"""
An API view function that tries execute ipfstat command without arguments.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:return: JSON response
"""
if request.method == 'GET':
try:
msg=''
for x in sh.ipfstat().split('\n'):
y = x.replace('\t', ' ').replace(' ', ' ').split(':')
msg+= y[0].ljust(22) + y[1] + '\n'
except IndexError:
return JSONResponse(msg, status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def ipfstat(request, args):
"""
An API view function that takes arguments from request and tries execute
them with an ipfstat command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: ipfstat arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.ipfstat(str(args))), status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def ipmon(request, args):
"""
An API view function that takes arguments from request and tries execute
them with an ipmon command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: ipmon arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.ipmon(str(args))), status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
@csrf_exempt
@api_view(['GET'])
def svcadm(request, args):
"""
An API view function that takes argument from request and tries execute
it with a svcadm command.
In case the execution was done, returns affirmative response 200 OK.
In case an error occurs, returns negative response 400 BAD_REQUEST.
:param request: client's request
:param args: svcadm arguments
:return: JSON response
"""
if request.method == 'GET':
try:
return JSONResponse(str(sh.svcadm(str(args), 'ipfilter')),
status=200)
except Exception as e:
return JSONResponse(str(e), status=400)
| 31.317507 | 78 | 0.666098 | 1,365 | 10,554 | 5.092308 | 0.132601 | 0.02719 | 0.028197 | 0.031794 | 0.78852 | 0.769386 | 0.746367 | 0.73083 | 0.707236 | 0.697454 | 0 | 0.015741 | 0.235551 | 10,554 | 336 | 79 | 31.410714 | 0.845811 | 0.016202 | 0 | 0.639752 | 0 | 0 | 0.051896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.043478 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b2ed9ee5fbe44761d1439d5292fe965a0871fee5 | 175 | py | Python | complex_number.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | 1 | 2021-03-29T02:29:58.000Z | 2021-03-29T02:29:58.000Z | complex_number.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | null | null | null | complex_number.py | fatihwin-yt/a-Python-Tutorial-of-2021 | 7d2110f80efdfa79437bf64f8edcd08ec3d61926 | [
"MIT"
] | 1 | 2021-03-27T15:00:06.000Z | 2021-03-27T15:00:06.000Z |
number_base = 0b100
print("100 binary: ",float_base)
number_base = 0o100
print("100 octal: ",float_base)
number_base = 0x100
print("100 hexadecimal: ",float_base)
| 17.5 | 38 | 0.708571 | 24 | 175 | 4.916667 | 0.458333 | 0.254237 | 0.254237 | 0.322034 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.144828 | 0.171429 | 175 | 9 | 39 | 19.444444 | 0.668966 | 0 | 0 | 0 | 0 | 0 | 0.242424 | 0 | 0 | 0 | 0.030303 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
b2ee7bb9e1e534940d7b074131882e9a65dd526f | 7,315 | py | Python | tests/test_cache.py | Anagraph/tileserver | a8be93ea911ef5b6a9511e482af687971d18e6c6 | [
"MIT"
] | 69 | 2016-08-16T18:24:19.000Z | 2020-12-26T11:48:49.000Z | tests/test_cache.py | Anagraph/tileserver | a8be93ea911ef5b6a9511e482af687971d18e6c6 | [
"MIT"
] | 68 | 2016-06-27T17:51:05.000Z | 2020-10-05T15:40:35.000Z | tests/test_cache.py | Anagraph/tileserver | a8be93ea911ef5b6a9511e482af687971d18e6c6 | [
"MIT"
] | 29 | 2016-09-20T22:43:08.000Z | 2022-01-19T00:57:31.000Z | import unittest
class CacheHelperTests(unittest.TestCase):
def test_clean_empty_parent_dirs(self):
import os
from tileserver.cache import clean_empty_parent_dirs, mkdir_p
self.assertFalse(
os.path.exists('foo'),
'The test expects foo/ to not exist when it starts'
)
mkdir_p('foo/bar/baz')
with open('foo/bar/baz/hello.txt', 'w') as f:
f.write('hello world')
clean_empty_parent_dirs('foo/bar/baz/hello.txt')
self.assertTrue(
os.path.exists('foo/bar/baz/hello.txt'),
"The directory is not empty, so it should not have been deleted")
os.remove('foo/bar/baz/hello.txt')
clean_empty_parent_dirs('foo/bar/baz', 'foo/bar')
self.assertTrue(
os.path.exists('foo/bar'),
"Shouldn't have deleted the parent_dir")
clean_empty_parent_dirs('foo/bar')
self.assertFalse(
os.path.exists('foo'),
"Should have deleted everything")
class FileCacheTests(unittest.TestCase):
def test_obtain_lock(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, FileCache
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
tile_size = 1
layers = 'all'
fmt = lookup_format_by_extension('mvt')
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = FileCache('foo')
try:
c.obtain_lock(cache_key)
finally:
c.release_lock(cache_key)
def test_obtain_lock_already_locked(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, FileCache, LockTimeout
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
tile_size = 1
layers = 'all'
fmt = lookup_format_by_extension('mvt')
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = FileCache('foo')
try:
# Obtain a lock from "client A"
c.obtain_lock(cache_key)
with self.assertRaises(LockTimeout):
# Locking from "client B" should time out
c.obtain_lock(cache_key, timeout=1)
finally:
c.release_lock(cache_key)
# After releasing, obtaining the lock from "client A" should work
c.obtain_lock(cache_key)
c.release_lock(cache_key)
def test_contextmanager_lock(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, FileCache, LockTimeout
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
tile_size = 1
layers = 'all'
fmt = lookup_format_by_extension('mvt')
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = FileCache('foo')
# A plain 'ol lock should work without exception
with c.lock(cache_key):
pass
with c.lock(cache_key):
with self.assertRaises(LockTimeout):
# A second lock on the same coord should time out
with c.lock(cache_key, timeout=1):
pass
def test_set_get(self):
import os
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, FileCache, \
clean_empty_parent_dirs
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
tile_size = 1
layers = 'all'
fmt = lookup_format_by_extension('mvt')
cache_key = CacheKey(coord, tile_size, layers, fmt)
tile_data = 'hello world'
c = FileCache('foo')
c.set(cache_key, tile_data)
actual_data = c.get(cache_key)
self.assertEquals(tile_data, actual_data)
key = c._generate_key('data', cache_key)
os.remove(key)
clean_empty_parent_dirs(os.path.dirname(key))
class MockRedis(object):
def __init__(self):
self._data = {}
def set(self, key, data, ex=None):
self._data[key] = data
def get(self, key):
return self._data.get(key)
def delete(self, key):
del self._data[key]
def setnx(self, key, data):
if key in self._data:
return False
else:
self.set(key, data)
def getset(self, key, data):
val = self._data.get(key)
self._data[key] = data
return val
class RedisCacheTests(unittest.TestCase):
def setUp(self):
self.redis = MockRedis()
def test_obtain_lock(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, RedisCache
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
tile_size = 1
layers = 'all'
fmt = lookup_format_by_extension('mvt')
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = RedisCache(self.redis)
try:
c.obtain_lock(cache_key)
finally:
c.release_lock(cache_key)
def test_obtain_lock_already_locked(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, RedisCache, LockTimeout
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
fmt = lookup_format_by_extension('mvt')
tile_size = 1
layers = 'all'
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = RedisCache(self.redis)
try:
c.obtain_lock(cache_key)
with self.assertRaises(LockTimeout):
c.obtain_lock(cache_key, timeout=1)
finally:
c.release_lock(cache_key)
c.obtain_lock(cache_key)
def test_contextmanager_lock(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, RedisCache, LockTimeout
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
fmt = lookup_format_by_extension('mvt')
tile_size = 1
layers = 'all'
cache_key = CacheKey(coord, tile_size, layers, fmt)
c = RedisCache(self.redis)
# A plain 'ol lock should work without exception
with c.lock(cache_key):
pass
with c.lock(cache_key):
with self.assertRaises(LockTimeout):
# A second lock on the same coord should time out
with c.lock(cache_key, timeout=1):
pass
def test_set_get(self):
from ModestMaps.Core import Coordinate
from tileserver.cache import CacheKey, RedisCache
from tilequeue.format import lookup_format_by_extension
coord = Coordinate(0, 0, 0)
fmt = lookup_format_by_extension('mvt')
tile_size = 1
layers = 'all'
cache_key = CacheKey(coord, tile_size, layers, fmt)
tile_data = 'hello world'
c = RedisCache(self.redis)
c.set(cache_key, tile_data)
actual_data = c.get(cache_key)
self.assertEquals(tile_data, actual_data)
self.redis.delete(
c._generate_key('data', cache_key))
| 31.12766 | 77 | 0.615174 | 907 | 7,315 | 4.762955 | 0.144432 | 0.061111 | 0.052778 | 0.085185 | 0.78287 | 0.764583 | 0.733565 | 0.703472 | 0.703472 | 0.684722 | 0 | 0.007029 | 0.299795 | 7,315 | 234 | 78 | 31.260684 | 0.836392 | 0.044156 | 0 | 0.740113 | 0 | 0 | 0.059135 | 0.012027 | 0 | 0 | 0 | 0 | 0.056497 | 1 | 0.090395 | false | 0.022599 | 0.158192 | 0.00565 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
651f9572dafd9699ea991ce13877017ccc6e7728 | 159 | py | Python | apps/lightrfp/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/lightrfp/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | apps/lightrfp/admin.py | ExpoAshique/ProveBanking__s | f0b45fffea74d00d14014be27aa50fe5f42f6903 | [
"MIT"
] | null | null | null | from django.contrib import admin
from vendors.models import RFP, Bid, Message
admin.site.register(RFP)
admin.site.register(Bid)
admin.site.register(Message)
| 19.875 | 44 | 0.805031 | 24 | 159 | 5.333333 | 0.5 | 0.210938 | 0.398438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09434 | 159 | 7 | 45 | 22.714286 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
6533d6d260f2cb20d2e8eb1f586de91c458977e4 | 31 | py | Python | Game.py | MerhuBerahu/Python-RPG | 0f573efc051dce1448209d9701269df9e8058f22 | [
"MIT"
] | null | null | null | Game.py | MerhuBerahu/Python-RPG | 0f573efc051dce1448209d9701269df9e8058f22 | [
"MIT"
] | null | null | null | Game.py | MerhuBerahu/Python-RPG | 0f573efc051dce1448209d9701269df9e8058f22 | [
"MIT"
] | null | null | null | from characterCreation import * | 31 | 31 | 0.870968 | 3 | 31 | 9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 31 | 1 | 31 | 31 | 0.964286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6538f70a5b06efdd4eeac58218deab5303d882c8 | 137 | py | Python | xfel/metrology/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/metrology/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | xfel/metrology/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
import boost.python
ext = boost.python.import_ext("xfel_metrology_ext")
from xfel_metrology_ext import *
| 27.4 | 51 | 0.839416 | 20 | 137 | 5.3 | 0.45 | 0.207547 | 0.301887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094891 | 137 | 4 | 52 | 34.25 | 0.854839 | 0 | 0 | 0 | 0 | 0 | 0.131387 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
654455b8cd5ada0238c866fedfafea358ce5fee8 | 47 | py | Python | src_files/models/swin/__init__.py | likelyzhao/ImageNet21K | cc92808db537836f3c085db9f22dec668d242c77 | [
"MIT"
] | null | null | null | src_files/models/swin/__init__.py | likelyzhao/ImageNet21K | cc92808db537836f3c085db9f22dec668d242c77 | [
"MIT"
] | null | null | null | src_files/models/swin/__init__.py | likelyzhao/ImageNet21K | cc92808db537836f3c085db9f22dec668d242c77 | [
"MIT"
] | null | null | null | from .build import build_swin_transformer_model | 47 | 47 | 0.914894 | 7 | 47 | 5.714286 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 47 | 1 | 47 | 47 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e8f7d4b6294efc58d2c4e5c6b4e321b28c06a82b | 32 | py | Python | easy_python/__init__.py | PinaYTTT/easy_python | e7ca0d08c808b493d7fd5a95967b7ad19160fd6e | [
"MIT"
] | null | null | null | easy_python/__init__.py | PinaYTTT/easy_python | e7ca0d08c808b493d7fd5a95967b7ad19160fd6e | [
"MIT"
] | null | null | null | easy_python/__init__.py | PinaYTTT/easy_python | e7ca0d08c808b493d7fd5a95967b7ad19160fd6e | [
"MIT"
] | null | null | null | from easy_python.easy import *
| 10.666667 | 30 | 0.78125 | 5 | 32 | 4.8 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 32 | 2 | 31 | 16 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
336d95374775c3363814edd03f0e7909b053eda3 | 394 | py | Python | discoveryservice/registry.py | pwgn/microtut | cd4e843806570affdfe11e0ce219269417bc8bde | [
"MIT"
] | null | null | null | discoveryservice/registry.py | pwgn/microtut | cd4e843806570affdfe11e0ce219269417bc8bde | [
"MIT"
] | null | null | null | discoveryservice/registry.py | pwgn/microtut | cd4e843806570affdfe11e0ce219269417bc8bde | [
"MIT"
] | null | null | null |
class Registry():
def __init__(self):
self.services = {}
def get_service(self, service_id):
return self.services[service_id]
def list_services(self):
return self.services
def register_service(self, service_id, service):
self.services[service_id] = service
def deregister_service(self, service_id):
self.services.pop(service_id)
| 21.888889 | 52 | 0.667513 | 48 | 394 | 5.1875 | 0.291667 | 0.216867 | 0.216867 | 0.240964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.236041 | 394 | 17 | 53 | 23.176471 | 0.827243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.454545 | false | 0 | 0 | 0.181818 | 0.727273 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
68268feb1e632499967805aabe4ec394521c37d6 | 46 | py | Python | application/imagenet_example/PTQ/ptq/models/layer/__init__.py | www516717402/MQBench | 8519ae6db18c3d09ec5dcc50a7e76349738ee819 | [
"Apache-2.0"
] | 179 | 2021-09-22T08:44:51.000Z | 2022-03-31T08:09:43.000Z | application/imagenet_example/PTQ/ptq/models/layer/__init__.py | www516717402/MQBench | 8519ae6db18c3d09ec5dcc50a7e76349738ee819 | [
"Apache-2.0"
] | 46 | 2021-09-29T03:04:30.000Z | 2022-03-31T11:53:23.000Z | application/imagenet_example/PTQ/ptq/models/layer/__init__.py | www516717402/MQBench | 8519ae6db18c3d09ec5dcc50a7e76349738ee819 | [
"Apache-2.0"
] | 42 | 2021-09-24T16:08:26.000Z | 2022-03-30T10:21:34.000Z | from .drop_path import DropPath # noqa: F401
| 23 | 45 | 0.76087 | 7 | 46 | 4.857143 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 0.173913 | 46 | 1 | 46 | 46 | 0.815789 | 0.217391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6839f3233081471702662731c7c0eac8c1e85e3b | 79 | py | Python | fuel_additive/errors.py | skdong/fuel-additive | a0ce9516ee7510a1ed02264a775cb50b35b84b48 | [
"Apache-2.0"
] | null | null | null | fuel_additive/errors.py | skdong/fuel-additive | a0ce9516ee7510a1ed02264a775cb50b35b84b48 | [
"Apache-2.0"
] | null | null | null | fuel_additive/errors.py | skdong/fuel-additive | a0ce9516ee7510a1ed02264a775cb50b35b84b48 | [
"Apache-2.0"
] | null | null | null | from fuel_agent import errors
class ReopTypeError(errors.BaseError):
pass | 15.8 | 38 | 0.797468 | 10 | 79 | 6.2 | 0.9 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151899 | 79 | 5 | 39 | 15.8 | 0.925373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
6870ff1781737ed500e3267674788a96bd8730cd | 7,413 | py | Python | tests/preprocessing/test_bam.py | aristoteleo/dynast-release | b676c7075b2907ebce7833dd668f32bc1c60d08e | [
"MIT"
] | 7 | 2021-04-03T16:36:17.000Z | 2022-03-15T21:27:41.000Z | tests/preprocessing/test_bam.py | aristoteleo/dynast-release | b676c7075b2907ebce7833dd668f32bc1c60d08e | [
"MIT"
] | 5 | 2021-04-28T15:25:45.000Z | 2021-12-09T00:05:07.000Z | tests/preprocessing/test_bam.py | aristoteleo/dynast-release | b676c7075b2907ebce7833dd668f32bc1c60d08e | [
"MIT"
] | 3 | 2021-04-03T23:38:32.000Z | 2022-03-03T11:06:17.000Z | import os
from unittest import mock, TestCase
import pandas as pd
import dynast.preprocessing.bam as bam
import dynast.utils as utils
from .. import mixins
class TestBam(mixins.TestMixin, TestCase):
@classmethod
def setUpClass(cls):
super(TestBam, cls).setUpClass()
cls.gene_infos = utils.read_pickle(cls.genes_path)
cls.transcript_infos = utils.read_pickle(cls.transcripts_path)
def test_read_alignments(self):
bam.read_alignments(self.umi_alignments_path)
def test_read_conversions(self):
bam.read_conversions(self.umi_conversions_path)
def test_select_alignments(self):
df = bam.read_alignments(self.umi_alignments_path)
result = bam.select_alignments(df)
df_indexed = df.set_index(['read_id', 'index'])
df_deduplicated = df_indexed[df_indexed.index.isin(result)].reset_index()
self.assertFalse(df_deduplicated.duplicated(['barcode', 'umi', 'GX'], keep=False).all())
def test_select_alignments_paired(self):
df = bam.read_alignments(self.umi_alignments_path)
result = bam.select_alignments(df)
df_indexed = df.set_index(['read_id', 'index'])
df_deduplicated = df_indexed[df_indexed.index.isin(result)].reset_index()
self.assertFalse(df_deduplicated.duplicated('read_id', keep=False).all())
def test_parse_all_reads(self):
with mock.patch('dynast.preprocessing.bam.utils.display_progress_with_counter'):
conversions_path = os.path.join(self.temp_dir, 'conversions.csv')
index_path = os.path.join(self.temp_dir, 'conversions.idx')
alignments_path = os.path.join(self.temp_dir, 'alignments.csv')
self.assertEqual((conversions_path, alignments_path, index_path),
bam.parse_all_reads(
self.umi_bam_path,
conversions_path,
alignments_path,
index_path,
self.gene_infos,
self.transcript_infos,
strand='forward',
umi_tag='UB',
barcode_tag='CB',
gene_tag='GX',
barcodes=None,
n_threads=1,
temp_dir=self.temp_dir,
nasc=False,
velocity=True
))
self.assertTrue(mixins.files_equal(self.umi_conversions_path, conversions_path))
self.assertEqual(utils.read_pickle(self.umi_conversions_index_path), utils.read_pickle(index_path))
self.assertTrue(mixins.files_equal(self.umi_alignments_path, alignments_path))
def test_parse_all_reads_no_velocity(self):
with mock.patch('dynast.preprocessing.bam.utils.display_progress_with_counter'):
conversions_path = os.path.join(self.temp_dir, 'conversions.csv')
index_path = os.path.join(self.temp_dir, 'conversions.idx')
alignments_path = os.path.join(self.temp_dir, 'alignments.csv')
self.assertEqual((conversions_path, alignments_path, index_path),
bam.parse_all_reads(
self.umi_bam_path,
conversions_path,
alignments_path,
index_path,
self.gene_infos,
self.transcript_infos,
strand='forward',
umi_tag='UB',
barcode_tag='CB',
gene_tag='GX',
barcodes=None,
n_threads=1,
temp_dir=self.temp_dir,
nasc=False,
velocity=False
))
self.assertEqual(['unassigned'], list(pd.read_csv(alignments_path)['velocity'].unique()))
def test_parse_all_reads_paired(self):
with mock.patch('dynast.preprocessing.bam.utils.display_progress_with_counter'):
conversions_path = os.path.join(self.temp_dir, 'conversions.csv')
index_path = os.path.join(self.temp_dir, 'conversions.idx')
alignments_path = os.path.join(self.temp_dir, 'alignments.csv')
self.assertEqual((conversions_path, alignments_path, index_path),
bam.parse_all_reads(
self.paired_bam_path,
conversions_path,
alignments_path,
index_path,
self.gene_infos,
self.transcript_infos,
strand='unstranded',
umi_tag=None,
barcode_tag='RG',
gene_tag='GX',
barcodes=None,
n_threads=1,
temp_dir=self.temp_dir,
nasc=False,
velocity=False
))
self.assertTrue(mixins.files_equal(self.paired_conversions_path, conversions_path))
self.assertEqual(utils.read_pickle(self.paired_conversions_index_path), utils.read_pickle(index_path))
self.assertTrue(mixins.files_equal(self.paired_alignments_path, alignments_path))
def test_parse_all_reads_nasc(self):
with mock.patch('dynast.preprocessing.bam.utils.display_progress_with_counter'):
conversions_path = os.path.join(self.temp_dir, 'conversions.csv')
index_path = os.path.join(self.temp_dir, 'conversions.idx')
alignments_path = os.path.join(self.temp_dir, 'alignments.csv')
self.assertEqual((conversions_path, alignments_path, index_path),
bam.parse_all_reads(
self.nasc_bam_path,
conversions_path,
alignments_path,
index_path,
self.gene_infos,
self.transcript_infos,
strand='forward',
umi_tag=None,
barcode_tag='RG',
gene_tag='GX',
barcodes=None,
n_threads=1,
temp_dir=self.temp_dir,
nasc=True,
velocity=False
))
self.assertTrue(mixins.files_equal(self.nasc_conversions_path, conversions_path))
self.assertEqual(utils.read_pickle(self.nasc_conversions_index_path), utils.read_pickle(index_path))
self.assertTrue(mixins.files_equal(self.nasc_alignments_path, alignments_path))
| 48.769737 | 114 | 0.517065 | 702 | 7,413 | 5.155271 | 0.125356 | 0.085106 | 0.048632 | 0.046422 | 0.844432 | 0.812103 | 0.812103 | 0.785023 | 0.763747 | 0.73722 | 0 | 0.000902 | 0.401996 | 7,413 | 151 | 115 | 49.092715 | 0.815475 | 0 | 0 | 0.7 | 0 | 0 | 0.071226 | 0.032376 | 0 | 0 | 0 | 0 | 0.123077 | 1 | 0.069231 | false | 0 | 0.046154 | 0 | 0.123077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d7a577a6cf4b8d9cc92730e9f2fdba499e0ca5ef | 118 | py | Python | handleExcel/exportor/scripts/xlrd-1.0.0/tests/base.py | Jack301/toolkit | 2d1e37a36281af7722be93d4d5299c1ce8b7f365 | [
"MIT"
] | 95 | 2016-09-30T02:55:26.000Z | 2022-01-12T05:47:30.000Z | handleExcel/exportor/scripts/xlrd-1.0.0/tests/base.py | Jack301/toolkit | 2d1e37a36281af7722be93d4d5299c1ce8b7f365 | [
"MIT"
] | 5 | 2019-06-27T19:02:18.000Z | 2019-08-07T07:16:49.000Z | handleExcel/exportor/scripts/xlrd-1.0.0/tests/base.py | Jack301/toolkit | 2d1e37a36281af7722be93d4d5299c1ce8b7f365 | [
"MIT"
] | 32 | 2017-03-28T06:45:09.000Z | 2021-12-21T10:33:10.000Z | import os
def from_this_dir(filename):
return os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
| 23.6 | 77 | 0.762712 | 19 | 118 | 4.421053 | 0.684211 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101695 | 118 | 4 | 78 | 29.5 | 0.792453 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
d7f3233648860c6fa3057bff030483c47d61281c | 10,509 | py | Python | misc/machinelearningbootcamp/day1/visplots.py | tmjnow/MoocX | 52c8450ff7ecc8450a8adc2457233d5777a3d5bb | [
"MIT"
] | 7 | 2017-06-13T05:24:15.000Z | 2022-01-09T01:10:28.000Z | misc/machinelearningbootcamp/day1/visplots.py | tmjnow/MoocX | 52c8450ff7ecc8450a8adc2457233d5777a3d5bb | [
"MIT"
] | 11 | 2017-05-08T23:30:50.000Z | 2017-06-24T21:57:42.000Z | misc/machinelearningbootcamp/day1/visplots.py | kinshuk4/MoocX | 52c8450ff7ecc8450a8adc2457233d5777a3d5bb | [
"MIT"
] | 4 | 2017-10-05T12:56:53.000Z | 2020-06-14T17:01:32.000Z | import numpy as np
import plotly.graph_objs as go
from plotly.graph_objs import *
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
def knnDecisionPlot(XTrain, yTrain, XTest, yTest, header, n_neighbors, weights = "uniform"):
Xtrain = XTrain[:, :2]
h = .02 # step size in the mesh
clf = KNeighborsClassifier(n_neighbors, weights)
clf.fit(Xtrain, yTrain)
x_min, x_max = Xtrain[:, 0].min() - 1, Xtrain[:, 0].max() + 1
y_min, y_max = Xtrain[:, 1].min() - 1, Xtrain[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
trace1 = go.Contour(
x = np.arange(x_min, x_max, h),
y = np.arange(y_min, y_max, h),
z = Z.reshape(xx.shape),
showscale=False,
opacity=0.8,
line = dict(
width = 1,
color = 'black'
),
colorscale=[[0, '#AAAAFF'], [1, '#FFAAAA']], # custom colorscale
)
trace2 = go.Scatter(
x = XTest[yTest == 0,0],
y = XTest[yTest == 0,1],
mode = 'markers',
marker = Marker(
color = '#0000FF',
line = dict(
width = 0.9,
)
),
name = 'high quality (test)'
)
trace3 = go.Scatter(
x = XTest[yTest == 1,0],
y = XTest[yTest == 1,1],
mode = 'markers',
marker = Marker(
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
),
name = 'low quality (test)',
)
trace4 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
color = '#0000FF',
line = dict(
width = 0.9,
)
),
name = 'high quality (train)'
)
trace5 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
),
name = 'low quality (train)'
)
layout = go.Layout(
title = "2-Class Classification (k = %i, weights = '%s')" % (n_neighbors, weights),
xaxis = dict(title = header[0]),
yaxis = dict(title = header[1]),
showlegend=True,
autosize=False,
width=700,
height=500,
margin=Margin(
l=50,
r=50,
b=100,
t=50,
pad=4
),
)
data = [trace1, trace2, trace3, trace4, trace5]
fig = dict(data=data, layout=layout)
iplot(fig)
def dtDecisionPlot(XTrain, yTrain, XTest, yTest, header, max_depth=10):
Xtrain = XTrain[:, :2]
h = .02 # step size in the mesh
clf = DecisionTreeClassifier(max_depth=max_depth)
clf.fit(Xtrain, yTrain)
x_min, x_max = Xtrain[:, 0].min() - 1, Xtrain[:, 0].max() + 1
y_min, y_max = Xtrain[:, 1].min() - 1, Xtrain[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
# Z = Z.reshape(xx.shape)
trace1 = go.Contour(
x = np.arange(x_min, x_max, h),
y = np.arange(y_min, y_max, h),
z = Z.reshape(xx.shape),
showscale=False,
opacity=0.8,
xaxis=header[0],
yaxis=header[1],
line = dict(
width = 1,
color = 'black'
),
colorscale=[[0, '#AAAAFF'], [1, '#FFAAAA']], # custom colorscale
)
trace2 = go.Scatter(
x = XTest[yTest == 0,0],
y = XTest[yTest == 0,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#0000FF',
line = dict(
width = 0.9,
)
#opacity=0.6
),
name = 'high quality (test)'
)
trace3 = go.Scatter(
x = XTest[yTest == 1,0],
y = XTest[yTest == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
#opacity=0.6
),
name = 'low quality (test)',
)
trace4 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#0000FF',
line = dict(
width = 0.9,
)
#opacity=0.6
),
name = 'high quality (train)'
)
trace5 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
#opacity=0.6
),
name = 'low quality (train)'
)
layout = go.Layout(
title = "2-Class classification Decision Trees",
xaxis = dict(title = header[0]),
yaxis = dict(title = header[1]),
showlegend=True,
autosize=False,
width=700,
height=500,
margin=Margin(
l=50,
r=50,
b=100,
t=50,
pad=4
),
)
data = [trace1, trace2, trace3, trace4, trace5]
fig = dict(data=data, layout=layout)
iplot(fig)
def rfDecisionPlot(XTrain, yTrain, XTest, yTest, header, n_estimators=10):
Xtrain = XTrain[:, :2]
h = .02 # step size in the mesh
clf = RandomForestClassifier(n_estimators=n_estimators, random_state=1)
clf.fit(Xtrain, yTrain)
x_min, x_max = Xtrain[:, 0].min() - 1, Xtrain[:, 0].max() + 1
y_min, y_max = Xtrain[:, 1].min() - 1, Xtrain[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
# Z = Z.reshape(xx.shape)
trace1 = go.Contour(
x = np.arange(x_min, x_max, h),
y = np.arange(y_min, y_max, h),
z = Z.reshape(xx.shape),
showscale=False,
opacity=0.8,
xaxis=header[0],
yaxis=header[1],
line = dict(
width = 1,
color = 'black'
),
colorscale=[[0, '#AAAAFF'], [1, '#FFAAAA']], # custom colorscale
)
trace2 = go.Scatter(
x = XTest[yTest == 0,0],
y = XTest[yTest == 0,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#0000FF',
line = dict(
width = 0.9,
)
#opacity=0.6
),
name = 'high quality (test)'
)
trace3 = go.Scatter(
x = XTest[yTest == 1,0],
y = XTest[yTest == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
#opacity=0.6
),
name = 'low quality (test)',
)
trace4 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#0000FF',
line = dict(
width = 0.9,
)
#opacity=0.6
),
name = 'high quality (train)'
)
trace5 = go.Scatter(
x = XTrain[yTrain == 1,0],
y = XTrain[yTrain == 1,1],
mode = 'markers',
marker = Marker(
#color = [('#0000FF' if i == 0 else '#FF0000') for i in yTest],
color = '#FF0000',
line = dict(
width = 0.9,
),
symbol = 4
#opacity=0.6
),
name = 'low quality (train)'
)
layout = go.Layout(
title = "2-Class classification Random Forests",
xaxis = dict(title = header[0]),
yaxis = dict(title = header[1]),
showlegend=True,
autosize=False,
width=700,
height=500,
margin=Margin(
l=50,
r=50,
b=100,
t=100,
pad=4
),
)
data = [trace1, trace2, trace3, trace4, trace5]
fig = dict(data=data, layout=layout)
iplot(fig)
def rfAvgAcc(rfModel, XTest, yTest):
preds = []
avgPred = []
df = []
for i,tree in enumerate(rfModel.estimators_):
predTree = tree.predict(XTest)
accTree = round(metrics.accuracy_score(yTest, predTree),2)
preds.append(accTree)
if i==0:
df = predTree
else:
df = np.vstack((df,predTree))
for j in np.arange(df.shape[0]):
j=j+1
mv = []
for i in np.arange(df.shape[1]):
(values,counts) = np.unique(df[:j,i],return_counts=True)
ind=np.argmax(counts)
mv.append(values[ind].astype(int))
avgPred.append(metrics.accuracy_score(yTest, mv))
trace = go.Scatter(
y=avgPred,
x=np.arange(df.shape[0]),
mode='markers+lines',
name = "Ensemble accuracy trend"
)
layout = go.Layout(
title = "Ensemble accuracy over increasing number of trees",
xaxis = dict(title = "Number of trees", nticks = 15),
yaxis = dict(title = "Accuracy"),
showlegend=False,
autosize=False,
width=1000,
height=500,
margin=Margin(
l=70,
r=50,
b=100,
t=50,
pad=4
),
)
data = [trace]
fig = dict(data=data, layout=layout)
iplot(fig)
| 26.537879 | 92 | 0.473118 | 1,235 | 10,509 | 3.978947 | 0.138462 | 0.043956 | 0.039683 | 0.043956 | 0.771266 | 0.750712 | 0.738909 | 0.738909 | 0.731787 | 0.728124 | 0 | 0.059132 | 0.382053 | 10,509 | 395 | 93 | 26.605063 | 0.697567 | 0.07622 | 0 | 0.767584 | 0 | 0 | 0.071288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012232 | false | 0 | 0.024465 | 0 | 0.036697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
cc262d9686e78479c36b780e4aa5e3ad0f970cf5 | 187 | py | Python | gbbox/__init__.py | Luqqk/geojson_bbox | 9a3215b8044f8489f58fea066365b19c6f67f3c8 | [
"MIT"
] | 5 | 2018-11-15T09:39:32.000Z | 2022-02-13T14:10:39.000Z | gbbox/__init__.py | Luqqk/geojson_bbox | 9a3215b8044f8489f58fea066365b19c6f67f3c8 | [
"MIT"
] | null | null | null | gbbox/__init__.py | Luqqk/geojson_bbox | 9a3215b8044f8489f58fea066365b19c6f67f3c8 | [
"MIT"
] | null | null | null | from .objects import (
Point,
LineString,
MultiLineString,
Polygon,
GeometryCollection,
)
__all__ = [Point, LineString, MultiLineString, Polygon, GeometryCollection]
| 18.7 | 75 | 0.716578 | 14 | 187 | 9.285714 | 0.642857 | 0.230769 | 0.461538 | 0.569231 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203209 | 187 | 9 | 76 | 20.777778 | 0.872483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
0408054b32c25e7b418f770c685f219abacc06f0 | 26 | py | Python | funniest/__init__.py | dmartinalbo/testing-travisci | 46253b7a980935da3d966e13d658913a719c65ce | [
"MIT"
] | null | null | null | funniest/__init__.py | dmartinalbo/testing-travisci | 46253b7a980935da3d966e13d658913a719c65ce | [
"MIT"
] | null | null | null | funniest/__init__.py | dmartinalbo/testing-travisci | 46253b7a980935da3d966e13d658913a719c65ce | [
"MIT"
] | 2 | 2020-05-18T09:25:06.000Z | 2020-05-18T09:26:28.000Z | from funniest import joke
| 13 | 25 | 0.846154 | 4 | 26 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 26 | 1 | 26 | 26 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f097d3fe4274893cd45b76b4e8b96b4da65698f6 | 29 | py | Python | python/submodules/foo/bar/__init__.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | 2 | 2019-05-26T15:09:34.000Z | 2021-09-12T08:01:23.000Z | python/submodules/foo/bar/__init__.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | null | null | null | python/submodules/foo/bar/__init__.py | robotlightsyou/test | 015f13943fc402d8ce86c5f6d2f5a7d032b3340a | [
"MIT"
] | 1 | 2021-04-11T20:28:21.000Z | 2021-04-11T20:28:21.000Z | print('foo/bar/__init__.py')
| 14.5 | 28 | 0.724138 | 5 | 29 | 3.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 29 | 1 | 29 | 29 | 0.607143 | 0 | 0 | 0 | 0 | 0 | 0.655172 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
f0a2f8287f665e877a416bdd9fe7a8e5755b5266 | 89,518 | py | Python | dev/Gems/CloudGemFramework/v1/AWS/lambda-code/ProjectResourceHandler/test/test_AccessControlResourceHandler.py | stickyparticles/lumberyard | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | 2 | 2018-03-29T10:56:36.000Z | 2020-12-12T15:28:14.000Z | dev/Gems/CloudGemFramework/v1/AWS/lambda-code/ProjectResourceHandler/test/test_AccessControlResourceHandler.py | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | null | null | null | dev/Gems/CloudGemFramework/v1/AWS/lambda-code/ProjectResourceHandler/test/test_AccessControlResourceHandler.py | JulianoCristian/Lumberyard-3 | dc523dd780f3cd1874251181b7cf6848b8db9959 | [
"AML"
] | 3 | 2019-05-13T09:41:33.000Z | 2021-04-09T12:12:38.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #3 $
import unittest
import mock
import json
from resource_manager_common.test import mock_aws
from test_case import ResourceHandlerTestCase
from resource_manager_common import stack_info
import AccessControlResourceHandler
def merge_dicts(*args):
result = {}
for arg in args:
result.update(arg)
return result
class AccessControlResourceHandlerTestCase(ResourceHandlerTestCase):
def __init__(self, *args, **kwargs):
super(AccessControlResourceHandlerTestCase, self).__init__(*args, **kwargs)
def make_problem_reporting_side_effect(self, problems_caused, return_values = None):
index = [0] # in list to avoid reference before assignment error in side effect function below
def side_effect(*args):
# assumes that the last argument is the "problems" list to which problems, if given, will be added
if problems_caused and index[0] < len(problems_caused):
problems = args[len(args)-1]
problems.append(problems_caused[index[0]])
if return_values:
return_value = return_values[index[0]]
else:
return_value = None
index[0] = index[0] + 1
return return_value
return side_effect
ANY_PROBLEM_LIST = ResourceHandlerTestCase.AnyInstance(AccessControlResourceHandler.ProblemList)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_handler(AccessControlResourceHandlerTestCase):
@mock.patch('custom_resource_response.succeed')
@mock.patch('resource_manager_common.stack_info.get_stack_info')
@mock.patch('AccessControlResourceHandler._apply_resource_group_access_control')
def __do_stack_type_resource_group_test(self,
request_type,
problems,
mock_apply_resource_group_access_control,
mock_get_stack_info,
mock_succeed):
event = self.make_event(request_type)
resource_group = mock_get_stack_info.return_value
resource_group.stack_type = resource_group.STACK_TYPE_RESOURCE_GROUP
mock_apply_resource_group_access_control.side_effect = self.make_problem_reporting_side_effect(problems)
if problems:
with self.assertRaises(RuntimeError):
AccessControlResourceHandler.handler(event, self.CONTEXT)
else:
AccessControlResourceHandler.handler(event, self.CONTEXT)
mock_get_stack_info.assert_called_with(self.STACK_ARN)
mock_apply_resource_group_access_control.assert_called_with(request_type, resource_group, self.ANY_PROBLEM_LIST)
if problems:
mock_succeed.assert_not_called()
else:
mock_succeed.assert_called_with(event, self.CONTEXT, {}, self.PHYSICAL_RESOURCE_ID)
def test_stack_type_resource_group_create_with_no_problems(self):
self.__do_stack_type_resource_group_test('Create', [])
def test_stack_type_resource_group_update_with_no_problems(self):
self.__do_stack_type_resource_group_test('Update', [])
def test_stack_type_resource_group_delete_with_no_problems(self):
self.__do_stack_type_resource_group_test('Delete', [])
def test_stack_type_resource_group_create_with_problems(self):
self.__do_stack_type_resource_group_test('Create', ['Problem'])
def test_stack_type_resource_group_update_with_problems(self):
self.__do_stack_type_resource_group_test('Update', ['Problem'])
def test_stack_type_resource_group_delete_with_problems(self):
self.__do_stack_type_resource_group_test('Delete', ['Problem'])
@mock.patch('custom_resource_response.succeed')
@mock.patch('resource_manager_common.stack_info.get_stack_info')
@mock.patch('AccessControlResourceHandler._apply_deployment_access_control')
def __do_stack_type_deployment_access_test(self,
request_type,
problems,
mock_apply_deployment_access_control,
mock_get_stack_info,
mock_succeed):
event = self.make_event(request_type)
deployment_access = mock_get_stack_info.return_value
deployment_access.stack_type = deployment_access.STACK_TYPE_DEPLOYMENT_ACCESS
mock_apply_deployment_access_control.side_effect = self.make_problem_reporting_side_effect(problems)
if problems:
with self.assertRaises(RuntimeError):
AccessControlResourceHandler.handler(event, self.CONTEXT)
else:
AccessControlResourceHandler.handler(event, self.CONTEXT)
mock_get_stack_info.assert_called_with(self.STACK_ARN)
mock_apply_deployment_access_control.assert_called_with(request_type, deployment_access, self.ANY_PROBLEM_LIST)
if problems:
mock_succeed.assert_not_called()
else:
mock_succeed.assert_called_with(event, self.CONTEXT, {}, self.PHYSICAL_RESOURCE_ID)
def test_stack_type_deployment_access_create_with_no_problems(self):
self.__do_stack_type_deployment_access_test('Create', [])
def test_stack_type_deployment_access_update_with_no_problems(self):
self.__do_stack_type_deployment_access_test('Update', [])
def test_stack_type_deployment_access_delete_with_no_problems(self):
self.__do_stack_type_deployment_access_test('Delete', [])
def test_stack_type_deployment_access_create_with_problems(self):
self.__do_stack_type_deployment_access_test('Create', ['Problem'])
def test_stack_type_deployment_access_update_with_problems(self):
self.__do_stack_type_deployment_access_test('Update', ['Problem'])
def test_stack_type_deployment_access_delete_with_problems(self):
self.__do_stack_type_deployment_access_test('Delete', ['Problem'])
@mock.patch('custom_resource_response.succeed')
@mock.patch('resource_manager_common.stack_info.get_stack_info')
@mock.patch('AccessControlResourceHandler._apply_project_access_control')
def __do_stack_type_project_test(self,
request_type,
problems,
mock_apply_project_access_control,
mock_get_stack_info,
mock_succeed):
event = self.make_event(request_type)
project = mock_get_stack_info.return_value
project.stack_type = project.STACK_TYPE_PROJECT
mock_apply_project_access_control.side_effect = self.make_problem_reporting_side_effect(problems)
if problems:
with self.assertRaises(RuntimeError):
AccessControlResourceHandler.handler(event, self.CONTEXT)
else:
AccessControlResourceHandler.handler(event, self.CONTEXT)
mock_get_stack_info.assert_called_with(self.STACK_ARN)
mock_apply_project_access_control.assert_called_with(request_type, project, self.ANY_PROBLEM_LIST)
if problems:
mock_succeed.assert_not_called()
else:
mock_succeed.assert_called_with(event, self.CONTEXT, {}, self.PHYSICAL_RESOURCE_ID)
def test_stack_type_project_create_with_no_problems(self):
self.__do_stack_type_project_test('Create', [])
def test_stack_type_project_update_with_no_problems(self):
self.__do_stack_type_project_test('Update', [])
def test_stack_type_project_delete_with_no_problems(self):
self.__do_stack_type_project_test('Delete', [])
def test_stack_type_project_create_with_problems(self):
self.__do_stack_type_project_test('Create', ['Problem'])
def test_stack_type_project_update_with_problems(self):
self.__do_stack_type_project_test('Update', ['Problem'])
def test_stack_type_project_delete_with_problems(self):
self.__do_stack_type_project_test('Delete', ['Problem'])
@mock.patch('custom_resource_response.succeed')
@mock.patch('resource_manager_common.stack_info.get_stack_info')
def __do_stack_type_deployment_test(self,
request_type,
mock_get_stack_info,
mock_succeed):
event = self.make_event(request_type)
deployment = mock_get_stack_info.return_value
deployment.stack_type = deployment.STACK_TYPE_DEPLOYMENT
with self.assertRaises(RuntimeError):
AccessControlResourceHandler.handler(event, self.CONTEXT)
mock_succeed.assert_not_called()
mock_get_stack_info.assert_called_with(self.STACK_ARN)
def test_stack_type_deployment_create(self):
self.__do_stack_type_deployment_test('Create')
def test_stack_type_deployment_update(self):
self.__do_stack_type_deployment_test('Update')
def test_stack_type_deployment_delete(self):
self.__do_stack_type_deployment_test('Delete')
def test_unexpected_request_type(self):
event = self.make_event('Unexpected')
with self.assertRaises(RuntimeError):
AccessControlResourceHandler.handler(event, self.CONTEXT)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_apply_resource_group_access_control(AccessControlResourceHandlerTestCase):
RESOURCE_GROUP = mock.MagicMock()
REQUEST_TYPE = mock.MagicMock()
RESOURCE_GROUP_ROLE_MAPPINGS = mock.MagicMock()
DEPLOYMENT_ACCESS_ROLE_MAPPINGS = mock.MagicMock()
PROJECT_ROLE_MAPPINGS = mock.MagicMock()
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._get_implicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles', return_value=True)
def test_with_no_problems(self,
mock_update_roles,
mock_get_implicit_role_mappings,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
mock_get_explicit_role_mappings.side_effect = [ self.DEPLOYMENT_ACCESS_ROLE_MAPPINGS, self.PROJECT_ROLE_MAPPINGS ]
mock_get_implicit_role_mappings.side_effect = [ self.RESOURCE_GROUP_ROLE_MAPPINGS ]
policy_name = mock_get_resource_group_policy_name.return_value
permissions = mock_get_permissions.return_value
expected_problems = []
problems = AccessControlResourceHandler.ProblemList()
AccessControlResourceHandler._apply_resource_group_access_control(self.REQUEST_TYPE, self.RESOURCE_GROUP, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(self.RESOURCE_GROUP)])
mock_get_permissions.assert_has_calls([
mock.call(self.RESOURCE_GROUP, problems)])
mock_get_explicit_role_mappings.assert_has_calls([
mock.call(self.RESOURCE_GROUP.deployment.deployment_access, problems),
mock.call(self.RESOURCE_GROUP.deployment.project, problems)])
mock_get_implicit_role_mappings.assert_has_calls([
mock.call(self.RESOURCE_GROUP, problems)])
mock_update_roles.assert_has_calls([
mock.call(self.REQUEST_TYPE, policy_name, permissions, self.RESOURCE_GROUP_ROLE_MAPPINGS),
mock.call(self.REQUEST_TYPE, policy_name, permissions, self.DEPLOYMENT_ACCESS_ROLE_MAPPINGS),
mock.call(self.REQUEST_TYPE, policy_name, permissions, self.PROJECT_ROLE_MAPPINGS)])
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._get_implicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles', return_value=True)
def test_with_no_deployment_access_stack(self,
mock_update_roles,
mock_get_implicit_role_mappings,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
mock_get_explicit_role_mappings.side_effect = [ self.PROJECT_ROLE_MAPPINGS ]
mock_get_implicit_role_mappings.side_effect = [ self.RESOURCE_GROUP_ROLE_MAPPINGS ]
policy_name = mock_get_resource_group_policy_name.return_value
permissions = mock_get_permissions.return_value
expected_problems = []
problems = AccessControlResourceHandler.ProblemList()
resource_group = mock.MagicMock()
resource_group.deployment.deployment_access = None
AccessControlResourceHandler._apply_resource_group_access_control(self.REQUEST_TYPE, resource_group, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(resource_group)])
mock_get_permissions.assert_has_calls([
mock.call(resource_group, problems)])
mock_get_explicit_role_mappings.assert_has_calls([
mock.call(resource_group.deployment.project, problems)])
self.assertEquals(mock_get_explicit_role_mappings.call_count, 1)
mock_get_implicit_role_mappings.assert_has_calls([
mock.call(resource_group, problems)])
mock_update_roles.assert_has_calls([
mock.call(self.REQUEST_TYPE, policy_name, permissions, self.RESOURCE_GROUP_ROLE_MAPPINGS),
mock.call(self.REQUEST_TYPE, policy_name, permissions, self.PROJECT_ROLE_MAPPINGS)])
self.assertEquals(mock_update_roles.call_count, 2)
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._get_implicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles')
def test_with_with_problems(self,
mock_update_roles,
mock_get_implicit_role_mappings,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
resource_group_implicit_role_mapping_problem = mock.MagicMock()
deployment_access_role_mapping_problem = mock.MagicMock()
project_role_mapping_problem = mock.MagicMock()
mock_get_explicit_role_mappings_return_values = [ self.DEPLOYMENT_ACCESS_ROLE_MAPPINGS, self.PROJECT_ROLE_MAPPINGS ]
mock_get_explicit_role_mappings_problems = [ deployment_access_role_mapping_problem, project_role_mapping_problem ]
mock_get_explicit_role_mappings.side_effect = self.make_problem_reporting_side_effect(
mock_get_explicit_role_mappings_problems,
mock_get_explicit_role_mappings_return_values)
mock_get_implicit_role_mappings.side_effect = self.make_problem_reporting_side_effect(
[ resource_group_implicit_role_mapping_problem ],
[ self.RESOURCE_GROUP_ROLE_MAPPINGS ])
policy_name = mock_get_resource_group_policy_name.return_value
permissions = mock.MagicMock()
mock_get_permissions_problem = mock.MagicMock()
mock_get_permissions.side_effect = self.make_problem_reporting_side_effect(
[ mock_get_permissions_problem ],
[ permissions ])
expected_problems = [mock_get_permissions_problem, resource_group_implicit_role_mapping_problem]
expected_problems.extend(mock_get_explicit_role_mappings_problems)
problems = AccessControlResourceHandler.ProblemList()
AccessControlResourceHandler._apply_resource_group_access_control(self.REQUEST_TYPE, self.RESOURCE_GROUP, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(self.RESOURCE_GROUP)])
mock_get_permissions.assert_has_calls([
mock.call(self.RESOURCE_GROUP, problems)])
mock_get_explicit_role_mappings.assert_has_calls([
mock.call(self.RESOURCE_GROUP.deployment.deployment_access, problems),
mock.call(self.RESOURCE_GROUP.deployment.project, problems)])
mock_get_implicit_role_mappings.assert_has_calls([
mock.call(self.RESOURCE_GROUP, problems)])
mock_update_roles.assert_not_called()
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_apply_deployment_access_control(AccessControlResourceHandlerTestCase):
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles')
def test_with_no_problems(self,
mock_update_roles,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
request_type = mock.MagicMock()
policy_name_1 = mock.MagicMock()
policy_name_2 = mock.MagicMock()
mock_get_resource_group_policy_name.side_effect = [ policy_name_1, policy_name_2 ]
permissions_1 = mock.MagicMock()
permissions_2 = mock.MagicMock()
mock_get_permissions.side_effect = [ permissions_1, permissions_2 ]
resource_group_1 = mock.MagicMock()
resource_group_2 = mock.MagicMock()
deployment_access = mock.MagicMock()
deployment_access.deployment.resource_groups = [ resource_group_1, resource_group_2 ]
explicit_role_mappings = mock_get_explicit_role_mappings.return_value
expected_problems = []
problems = AccessControlResourceHandler.ProblemList()
AccessControlResourceHandler._apply_deployment_access_control(request_type, deployment_access, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_explicit_role_mappings.assert_called_with(deployment_access, problems)
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(resource_group_1),
mock.call(resource_group_2)])
mock_get_permissions.assert_has_calls([
mock.call(resource_group_1, problems),
mock.call(resource_group_2, problems)])
mock_update_roles.assert_has_calls([
mock.call(request_type, policy_name_1, permissions_1, explicit_role_mappings),
mock.call(request_type, policy_name_2, permissions_2, explicit_role_mappings)],
any_order = True)
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles')
def test_with_problems(self,
mock_update_roles,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
request_type = mock.MagicMock()
policy_name_1 = mock.MagicMock()
policy_name_2 = mock.MagicMock()
mock_get_resource_group_policy_name.side_effect = [ policy_name_1, policy_name_2 ]
permissions_1 = mock.MagicMock()
permissions_2 = mock.MagicMock()
mock_get_permissions_return_values = [ permissions_1, permissions_2 ]
mock_get_permissions_problem_1 = mock.MagicMock()
mock_get_permissions_problem_2 = mock.MagicMock()
mock_get_permissions_problems = [ mock_get_permissions_problem_1, mock_get_permissions_problem_2 ]
mock_get_permissions.side_effect = self.make_problem_reporting_side_effect(
mock_get_permissions_problems,
mock_get_permissions_return_values)
resource_group_1 = mock.MagicMock()
resource_group_2 = mock.MagicMock()
deployment_access = mock.MagicMock()
deployment_access.deployment.resource_groups = [ resource_group_1, resource_group_2 ]
get_explicit_role_mappings_problem = mock.MagicMock()
role_mappings = mock.MagicMock()
mock_get_explicit_role_mappings.side_effect = self.make_problem_reporting_side_effect(
[ get_explicit_role_mappings_problem ],
[ role_mappings ])
expected_problems = [ get_explicit_role_mappings_problem ]
expected_problems.extend(mock_get_permissions_problems)
problems = AccessControlResourceHandler.ProblemList()
AccessControlResourceHandler._apply_deployment_access_control(request_type, deployment_access, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_explicit_role_mappings.assert_called_with(deployment_access, problems)
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(resource_group_1),
mock.call(resource_group_2)])
mock_get_permissions.assert_has_calls([
mock.call(resource_group_1, problems),
mock.call(resource_group_2, problems)])
mock_update_roles.assert_not_called()
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_apply_project_access_control(AccessControlResourceHandlerTestCase):
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles')
def test_with_no_problems(self,
mock_update_roles,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
request_type = mock.MagicMock()
policy_name_a_1 = mock.MagicMock()
policy_name_a_2 = mock.MagicMock()
policy_name_b_1 = mock.MagicMock()
policy_name_b_2 = mock.MagicMock()
mock_get_resource_group_policy_name.side_effect = [ policy_name_a_1, policy_name_a_2, policy_name_b_1, policy_name_b_2 ]
permissions_a_1 = mock.MagicMock()
permissions_a_2 = mock.MagicMock()
permissions_b_1 = mock.MagicMock()
permissions_b_2 = mock.MagicMock()
permissions_c_1 = mock.MagicMock()
mock_get_permissions.side_effect = [ permissions_a_1, permissions_a_2, permissions_b_1, permissions_b_2, permissions_c_1 ]
resource_group_a_1 = mock.MagicMock()
resource_group_a_2 = mock.MagicMock()
resource_group_b_1 = mock.MagicMock()
resource_group_b_2 = mock.MagicMock()
deployment_a = mock.MagicMock()
deployment_a.resource_groups = [ resource_group_a_1, resource_group_a_2 ]
deployment_b = mock.MagicMock()
deployment_b.resource_groups = [ resource_group_b_1, resource_group_b_2 ]
project = mock.MagicMock()
project.deployments = [ deployment_a, deployment_b ]
role_mappings = mock_get_explicit_role_mappings.return_value
expected_problems = []
problems = AccessControlResourceHandler.ProblemList()
actual_problems = AccessControlResourceHandler._apply_project_access_control(request_type, project, problems)
self.assertEquals(len(problems), len(expected_problems))
mock_get_explicit_role_mappings.assert_called_with(project, problems)
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(resource_group_a_1),
mock.call(resource_group_a_2),
mock.call(resource_group_b_1),
mock.call(resource_group_b_2)])
mock_get_permissions.assert_has_calls([
mock.call(resource_group_a_1, problems),
mock.call(resource_group_a_2, problems),
mock.call(resource_group_b_1, problems),
mock.call(resource_group_b_2, problems)])
mock_update_roles.assert_has_calls([
mock.call(request_type, policy_name_a_1, permissions_a_1, role_mappings),
mock.call(request_type, policy_name_a_2, permissions_a_2, role_mappings),
mock.call(request_type, policy_name_b_1, permissions_b_1, role_mappings),
mock.call(request_type, policy_name_b_2, permissions_b_2, role_mappings)],
any_order = True)
@mock.patch('AccessControlResourceHandler._get_resource_group_policy_name')
@mock.patch('AccessControlResourceHandler._get_permissions')
@mock.patch('AccessControlResourceHandler._get_explicit_role_mappings')
@mock.patch('AccessControlResourceHandler._update_roles')
def test_with_problems(self,
mock_update_roles,
mock_get_explicit_role_mappings,
mock_get_permissions,
mock_get_resource_group_policy_name):
request_type = mock.MagicMock()
policy_name_a_1 = mock.MagicMock()
policy_name_a_2 = mock.MagicMock()
policy_name_b_1 = mock.MagicMock()
policy_name_b_2 = mock.MagicMock()
mock_get_resource_group_policy_name.side_effect = [ policy_name_a_1, policy_name_a_2, policy_name_b_1, policy_name_b_2 ]
permissions_a_1 = mock.MagicMock()
permissions_a_2 = mock.MagicMock()
permissions_b_1 = mock.MagicMock()
permissions_b_2 = mock.MagicMock()
permissions_c_1 = mock.MagicMock()
permissions = [ permissions_a_1, permissions_a_2, permissions_b_1, permissions_b_2, permissions_c_1 ]
mock_get_permissions_problem_a_1 = 'mock_get_permissions_problem_a_1'
mock_get_permissions_problem_a_2 = 'mock_get_permissions_problem_a_2'
mock_get_permissions_problem_b_1 = 'mock_get_permissions_problem_b_1'
mock_get_permissions_problem_b_2 = 'mock_get_permissions_problem_b_2'
mock_get_permissions_problem_c_1 = 'mock_get_permissions_problem_c_1'
mock_get_permissions_problems = [ mock_get_permissions_problem_a_1, mock_get_permissions_problem_a_2, mock_get_permissions_problem_b_1, mock_get_permissions_problem_b_2, mock_get_permissions_problem_c_1 ]
mock_get_permissions.side_effect = self.make_problem_reporting_side_effect(
mock_get_permissions_problems,
permissions)
resource_group_a_1 = mock.MagicMock()
resource_group_a_2 = mock.MagicMock()
resource_group_b_1 = mock.MagicMock()
resource_group_b_2 = mock.MagicMock()
deployment_a = mock.MagicMock()
deployment_a.resource_groups = [ resource_group_a_1, resource_group_a_2 ]
deployment_b = mock.MagicMock()
deployment_b.resource_groups = [ resource_group_b_1, resource_group_b_2 ]
project = mock.MagicMock()
project.deployments = [ deployment_a, deployment_b ]
explicit_role_mappings = mock.MagicMock()
mock_get_explicit_role_mappings_problem = 'mock_get_explicit_role_mappings_problem'
mock_get_explicit_role_mappings.side_effect = self.make_problem_reporting_side_effect(
[ mock_get_explicit_role_mappings_problem ],
[ explicit_role_mappings ])
actual_problems = AccessControlResourceHandler.ProblemList()
AccessControlResourceHandler._apply_project_access_control(request_type, project, actual_problems)
expected_problems = [ mock_get_explicit_role_mappings_problem ]
expected_problems.extend( mock_get_permissions_problems )
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_explicit_role_mappings.assert_called_with(project, actual_problems)
mock_get_resource_group_policy_name.assert_has_calls([
mock.call(resource_group_a_1),
mock.call(resource_group_a_2),
mock.call(resource_group_b_1),
mock.call(resource_group_b_2)])
mock_get_permissions.assert_has_calls([
mock.call(resource_group_a_1, actual_problems),
mock.call(resource_group_a_2, actual_problems),
mock.call(resource_group_b_1, actual_problems),
mock.call(resource_group_b_2, actual_problems)])
mock_update_roles.assert_not_called()
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_resource_group_policy_name(AccessControlResourceHandlerTestCase):
def test_default(self):
resource_group_name = 'test-resource-group'
deployment_name = 'test-deployment'
resource_group = mock.MagicMock()
resource_group.resource_group_name = resource_group_name
resource_group.deployment.deployment_name = deployment_name
expected_policy_name = deployment_name + '.' + resource_group_name + '-AccessControl'
actual_policy_name = AccessControlResourceHandler._get_resource_group_policy_name(resource_group)
self.assertEquals(actual_policy_name, expected_policy_name)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permissions(AccessControlResourceHandlerTestCase):
def test_with_no_metadata(self):
resource_a = mock.MagicMock()
resource_a.get_cloud_canvas_metadata.return_value = None
resource_b = mock.MagicMock()
resource_b.get_cloud_canvas_metadata.return_value = None
resource_group = mock.MagicMock()
resource_group.resources = [ resource_a, resource_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_permissions = {}
actual_permissions = AccessControlResourceHandler._get_permissions(resource_group, actual_problems)
self.assertEquals(actual_permissions, expected_permissions)
self.assertEquals(len(actual_problems), len(expected_problems))
resource_a.get_cloud_canvas_metadata.assert_called_with('Permissions')
resource_b.get_cloud_canvas_metadata.assert_called_with('Permissions')
@mock.patch('AccessControlResourceHandler._get_permission_list')
def test_with_metadata(self,
mock_get_permission_list):
resource_a = mock.MagicMock(name='resource_a')
metadata_a = resource_a.get_cloud_canvas_metadata.return_value
resource_b = mock.MagicMock(name='resource_b')
metadata_b = resource_b.get_cloud_canvas_metadata.return_value
resource_group = mock.MagicMock(name='resource-group')
resource_group.resources = [ resource_a, resource_b ]
permission_list_a = [ mock.MagicMock(name='permission_list_a') ]
permission_list_b = [ mock.MagicMock(name='permission_list_b') ]
mock_get_permission_list.side_effect = [ permission_list_a, permission_list_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_permissions = {
resource_a.resource_arn: permission_list_a,
resource_b.resource_arn: permission_list_b
}
actual_permissions = AccessControlResourceHandler._get_permissions(resource_group, actual_problems)
self.assertEquals(actual_permissions, expected_permissions)
self.assertEquals(len(actual_problems), len(expected_problems))
resource_a.get_cloud_canvas_metadata.assert_called_with('Permissions')
resource_b.get_cloud_canvas_metadata.assert_called_with('Permissions')
mock_get_permission_list.assert_has_calls([
mock.call(resource_group.permission_context_name, resource_a.logical_id, metadata_a, actual_problems),
mock.call(resource_group.permission_context_name, resource_b.logical_id, metadata_b, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_permission_list')
def test_with_get_permission_list_problem(self,
mock_get_permission_list):
resource_a = mock.MagicMock(name='resource_a')
metadata_a = resource_a.get_cloud_canvas_metadata.return_value
resource_b = mock.MagicMock(name='resource_b')
metadata_b = resource_b.get_cloud_canvas_metadata.return_value
resource_group = mock.MagicMock(name='resource-group')
resource_group.resources = [ resource_a, resource_b ]
problem_a_1 = 'problem_a_1'
problem_b_1 = 'problem_b_1'
mock_get_permission_list.side_effect = self.make_problem_reporting_side_effect(
[ problem_a_1, problem_b_1 ],
[ [], [] ])
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ problem_a_1, problem_b_1 ]
expected_permissions = {}
actual_permissions = AccessControlResourceHandler._get_permissions(resource_group, actual_problems)
self.assertEquals(actual_permissions, expected_permissions)
self.assertEquals(len(actual_problems), len(expected_problems))
resource_a.get_cloud_canvas_metadata.assert_called_with('Permissions')
resource_b.get_cloud_canvas_metadata.assert_called_with('Permissions')
mock_get_permission_list.assert_has_calls([
mock.call(resource_group.permission_context_name, resource_a.logical_id, metadata_a, actual_problems),
mock.call(resource_group.permission_context_name, resource_b.logical_id, metadata_b, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_permission_list')
def test_with_unsupported_resource_type_arn(self,
mock_get_permission_list):
resource_a = mock.MagicMock(name='resource_a')
type(resource_a).resource_arn = mock.PropertyMock(side_effect=RuntimeError('message_a'))
metadata_a = resource_a.get_cloud_canvas_metadata.return_value
resource_b = mock.MagicMock(name='resource_b')
type(resource_b).resource_arn = mock.PropertyMock(side_effect=RuntimeError('message_b'))
metadata_b = resource_b.get_cloud_canvas_metadata.return_value
resource_group = mock.MagicMock(name='resource-group')
resource_group.resources = [ resource_a, resource_b ]
permission_list_a = [ mock.MagicMock(name='permission_list_a') ]
permission_list_b = [ mock.MagicMock(name='permission_list_b') ]
mock_get_permission_list.side_effect = [ permission_list_a, permission_list_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING, self.ANY_STRING ]
expected_permissions = {}
actual_permissions = AccessControlResourceHandler._get_permissions(resource_group, actual_problems)
self.assertEquals(actual_permissions, expected_permissions)
self.assertEquals(len(actual_problems), len(expected_problems))
resource_a.get_cloud_canvas_metadata.assert_called_with('Permissions')
resource_b.get_cloud_canvas_metadata.assert_called_with('Permissions')
mock_get_permission_list.assert_has_calls([
mock.call(resource_group.permission_context_name, resource_a.logical_id, metadata_a, actual_problems),
mock.call(resource_group.permission_context_name, resource_b.logical_id, metadata_b, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permission_list(AccessControlResourceHandlerTestCase):
@mock.patch('AccessControlResourceHandler._get_permission')
def test_with_metadata_object(self,
mock_get_permission):
permission = mock_get_permission.return_value
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_permission_list = [ permission ]
resource_group_name = mock.MagicMock()
resource_logical_id = mock.MagicMock()
permission_metadata = mock.MagicMock()
permission_metadata_list = permission_metadata
actual_permission_list = AccessControlResourceHandler._get_permission_list(resource_group_name, resource_logical_id, permission_metadata_list, actual_problems)
self.assertEquals(actual_permission_list, expected_permission_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_permission.assert_has_calls([
mock.call(resource_group_name, resource_logical_id, permission_metadata, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_permission')
def test_with_metadata_list(self,
mock_get_permission):
permission_a = mock.MagicMock()
permission_b = mock.MagicMock()
mock_get_permission.side_effect = [ permission_a, permission_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_permission_list = [ permission_a, permission_b ]
resource_group_name = mock.MagicMock()
resource_logical_id = mock.MagicMock()
permission_metadata_a = mock.MagicMock()
permission_metadata_b = mock.MagicMock()
permission_metadata_list = [ permission_metadata_a, permission_metadata_b ]
actual_permission_list = AccessControlResourceHandler._get_permission_list(resource_group_name, resource_logical_id, permission_metadata_list, actual_problems)
self.assertEquals(actual_permission_list, expected_permission_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_permission.assert_has_calls([
mock.call(resource_group_name, resource_logical_id, permission_metadata_a, actual_problems),
mock.call(resource_group_name, resource_logical_id, permission_metadata_b, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_permission')
def test_with_get_permission_problem(self,
mock_get_permission):
problem_a = mock.MagicMock()
problem_b = mock.MagicMock()
mock_get_permission.side_effect = self.make_problem_reporting_side_effect(
[ problem_a, problem_b ],
[ None, None ])
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ problem_a, problem_b ]
expected_permission_list = []
resource_group_name = mock.MagicMock()
resource_logical_id = mock.MagicMock()
permission_metadata_a = mock.MagicMock()
permission_metadata_b = mock.MagicMock()
permission_metadata_list = [ permission_metadata_a, permission_metadata_b ]
actual_permission_list = AccessControlResourceHandler._get_permission_list(resource_group_name, resource_logical_id, permission_metadata_list, actual_problems)
self.assertEquals(actual_permission_list, expected_permission_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_permission.assert_has_calls([
mock.call(resource_group_name, resource_logical_id, permission_metadata_a, actual_problems),
mock.call(resource_group_name, resource_logical_id, permission_metadata_b, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permission(AccessControlResourceHandlerTestCase):
def test_with_invalid_metadata_type(self):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
resource_group_name = mock.MagicMock()
resource_logical_id = mock.MagicMock()
permission_metadata = 'invalid'
expected_permission = None
actual_permission = AccessControlResourceHandler._get_permission(resource_group_name, resource_logical_id, permission_metadata, actual_problems)
self.assertEquals(actual_permission, expected_permission)
self.assertEquals(len(actual_problems), len(expected_problems))
@mock.patch('AccessControlResourceHandler._get_permission_abstract_role_list')
@mock.patch('AccessControlResourceHandler._get_permission_allowed_action_list')
@mock.patch('AccessControlResourceHandler._get_permission_resource_suffix_list')
def test_with_valid_metadata_object(self,
mock_get_permission_resource_suffix_list,
mock_get_permission_allowed_action_list,
mock_get_permission_abstract_role_list):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
resource_name = 'test-resource-group'
resource_logical_id = 'test-resource-logical-id'
abstract_role = 'test-abstract-role'
allowed_action = 'test-allowed-action'
resource_suffix = 'test-resource-suffix'
permission_metadata = {
'AbstractRole': abstract_role,
'Action': allowed_action,
'ResourceSuffix': resource_suffix
}
expected_permission = {
'AbstractRole': mock_get_permission_abstract_role_list.return_value,
'Action': mock_get_permission_allowed_action_list.return_value,
'ResourceSuffix': mock_get_permission_resource_suffix_list.return_value,
'LogicalResourceId': resource_logical_id
}
actual_permission = AccessControlResourceHandler._get_permission(resource_name, resource_logical_id, permission_metadata, actual_problems)
self.assertEquals(actual_permission, expected_permission)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_permission_resource_suffix_list.assert_has_calls([
mock.call(resource_suffix, actual_problems)])
mock_get_permission_allowed_action_list.assert_has_calls([
mock.call(allowed_action, actual_problems)])
mock_get_permission_abstract_role_list.assert_has_calls([
mock.call(resource_name, abstract_role, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_permission_abstract_role_list')
@mock.patch('AccessControlResourceHandler._get_permission_allowed_action_list')
@mock.patch('AccessControlResourceHandler._get_permission_resource_suffix_list')
def test_with_metadata_object_with_unsupported_property(self,
mock_get_permission_resource_suffix_list,
mock_get_permission_allowed_action_list,
mock_get_permission_abstract_role_list):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
resource_group_name = 'test-resource-group'
resource_logical_id = 'test-resource-logical-id'
abstract_role = 'test-abstract-role'
allowed_action = 'test-allowed-action'
resource_suffix = 'test-resource-suffix'
permission_metadata = {
'AbstractRole': abstract_role,
'Action': allowed_action,
'ResourceSuffix': resource_suffix,
'Unsupported': 'unsupported-value'
}
expected_permission = {
'AbstractRole': mock_get_permission_abstract_role_list.return_value,
'Action': mock_get_permission_allowed_action_list.return_value,
'ResourceSuffix': mock_get_permission_resource_suffix_list.return_value,
'LogicalResourceId': resource_logical_id
}
actual_permission = AccessControlResourceHandler._get_permission(resource_group_name, resource_logical_id, permission_metadata, actual_problems)
self.assertEquals(actual_permission, expected_permission)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_permission_resource_suffix_list.assert_has_calls([
mock.call(resource_suffix, actual_problems)])
mock_get_permission_allowed_action_list.assert_has_calls([
mock.call(allowed_action, actual_problems)])
mock_get_permission_abstract_role_list.assert_has_calls([
mock.call(resource_group_name, abstract_role, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permission_abstract_role_list(AccessControlResourceHandlerTestCase):
def test_with_none(self):
resource_group_name = mock.MagicMock()
abstract_role_list = None
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_permission_abstract_role_list(resource_group_name, abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string(self):
resource_group_name = mock.MagicMock()
abstract_role = 'test-abstract-role'
abstract_role_list = abstract_role
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_abstract_role_list = [ [ resource_group_name, abstract_role ] ]
actual_abstract_role_list = AccessControlResourceHandler._get_permission_abstract_role_list(resource_group_name, abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_list(self):
resource_group_name = mock.MagicMock()
abstract_role_1 = 'test-abstract-role-1'
abstract_role_2 = 'test-abstract-role-2'
abstract_role_list = [ abstract_role_1, abstract_role_2 ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_abstract_role_list = [
[ resource_group_name, abstract_role_1 ],
[ resource_group_name, abstract_role_2 ]
]
actual_abstract_role_list = AccessControlResourceHandler._get_permission_abstract_role_list(resource_group_name, abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_object(self):
resource_group_name = mock.MagicMock()
abstract_role_list = {}
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_permission_abstract_role_list(resource_group_name, abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string_with_dot(self):
resource_group_name = mock.MagicMock()
abstract_role = 'test-abstract-role.with-dot'
abstract_role_list = abstract_role
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_permission_abstract_role_list(resource_group_name, abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permission_resource_suffix_list(AccessControlResourceHandlerTestCase):
def test_with_none(self):
resource_suffix_list = None
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_resource_suffix_list = ['']
actual_resource_suffix_list = AccessControlResourceHandler._get_permission_resource_suffix_list(resource_suffix_list, actual_problems)
self.assertEquals(actual_resource_suffix_list, expected_resource_suffix_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string(self):
resource_suffix = 'test-resource-suffix'
resource_suffix_list = resource_suffix
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_resource_suffix_list = [ resource_suffix ]
actual_resource_suffix_list = AccessControlResourceHandler._get_permission_resource_suffix_list(resource_suffix_list, actual_problems)
self.assertEquals(actual_resource_suffix_list, expected_resource_suffix_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_list(self):
resource_suffix_1 = 'test-resource-suffix-1'
resource_suffix_2 = 'test-resource-suffix-2'
resource_suffix_list = [ resource_suffix_1, resource_suffix_2 ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_resource_suffix_list = resource_suffix_list
actual_resource_suffix_list = AccessControlResourceHandler._get_permission_resource_suffix_list(resource_suffix_list, actual_problems)
self.assertEquals(actual_resource_suffix_list, expected_resource_suffix_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_empty_list(self):
resource_suffix_list = []
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_resource_suffix_list = ['']
actual_resource_suffix_list = AccessControlResourceHandler._get_permission_resource_suffix_list(resource_suffix_list, actual_problems)
self.assertEquals(actual_resource_suffix_list, expected_resource_suffix_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_object(self):
resource_suffix_list = {}
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_resource_suffix_list = []
actual_resource_suffix_list = AccessControlResourceHandler._get_permission_resource_suffix_list(resource_suffix_list, actual_problems)
self.assertEquals(actual_resource_suffix_list, expected_resource_suffix_list)
self.assertEquals(len(actual_problems), len(expected_problems))
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_permission_allowed_action_list(AccessControlResourceHandlerTestCase):
def test_with_none(self):
allowed_action_list = None
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_allowed_action_list = []
actual_allowed_action_list = AccessControlResourceHandler._get_permission_allowed_action_list(allowed_action_list, actual_problems)
self.assertEquals(actual_allowed_action_list, expected_allowed_action_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string(self):
allowed_action = 'test-allowed-action'
allowed_action_list = allowed_action
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_allowed_action_list = [ allowed_action ]
actual_allowed_action_list = AccessControlResourceHandler._get_permission_allowed_action_list(allowed_action_list, actual_problems)
self.assertEquals(actual_allowed_action_list, expected_allowed_action_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_list(self):
allowed_action_1 = 'test-allowed-action-1'
allowed_action_2 = 'test-allowed-action-2'
allowed_action_list = [ allowed_action_1, allowed_action_2 ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_allowed_action_list = allowed_action_list
actual_allowed_action_list = AccessControlResourceHandler._get_permission_allowed_action_list(allowed_action_list, actual_problems)
self.assertEquals(actual_allowed_action_list, expected_allowed_action_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_object(self):
allowed_action_list = {}
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_allowed_action_list = []
actual_allowed_action_list = AccessControlResourceHandler._get_permission_allowed_action_list(allowed_action_list, actual_problems)
self.assertEquals(actual_allowed_action_list, expected_allowed_action_list)
self.assertEquals(len(actual_problems), len(expected_problems))
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_implicit_role_mappings(AccessControlResourceHandlerTestCase):
def test_default(self):
mock_physical_role_name_1 = 'physical-test-role-1'
mock_logical_role_name_1 = 'logical-test-role-1'
mock_resource_1_id_data = { 'AbstractRoleMappings': { mock_logical_role_name_1: mock_physical_role_name_1 } }
mock_resource_1 = mock.MagicMock()
mock_resource_1.type = 'Custom::Test'
mock_resource_1.physical_id = 'resource-name::{}'.format(json.dumps(mock_resource_1_id_data))
mock_physical_role_name_2 = 'physical-test-role-2'
mock_logical_role_name_2 = 'logical-test-role-2'
mock_resource_2_id_data = { 'AbstractRoleMappings': { mock_logical_role_name_2: mock_physical_role_name_2 } }
mock_resource_2 = mock.MagicMock()
mock_resource_2.type = 'Custom::Test'
mock_resource_2.physical_id = 'resource-name::{}'.format(json.dumps(mock_resource_2_id_data))
mock_resource_info_name = 'test-resource-group'
mock_resource_info = mock.MagicMock()
mock_resource_info.permission_context_name = mock_resource_info_name
mock_resource_info.resources = [ mock_resource_1, mock_resource_2 ]
problems = []
expected_mappings = {
mock_physical_role_name_1: [{
'Effect': 'Allow',
'AbstractRole': [ [ mock_resource_info_name, mock_logical_role_name_1.encode('utf8') ] ]
}],
mock_physical_role_name_2: [{
'Effect': 'Allow',
'AbstractRole': [ [ mock_resource_info_name, mock_logical_role_name_2.encode('utf8') ] ]
}]
}
actual_mappings = AccessControlResourceHandler._get_implicit_role_mappings(mock_resource_info, problems)
self.assertEquals(actual_mappings, expected_mappings)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_explicit_role_mappings(AccessControlResourceHandlerTestCase):
def test_with_no_metadata(self):
role_resource_a = mock.MagicMock()
role_resource_a.get_cloud_canvas_metadata.return_value = None
role_resource_b = mock.MagicMock()
role_resource_b.get_cloud_canvas_metadata.return_value = None
stack = mock.MagicMock()
stack.resources.get_by_type.return_value = [ role_resource_a, role_resource_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_role_mappings = {
role_resource_a.physical_id: [],
role_resource_b.physical_id: []
}
actual_role_mappings = AccessControlResourceHandler._get_explicit_role_mappings(stack, actual_problems)
self.assertEquals(actual_role_mappings, expected_role_mappings)
self.assertEquals(len(actual_problems), len(expected_problems))
role_resource_a.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
role_resource_b.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
stack.resources.get_by_type.assert_called_with('AWS::IAM::Role')
@mock.patch('AccessControlResourceHandler._get_role_mapping_list')
def test_with_metadata(self,
mock_get_role_mapping_list):
role_resource_a = mock.MagicMock(name='resource_a')
metadata_a = role_resource_a.get_cloud_canvas_metadata.return_value
role_resource_b = mock.MagicMock(name='resource_b')
metadata_b = role_resource_b.get_cloud_canvas_metadata.return_value
stack = mock.MagicMock(name='resource-group')
stack.resources.get_by_type.return_value = [ role_resource_a, role_resource_b ]
role_mapping_list_a = mock.MagicMock(name='role_mapping_list_a')
role_mapping_list_b = mock.MagicMock(name='role_mapping_list_b')
mock_get_role_mapping_list.side_effect = [ role_mapping_list_a, role_mapping_list_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_role_mappings = {
role_resource_a.physical_id: role_mapping_list_a,
role_resource_b.physical_id: role_mapping_list_b
}
actual_role_mappings = AccessControlResourceHandler._get_explicit_role_mappings(stack, actual_problems)
self.assertEquals(actual_role_mappings, expected_role_mappings)
self.assertEquals(len(actual_problems), len(expected_problems))
role_resource_a.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
role_resource_b.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
stack.resources.get_by_type.assert_called_with('AWS::IAM::Role')
mock_get_role_mapping_list.assert_has_calls([
mock.call(metadata_a, actual_problems),
mock.call(metadata_b, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_role_mapping_list')
def test_with_get_role_mapping_list_problem(self,
mock_get_role_mapping_list):
role_resource_a = mock.MagicMock(name='resource_a')
metadata_a = role_resource_a.get_cloud_canvas_metadata.return_value
role_resource_b = mock.MagicMock(name='resource_b')
metadata_b = role_resource_b.get_cloud_canvas_metadata.return_value
stack = mock.MagicMock(name='resource-group')
stack.resources.get_by_type.return_value = [ role_resource_a, role_resource_b ]
problem_a_1 = mock.MagicMock(name='problem_a_1')
problem_b_1 = mock.MagicMock(name='problem_b_1')
mock_get_role_mapping_list.side_effect = self.make_problem_reporting_side_effect(
[ problem_a_1, problem_b_1 ],
[ [], [] ])
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ problem_a_1, problem_b_1 ]
expected_role_mappings = {
role_resource_a.physical_id: [],
role_resource_b.physical_id: []
}
actual_role_mappings = AccessControlResourceHandler._get_explicit_role_mappings(stack, actual_problems)
self.assertEquals(actual_role_mappings, expected_role_mappings)
self.assertEquals(len(actual_problems), len(expected_problems))
role_resource_a.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
role_resource_b.get_cloud_canvas_metadata.assert_called_with('RoleMappings')
stack.resources.get_by_type.assert_called_with('AWS::IAM::Role')
mock_get_role_mapping_list.assert_has_calls([
mock.call(metadata_a, actual_problems),
mock.call(metadata_b, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_role_mapping_list(AccessControlResourceHandlerTestCase):
@mock.patch('AccessControlResourceHandler._get_role_mapping')
def test_with_metadata_object(self,
mock_get_role_mapping):
role_mapping = mock_get_role_mapping.return_value
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_role_mapping_list = [ role_mapping ]
role_mapping_metadata = mock.MagicMock()
role_mapping_metadata_list = role_mapping_metadata
actual_role_mapping_list = AccessControlResourceHandler._get_role_mapping_list(role_mapping_metadata_list, actual_problems)
self.assertEquals(actual_role_mapping_list, expected_role_mapping_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_role_mapping.assert_has_calls([
mock.call(role_mapping_metadata, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_role_mapping')
def test_with_metadata_list(self,
mock_get_role_mapping):
role_mapping_a = mock.MagicMock()
role_mapping_b = mock.MagicMock()
mock_get_role_mapping.side_effect = [ role_mapping_a, role_mapping_b ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_role_mapping_list = [ role_mapping_a, role_mapping_b ]
role_mapping_metadata_a = mock.MagicMock()
role_mapping_metadata_b = mock.MagicMock()
role_mapping_metadata_list = [ role_mapping_metadata_a, role_mapping_metadata_b ]
actual_role_mapping_list = AccessControlResourceHandler._get_role_mapping_list(role_mapping_metadata_list, actual_problems)
self.assertEquals(actual_role_mapping_list, expected_role_mapping_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_role_mapping.assert_has_calls([
mock.call(role_mapping_metadata_a, actual_problems),
mock.call(role_mapping_metadata_b, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_role_mapping')
def test_with_get_role_mapping_problem(self,
mock_get_role_mapping):
problem_a = mock.MagicMock()
problem_b = mock.MagicMock()
mock_get_role_mapping.side_effect = self.make_problem_reporting_side_effect(
[ problem_a, problem_b ],
[ None, None ])
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ problem_a, problem_b ]
expected_role_mapping_list = []
role_mapping_metadata_a = mock.MagicMock()
role_mapping_metadata_b = mock.MagicMock()
role_mapping_metadata_list = [ role_mapping_metadata_a, role_mapping_metadata_b ]
actual_role_mapping_list = AccessControlResourceHandler._get_role_mapping_list(role_mapping_metadata_list, actual_problems)
self.assertEquals(actual_role_mapping_list, expected_role_mapping_list)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_role_mapping.assert_has_calls([
mock.call(role_mapping_metadata_a, actual_problems),
mock.call(role_mapping_metadata_b, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_role_mapping(AccessControlResourceHandlerTestCase):
def test_with_invalid_metadata_type(self):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
role_mapping_metadata = 'invalid'
expected_role_mapping = None
actual_role_mapping = AccessControlResourceHandler._get_role_mapping(role_mapping_metadata, actual_problems)
self.assertEquals(actual_role_mapping, expected_role_mapping)
self.assertEquals(len(actual_problems), len(expected_problems))
@mock.patch('AccessControlResourceHandler._get_role_mapping_abstract_role_list')
@mock.patch('AccessControlResourceHandler._get_role_mapping_effect')
def test_with_valid_metadata_object(self,
mock_get_role_mapping_effect,
mock_get_role_mapping_abstract_role_list):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
abstract_role = 'test-abstract-role'
effect = 'test-effect'
role_mapping_metadata = {
'AbstractRole': abstract_role,
'Effect': effect
}
expected_role_mapping = {
'AbstractRole': mock_get_role_mapping_abstract_role_list.return_value,
'Effect': mock_get_role_mapping_effect.return_value
}
actual_role_mapping = AccessControlResourceHandler._get_role_mapping(role_mapping_metadata, actual_problems)
self.assertEquals(actual_role_mapping, expected_role_mapping)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_role_mapping_effect.assert_has_calls([
mock.call(effect, actual_problems)])
mock_get_role_mapping_abstract_role_list.assert_has_calls([
mock.call(abstract_role, actual_problems)])
@mock.patch('AccessControlResourceHandler._get_role_mapping_abstract_role_list')
@mock.patch('AccessControlResourceHandler._get_role_mapping_effect')
def test_with_metadata_object_with_unsupported_property(self,
mock_get_role_mapping_effect,
mock_get_role_mapping_abstract_role_list):
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
abstract_role = 'test-abstract-role'
effect = 'test-effect'
role_mapping_metadata = {
'AbstractRole': abstract_role,
'Effect': effect,
'Unsupported': 'unsupported-value'
}
expected_role_mapping = {
'AbstractRole': mock_get_role_mapping_abstract_role_list.return_value,
'Effect': mock_get_role_mapping_effect.return_value
}
actual_role_mapping = AccessControlResourceHandler._get_role_mapping(role_mapping_metadata, actual_problems)
self.assertEquals(actual_role_mapping, expected_role_mapping)
self.assertEquals(len(actual_problems), len(expected_problems))
mock_get_role_mapping_effect.assert_has_calls([
mock.call(effect, actual_problems)])
mock_get_role_mapping_abstract_role_list.assert_has_calls([
mock.call(abstract_role, actual_problems)])
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_role_mapping_abstract_role_list(AccessControlResourceHandlerTestCase):
def test_with_none(self):
abstract_role_list = None
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string(self):
resource_group_name = 'test-resoruce-group'
abstract_role_name = 'test-abstract-role'
abstract_role = resource_group_name + '.' + abstract_role_name
abstract_role_list = abstract_role
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_abstract_role_list = [ [ resource_group_name, abstract_role_name ] ]
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_list(self):
resource_group_name_1 = 'test-resoruce-group-1'
resource_group_name_2 = 'test-resoruce-group-2'
abstract_role_name_1 = 'test-abstract-role-1'
abstract_role_name_2 = 'test-abstract-role-2'
abstract_role_1 = resource_group_name_1 + '.' + abstract_role_name_1
abstract_role_2 = resource_group_name_2 + '.' + abstract_role_name_2
abstract_role_list = [ abstract_role_1, abstract_role_2 ]
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_abstract_role_list = [
[ resource_group_name_1, abstract_role_name_1 ],
[ resource_group_name_2, abstract_role_name_2 ]
]
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_object(self):
abstract_role_list = {}
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string_without_dot(self):
abstract_role = 'invalid'
abstract_role_list = abstract_role
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_string_with_too_many_dots(self):
abstract_role = 'invalid.with.dots'
abstract_role_list = abstract_role
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_abstract_role_list = []
actual_abstract_role_list = AccessControlResourceHandler._get_role_mapping_abstract_role_list(abstract_role_list, actual_problems)
self.assertEquals(actual_abstract_role_list, expected_abstract_role_list)
self.assertEquals(len(actual_problems), len(expected_problems))
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_get_role_mapping_effect(AccessControlResourceHandlerTestCase):
def test_with_none(self):
effect = None
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_effect = effect
actual_effect = AccessControlResourceHandler._get_role_mapping_effect(effect, actual_problems)
self.assertEquals(actual_effect, expected_effect)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_Allow(self):
effect = 'Allow'
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_effect = effect
actual_effect = AccessControlResourceHandler._get_role_mapping_effect(effect, actual_problems)
self.assertEquals(actual_effect, expected_effect)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_Deny(self):
effect = 'Deny'
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = []
expected_effect = effect
actual_effect = AccessControlResourceHandler._get_role_mapping_effect(effect, actual_problems)
self.assertEquals(actual_effect, expected_effect)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_invalid_type(self):
effect = []
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_effect = effect
actual_effect = AccessControlResourceHandler._get_role_mapping_effect(effect, actual_problems)
self.assertEquals(actual_effect, expected_effect)
self.assertEquals(len(actual_problems), len(expected_problems))
def test_with_invalid_string(self):
effect = 'Invalid'
actual_problems = AccessControlResourceHandler.ProblemList()
expected_problems = [ self.ANY_STRING ]
expected_effect = effect
actual_effect = AccessControlResourceHandler._get_role_mapping_effect(effect, actual_problems)
self.assertEquals(actual_effect, expected_effect)
self.assertEquals(len(actual_problems), len(expected_problems))
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_update_roles(AccessControlResourceHandlerTestCase):
policy_name = 'TestPolicy'
permissions = mock.MagicMock()
mock_resource_group_name = 'test-resource-group'
mock_physical_role_name_1 = 'physical-test-role-1'
mock_physical_role_name_2 = 'physical-test-role-2'
role_mapping_list_1 = mock.MagicMock()
role_mapping_list_2 = mock.MagicMock()
role_mappings = {
mock_physical_role_name_1: role_mapping_list_1,
mock_physical_role_name_2: role_mapping_list_2
}
@mock_aws.patch_client('iam', 'delete_role_policy', reload = AccessControlResourceHandler)
def test_with_request_type_delete(self, mock_delete_role_policy):
request_type = 'Delete'
AccessControlResourceHandler._update_roles(request_type, self.policy_name, self.permissions, self.role_mappings)
mock_delete_role_policy.assert_has_calls([
mock.call(RoleName=self.mock_physical_role_name_1, PolicyName=self.policy_name),
mock.call(RoleName=self.mock_physical_role_name_2, PolicyName=self.policy_name)],
any_order = True)
@mock_aws.patch_client('iam', 'delete_role_policy', reload = AccessControlResourceHandler)
@mock.patch('AccessControlResourceHandler._create_role_policy')
def test_with_no_statements(self, mock_create_role_policy, mock_delete_role_policy):
mock_create_role_policy.return_value = { 'Statement': [] }
request_type = 'Create'
AccessControlResourceHandler._update_roles(request_type, self.policy_name, self.permissions, self.role_mappings)
mock_create_role_policy.assert_has_calls([
mock.call(self.permissions, self.role_mapping_list_1),
mock.call(self.permissions, self.role_mapping_list_2)],
any_order = True)
mock_delete_role_policy.assert_has_calls([
mock.call(RoleName=self.mock_physical_role_name_1, PolicyName=self.policy_name),
mock.call(RoleName=self.mock_physical_role_name_2, PolicyName=self.policy_name)],
any_order = True)
@mock_aws.patch_client('iam', 'put_role_policy', reload = AccessControlResourceHandler)
@mock.patch('AccessControlResourceHandler._create_role_policy')
def test_with_statements(self, mock_create_role_policy, mock_put_role_policy):
mock_policy_1 = { 'Statement': [ 'Policy-1' ] }
mock_policy_2 = { 'Statement': [ 'Policy-2' ] }
def mock_create_role_policy_side_effect(permissions, role_mapping_list):
if role_mapping_list is self.role_mapping_list_1:
return mock_policy_1
elif role_mapping_list is self.role_mapping_list_2:
return mock_policy_2
else:
return None
mock_create_role_policy.side_effect = mock_create_role_policy_side_effect
request_type = 'Create'
AccessControlResourceHandler._update_roles(request_type, self.policy_name, self.permissions, self.role_mappings)
mock_create_role_policy.assert_has_calls([
mock.call(self.permissions, self.role_mapping_list_1),
mock.call(self.permissions, self.role_mapping_list_2)],
any_order = True)
mock_put_role_policy.assert_has_calls([
mock.call(RoleName=self.mock_physical_role_name_1, PolicyName=self.policy_name, PolicyDocument=json.dumps(mock_policy_1)),
mock.call(RoleName=self.mock_physical_role_name_2, PolicyName=self.policy_name, PolicyDocument=json.dumps(mock_policy_2))],
any_order = True)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_create_role_policy(AccessControlResourceHandlerTestCase):
PERMISSION_RESOURCE_ARN_1 = 'permission-resource-arn-1'
PERMISSION_RESOURCE_ARN_2 = 'permission-resource-arn-2'
PERMISSION_ABSTRACT_ROLE_LIST_1_A = [ 'permission-abstract-role-list-1-a' ]
PERMISSION_ABSTRACT_ROLE_LIST_1_B = [ 'permission-abstract-role-list-1-b' ]
PERMISSION_ABSTRACT_ROLE_LIST_2_A = [ 'permission-abstract-role-list-2-a' ]
PERMISSION_ABSTRACT_ROLE_LIST_2_B = [ 'permission-abstract-role-list-2-b' ]
PERMISSION_ACTION_LIST_1_A = [ 'permission-action-list-1-a' ]
PERMISSION_ACTION_LIST_1_B = [ 'permission-action-list-1-b' ]
PERMISSION_ACTION_LIST_2_A = [ 'permission-action-list-2-a' ]
PERMISSION_ACTION_LIST_2_B = [ 'permission-action-list-2-b' ]
PERMISSION_RESOURCE_SUFFIX_1_A_P = 'permission-resource-suffix-1-a-p'
PERMISSION_RESOURCE_SUFFIX_1_A_Q = 'permission-resource-suffix-1-a-q'
PERMISSION_RESOURCE_SUFFIX_1_B_P = 'permission-resource-suffix-1-b-p'
PERMISSION_RESOURCE_SUFFIX_1_B_Q = 'permission-resource-suffix-1-b-q'
PERMISSION_RESOURCE_SUFFIX_2_A_P = 'permission-resource-suffix-2-a-p'
PERMISSION_RESOURCE_SUFFIX_2_A_Q = 'permission-resource-suffix-2-a-q'
PERMISSION_RESOURCE_SUFFIX_2_B_P = 'permission-resource-suffix-2-b-p'
PERMISSION_RESOURCE_SUFFIX_2_B_Q = 'permission-resource-suffix-2-b-q'
PERMISSION_RESOURCE_SUFFIX_LIST_1_A = [ PERMISSION_RESOURCE_SUFFIX_1_A_P, PERMISSION_RESOURCE_SUFFIX_1_A_Q ]
PERMISSION_RESOURCE_SUFFIX_LIST_1_B = [ PERMISSION_RESOURCE_SUFFIX_1_B_P, PERMISSION_RESOURCE_SUFFIX_1_B_Q ]
PERMISSION_RESOURCE_SUFFIX_LIST_2_A = [ PERMISSION_RESOURCE_SUFFIX_2_A_P, PERMISSION_RESOURCE_SUFFIX_2_A_Q ]
PERMISSION_RESOURCE_SUFFIX_LIST_2_B = [ PERMISSION_RESOURCE_SUFFIX_2_B_P, PERMISSION_RESOURCE_SUFFIX_2_B_Q ]
PERMISSION_SID_1_A = 'permission-sid-1-a'
PERMISSION_SID_1_B = 'permission-sid-1-b'
PERMISSION_SID_2_A = 'permission-sid-2-a'
PERMISSION_SID_2_B = 'permission-sid-2-b'
PERMISSIONS = {
PERMISSION_RESOURCE_ARN_1: [
{
"AbstractRole": PERMISSION_ABSTRACT_ROLE_LIST_1_A,
"Action": PERMISSION_ACTION_LIST_1_A,
"ResourceSuffix": PERMISSION_RESOURCE_SUFFIX_LIST_1_A,
"LogicalResourceId": PERMISSION_SID_1_A
},
{
"AbstractRole": PERMISSION_ABSTRACT_ROLE_LIST_1_B,
"Action": PERMISSION_ACTION_LIST_1_B,
"ResourceSuffix": PERMISSION_RESOURCE_SUFFIX_LIST_1_B,
"LogicalResourceId": PERMISSION_SID_1_B
}
],
PERMISSION_RESOURCE_ARN_2: [
{
"AbstractRole": PERMISSION_ABSTRACT_ROLE_LIST_2_A,
"Action": PERMISSION_ACTION_LIST_2_A,
"ResourceSuffix": PERMISSION_RESOURCE_SUFFIX_LIST_2_A,
"LogicalResourceId": PERMISSION_SID_2_A
},
{
"AbstractRole": PERMISSION_ABSTRACT_ROLE_LIST_2_B,
"Action": PERMISSION_ACTION_LIST_2_B,
"ResourceSuffix": PERMISSION_RESOURCE_SUFFIX_LIST_2_B,
"LogicalResourceId": PERMISSION_SID_2_B
}
]
}
ROLE_MAPPING_EFFECT_1 = 'role-mapping-effect-1'
ROLE_MAPPING_EFFECT_2 = 'role-mapping-effect-2'
ROLE_MAPPING_ABSTRACT_ROLE_LIST_1 = 'role-mapping-abstract-role-list-1'
ROLE_MAPPING_ABSTRACT_ROLE_LIST_2 = 'role-mapping-abstract-role-list-2'
ROLE_MAPPING_LIST = [
{
"Effect": ROLE_MAPPING_EFFECT_1,
"AbstractRole": ROLE_MAPPING_ABSTRACT_ROLE_LIST_1
},
{
"Effect": ROLE_MAPPING_EFFECT_2,
"AbstractRole": ROLE_MAPPING_ABSTRACT_ROLE_LIST_2
}
]
@mock.patch('AccessControlResourceHandler._any_abstract_roles_match')
def test_with_no_matches(self, mock_any_abstract_roles_match):
mock_any_abstract_roles_match.return_value = False
expected_policy = {
'Version': '2012-10-17',
'Statement': []
}
actual_policy = AccessControlResourceHandler._create_role_policy(self.PERMISSIONS, self.ROLE_MAPPING_LIST)
self.assertEquals(actual_policy, expected_policy)
mock_any_abstract_roles_match.assert_has_calls([
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2)],
any_order = True)
@mock.patch('AccessControlResourceHandler._any_abstract_roles_match')
def test_with_matches(self, mock_any_abstract_roles_match):
mock_any_abstract_roles_match.return_value = True
expected_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Sid': self.PERMISSION_SID_1_A + '1',
'Effect': self.ROLE_MAPPING_EFFECT_1,
'Action': self.PERMISSION_ACTION_LIST_1_A,
'Resource': [ self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_A_P, self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_A_Q ]
},
{
'Sid': self.PERMISSION_SID_1_A + '2',
'Effect': self.ROLE_MAPPING_EFFECT_2,
'Action': self.PERMISSION_ACTION_LIST_1_A,
'Resource': [ self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_A_P, self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_A_Q ]
},
{
'Sid': self.PERMISSION_SID_1_B + '1',
'Effect': self.ROLE_MAPPING_EFFECT_1,
'Action': self.PERMISSION_ACTION_LIST_1_B,
'Resource': [ self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_B_P, self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_B_Q ]
},
{
'Sid': self.PERMISSION_SID_1_B + '2',
'Effect': self.ROLE_MAPPING_EFFECT_2,
'Action': self.PERMISSION_ACTION_LIST_1_B,
'Resource': [ self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_B_P, self.PERMISSION_RESOURCE_ARN_1 + self.PERMISSION_RESOURCE_SUFFIX_1_B_Q ]
},
{
'Sid': self.PERMISSION_SID_2_A + '1',
'Effect': self.ROLE_MAPPING_EFFECT_1,
'Action': self.PERMISSION_ACTION_LIST_2_A,
'Resource': [ self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_A_P, self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_A_Q ]
},
{
'Sid': self.PERMISSION_SID_2_A + '2',
'Effect': self.ROLE_MAPPING_EFFECT_2,
'Action': self.PERMISSION_ACTION_LIST_2_A,
'Resource': [ self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_A_P, self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_A_Q ]
},
{
'Sid': self.PERMISSION_SID_2_B + '1',
'Effect': self.ROLE_MAPPING_EFFECT_1,
'Action': self.PERMISSION_ACTION_LIST_2_B,
'Resource': [ self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_B_P, self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_B_Q ]
},
{
'Sid': self.PERMISSION_SID_2_B + '2',
'Effect': self.ROLE_MAPPING_EFFECT_2,
'Action': self.PERMISSION_ACTION_LIST_2_B,
'Resource': [ self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_B_P, self.PERMISSION_RESOURCE_ARN_2 + self.PERMISSION_RESOURCE_SUFFIX_2_B_Q ]
},
]
}
actual_policy = AccessControlResourceHandler._create_role_policy(self.PERMISSIONS, self.ROLE_MAPPING_LIST)
self.assertItemsEqual(actual_policy['Statement'], expected_policy['Statement'])
mock_any_abstract_roles_match.assert_has_calls([
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_1_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_A, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_1),
mock.call(self.PERMISSION_ABSTRACT_ROLE_LIST_2_B, self.ROLE_MAPPING_ABSTRACT_ROLE_LIST_2)],
any_order = True)
class UnitTest_CloudGemFramework_ProjectResourceHandler_AccessControlResourceHandler_any_abstract_roles_match(AccessControlResourceHandlerTestCase):
def test_with_no_match(self):
permission_abstract_role_list = [
[ 'resource-group-1', u'abstract-role-a' ],
[ 'resource-group-1', u'abstract-role-b' ],
[ 'resource-group-2', u'abstract-role-a' ],
[ 'resource-group-2', u'abstract-role-b' ] ]
mapping_abstract_role_list = [
[ 'resource-group-1', 'abstract-role-x' ],
[ 'resource-group-x', 'abstract-role-a' ] ]
actual = AccessControlResourceHandler._any_abstract_roles_match(permission_abstract_role_list, mapping_abstract_role_list)
self.assertFalse(actual)
def test_with_exact_match(self):
permission_abstract_role_list = [
[ 'resource-group-1', u'abstract-role-a' ],
[ 'resource-group-1', u'abstract-role-b' ],
[ 'resource-group-2', u'abstract-role-a' ],
[ 'resource-group-2', u'abstract-role-b' ] ]
mapping_abstract_role_list = [ [ 'resource-group-2', 'abstract-role-a' ] ]
actual = AccessControlResourceHandler._any_abstract_roles_match(permission_abstract_role_list, mapping_abstract_role_list)
self.assertTrue(actual)
def test_with_wildcard_match(self):
permission_abstract_role_list = [
[ 'resource-group-1', u'abstract-role-a' ],
[ 'resource-group-1', u'abstract-role-b' ],
[ 'resource-group-2', u'abstract-role-a' ],
[ 'resource-group-2', u'abstract-role-b' ] ]
mapping_abstract_role_list = [ [ '*', 'abstract-role-a' ] ]
actual = AccessControlResourceHandler._any_abstract_roles_match(permission_abstract_role_list, mapping_abstract_role_list)
self.assertTrue(actual)
if __name__ == '__main__':
unittest.main()
| 42.668255 | 212 | 0.731663 | 10,044 | 89,518 | 5.993927 | 0.023994 | 0.045048 | 0.041726 | 0.015547 | 0.929322 | 0.895171 | 0.85308 | 0.819311 | 0.796007 | 0.761557 | 0 | 0.006529 | 0.195815 | 89,518 | 2,097 | 213 | 42.688603 | 0.829752 | 0.007775 | 0 | 0.650424 | 0 | 0 | 0.084509 | 0.050582 | 0 | 0 | 0 | 0 | 0.140537 | 1 | 0.064972 | false | 0 | 0.004944 | 0 | 0.121469 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f0cf43b1bfeb034c667c5eb3208966a2d0e977e6 | 13,060 | py | Python | src/Components/misc/dee/dee_mm.py | GEOS-ESM/AeroApps | 874dad6f34420c014d98eccbe81a061bdc0110cf | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-12-02T14:23:30.000Z | 2021-12-31T15:39:30.000Z | src/Components/misc/dee/dee_mm.py | GEOS-ESM/AeroApps | 874dad6f34420c014d98eccbe81a061bdc0110cf | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-04-15T16:22:14.000Z | 2022-03-24T13:59:25.000Z | src/Components/misc/dee/dee_mm.py | GEOS-ESM/AeroApps | 874dad6f34420c014d98eccbe81a061bdc0110cf | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Implements direct emission estimates based on MODIS Deep Blue C6
retrievals.
"""
import os
import sys
from numpy import linspace, array, savez
from glob import glob
from datetime import datetime, timedelta
from string import Template
from grads import GrADS
from grads.gacore import gat2dt, dt2gat
from deep import mrange
Force = True
#....................................................................
def mflux_o(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute mass flux month
mean.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/du_cm \
-j $path/opendap/$prod \
-o $filename \
-vars dufluxu dufluxv \
-time $t1 $t1 \
-func 'ave(@*aod.2*(ducmass.1/duexttau.1),time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
rc = os.system(cmd)
if rc:
raise RuntimeError, 'error on return from %s'%cmd
def mflux_m(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute mass flux month
mean.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/du_cm \
-j $path/opendap/$prod \
-o $filename \
-vars dufluxu dufluxv \
-time $t1 $t1 \
-func 'ave(@+0*aod.2,time=$t1,time=$t2)'
` """
cmd = Template(tmpl).substitute(d)
if os.system(cmd):
raise RuntimeError, 'error on return from %s'%cmd
#....................................................................
def removal(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute data constrained
removal month mean.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/du_rm \
-j $path/opendap/du_cm \
-k $path/opendap/$prod \
-o $filename \
-vars durm \
-time $t1 $t1 \
-func 'ave(durm.1*aod.3/duexttau.2,time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
rc = os.system(cmd)
if rc:
raise RuntimeError, 'error on return from %s'%cmd
#....................................................................
def aod_o(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
if prod=='MxD04':
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/MOD04 \
-j $path/opendap/MYD04 \
-o $filename \
-vars aod \
-time $t1 $t1 \
-func 'ave(const(@.1,0,-u)+const(@.2,0,-u)/(if(@.1,==,-u,0,1)+if(@.2,==,-u,0,1)),time=$t1,time=$t2)'
"""
else:
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/$prod \
-o $filename \
-vars aod \
-time $t1 $t1 \
-func 'ave(aod,time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
rc = os.system(cmd)
if rc:
raise RuntimeError, 'error on return from %s'%cmd
def xxx_m(path,filename,prod,gatime,inFile,var):
"""
Given a time range in gatime, use lats4d to compute sampled mothly mean.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
inFile=inFile, var=var,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/$inFile \
-j $path/opendap/$prod \
-o $filename \
-vars $var \
-time $t1 $t1 \
-func 'ave(@+0*aod.2,time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
print 30*'-'
print cmd
print 30*'-'
if os.system(cmd):
raise RuntimeError, 'error on return from %s'%cmd
#....................................................................
def wx_modulation(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute the emission
weather modulation factor monthly means. Sampled as the data.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 -udxt ginoux.udxt \
-i $path/opendap/w10m \
-j $path/opendap/gwet \
-k $path/opendap/$prod \
-o $filename \
-vars w10m \
-time $t1 $t1 \
-func 'ave(ginoux5(w10m.1,gwettop.2)+0*aod.3,time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
cmd = "ncrename %s -v w10m,duwx"%filename
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
#....................................................................
def nobs(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute the MERRA-2
dust emissions monthly means. Sampled as the data.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/$prod \
-o $filename \
-vars aod \
-time $t1 $t1 \
-func 'ave(if(@,==,-u,0,1),time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
cmd = "ncrename %s -v aod,nobs"%filename
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
#....................................................................
def foo(path,filename,prod,gatime):
"""
Given a time range in gatime, use lats4d to compute the MERRA-2
dust emissions monthly means. Sampled as the data.
path top path for input control files, see below
filename output file name
"""
if not Force:
if os.path.exists(filename):
print '<> File exists, skipping <%s>'%filename
return
d = dict(path=path, prod=prod, filename=filename,
t1=gatime[0], t2=gatime[1])
if prod=='MxD04':
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/MOD04 \
-j $path/opendap/MYD04 \
-o $filename \
-vars aod \
-time $t1 $t1 \
-func 'ave(if(const(@.1,0,-u),>,0.2,1,0)+if(const(@.2,0,-u),>,0.2,1,0),time=$t1,time=$t2)' """
else:
tmpl = """
lats4d.sh -v -gzip 2 \
-i $path/opendap/$prod \
-o $filename \
-vars aod \
-time $t1 $t1 \
-func 'ave(if(const(@,0,-u),>,0.2,1,0),time=$t1,time=$t2)'
"""
cmd = Template(tmpl).substitute(d)
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
cmd = "ncrename %s -v aod,foo"%filename
if os.system(cmd):
raise RuntimeError, 'error on return from <%s>'%cmd
#....................................................................
if __name__ == "__main__":
path = '/nobackup/5/GAAS/DEE/'
# Parse command line
# ------------------
if len(sys.argv) == 3:
year1 = sys.argv[1]
year2 = sys.argv[2]
elif len(sys.argv) == 2 :
year1 = sys.argv[1]
year2 = year1
else:
print " Usage: %s year1 [year2]"%sys.argv[0]
print "Examples: %s 2003 2005"%sys.argv[0]
print " %s 2003"%sys.argv[0]
raise RuntimeError, 'not enough parameters'
year1, year2 = int(year1), int(year2)
# Loop over time and products
# ---------------------------
for year in range(year1,year2+1):
for month in range(1,13):
#for prod in ('MOD04', 'MYD04'):
for prod in ('MxD04',):
gatime = mrange(year,month,gat=True)
dirn = path+'/Level3/%s/Y%d/M%02d'%(prod,year,month)
os.system('/bin/mkdir -p '+dirn)
"""
# Deep Blue Mass flux
# -------------------
filename = '%s/dee_%s.uqvq.%d%02d.nc4'%(dirn,prod,year,month)
mflux_o(path,filename,prod,gatime)
# Deep Blue Removal
# -----------------
filename = '%s/dee_%s.durm.%d%02d.nc4'%(dirn,prod,year,month)
removal(path,filename,prod,gatime)
# Frequency of Occurence (based on gridbox mean)
# ----------------------------------------------
filename = '%s/dee_%s.foo.%d%02d.nc4'%(dirn,prod,year,month)
foo(path,filename,prod,gatime)
# Number of "obs"
# ---------------
filename = '%s/dee_%s.nobs.%d%02d.nc4'%(dirn,prod,year,month)
nobs(path,filename,prod,gatime)
# MERRA-2 Mass flux
# ------------------
filename = '%s/dee_%s.uqvq_m.%d%02d.nc4'%(dirn,prod,year,month)
inFile, var = 'du_cm', 'dufluxu dufluxv'
xxx_m(path,filename,prod,gatime,inFile,var)
# MERRA-2 AOD
# -----------
filename = '%s/dee_%s.aod_m.%d%02d.nc4'%(dirn,prod,year,month)
inFile, var = 'du_cm', 'duexttau'
xxx_m(path,filename,prod,gatime,inFile,var)
# MERRA-2 CMASS
# -------------
filename = '%s/dee_%s.ducm_m.%d%02d.nc4'%(dirn,prod,year,month)
inFile, var = 'du_cm', 'ducmass'
xxx_m(path,filename,prod,gatime,inFile,var)
# MERRA-2 Removal
# ---------------
filename = '%s/dee_%s.durm_m.%d%02d.nc4'%(dirn,prod,year,month)
inFile, var = 'du_rm', 'durm'
xxx_m(path,filename,prod,gatime,inFile,var)
# MERRA-2 Emissions
# -----------------
filename = '%s/dee_%s.duem_m.%d%02d.nc4'%(dirn,prod,year,month)
inFile, var = 'du_em', 'duem'
xxx_m(path,filename,prod,gatime,inFile,var)
# Weather Modulation
# ------------------
filename = '%s/dee_%s.duwx.%d%02d.nc4'%(dirn,prod,year,month)
wx_modulation(path,filename,prod,gatime)
"""
# Observed AOD
# ------------
filename = '%s/dee_%s.aod_o.%d%02d.nc4'%(dirn,prod,year,month)
aod_o(path,filename,prod,gatime)
| 30.161663 | 124 | 0.474502 | 1,530 | 13,060 | 4.019608 | 0.122222 | 0.019512 | 0.049431 | 0.067967 | 0.800813 | 0.774634 | 0.741138 | 0.694634 | 0.683252 | 0.683252 | 0 | 0.030278 | 0.34755 | 13,060 | 432 | 125 | 30.231481 | 0.691468 | 0.048009 | 0 | 0.727273 | 0 | 0.031818 | 0.460432 | 0.071343 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.040909 | null | null | 0.063636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
502fbf0fa248310e0b8337f235ed60c86a9f7db9 | 2,815 | py | Python | nxstart/app.py | roedesh/nxstart | ad1470aeae865270267322e265669f38945e268a | [
"MIT"
] | 40 | 2018-06-19T02:28:00.000Z | 2021-11-14T20:00:34.000Z | nxstart/app.py | roedesh/nxstart | ad1470aeae865270267322e265669f38945e268a | [
"MIT"
] | 2 | 2018-06-19T09:49:36.000Z | 2018-12-02T13:14:26.000Z | nxstart/app.py | roedesh/nxstart | ad1470aeae865270267322e265669f38945e268a | [
"MIT"
] | 4 | 2018-06-19T09:19:29.000Z | 2020-03-31T14:35:12.000Z | # -*- coding: utf-8 -*-
"""Defines the logic for the commands."""
import click
from nxstart import filebuilder
from nxstart.utils.files import check_and_create_directory
from nxstart.utils.strings import generate_folder_name_and_path
def libnx(name, author, clion, cwd):
"""
Function that holds the logic for the 'libnx' command.
:param name: Name of the project
:param author: Name of the author
:param clion: Using CLion
:param cwd: Current working directory
"""
folder_name, folder_path = generate_folder_name_and_path(name, cwd)
check_and_create_directory(folder_path)
filebuilder.libnx.create_libnx_project(folder_path, name, author)
filebuilder.generic.modify_readme_file(folder_path, name, author)
if clion:
filebuilder.generic.modify_cmake_lists_file(folder_path, folder_name)
else:
filebuilder.generic.remove_cmake_lists_file(folder_path)
click.echo("Successfully created the libnx project!")
def libt(name, author, clion, cwd):
"""
Function that holds the logic for the 'libt' command.
:param name: Name of the project
:param author: Name of the author
:param clion: Using CLion
:param cwd: Current working directory
"""
folder_name, folder_path = generate_folder_name_and_path(name, cwd)
check_and_create_directory(folder_path)
filebuilder.libt.create_libt_project(folder_path, name, author)
filebuilder.generic.modify_readme_file(folder_path, name, author)
if clion:
filebuilder.generic.modify_cmake_lists_file(folder_path, folder_name)
else:
filebuilder.generic.remove_cmake_lists_file(folder_path)
click.echo("Successfully created the libtransistor project!")
def brewjs(name, author, cwd):
"""
Function that holds the logic for the 'brewjs' command.
:param name: Name of the project
:param author: Name of the author
:param cwd: Current working directory
"""
folder_name, folder_path = generate_folder_name_and_path(name, cwd)
check_and_create_directory(folder_path)
filebuilder.brewjs.create_brewjs_project(folder_path, name, author)
filebuilder.generic.modify_readme_file(folder_path, name, author)
click.echo("Successfully created the BrewJS project!")
def pynx(name, author, cwd):
"""
Function that holds the logic for the 'pynx' command.
:param name: Name of the project
:param author: Name of the author
:param cwd: Current working directory
"""
folder_name, folder_path = generate_folder_name_and_path(name, cwd)
check_and_create_directory(folder_path)
filebuilder.pynx.create_pynx_project(folder_path, name, author)
filebuilder.generic.modify_readme_file(folder_path, name, author)
click.echo("Successfully created the PyNX project!")
| 31.277778 | 77 | 0.741385 | 378 | 2,815 | 5.285714 | 0.145503 | 0.1001 | 0.036036 | 0.08008 | 0.837337 | 0.824825 | 0.824825 | 0.824825 | 0.824825 | 0.824825 | 0 | 0.000432 | 0.17762 | 2,815 | 89 | 78 | 31.629213 | 0.862635 | 0.267496 | 0 | 0.555556 | 0 | 0 | 0.084536 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
50422835b87e701a6948c725f969b9c9a71362b4 | 8,456 | py | Python | tests/locations/market_groups.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 21 | 2016-06-01T14:10:07.000Z | 2022-02-28T01:56:31.000Z | tests/locations/market_groups.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 152 | 2016-05-16T21:33:22.000Z | 2019-06-24T12:57:14.000Z | tests/locations/market_groups.py | cmutel/Ocelot | 20e9639570c43f84ae255750a6c402ebabe00981 | [
"BSD-3-Clause"
] | 12 | 2016-09-05T15:35:59.000Z | 2021-07-03T19:28:47.000Z | # -*- coding: utf-8 -*-
from ocelot.transformations.locations.market_groups import *
from ocelot.errors import MarketGroupError
import pytest
class FakeTopology:
def tree(self, data):
return {
'G1': {
'G2': {'M1': {}},
'M2': {}
}
}
def ordered_dependencies(self, datasets):
locations = {x['location'] for x in datasets}
return [x for x in ['G1', 'G2', 'M1', 'M2'] if x in locations]
def contained(self, location, exclude_self=False, subtract=None,
resolved_row=None):
if location == 'G1':
return {'G2', 'M1', 'M2'}
elif location == 'G2':
return {'M1'}
else:
return set()
def contains(self, parent, child, subtract=None, resolved_row=None):
if parent == 'G1' and child in {'M1', 'M2', 'G2'}:
return True
elif parent == 'G2' and child == 'M1':
return True
else:
return False
def __call__(self, x):
return set()
def reformat_suppliers(result):
result_as_dict = {ds['code']: sorted([exc['code'] for exc in ds.get('suppliers', [])])
for ds in result}
return {k: v for k, v in result_as_dict.items() if v}
def test_inconsistent_names():
data = [{
'type': 'market group',
'name': 'market group for bar',
'reference product': 'foo',
}, {
'type': 'market group',
'name': 'market group for foo',
'reference product': 'foo',
}]
with pytest.raises(MarketGroupError):
link_market_group_suppliers(data)
def test_real_locations_including_glo_but_excluding_row():
# Markets: RoW, CA, FR, NO
# Market groups: GLO, RER, WEU (Western Europe)
expected = {
"GLO": ['CA', 'RER', 'RoW'],
"RER": ['NO', 'WEU'],
"WEU": ['FR'],
}
given = [{
'type': 'market activity',
'location': 'RoW',
'code': 'RoW',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'CA',
'code': 'CA',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'FR',
'code': 'FR',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'NO',
'code': 'NO',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'GLO',
'code': 'GLO',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'RER',
'code': 'RER',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'WEU',
'code': 'WEU',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}]
assert reformat_suppliers(link_market_group_suppliers(given)) == expected
def test_real_locations_including_glo_and_row():
# Markets: RoW, CA, FR, NO
# Market groups: GLO, RER, WEU (Western Europe)
expected = {
"GLO": ['CA', 'RER', 'RoW'],
"RER": ['NO', 'WEU'],
"WEU": ['FR'],
}
given = [{
'type': 'market activity',
'location': 'RoW',
'code': 'RoW',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'CA',
'code': 'CA',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'FR',
'code': 'FR',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'NO',
'code': 'NO',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'GLO',
'code': 'GLO',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'RER',
'code': 'RER',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'WEU',
'code': 'WEU',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}]
assert reformat_suppliers(link_market_group_suppliers(given)) == expected
def test_glo_includes_missing_activities():
given = [{
'type': 'market activity',
'location': 'CA',
'code': 'CA',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'FR',
'code': 'FR',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'GLO',
'code': 'GLO',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'RER',
'code': 'RER',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}]
expected = {
"GLO": ['CA', 'RER'],
"RER": ['FR'],
}
assert reformat_suppliers(link_market_group_suppliers(given)) == expected
def test_same_location_market_group_market():
given = [{
'type': 'market activity',
'location': 'CA',
'code': '2',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'CA',
'code': '1',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}]
expected = {
"1": ['2'],
}
assert reformat_suppliers(link_market_group_suppliers(given)) == expected
def test_row_only_supply_no_market_group():
given = [{
'type': 'market activity',
'location': 'RoW',
'code': 'RoW',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market activity',
'location': 'RER',
'code': 'RER',
'name': 'market for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}, {
'type': 'market group',
'location': 'GLO',
'code': 'GLO',
'name': 'market group for foo',
'reference product': 'foo',
'exchanges': [{'type': 'reference product', 'name': 'foo'}]
}]
expected = {
"GLO": ['RER', 'RoW'],
}
assert reformat_suppliers(link_market_group_suppliers(given)) == expected
def test_row_market_groups():
data = [{
'type': 'market group',
'location': 'RoW',
}]
with pytest.raises(MarketGroupError):
check_no_row_market_groups(data)
| 30.861314 | 90 | 0.500946 | 825 | 8,456 | 5.044848 | 0.122424 | 0.184527 | 0.114128 | 0.126862 | 0.773667 | 0.773667 | 0.743633 | 0.722009 | 0.712398 | 0.712398 | 0 | 0.004264 | 0.306646 | 8,456 | 273 | 91 | 30.974359 | 0.705611 | 0.019276 | 0 | 0.8 | 0 | 0 | 0.346084 | 0 | 0 | 0 | 0 | 0 | 0.02 | 1 | 0.052 | false | 0 | 0.012 | 0.008 | 0.108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
506331e243ba95a6d9e178d02dc8bd6d28189763 | 71 | py | Python | tests/compat.py | MountainChenCad/First | 9dd75efc9d989df17eba2b4865e837428cf033f4 | [
"CC0-1.0"
] | 827 | 2019-04-18T12:27:41.000Z | 2020-02-22T15:26:18.000Z | tests/compat.py | MountainChenCad/First | 9dd75efc9d989df17eba2b4865e837428cf033f4 | [
"CC0-1.0"
] | 3 | 2019-04-19T15:33:13.000Z | 2020-01-11T05:48:10.000Z | tests/compat.py | MountainChenCad/First | 9dd75efc9d989df17eba2b4865e837428cf033f4 | [
"CC0-1.0"
] | 34 | 2019-04-18T18:09:37.000Z | 2020-01-11T05:42:42.000Z | # -*- coding: utf-8 -*-
import io as StringIO
def u(s):
return s
| 10.142857 | 23 | 0.56338 | 12 | 71 | 3.333333 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 0.267606 | 71 | 6 | 24 | 11.833333 | 0.75 | 0.295775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
ac97dff11b6f54bb06028f8443698bce4efbf45d | 6,249 | py | Python | train_quantized_CIFAR10.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | train_quantized_CIFAR10.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | train_quantized_CIFAR10.py | BatyrM/QL-Net | b245aadeb106810d075064137f26d773b2dbd679 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from load_data import get_data
from vgg_quantized import VGG
from train_utils_quantized_CIFAR10 import train, test
from training_parameters_CIFAR10 import get_params
args = get_params()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
case_number = args.case_number
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
train_loader = get_data(args, dataset='cifar10', ifTrain=True)
test_loader = get_data(args, dataset='cifar10', ifTrain=False)
model = VGG()
model.load_state_dict(torch.load('cifar10_baseline.pth'))
model.to(device).eval()
if case_number == 1:
layer_id1 = 1
layer1 = 'layer' + str(layer_id1)
tree1 = torch.load('tree_' + layer1)
nodes1 = np.asarray([tree1[i].centroid for i in range (0, np.shape(tree1)[0])])
lookup_table1 = torch.FloatTensor(nodes1).to(device).unsqueeze(0)
print(lookup_table1.size())
args.out_name = 'cifar10_ql_layer'+str(layer_id1)+'.pth'
print("\n\nWith only first layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=1, tree=[lookup_table1])
elif case_number == 2:
layer_id2 = 2
layer2 = 'layer' + str(layer_id2)
tree2 = torch.load('tree_' + layer2)
nodes2 = np.asarray([tree2[i].centroid for i in range (0, np.shape(tree2)[0])])
lookup_table2 = torch.FloatTensor(nodes2).to(device).unsqueeze(0)
print(lookup_table2.size())
args.out_name = 'cifar10_ql_layer'+str(layer_id2)+'.pth'
print("\n\nWith only second layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=2, tree=[lookup_table2])
elif case_number == 3:
layer_id1 = 1
layer1 = 'layer' + str(layer_id1)
tree1 = torch.load('tree_' + layer1)
nodes1 = np.asarray([tree1[i].centroid for i in range (0, np.shape(tree1)[0])])
layer_id2 = 2
layer2 = 'layer' + str(layer_id2)
tree2 = torch.load('tree_' + layer2)
nodes2 = np.asarray([tree2[i].centroid for i in range (0, np.shape(tree2)[0])])
lookup_table1 = torch.FloatTensor(nodes1).to(device).unsqueeze(0)
lookup_table2 = torch.FloatTensor(nodes2).to(device).unsqueeze(0)
# lookup_table = torch.FloatTensor(nodes).to(device)
print(lookup_table1.size())
print(lookup_table2.size())
args.out_name = 'cifar10_ql_layer'+str(layer_id1)+'&layer'+str(layer_id2)+'.pth'
print("\n\nWith both first layer + second layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=3, tree=[[lookup_table1], [lookup_table2]])
elif case_number == 4:
layer_id0 = 0
layer0 = 'layer' + str(layer_id0)
tree0 = torch.load('tree_' + layer0)
nodes0 = np.asarray([tree0[i].centroid for i in range (0, np.shape(tree0)[0])])
lookup_table0 = torch.FloatTensor(nodes0).to(device).unsqueeze(0)
print(lookup_table0.size())
args.out_name = 'cifar10_ql_input_layer.pth'
print("\n\nWith only input layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=4, tree=[lookup_table0])
elif case_number == 5:
layer_id0 = 0
layer0 = 'layer' + str(layer_id0)
tree0 = torch.load('tree_' + layer0)
nodes0 = np.asarray([tree0[i].centroid for i in range (0, np.shape(tree0)[0])])
layer_id1 = 1
layer1 = 'layer' + str(layer_id1)
tree1 = torch.load('tree_' + layer1)
nodes1 = np.asarray([tree1[i].centroid for i in range (0, np.shape(tree1)[0])])
lookup_table0 = torch.FloatTensor(nodes0).to(device).unsqueeze(0)
lookup_table1 = torch.FloatTensor(nodes1).to(device).unsqueeze(0)
print(lookup_table0.size())
print(lookup_table1.size())
args.out_name = 'cifar10_ql_input_layer&layer'+str(layer_id1)+'.pth'
print("\n\nWith only first layer + input layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=5, tree=[[lookup_table0], [lookup_table1]])
elif case_number == 6:
layer_id0 = 0
layer0 = 'layer' + str(layer_id0)
tree0 = torch.load('tree_' + layer0)
nodes0 = np.asarray([tree0[i].centroid for i in range (0, np.shape(tree0)[0])])
layer_id2 = 2
layer2 = 'layer' + str(layer_id2)
tree2 = torch.load('tree_' + layer2)
nodes2 = np.asarray([tree2[i].centroid for i in range (0, np.shape(tree2)[0])])
lookup_table0 = torch.FloatTensor(nodes0).to(device).unsqueeze(0)
lookup_table2 = torch.FloatTensor(nodes2).to(device).unsqueeze(0)
print(lookup_table0.size())
print(lookup_table2.size())
args.out_name = 'cifar10_ql_input_layer&layer'+str(layer_id2)+'.pth'
print("\n\nWith only second layer + input layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=6, tree=[[lookup_table0], [lookup_table2]])
elif case_number == 7:
layer_id0 = 0
layer0 = 'layer' + str(layer_id0)
tree0 = torch.load('tree_' + layer0)
nodes0 = np.asarray([tree0[i].centroid for i in range (0, np.shape(tree0)[0])])
layer_id1 = 1
layer1 = 'layer' + str(layer_id1)
tree1 = torch.load('tree_' + layer1)
nodes1 = np.asarray([tree1[i].centroid for i in range (0, np.shape(tree1)[0])])
layer_id2 = 2
layer2 = 'layer' + str(layer_id2)
tree2 = torch.load('tree_' + layer2)
nodes2 = np.asarray([tree2[i].centroid for i in range (0, np.shape(tree2)[0])])
lookup_table0 = torch.FloatTensor(nodes0).to(device).unsqueeze(0)
lookup_table1 = torch.FloatTensor(nodes1).to(device).unsqueeze(0)
lookup_table2 = torch.FloatTensor(nodes2).to(device).unsqueeze(0)
# lookup_table = torch.FloatTensor(nodes).to(device)
print(lookup_table0.size())
print(lookup_table1.size())
print(lookup_table2.size())
args.out_name = 'cifar10_ql_input_layer&layer'+str(layer_id1)+'&layer'+str(layer_id2)+'.pth'
print("\n\nWith both layers + input layer quantized:\n")
train(model, train_loader, test_loader, args, device, layer_id=7, tree=[[lookup_table0], [lookup_table1], [lookup_table2]])
# test(model, test_loader, device)
| 39.802548 | 127 | 0.694511 | 928 | 6,249 | 4.498922 | 0.115302 | 0.038323 | 0.062275 | 0.037365 | 0.825629 | 0.779641 | 0.779641 | 0.761437 | 0.746826 | 0.743952 | 0 | 0.045377 | 0.157145 | 6,249 | 156 | 128 | 40.057692 | 0.747294 | 0.021443 | 0 | 0.585366 | 0 | 0 | 0.110784 | 0.018 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.089431 | 0 | 0.089431 | 0.162602 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
acca0a08c67b62b932dc8abd4819eb5c8d264fb6 | 7,801 | py | Python | causaleffect.py | WanyuGroup/CVPR2022-OrphicX | 98d8d8259439c45661573e575cf956331df16abc | [
"MIT"
] | null | null | null | causaleffect.py | WanyuGroup/CVPR2022-OrphicX | 98d8d8259439c45661573e575cf956331df16abc | [
"MIT"
] | null | null | null | causaleffect.py | WanyuGroup/CVPR2022-OrphicX | 98d8d8259439c45661573e575cf956331df16abc | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn.functional as F
"""
joint_uncond:
Sample-based estimate of "joint, unconditional" causal effect, -I(alpha; Yhat).
Inputs:
- params['Nalpha'] monte-carlo samples per causal factor
- params['Nbeta'] monte-carlo samples per noncausal factor
- params['K'] number of causal factors
- params['L'] number of noncausal factors
- params['M'] number of classes (dimensionality of classifier output)
- decoder
- classifier
- device
Outputs:
- negCausalEffect (sample-based estimate of -I(alpha; Yhat))
- info['xhat']
- info['yhat']
"""
def joint_uncond(params, decoder, classifier, adj, feat, node_idx=None, act=torch.sigmoid, mu=0, std=1, device=None):
eps = 1e-8
I = 0.0
q = torch.zeros(params['M'], device=device)
feat = feat.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
adj = adj.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
if torch.is_tensor(mu):
alpha_mu = mu[:,:params['K']]
beta_mu = mu[:,params['K']:]
alpha_std = std[:,:params['K']]
beta_std = std[:,params['K']:]
else:
alpha_mu = 0
beta_mu = 0
alpha_std = 1
beta_std = 1
alpha = torch.randn((params['Nalpha'], adj.shape[-1], params['K']), device=device).mul(alpha_std).add_(alpha_mu).repeat(1,params['Nbeta'],1).view(params['Nalpha'] * params['Nbeta'] , adj.shape[-1], params['K'])
beta = torch.randn((params['Nalpha'] * params['Nbeta'], adj.shape[-1], params['L']), device=device).mul(beta_std).add_(beta_mu)
zs = torch.cat([alpha, beta], dim=-1)
xhat = act(decoder(zs)) * adj
if node_idx is None:
logits = classifier(feat, xhat)[0]
else:
logits = classifier(feat, xhat)[0][:,node_idx,:]
yhat = F.softmax(logits, dim=1).view(params['Nalpha'], params['Nbeta'] ,params['M'])
p = yhat.mean(1)
I = torch.sum(torch.mul(p, torch.log(p+eps)), dim=1).mean()
q = p.mean(0)
I = I - torch.sum(torch.mul(q, torch.log(q+eps)))
return -I, None
def beta_info_flow(params, decoder, classifier, adj, feat, node_idx=None, act=torch.sigmoid, mu=0, std=1, device=None):
eps = 1e-8
I = 0.0
q = torch.zeros(params['M'], device=device)
feat = feat.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
adj = adj.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
if torch.is_tensor(mu):
alpha_mu = mu[:,:params['K']]
beta_mu = mu[:,params['K']:]
alpha_std = std[:,:params['K']]
beta_std = std[:,params['K']:]
else:
alpha_mu = 0
beta_mu = 0
alpha_std = 1
beta_std = 1
alpha = torch.randn((params['Nalpha'] * params['Nbeta'], adj.shape[-1], params['K']), device=device).mul(alpha_std).add_(alpha_mu)
beta = torch.randn((params['Nalpha'], adj.shape[-1], params['L']), device=device).mul(beta_std).add_(beta_mu).repeat(1,params['Nbeta'],1).view(params['Nalpha'] * params['Nbeta'] , adj.shape[-1], params['L'])
zs = torch.cat([alpha, beta], dim=-1)
xhat = act(decoder(zs)) * adj
if node_idx is None:
logits = classifier(feat, xhat)[0]
else:
logits = classifier(feat, xhat)[0][:,node_idx,:]
yhat = F.softmax(logits, dim=1).view(params['Nalpha'], params['Nbeta'] ,params['M'])
p = yhat.mean(1)
I = torch.sum(torch.mul(p, torch.log(p+eps)), dim=1).mean()
q = p.mean(0)
I = I - torch.sum(torch.mul(q, torch.log(q+eps)))
return -I, None
for i in range(0, params['Nalpha']):
# alpha = torch.randn((100, params['K']), device=device)
# zs = torch.zeros((params['Nbeta'], 100, params['z_dim']), device=device)
# for j in range(0, params['Nbeta']):
# beta = torch.randn((100, params['L']), device=device)
# zs[j,:,:params['K']] = alpha
# zs[j,:,params['K']:] = beta
alpha = torch.randn((100, params['K']), device=device).mul(alpha_std).add_(alpha_mu).unsqueeze(0).repeat(params['Nbeta'],1,1)
beta = torch.randn((params['Nbeta'], 100, params['L']), device=device).mul(beta_std).add_(beta_mu)
zs = torch.cat([alpha, beta], dim=-1)
# decode and classify batch of Nbeta samples with same alpha
xhat = torch.sigmoid(decoder(zs)) * adj
yhat = F.softmax(classifier(feat, xhat)[0], dim=1)
p = 1./float(params['Nbeta']) * torch.sum(yhat,0) # estimate of p(y|alpha)
I = I + 1./float(params['Nalpha']) * torch.sum(torch.mul(p, torch.log(p+eps)))
q = q + 1./float(params['Nalpha']) * p # accumulate estimate of p(y)
I = I - torch.sum(torch.mul(q, torch.log(q+eps)))
negCausalEffect = -I
info = {"xhat" : xhat, "yhat" : yhat}
return negCausalEffect, info
"""
joint_uncond_singledim:
Sample-based estimate of "joint, unconditional" causal effect
for single latent factor, -I(z_i; Yhat). Note the interpretation
of params['Nalpha'] and params['Nbeta'] here: Nalpha is the number
of samples of z_i, and Nbeta is the number of samples of the other
latent factors.
Inputs:
- params['Nalpha']
- params['Nbeta']
- params['K']
- params['L']
- params['M']
- decoder
- classifier
- device
- dim (i : compute -I(z_i; Yhat) **note: i is zero-indexed!**)
Outputs:
- negCausalEffect (sample-based estimate of -I(z_i; Yhat))
- info['xhat']
- info['yhat']
"""
def joint_uncond_singledim(params, decoder, classifier, adj, feat, dim, node_idx=None, act=torch.sigmoid, mu=0, std=1, device=None):
eps = 1e-8
I = 0.0
q = torch.zeros(params['M'], device=device)
feat = feat.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
adj = adj.repeat(params['Nalpha'] * params['Nbeta'], 1, 1)
if torch.is_tensor(mu):
alpha_mu = mu
beta_mu = mu[:,dim]
alpha_std = std
beta_std = std[:,dim]
else:
alpha_mu = 0
beta_mu = 0
alpha_std = 1
beta_std = 1
alpha = torch.randn((params['Nalpha'], adj.shape[-1]), device=device).mul(alpha_std).add_(alpha_mu).repeat(1,params['Nbeta']).view(params['Nalpha'] * params['Nbeta'] , adj.shape[-1])
zs = torch.randn((params['Nalpha'] * params['Nbeta'], adj.shape[-1], params['z_dim']), device=device).mul(beta_std).add_(beta_mu)
zs[:,:,dim] = alpha
xhat = act(decoder(zs)) * adj
if node_idx is None:
logits = classifier(feat, xhat)[0]
else:
logits = classifier(feat, xhat)[0][:,node_idx,:]
yhat = F.softmax(logits, dim=1).view(params['Nalpha'], params['Nbeta'] ,params['M'])
p = yhat.mean(1)
I = torch.sum(torch.mul(p, torch.log(p+eps)), dim=1).mean()
q = p.mean(0)
I = I - torch.sum(torch.mul(q, torch.log(q+eps)))
return -I, None
# eps = 1e-8
# I = 0.0
# q = torch.zeros(params['M']).to(device)
# zs = np.zeros((params['Nalpha']*params['Nbeta'], params['z_dim']))
# for i in range(0, params['Nalpha']):
# z_fix = np.random.randn(1)
# zs = np.zeros((params['Nbeta'],params['z_dim']))
# for j in range(0, params['Nbeta']):
# zs[j,:] = np.random.randn(params['K']+params['L'])
# zs[j,dim] = z_fix
# # decode and classify batch of Nbeta samples with same alpha
# xhat = decoder(torch.from_numpy(zs).float().to(device))
# yhat = classifier(xhat)[0]
# p = 1./float(params['Nbeta']) * torch.sum(yhat,0) # estimate of p(y|alpha)
# I = I + 1./float(params['Nalpha']) * torch.sum(torch.mul(p, torch.log(p+eps)))
# q = q + 1./float(params['Nalpha']) * p # accumulate estimate of p(y)
# I = I - torch.sum(torch.mul(q, torch.log(q+eps)))
# negCausalEffect = -I
# info = {"xhat" : xhat, "yhat" : yhat}
# return negCausalEffect, info | 42.167568 | 214 | 0.588002 | 1,150 | 7,801 | 3.92 | 0.1 | 0.073203 | 0.067879 | 0.086735 | 0.800133 | 0.766415 | 0.747338 | 0.706965 | 0.644188 | 0.636424 | 0 | 0.018924 | 0.220997 | 7,801 | 185 | 215 | 42.167568 | 0.72289 | 0.166389 | 0 | 0.735849 | 0 | 0 | 0.050734 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028302 | false | 0 | 0.028302 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
acf1baa7c183794c174af5cb75f202ea0cf6a7b2 | 81 | py | Python | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | 4 | 2021-12-09T06:44:24.000Z | 2021-12-24T08:09:19.000Z | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | null | null | null | hackerRank/Problem Solving/Data Structures/Arrays/leftRotation.py | paritoshtripathi935/laughing-disco | 3ee690fba469b1e07846af5c85b1dd8675656520 | [
"MIT"
] | null | null | null | def rotateLeft(d, arr):
# Write your code here
return arr[d:] + arr[: d]
| 20.25 | 29 | 0.592593 | 13 | 81 | 3.692308 | 0.692308 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.259259 | 81 | 3 | 30 | 27 | 0.8 | 0.246914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
acfeb68ee745d3d7d7b1986e54c5c55737c943b0 | 9,787 | py | Python | autoarray/plot/imaging_plots.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | autoarray/plot/imaging_plots.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | autoarray/plot/imaging_plots.py | Sketos/PyAutoArray | 72dc7e8d1c38786915f82a7e7284239e5ce87624 | [
"MIT"
] | null | null | null | from autoarray.plot import plotters
@plotters.set_include_and_sub_plotter
@plotters.set_subplot_filename
def subplot_imaging(
imaging, grid=None, mask=None, positions=None, include=None, sub_plotter=None
):
"""Plot the imaging data_type as a sub-plotters of all its quantites (e.g. the dataset, noise_map-map, PSF, Signal-to_noise-map, \
etc).
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
imaging : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
image_plane_pix_grid : ndarray or data_type.array.grid_stacks.PixGrid
If an adaptive pixelization whose pixels are formed by tracing pixels from the dataset, this plots those pixels \
over the immage.
ignore_config : bool
If *False*, the config file general.ini is used to determine whether the subpot is plotted. If *True*, the \
config file is ignored.
"""
number_subplots = 6
sub_plotter.open_subplot_figure(number_subplots=number_subplots)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=1)
image(
imaging=imaging,
grid=grid,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=2)
noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=3)
psf(imaging=imaging, positions=positions, include=include, plotter=sub_plotter)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=4)
signal_to_noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=5)
absolute_signal_to_noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=6)
potential_chi_squared_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.output.subplot_to_figure()
sub_plotter.figure.close()
def individual(
imaging,
grid=None,
mask=None,
positions=None,
plot_image=False,
plot_noise_map=False,
plot_psf=False,
plot_signal_to_noise_map=False,
plot_absolute_signal_to_noise_map=False,
plot_potential_chi_squared_map=False,
include=None,
plotter=None,
):
"""Plot each attribute of the imaging data_type as individual figures one by one (e.g. the dataset, noise_map-map, PSF, \
Signal-to_noise-map, etc).
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
imaging : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
if plot_image:
image(
imaging=imaging,
grid=grid,
mask=mask,
positions=positions,
include=include,
plotter=plotter,
)
if plot_noise_map:
noise_map(imaging=imaging, mask=mask, include=include, plotter=plotter)
if plot_psf:
psf(imaging=imaging, include=include, plotter=plotter)
if plot_signal_to_noise_map:
signal_to_noise_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
if plot_absolute_signal_to_noise_map:
absolute_signal_to_noise_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
if plot_potential_chi_squared_map:
potential_chi_squared_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
@plotters.set_include_and_plotter
@plotters.set_labels
def image(imaging, grid=None, mask=None, positions=None, include=None, plotter=None):
"""Plot the observed data_type of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
image_plane_pix_grid : ndarray or data_type.array.grid_stacks.PixGrid
If an adaptive pixelization whose pixels are formed by tracing pixels from the dataset, this plots those pixels \
over the immage.
"""
plotter.plot_array(
array=imaging.image,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def psf(imaging, grid=None, positions=None, include=None, plotter=None):
"""Plot the PSF of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.psf, include_origin=include.origin, grid=grid, positions=positions
)
@plotters.set_include_and_plotter
@plotters.set_labels
def signal_to_noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.signal_to_noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def absolute_signal_to_noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.absolute_signal_to_noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def potential_chi_squared_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.potential_chi_squared_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
| 31.775974 | 134 | 0.697047 | 1,293 | 9,787 | 5.073473 | 0.089714 | 0.05122 | 0.049543 | 0.060976 | 0.902439 | 0.873933 | 0.853049 | 0.847561 | 0.841768 | 0.809756 | 0 | 0.000914 | 0.21784 | 9,787 | 307 | 135 | 31.879479 | 0.856042 | 0.442832 | 0 | 0.590062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049689 | false | 0 | 0.006211 | 0 | 0.055901 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4a11f01759ae0b785f550c0bd4335ba56b57d615 | 30,120 | py | Python | networking_oneview/tests/unit/ml2/drivers/oneview/test_oneview_mech_driver.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 6 | 2018-02-02T17:37:15.000Z | 2018-09-20T14:06:16.000Z | networking_oneview/tests/unit/ml2/drivers/oneview/test_oneview_mech_driver.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 18 | 2018-02-06T14:54:22.000Z | 2018-02-27T13:34:37.000Z | networking_oneview/tests/unit/ml2/drivers/oneview/test_oneview_mech_driver.py | HewlettPackard/networking-oneview | 21881596eeb39565a027022921d4ec4f71e3278b | [
"Apache-2.0"
] | 2 | 2020-04-28T14:36:12.000Z | 2020-07-22T13:09:44.000Z | # Copyright 2017 Hewlett Packard Enterprise Development LP.
# Copyright 2017 Universidade Federal de Campina Grande
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
from networking_oneview.ml2.drivers.oneview import common
from networking_oneview.ml2.drivers.oneview import database_manager
from networking_oneview.ml2.drivers.oneview import exceptions
from networking_oneview.ml2.drivers.oneview import mech_oneview
from networking_oneview.ml2.drivers.oneview import neutron_oneview_client
FAKE_FLAT_ONEVIEW_NETWORK = {
'id': '1',
'provider:physical_network': 'physnet-mapped',
'provider:network_type': 'flat'
}
FAKE_FLAT_NETWORK = {
'id': '2',
'provider:physical_network': 'physnet',
'provider:network_type': 'flat'
}
FAKE_VLAN_NETWORK = {
'id': '3',
'provider:segmentation_id': '123',
'provider:physical_network': 'physnet',
'provider:network_type': 'vlan'
}
FAKE_VXLAN_NETWORK = {
'id': '3',
'provider:segmentation_id': '123',
'provider:physical_network': 'physnet',
'provider:network_type': 'vxlan'
}
FAKE_NETWORK_NOT_MAPPED = {
'id': '4',
'provider:physical_network': 'not_mapped_phys',
'provider:network_type': 'flat'
}
FAKE_NETWORK_SEGMENT = {
'physical_network': 'physnet',
'network_type': 'flat'
}
FAKE_NETWORK_SEGMENT_NOT_MAPPED = {
'physical_network': 'not_mapped_phys',
'network_type': 'flat'
}
FAKE_UNTAGGED_UPLINKSET = {
'name': 'uplinkset_flat',
'ethernetNetworkType': 'untagged',
'networkUris': ['fake_net_uri']
}
FAKE_TAGGED_UPLINKSET = {
'name': 'uplinkset_vlan',
'ethernetNetworkType': 'tagged',
'networkUris': ['fake_net_uri2']
}
UPLINKSET_MAPPINGS = {
'physnet': ['lig_123', 'uplinkset_flat', 'lig_123', 'uplinkset_vlan']
}
FLAT_NET_MAPPINGS = {'physnet-mapped': ['112233AA']}
FAKE_LIG = {
'uplinkSets': [FAKE_TAGGED_UPLINKSET, FAKE_UNTAGGED_UPLINKSET]
}
FAKE_PORT = {
'id': '1',
'mac_address': 'aa:11:cc:33:ee:44',
'network_id': '1',
'binding:vnic_type': 'baremetal',
'binding:profile': {
'local_link_information': [{
"switch_info": {
"server_hardware_id": "1122AA",
"bootable": "true"
},
"port_id": "",
"switch_id": "aa:bb:cc:dd:ee:ff"
}]
}
}
FAKE_SERVER_PROFILE = {
'uri': '/fake_sp_uri',
'status': 'ok',
'connections': [{
'portId': '1234',
'networkUri': '/fake_net_uri',
'mac': 'aa:11:cc:33:ee:44',
'boot': {'priority': 'Primary'}
}]
}
FAKE_SERVER_HARDWARE = {
'uuid': '1122AA',
'powerState': 'On',
'serverProfileUri': '/fake_sp_uri',
'locationUri': '/fake_enclosure_uri',
'powerLock': False,
'portMap': {
'deviceSlots': [{
'slotNumber': '1',
'location': 'Flb',
'physicalPorts': [{
'portNumber': '1',
'virtualPorts': [{
'mac': 'aa:11:cc:33:ee:44',
'portFunction': 'a',
}]
}]
}]
}
}
FAKE_OV_FLAT_NETWORK = {
'name': 'Neutron [%s]' % FAKE_FLAT_NETWORK.get('id'),
'ethernetNetworkType': 'Untagged',
'vlanId': None,
'purpose': 'General',
'smartLink': False,
'privateNetwork': False,
}
FAKE_OV_VLAN_NETWORK = {
'name': 'Neutron [%s]' % FAKE_VLAN_NETWORK.get('id'),
'ethernetNetworkType': 'Tagged',
'vlanId': '%s' % FAKE_VLAN_NETWORK.get('provider:segmentation_id'),
'purpose': 'General',
'smartLink': False,
'privateNetwork': False,
}
class FakeContext(object):
def __init__(self):
self._plugin_context = FakePlugin()
self._network = None
self._port = copy.deepcopy(FAKE_PORT)
self.current = copy.deepcopy(FAKE_PORT)
self.segments_to_bind = []
class FakePlugin(object):
def __init__(self):
self._session = 'fake_session'
class FakeNetwork(object):
def __init__(self):
self.oneview_network_id = '12345'
self.neutron_network_id = '54321'
self.manageable = True
class OneViewMechanismDriverTestCase(base.AgentMechanismBaseTestCase):
def setUp(self):
super(OneViewMechanismDriverTestCase, self).setUp()
common.get_oneview_client = mock.MagicMock()
oneview_client = common.get_oneview_client()
oneview_client.logical_interconnect_groups.get.return_value = FAKE_LIG
database_manager.get_neutron_oneview_network = mock.Mock(
return_value=False
)
self.driver = mech_oneview.OneViewDriver()
self.driver.oneview_client = oneview_client
self.driver.neutron_oneview_client = neutron_oneview_client.Client(
oneview_client, UPLINKSET_MAPPINGS, FLAT_NET_MAPPINGS
)
self.server_hardware = copy.deepcopy(FAKE_SERVER_HARDWARE)
self.server_profile = copy.deepcopy(FAKE_SERVER_PROFILE)
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_flat_mapping(self, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_ONEVIEW_NETWORK
client = self.driver.oneview_client
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
# NOTE(nicodemos) parameters: session, network_id, oneview_network_id,
# manageable, mapping
mock_map_net.assert_called_with(
network_context._plugin_context._session,
FAKE_FLAT_ONEVIEW_NETWORK.get('id'),
['112233AA'], False, [])
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_flat(self, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_NETWORK
client = self.driver.oneview_client
client.ethernet_networks.get_by.return_value = []
self.driver.create_network_postcommit(network_context)
client.ethernet_networks.create.assert_called_with(
FAKE_OV_FLAT_NETWORK
)
# NOTE(nicodemos) parameters: session, network_id, oneview_network_id,
# manageable, mapping
mock_map_net.assert_called_with(
network_context._plugin_context._session,
FAKE_FLAT_NETWORK.get('id'),
mock.ANY, True, ['lig_123', 'uplinkset_flat'])
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_already_existing_network_postcommit_flat(
self, mock_map_net
):
network_context = FakeContext()
network_context._network = FAKE_FLAT_NETWORK
client = self.driver.oneview_client
client.ethernet_networks.get_by.return_value = [FAKE_OV_FLAT_NETWORK]
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
# NOTE(gustavo) parameters: session, network_id, oneview_network_id,
# manageable, mapping
mock_map_net.assert_called_with(
network_context._plugin_context._session,
FAKE_FLAT_NETWORK.get('id'),
mock.ANY, True, ['lig_123', 'uplinkset_flat'])
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_vlan(self, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_VLAN_NETWORK
client = self.driver.oneview_client
client.ethernet_networks.get_by.return_value = []
self.driver.create_network_postcommit(network_context)
client.ethernet_networks.create.assert_called_with(
FAKE_OV_VLAN_NETWORK
)
# NOTE(nicodemos) parameters: session, network_id, oneview_network_id,
# manageable, mapping
mock_map_net.assert_called_with(
network_context._plugin_context._session,
FAKE_VLAN_NETWORK.get('id'),
mock.ANY, True, ['lig_123', 'uplinkset_vlan'])
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_already_existing_network_postcommit_vlan(
self, mock_map_net
):
network_context = FakeContext()
network_context._network = FAKE_VLAN_NETWORK
client = self.driver.oneview_client
client.ethernet_networks.get_by.return_value = [FAKE_OV_VLAN_NETWORK]
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
# NOTE(gustavo) parameters: session, network_id, oneview_network_id,
# manageable, mapping
mock_map_net.assert_called_with(
network_context._plugin_context._session,
FAKE_VLAN_NETWORK.get('id'),
mock.ANY, True, ['lig_123', 'uplinkset_vlan'])
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_not_mapped(self, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_NETWORK_NOT_MAPPED
client = self.driver.oneview_client
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
self.assertFalse(mock_map_net.called)
@mock.patch.object(neutron_oneview_client.Network, '_add_to_ligs')
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_in_lig(self, mock_map_net, mock_add):
network_context = FakeContext()
network_context._network = FAKE_VLAN_NETWORK
client = self.driver.oneview_client
mock_add.side_effect = Exception("BOOM")
vlan_network = {
'name': 'Neutron [%s]' % FAKE_VLAN_NETWORK.get('id'),
'ethernetNetworkType': 'Tagged',
'vlanId': '%s' % FAKE_VLAN_NETWORK.get('provider:segmentation_id'),
'purpose': 'General',
'smartLink': False,
'privateNetwork': False,
}
self.assertRaises(
exceptions.NetworkCreationException,
self.driver.create_network_postcommit,
network_context
)
client.ethernet_networks.create.assert_called_with(vlan_network)
self.assertTrue(client.ethernet_networks.delete.called)
self.assertFalse(mock_map_net.called)
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
def test_create_network_postcommit_net_created(
self, mock_get_net, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_NETWORK
client = self.driver.oneview_client
mock_get_net.return_value = True
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
self.assertFalse(mock_map_net.called)
# NOTE(nicodemos): See bug when creating a unsupported network type
@mock.patch.object(database_manager, 'map_neutron_network_to_oneview')
def test_create_network_postcommit_unsupported_type(self, mock_map_net):
network_context = FakeContext()
network_context._network = FAKE_VXLAN_NETWORK
client = self.driver.oneview_client
self.driver.create_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.create.called)
self.assertFalse(mock_map_net.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_oneview_network_lig')
def test_delete_network_postcommit(self, mock_del_lig,
mock_del_net, mock_get_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_NETWORK
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.delete_network_postcommit(network_context)
client.ethernet_networks.delete.assert_called_with(
fake_network_obj.oneview_network_id)
mock_del_net.assert_called_with(
network_context._plugin_context._session,
neutron_network_id=FAKE_FLAT_NETWORK.get('id')
)
mock_del_lig.assert_called_with(
network_context._plugin_context._session,
oneview_network_id=fake_network_obj.oneview_network_id
)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_oneview_network_lig')
def test_delete_network_postcommit_flat_mapping(
self, mock_del_lig, mock_del_net, mock_get_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_ONEVIEW_NETWORK
fake_network_obj = FakeNetwork()
fake_network_obj.manageable = False
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.delete_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.delete.called)
mock_del_net.assert_called_with(
network_context._plugin_context._session,
neutron_network_id=FAKE_FLAT_ONEVIEW_NETWORK.get('id')
)
mock_del_lig.assert_called_with(
network_context._plugin_context._session,
oneview_network_id=fake_network_obj.oneview_network_id
)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_neutron_oneview_network')
@mock.patch.object(database_manager, 'delete_oneview_network_lig')
def test_delete_network_postcommit_no_network(
self, mock_del_lig, mock_del_net, mock_get_net):
network_context = FakeContext()
network_context._network = FAKE_FLAT_ONEVIEW_NETWORK
mock_get_net.return_value = None
client = self.driver.oneview_client
self.driver.delete_network_postcommit(network_context)
self.assertFalse(client.ethernet_networks.delete.called)
self.assertFalse(mock_del_net.called)
self.assertFalse(mock_del_lig.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
client.server_hardware.get.return_value = self.server_hardware
client.server_profiles.get.return_value = self.server_profile
old_connections = copy.deepcopy(self.server_profile['connections'])
self.driver.bind_port(port_context)
new_connections = self.server_profile['connections']
self.assertNotEqual(old_connections, new_connections)
client.server_profiles.update.assert_called_with(
id_or_uri=self.server_profile.get('uri'),
resource={
'uri': self.server_profile.get('uri'),
'status': self.server_profile.get('status'),
'connections': self.server_profile['connections']
})
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_existing_conn(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
client.server_hardware.get.return_value = self.server_hardware
client.server_profiles.get.return_value = self.server_profile
self.server_profile["connections"][0]["portId"] = "Flb 1:1-a"
old_connections = copy.deepcopy(self.server_profile['connections'])
self.driver.bind_port(port_context)
new_connections = self.server_profile['connections']
for old_conn in old_connections:
for new_conn in new_connections:
if old_conn.get('mac') == new_conn.get('mac'):
self.assertEqual(old_conn.get('portId'),
new_conn.get('portId'))
self.assertNotEqual(old_conn.get('networkUri'),
new_conn.get('networkUri'))
self.assertEqual(old_conn.get('boot'),
new_conn.get('boot'))
self.assertEqual(len(old_connections), len(new_connections))
client.server_profiles.update.assert_called_with(
id_or_uri=self.server_profile.get('uri'),
resource={
'uri': self.server_profile.get('uri'),
'status': self.server_profile.get('status'),
'connections': self.server_profile['connections']
})
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_net_not_mapped(self, mock_net_segment):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT_NOT_MAPPED
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_not_baremetal(self, mock_net_segment):
port_context = FakeContext()
port_context._port['binding:vnic_type'] = 'not_baremetal'
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_not_in_database(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
mock_get_net.return_value = None
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_no_link_info(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
port_context._port['binding:profile']['local_link_information'] = None
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_more_link_info(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
port_context._port['binding:profile']['local_link_information'].append(
{'fake_local_link_info': True}
)
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_no_switch_info(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
port_context._port[
'binding:profile']['local_link_information'][0][
'switch_info'] = None
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_not_bootable(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
switch_info = port_context._port[
'binding:profile']['local_link_information'][0]['switch_info']
switch_info['bootable'] = False
port_context._port[
'binding:profile']['local_link_information'][0][
'switch_info'] = switch_info
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
client.server_hardware.get.return_value = self.server_hardware
client.server_profiles.get.return_value = self.server_profile
old_connections = copy.deepcopy(self.server_profile['connections'])
self.driver.bind_port(port_context)
new_connections = self.server_profile['connections']
boot_info = new_connections[1].get('boot').get('priority')
self.assertNotEqual(old_connections, new_connections)
self.assertTrue(client.server_hardware.get.called)
self.assertTrue(client.server_profiles.get.called)
self.assertEqual(boot_info, 'NotBootable')
client.server_profiles.update.assert_called_with(
id_or_uri=self.server_profile.get('uri'),
resource={
'uri': self.server_profile.get('uri'),
'status': self.server_profile.get('status'),
'connections': self.server_profile['connections']
})
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_no_hardware(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
switch_info = port_context._port[
'binding:profile']['local_link_information'][0]['switch_info']
switch_info['server_hardware_id'] = None
port_context._port[
'binding:profile']['local_link_information'][0][
'switch_info'] = switch_info
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.bind_port(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_no_profile(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.server_hardware['serverProfileUri'] = None
client.server_hardware.get.return_value = self.server_hardware
self.driver.bind_port(port_context)
self.assertTrue(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_rack_server(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.server_hardware['locationUri'] = None
client.server_hardware.get.return_value = self.server_hardware
self.driver.bind_port(port_context)
self.assertTrue(client.server_hardware.get.called)
self.assertTrue(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_create_port_no_pxe_bootable_available(
self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
client.server_hardware.get.return_value = self.server_hardware
client.server_profiles.get.return_value = self.server_profile
new_connection = {
'portId': '231',
'networkUri': '/fake_net_uri_2',
'mac': 'aa:11:22:33:ee:44',
'boot': {'priority': 'Secondary'}
}
self.server_profile['connections'].append(new_connection)
self.driver.bind_port(port_context)
self.assertTrue(client.server_hardware.get.called)
self.assertTrue(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_delete_port_postcommit(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
client.server_hardware.get.return_value = self.server_hardware
client.server_profiles.get.return_value = self.server_profile
self.driver.delete_port_postcommit(port_context)
client.server_profiles.update.assert_called_with(
id_or_uri=self.server_profile.get('uri'),
resource={
'uri': self.server_profile.get('uri'),
'status': self.server_profile.get('status'),
'connections': self.server_profile['connections']
})
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_delete_port_postcommit_not_valid(
self, mock_net_segment, mock_get_net):
port_context = FakeContext()
port_context._port['binding:profile']['local_link_information'] = None
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.driver.delete_port_postcommit(port_context)
self.assertFalse(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
@mock.patch.object(database_manager, 'get_neutron_oneview_network')
@mock.patch.object(database_manager, 'get_network_segment')
def test_delete_port_rack_server(self, mock_net_segment, mock_get_net):
port_context = FakeContext()
mock_net_segment.return_value = FAKE_NETWORK_SEGMENT
fake_network_obj = FakeNetwork()
mock_get_net.return_value = fake_network_obj
client = self.driver.oneview_client
self.server_hardware['locationUri'] = None
client.server_hardware.get.return_value = self.server_hardware
self.driver.delete_port_postcommit(port_context)
self.assertTrue(client.server_hardware.get.called)
self.assertFalse(client.server_profiles.get.called)
self.assertFalse(client.server_profiles.update.called)
| 41.717452 | 79 | 0.7 | 3,511 | 30,120 | 5.615494 | 0.077186 | 0.029925 | 0.03804 | 0.057162 | 0.832217 | 0.805995 | 0.793467 | 0.767143 | 0.760854 | 0.757202 | 0 | 0.005213 | 0.203884 | 30,120 | 721 | 80 | 41.775312 | 0.817007 | 0.039475 | 0 | 0.643697 | 0 | 0 | 0.129013 | 0.050097 | 0 | 0 | 0 | 0 | 0.131092 | 1 | 0.053782 | false | 0 | 0.013445 | 0 | 0.07395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4a140847022acc877c91c0d1da4231daad62b057 | 5,266 | py | Python | tests/report_tests/test_image_report.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 185 | 2017-12-15T09:24:07.000Z | 2022-01-20T11:20:13.000Z | tests/report_tests/test_image_report.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 191 | 2017-12-15T09:14:52.000Z | 2022-02-17T14:09:19.000Z | tests/report_tests/test_image_report.py | chainer/chainerui | 91c5c26d9154a008079dbb0bcbf69b5590d105f7 | [
"MIT"
] | 29 | 2017-12-15T09:40:45.000Z | 2022-03-13T11:21:11.000Z | import os
import unittest
import warnings
import numpy as np
import pytest
import six
try:
import chainer # NOQA
_chainer_installed = True
except (ImportError, TypeError):
_chainer_installed = False
if _chainer_installed:
from chainerui.report import image_report
_image_report_available = image_report._available
else:
_image_report_available = False
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_available():
with warnings.catch_warnings(record=True) as w:
assert image_report.check_available()
assert len(w) == 0
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_available_not_installed():
import sys
is_installed = 'PIL' in sys.modules
def check_available():
with warnings.catch_warnings(record=True) as w:
assert not image_report.check_available()
assert len(w) == 1
if is_installed:
pil = sys.modules['PIL']
try:
sys.modules['PIL'] = ImportError()
six.moves.reload_module(image_report)
check_available()
finally:
sys.modules['PIL'] = pil
six.moves.reload_module(image_report)
else:
check_available()
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_report_error(func_dir):
img = np.zeros(10)
with pytest.raises(ValueError) as e:
image_report.report(img, func_dir, 'test', batched=False)
assert 'must be 2 or 3' in str(e.value)
@unittest.skipUnless(_chainer_installed, 'Chainer is not installed')
def test_report_error_batch(func_dir):
img = np.zeros(10).reshape(2, 5)
with pytest.raises(ValueError) as e:
image_report.report(img, func_dir, 'test')
assert 'must be 3 or 4' in str(e.value)
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bchw_row0(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
filename, created_at = image_report.report(img, func_dir, 'test')
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bchw_row2(func_dir):
img = np.zeros(10*3*5*5, dtype=np.float32).reshape((10, 3, 5, 5))
filename, created_at = image_report.report(img, func_dir, 'test', row=2)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhwc_row0(func_dir):
img = np.zeros(10*5*5*3, dtype=np.float32).reshape((10, 5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhwc_row2(func_dir):
img = np.zeros(10*5*5*3, dtype=np.float32).reshape((10, 5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1, row=2)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_chw_chainer_variable(func_dir):
img = np.zeros(3*5*5, dtype=np.float32).reshape((3, 5, 5))
img = chainer.Variable(img)
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=0, batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_hwc_hsv(func_dir):
img = np.zeros(5*5*3, dtype=np.float32).reshape((5, 5, 3))
filename, created_at = image_report.report(
img, func_dir, 'test', ch_axis=-1, mode='HSV', batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_bhw_uint8(func_dir):
img = np.zeros(8*5*10, dtype=np.uint8).reshape((8, 5, 10))
filename, created_at = image_report.report(img, func_dir, 'test')
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
@unittest.skipUnless(_image_report_available, 'Image report is not available')
def test_report_hw(func_dir):
img = np.zeros(5*10, dtype=np.float32).reshape((5, 10))
filename, created_at = image_report.report(
img, func_dir, 'test', batched=False)
assert filename.startswith('test_')
path = os.path.join(func_dir, filename)
assert os.path.exists(path)
assert created_at is not None
| 34.644737 | 78 | 0.707938 | 771 | 5,266 | 4.625162 | 0.12192 | 0.111049 | 0.067302 | 0.070107 | 0.843803 | 0.819686 | 0.775659 | 0.738082 | 0.736399 | 0.736399 | 0 | 0.024018 | 0.177744 | 5,266 | 151 | 79 | 34.874172 | 0.799538 | 0.00076 | 0 | 0.54918 | 0 | 0 | 0.086692 | 0 | 0 | 0 | 0 | 0 | 0.245902 | 1 | 0.106557 | false | 0 | 0.090164 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4a218f52365e29e103e58f6de516129bd07d1003 | 22,705 | py | Python | ninjalooter/tests/test_message_handlers.py | rm-you/ninjalooter | 026b0e732964d62721b84c7ae64d418bfe1e2975 | [
"MIT"
] | 4 | 2020-08-30T12:57:03.000Z | 2022-03-18T15:11:13.000Z | ninjalooter/tests/test_message_handlers.py | rm-you/ninjalooter | 026b0e732964d62721b84c7ae64d418bfe1e2975 | [
"MIT"
] | 5 | 2022-01-07T03:17:32.000Z | 2022-03-27T21:20:12.000Z | ninjalooter/tests/test_message_handlers.py | rm-you/ninjalooter | 026b0e732964d62721b84c7ae64d418bfe1e2975 | [
"MIT"
] | 1 | 2021-12-28T02:18:04.000Z | 2021-12-28T02:18:04.000Z | import datetime
from unittest import mock
from ninjalooter import config
from ninjalooter import message_handlers
from ninjalooter import models
from ninjalooter.tests import base
from ninjalooter import utils
class TestMessageHandlers(base.NLTestBase):
def setUp(self) -> None:
utils.setup_aho()
config.ALLIANCES = base.SAMPLE_ALLIANCES
config.ALLIANCE_MAP = base.SAMPLE_ALLIANCE_MAP
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_start_who(self, mock_post_event, mock_store_state):
# Empty List, full /who
config.PLAYER_AFFILIATIONS = {}
for line in base.SAMPLE_WHO_LOG.splitlines():
match = config.MATCH_WHO.match(line)
if match:
message_handlers.handle_who(match, 'window')
self.assertEqual(25, len(config.PLAYER_AFFILIATIONS))
self.assertEqual(25, mock_post_event.call_count)
mock_post_event.reset_mock()
# Peter and Fred should be marked as guildless
self.assertIsNone(config.PLAYER_AFFILIATIONS['Peter'])
self.assertIsNone(config.PLAYER_AFFILIATIONS['Fred'])
# Mark Peter and Fred as historically belonging to Kingdom
config.HISTORICAL_AFFILIATIONS['Peter'] = 'Kingdom'
config.HISTORICAL_AFFILIATIONS['Fred'] = 'Kingdom'
# Trigger New Who
message_handlers.handle_start_who(None, 'window')
mock_post_event.assert_called_once_with(
'window', models.ClearWhoEvent())
mock_post_event.reset_mock()
# Run the full who-list again
for line in base.SAMPLE_WHO_LOG.splitlines():
match = config.MATCH_WHO.match(line)
if match:
message_handlers.handle_who(match, 'window')
self.assertEqual(25, len(config.PLAYER_AFFILIATIONS))
# Peter should be marked as Kingdom, and Fred as guildless
self.assertEqual('Kingdom', config.PLAYER_AFFILIATIONS['Peter'])
self.assertIsNone(config.PLAYER_AFFILIATIONS['Fred'])
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_who(self, mock_post_event, mock_store_state):
# Empty List, full /who
config.PLAYER_AFFILIATIONS = {}
for line in base.SAMPLE_WHO_LOG.splitlines():
match = config.MATCH_WHO.match(line)
if match:
message_handlers.handle_who(match, 'window')
self.assertEqual(25, len(config.PLAYER_AFFILIATIONS))
self.assertEqual(25, mock_post_event.call_count)
mock_post_event.reset_mock()
# Member changed from ANONYMOUS/Unguilded to Guilded
config.PLAYER_AFFILIATIONS = {'Jim': None}
line = '[Sun Aug 16 22:46:32 2020] [ANONYMOUS] Jim (Gnome) <Guild>'
match = config.MATCH_WHO.match(line)
message_handlers.handle_who(match, 'window')
self.assertEqual(1, len(config.PLAYER_AFFILIATIONS))
self.assertEqual('Guild', config.PLAYER_AFFILIATIONS['Jim'])
mock_post_event.assert_called_once_with(
'window', models.WhoEvent('Jim', 'ANONYMOUS', '??', 'Guild'))
mock_post_event.reset_mock()
# Member changed guilds
config.PLAYER_AFFILIATIONS = {'Jim': 'Guild'}
line = '[Sun Aug 16 22:46:32 2020] [ANONYMOUS] Jim (Gnome) <Other>'
match = config.MATCH_WHO.match(line)
message_handlers.handle_who(match, 'window')
self.assertEqual(1, len(config.PLAYER_AFFILIATIONS))
self.assertEqual('Other', config.PLAYER_AFFILIATIONS['Jim'])
mock_post_event.assert_called_once_with(
'window', models.WhoEvent('Jim', 'ANONYMOUS', '??', 'Other'))
mock_post_event.reset_mock()
# Member left their guild
config.PLAYER_AFFILIATIONS = {'Jim': 'Guild'}
line = '[Sun Aug 16 22:46:32 2020] [50 Cleric] Jim (Gnome)'
match = config.MATCH_WHO.match(line)
message_handlers.handle_who(match, 'window')
self.assertEqual(1, len(config.PLAYER_AFFILIATIONS))
self.assertIsNone(config.PLAYER_AFFILIATIONS['Jim'])
mock_post_event.assert_called_once_with(
'window', models.WhoEvent('Jim', 'Cleric', '50', None))
mock_post_event.reset_mock()
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_drop(self, mock_post_event, mock_store_state):
config.PLAYER_AFFILIATIONS = {
'Jim': 'Venerate',
'James': 'Kingdom',
'Dan': 'Dial a Daniel',
}
config.PENDING_AUCTIONS = list()
# # FILTER OFF - Item linked by a non-federation guild member
# config.RESTRICT_BIDS = False
# line = ("[Sun Aug 16 22:47:31 2020] Dan says out of character, "
# "'Belt of Iniquity'")
# match = config.MATCH_DROP.match(line)
# items = message_handlers.handle_drop(match, 'window')
# self.assertEqual(1, len(items))
# self.assertEqual(1, len(config.PENDING_AUCTIONS))
# mock_post_event.assert_called_once_with(
# 'window', models.DropEvent())
# mock_post_event.reset_mock()
# config.PENDING_AUCTIONS = list()
# # FILTER ON - Item linked by a non-federation guild member
# config.RESTRICT_BIDS = True
# line = ("[Sun Aug 16 22:47:31 2020] Dan says out of character, "
# "'Belt of Iniquity'")
# match = config.MATCH_DROP.match(line)
# items = message_handlers.handle_drop(match, 'window')
# self.assertEqual(0, len(items))
# self.assertEqual(0, len(config.PENDING_AUCTIONS))
# mock_post_event.assert_not_called()
# Item linked by a federation guild member
# NODROP filter on, droppable item
config.NODROP_ONLY = True
line = ("[Sun Aug 16 22:47:31 2020] Jim says out of character, "
"'Copper Disc'")
jim_disc_1_uuid = "jim_disc_1_uuid"
jim_disc_1 = models.ItemDrop(
'Copper Disc', 'Jim', 'Sun Aug 16 22:47:31 2020',
uuid=jim_disc_1_uuid)
match = config.MATCH_DROP_OOC.match(line)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(1, len(items))
self.assertIn('Copper Disc', items)
self.assertEqual(0, len(config.PENDING_AUCTIONS))
mock_post_event.assert_not_called()
mock_post_event.reset_mock()
# NODROP filter on, NODROP item
line = ("[Sun Aug 16 22:47:31 2020] Jim says, "
"'Belt of Iniquity'")
jim_belt_1_uuid = "jim_belt_1_uuid"
jim_belt_1 = models.ItemDrop(
'Belt of Iniquity', 'Jim', 'Sun Aug 16 22:47:31 2020',
uuid=jim_belt_1_uuid)
match = config.MATCH_DROP_SAY.match(line)
with mock.patch('uuid.uuid4') as mock_uuid4:
mock_uuid4.return_value = jim_belt_1_uuid
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(1, len(items))
self.assertIn('Belt of Iniquity', items)
self.assertEqual(1, len(config.PENDING_AUCTIONS))
self.assertListEqual(
[jim_belt_1],
config.PENDING_AUCTIONS)
mock_post_event.assert_called_once_with(
'window', models.DropEvent())
mock_post_event.reset_mock()
# NODROP filter off, droppable item
config.NODROP_ONLY = False
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Copper Disc'")
match = config.MATCH_DROP_GU.match(line)
with mock.patch('uuid.uuid4') as mock_uuid4:
mock_uuid4.return_value = jim_disc_1_uuid
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(1, len(items))
self.assertIn('Copper Disc', items)
self.assertEqual(2, len(config.PENDING_AUCTIONS))
self.assertListEqual(
[jim_belt_1, jim_disc_1],
config.PENDING_AUCTIONS)
mock_post_event.assert_called_once_with(
'window', models.DropEvent())
mock_post_event.reset_mock()
# Two items linked by a federation guild member, plus chat
line = ("[Sun Aug 16 22:47:41 2020] James tells the guild, "
"'Platinum Disc and Golden Amber Earring woo'")
james_disc_uuid = "james_disc_uuid"
james_earring_uuid = "james_earring_uuid"
james_disc = models.ItemDrop(
'Platinum Disc', 'James', 'Sun Aug 16 22:47:41 2020',
uuid=james_disc_uuid)
james_earring = models.ItemDrop(
'Golden Amber Earring', 'James', 'Sun Aug 16 22:47:41 2020',
uuid=james_earring_uuid)
match = config.MATCH_DROP_GU.match(line)
with mock.patch('uuid.uuid4') as mock_uuid4:
mock_uuid4.side_effect = [james_disc_uuid, james_earring_uuid]
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(2, len(items))
self.assertListEqual(
['Platinum Disc', 'Golden Amber Earring'], items)
self.assertListEqual(
[jim_belt_1, jim_disc_1, james_disc, james_earring],
config.PENDING_AUCTIONS)
mock_post_event.assert_called_once_with(
'window', models.DropEvent())
mock_post_event.reset_mock()
# Random chatter by federation guild member
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'four score and seven years ago, we wanted pixels'")
match = config.MATCH_DROP_GU.match(line)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(0, len(items))
self.assertListEqual(
[jim_belt_1, jim_disc_1, james_disc, james_earring],
config.PENDING_AUCTIONS)
mock_post_event.assert_not_called()
# Someone reports they looted an item
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'looted Belt of Iniquity'")
match = config.MATCH_DROP_GU.match(line)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertEqual(0, len(items))
self.assertListEqual(
[jim_belt_1, jim_disc_1, james_disc, james_earring],
config.PENDING_AUCTIONS)
mock_post_event.assert_not_called()
# Bid message doesn't register as a drop
config.ACTIVE_AUCTIONS.clear()
jerkin_1 = models.ItemDrop(
'Shiverback-hide Jerkin', 'Jim', 'Sun Aug 16 22:47:31 2020')
config.PENDING_AUCTIONS.append(jerkin_1)
auction1 = utils.start_auction_dkp(jerkin_1, 'VCR')
self.assertEqual(
config.ACTIVE_AUCTIONS.get(auction1.item.uuid), auction1)
config.PENDING_AUCTIONS.clear()
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Shiverback-hide Jerkin'")
match = config.MATCH_DROP_GU.match(line)
items = list(message_handlers.handle_drop(match, 'window'))
# One item should be found
self.assertListEqual(['Shiverback-hide Jerkin'], items)
self.assertListEqual([], config.PENDING_AUCTIONS)
mock_post_event.assert_not_called()
# A gratss message from another app should not register as a drop
bid_line = ("[Sun Aug 16 22:47:31 2020] Toald tells the guild, "
"'Shiverback-hide Jerkin 1 main'")
config.RESTRICT_BIDS = False
bid_match = config.MATCH_BID_GU.match(bid_line)
message_handlers.handle_bid(bid_match, 'window')
config.HISTORICAL_AUCTIONS[auction1.item.uuid] = (
config.ACTIVE_AUCTIONS.pop(auction1.item.uuid))
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Gratss Toald on [Shiverback-hide Jerkin] (1 DKP)!'")
match = config.MATCH_DROP_GU.match(line)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertListEqual([], items)
# Ignore items if a number is present, it's probably a bid
match = config.MATCH_DROP_GU.match(bid_line)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertListEqual([], items)
# second same drop shouldn't record if it is within cooldown time
jerkin_2 = models.ItemDrop(
'Shiverback-hide Jerkin', 'Jim',
utils.datetime_to_eq_format(datetime.datetime.now()))
config.PENDING_AUCTIONS.append(jerkin_2)
line = ("[{}] Jim tells the guild, 'Shiverback-hide Jerkin'".format(
utils.datetime_to_eq_format(datetime.datetime.now())))
match = config.MATCH_DROP_GU.match(line)
self.assertEqual([jerkin_2], config.PENDING_AUCTIONS)
items = list(message_handlers.handle_drop(match, 'window'))
self.assertListEqual([jerkin_2.name], items)
self.assertEqual([jerkin_2], config.PENDING_AUCTIONS)
# second same drop should record if it is past cooldown time
jerkin_2.timestamp = utils.datetime_to_eq_format(
datetime.datetime.now() -
datetime.timedelta(seconds=config.DROP_COOLDOWN))
self.assertEqual(1, len(config.PENDING_AUCTIONS))
items = list(message_handlers.handle_drop(match, 'window'))
self.assertListEqual([jerkin_2.name], items)
self.assertEqual(2, len(config.PENDING_AUCTIONS))
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_bid(self, mock_post_event, mock_store_state):
config.PLAYER_AFFILIATIONS = {
'Jim': 'Venerate',
'Pim': 'Castle',
'Tim': 'Kingdom',
'Dan': 'Dial a Daniel',
}
item_name = 'Copper Disc'
itemdrop = models.ItemDrop(item_name, "Jim", "timestamp")
disc_auction = models.DKPAuction(itemdrop, 'VCR')
config.ACTIVE_AUCTIONS = {
itemdrop.uuid: disc_auction
}
# FILTER ON - Someone in the alliance bids on an inactive item
config.RESTRICT_BIDS = True
line = ("[Sun Aug 16 22:47:31 2020] Jim auctions, "
"'Platinum Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertListEqual([], disc_auction.highest())
self.assertEqual(1, len(config.ACTIVE_AUCTIONS))
mock_post_event.assert_not_called()
# FILTER ON - Someone outside the alliance bids on an active item
line = ("[Sun Aug 16 22:47:31 2020] Dan auctions, "
"'Copper Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertEqual([], disc_auction.highest())
mock_post_event.assert_not_called()
# FILTER OFF - Someone in the alliance bids on an inactive item
config.RESTRICT_BIDS = False
line = ("[Sun Aug 16 22:47:31 2020] Jim auctions, "
"'Platinum Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertListEqual([], disc_auction.highest())
self.assertEqual(1, len(config.ACTIVE_AUCTIONS))
mock_post_event.assert_not_called()
# FILTER ON - Someone outside the alliance bids on an active item
config.RESTRICT_BIDS = True
line = ("[Sun Aug 16 22:47:31 2020] Dan auctions, "
"'Copper Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertEqual([], disc_auction.highest())
mock_post_event.assert_not_called()
# Someone we haven't seen bids on an active item
line = ("[Sun Aug 16 22:47:31 2020] Paul auctions, "
"'Copper Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertListEqual([], disc_auction.highest())
mock_post_event.assert_not_called()
# Someone in the alliance says random stuff with a number
line = ("[Sun Aug 16 22:47:31 2020] Tim auctions, "
"'I am 12 and what channel is this'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertListEqual([], disc_auction.highest())
mock_post_event.assert_not_called()
# Someone in the alliance bids on two items at once
line = ("[Sun Aug 16 22:47:31 2020] Jim auctions, "
"'Copper Disc 10 DKP Platinum Disc'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertFalse(result)
self.assertListEqual([], disc_auction.highest())
mock_post_event.assert_not_called()
# Someone in the alliance bids on an active item
line = ("[Sun Aug 16 22:47:31 2020] Jim auctions, "
"'Copper Disc 10 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertTrue(result)
self.assertIn(('Jim', 10), disc_auction.highest())
mock_post_event.assert_called_once_with(
'window', models.BidEvent(disc_auction))
mock_post_event.reset_mock()
# Someone in the alliance bids on an active item with wrong case
line = ("[Sun Aug 16 22:47:31 2020] Pim auctions, "
"'copper DISC 11 DKP'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertTrue(result)
self.assertIn(('Pim', 11), disc_auction.highest())
mock_post_event.assert_called_once_with(
'window', models.BidEvent(disc_auction))
mock_post_event.reset_mock()
# Someone in the alliance bids on an active item for their 2nd main
# This would trigger a bug with "2nd" being read as "2 DKP"
line = ("[Sun Aug 16 22:47:31 2020] Jim auctions, "
"'Copper Disc 2nd main 12dkp'")
match = config.MATCH_BID[0].match(line)
result = message_handlers.handle_bid(match, 'window')
self.assertTrue(result)
self.assertIn(('Jim', 12), disc_auction.highest())
mock_post_event.assert_called_once_with(
'window', models.BidEvent(disc_auction))
mock_post_event.reset_mock()
config.ACTIVE_AUCTIONS = {}
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_gratss(self, mock_post_event, mock_store_state):
config.PENDING_AUCTIONS.clear()
config.ACTIVE_AUCTIONS.clear()
# Set up a historical auction with bids
jerkin_1 = models.ItemDrop(
'Shiverback-hide Jerkin', 'Jim', 'Sun Aug 16 22:47:31 2020')
config.PENDING_AUCTIONS.append(jerkin_1)
auction1 = utils.start_auction_dkp(jerkin_1, 'VCR')
self.assertEqual(
config.ACTIVE_AUCTIONS.get(auction1.item.uuid), auction1)
bid_line = ("[Sun Aug 16 22:47:31 2020] Toald tells the guild, "
"'Shiverback-hide Jerkin 1 main'")
config.RESTRICT_BIDS = False
bid_match = config.MATCH_BID_GU.match(bid_line)
message_handlers.handle_bid(bid_match, 'window')
config.HISTORICAL_AUCTIONS[auction1.item.uuid] = (
config.ACTIVE_AUCTIONS.pop(auction1.item.uuid))
# Set up a historical auction without bids (rot)
disc_1 = models.ItemDrop(
'Copper Disc', 'Jim', 'Sun Aug 16 22:47:31 2020')
config.PENDING_AUCTIONS.append(disc_1)
auction2 = utils.start_auction_dkp(disc_1, 'VCR')
self.assertEqual(
config.ACTIVE_AUCTIONS.get(auction2.item.uuid), auction2)
config.HISTORICAL_AUCTIONS[auction2.item.uuid] = (
config.ACTIVE_AUCTIONS.pop(auction2.item.uuid))
# A gratss message from auction history should not register (bids)
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Gratss Toald on [Shiverback-hide Jerkin] (1 DKP)!'")
match = config.MATCH_GRATSS.match(line)
self.assertFalse(message_handlers.handle_gratss(match, 'window'))
# A gratss message from auction history should not register (no bids)
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Gratss ROT on [Copper Disc] (0 DKP)!'")
match = config.MATCH_GRATSS.match(line)
self.assertFalse(message_handlers.handle_gratss(match, 'window'))
# A gratss message that doesn't match auction history SHOULD register
line = ("[Sun Aug 16 22:47:31 2020] Jim tells the guild, "
"'Gratss Jim on [Bladestopper] (100 DKP)!'")
match = config.MATCH_GRATSS.match(line)
self.assertTrue(message_handlers.handle_gratss(match, 'window'))
# A gratss message direct to /tell should register (no tell windows)
line = ("[Sun Aug 16 22:47:31 2020] Jim tells you, "
"'Gratss Jim on [Bladestopper] (100 DKP)!'")
match = config.MATCH_GRATSS.match(line)
self.assertTrue(message_handlers.handle_gratss(match, 'window'))
# A gratss message direct to /tell should register (tell windows)
line = ("[Sun Aug 16 22:47:31 2020] Jim -> You, "
"'Gratss Jim on [Bladestopper] (100 DKP)!'")
match = config.MATCH_GRATSS.match(line)
self.assertTrue(message_handlers.handle_gratss(match, 'window'))
@mock.patch('ninjalooter.utils.store_state')
@mock.patch('wx.PostEvent')
def test_handle_creditt(self, mock_post_event, mock_store_state):
config.PLAYER_NAME = "PlayerName"
# A creditt message direct to /tell should register (no tell windows)
line = ("[Sun Aug 16 22:47:31 2020] Jim tells you, "
"'Creditt Bill'")
match = config.MATCH_CREDITT.match(line)
self.assertTrue(message_handlers.handle_creditt(match, 'window'))
# A creditt message direct to /tell should register (tell windows)
line = ("[Sun Aug 16 22:47:31 2020] Jim -> PlayerName: "
"Creditt Tony")
match = config.MATCH_CREDITT.match(line)
self.assertTrue(message_handlers.handle_creditt(match, 'window'))
config.PLAYER_NAME = ""
| 45.961538 | 77 | 0.638494 | 2,839 | 22,705 | 4.915463 | 0.096513 | 0.025797 | 0.04192 | 0.027947 | 0.82938 | 0.8043 | 0.780867 | 0.7555 | 0.732139 | 0.709065 | 0 | 0.036292 | 0.253645 | 22,705 | 493 | 78 | 46.054767 | 0.787206 | 0.131997 | 0 | 0.683377 | 0 | 0.005277 | 0.179217 | 0.008859 | 0 | 0 | 0 | 0 | 0.253298 | 1 | 0.01847 | false | 0 | 0.01847 | 0 | 0.039578 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c5877e0b37db69eb7dfe1ccefbbd4ca476b69f11 | 156 | py | Python | api/admin.py | dylankershaw/robin | 72cebe0e18c611384a19f8cba2b8980d39cc8bc9 | [
"MIT"
] | null | null | null | api/admin.py | dylankershaw/robin | 72cebe0e18c611384a19f8cba2b8980d39cc8bc9 | [
"MIT"
] | 2 | 2020-02-11T23:38:46.000Z | 2020-04-30T03:15:04.000Z | api/admin.py | dylankershaw/robin | 72cebe0e18c611384a19f8cba2b8980d39cc8bc9 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Intent, Phrase, User
admin.site.register(Intent)
admin.site.register(Phrase)
admin.site.register(User) | 26 | 40 | 0.814103 | 23 | 156 | 5.521739 | 0.478261 | 0.212598 | 0.401575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 156 | 6 | 41 | 26 | 0.888112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
c59cc51e35c9b46c67c574f9d7226c58c55494b7 | 68 | py | Python | roppy/misc/__init__.py | bee-san/roppy | 8c957fd4a49f8f4ffdcc539ced17a63e12a0dd10 | [
"MIT"
] | 1 | 2021-12-22T16:06:11.000Z | 2021-12-22T16:06:11.000Z | roppy/misc/__init__.py | bee-san/roppy | 8c957fd4a49f8f4ffdcc539ced17a63e12a0dd10 | [
"MIT"
] | null | null | null | roppy/misc/__init__.py | bee-san/roppy | 8c957fd4a49f8f4ffdcc539ced17a63e12a0dd10 | [
"MIT"
] | null | null | null | from .packing import *
from .utils import *
from .pattern import * | 22.666667 | 22 | 0.720588 | 9 | 68 | 5.444444 | 0.555556 | 0.408163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191176 | 68 | 3 | 23 | 22.666667 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c5c046464f387a9126f0c6b864b89704c2cdb2fc | 93 | py | Python | scripts/build.py | digitalinteraction/appmovement | 570c1b36b351a859aa542073699b86c237f62641 | [
"BSD-3-Clause",
"MIT"
] | 3 | 2017-07-27T08:54:24.000Z | 2018-10-17T15:50:45.000Z | scripts/build.py | digitalinteraction/appmovement | 570c1b36b351a859aa542073699b86c237f62641 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | scripts/build.py | digitalinteraction/appmovement | 570c1b36b351a859aa542073699b86c237f62641 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | import os
import sys
os.system("sudo apidoc -i ../app/Controller/Templates/ -o ../apidoc/")
| 18.6 | 70 | 0.698925 | 14 | 93 | 4.642857 | 0.785714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11828 | 93 | 4 | 71 | 23.25 | 0.792683 | 0 | 0 | 0 | 0 | 0 | 0.612903 | 0.301075 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.666667 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c5c066745e12a27195e1531b0e2e824e8ff5d23a | 4,185 | py | Python | release_planning/tests/when_plan_daily_work_test.py | bevzuk/CrossFunctionalTeams-Python | f8d798dade50eed3662de336800d4d6e92dd8920 | [
"MIT"
] | null | null | null | release_planning/tests/when_plan_daily_work_test.py | bevzuk/CrossFunctionalTeams-Python | f8d798dade50eed3662de336800d4d6e92dd8920 | [
"MIT"
] | null | null | null | release_planning/tests/when_plan_daily_work_test.py | bevzuk/CrossFunctionalTeams-Python | f8d798dade50eed3662de336800d4d6e92dd8920 | [
"MIT"
] | null | null | null | import unittest
from eventsourcing.domain.model.events import subscribe
from scrum_team import ScrumTeam
from tests.dsl.given import Given
class WhenPlanDailyWork(unittest.TestCase):
received_day_planned_events = []
def is_day_planned_event(self, events):
return all(isinstance(e, ScrumTeam.DayPlanned) for e in events)
def setUp(self) -> None:
self.received_day_planned_events = []
subscribe(lambda e: self.received_day_planned_events.extend(e), predicate=self.is_day_planned_event)
def test_plan_single_item(self):
scrum_team = Given.scrum_team().with_developer("Homer", "A").please()
product_backlog = Given.product_backlog().with_item("US1", "A").please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({"Homer": [("US1", "A")]}, plan)
def test_choose_task_matching_skill(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "B") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "A") \
.with_item("US2", "B") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({"Homer": [("US2", "B")]}, plan)
def test_when_no_work_do_nothing(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "A") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "B") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({"Homer": []}, plan)
def test_one_task_for_two_developers(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "A") \
.with_developer("Marge", "A") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "A") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({
"Homer": [("US1", "A")],
"Marge": [],
}, plan)
def test_two_tasks_in_single_story_for_two_developers(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "A") \
.with_developer("Marge", "B") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "A", "B") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({
"Homer": [("US1", "A")],
"Marge": [("US1", "B")],
}, plan)
def test_two_tasks_in_two_stories_for_two_developers(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "A") \
.with_developer("Marge", "B") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "B") \
.with_item("US2", "A") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({
"Homer": [("US2", "A")],
"Marge": [("US1", "B")],
}, plan)
def test_tshape_developer(self):
scrum_team = Given \
.scrum_team() \
.with_developer("Homer", "A") \
.with_developer("Marge", "A", "B") \
.please()
product_backlog = Given \
.product_backlog() \
.with_item("US1", "A", "B") \
.please()
scrum_team.plan_day(product_backlog)
plan = self.received_day_planned_events[0].plan
self.assertEqual({
"Homer": [("US1", "A")],
"Marge": [("US1", "B")],
}, plan)
if __name__ == '__main__':
unittest.main()
| 30.107914 | 108 | 0.547431 | 446 | 4,185 | 4.802691 | 0.161435 | 0.092437 | 0.084034 | 0.112045 | 0.75957 | 0.733427 | 0.718954 | 0.705882 | 0.705882 | 0.684874 | 0 | 0.008639 | 0.308483 | 4,185 | 138 | 109 | 30.326087 | 0.731513 | 0 | 0 | 0.706422 | 0 | 0 | 0.048746 | 0 | 0 | 0 | 0 | 0 | 0.06422 | 1 | 0.082569 | false | 0 | 0.036697 | 0.009174 | 0.146789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c5df16e70e52395a77b88659ea32118f09957104 | 1,406 | py | Python | python/challenges/tests/test_multi_bracket_validation.py | kmangub/data-structures-and-algorithms | 44b42c0d892f39593997bccb793eacc4d7d98906 | [
"MIT"
] | null | null | null | python/challenges/tests/test_multi_bracket_validation.py | kmangub/data-structures-and-algorithms | 44b42c0d892f39593997bccb793eacc4d7d98906 | [
"MIT"
] | null | null | null | python/challenges/tests/test_multi_bracket_validation.py | kmangub/data-structures-and-algorithms | 44b42c0d892f39593997bccb793eacc4d7d98906 | [
"MIT"
] | null | null | null | import pytest
from challenges.multi_bracket_validation.multi_bracket_validation import multi_bracket_validation
def test_multi_bracket_validation_with_one_pair():
actual = multi_bracket_validation('{}')
expected = True
assert actual == expected
def test_multi_bracket_validation_with_one__of_each_pair():
actual = multi_bracket_validation('{}(){}')
expected = True
assert actual == expected
def test_multi_bracket_validation_with_extra_characters():
actual = multi_bracket_validation('()[[Extra Characters]]')
expected = True
assert actual == expected
def test_multi_bracket_validation_with_pair_inside_of_pair():
actual = multi_bracket_validation('(){}[[]]')
expected = True
assert actual == expected
def test_multi_bracket_validation_with_words_separated():
actual = multi_bracket_validation('{}{Code}[Fellows](())')
expected = True
assert actual == expected
def test_multi_bracket_validation_with_missing_closing():
actual = multi_bracket_validation('[({}]')
expected = False
assert actual == expected
def test_multi_bracket_validation_with_no_corresponding_opening():
actual = multi_bracket_validation('(](')
expected = False
assert actual == expected
def test_multi_bracket_validation_with_no_matching_pairs():
actual = multi_bracket_validation('{(})')
expected = False
assert actual == expected
| 31.954545 | 97 | 0.756757 | 160 | 1,406 | 6.16875 | 0.20625 | 0.231003 | 0.423506 | 0.154002 | 0.722391 | 0.722391 | 0.722391 | 0.682877 | 0.682877 | 0.621074 | 0 | 0 | 0.147937 | 1,406 | 43 | 98 | 32.697674 | 0.823873 | 0 | 0 | 0.470588 | 0 | 0 | 0.050534 | 0.014947 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.235294 | false | 0 | 0.058824 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
6811136d2932d9e78d01a4c2b5dbe9f2a5da522f | 34 | py | Python | datadomain/__init__.py | itkovian/datadomain | 3d5096c06749034ff1b1b8f2743fab5cfc606be0 | [
"MIT"
] | null | null | null | datadomain/__init__.py | itkovian/datadomain | 3d5096c06749034ff1b1b8f2743fab5cfc606be0 | [
"MIT"
] | null | null | null | datadomain/__init__.py | itkovian/datadomain | 3d5096c06749034ff1b1b8f2743fab5cfc606be0 | [
"MIT"
] | null | null | null | from datadomain import DataDomain
| 17 | 33 | 0.882353 | 4 | 34 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a861f96621057651b9247dd60ae68cd1c48a83a2 | 20,221 | py | Python | analysis/rosetta/rosetta_kl_helper.py | willshi88/scrambler | fd77c05824fc99e6965d204c4f5baa1e3b0c4fb3 | [
"MIT"
] | 19 | 2021-04-30T04:12:58.000Z | 2022-03-07T19:09:32.000Z | analysis/rosetta/rosetta_kl_helper.py | willshi88/scrambler | fd77c05824fc99e6965d204c4f5baa1e3b0c4fb3 | [
"MIT"
] | 4 | 2021-07-02T15:07:27.000Z | 2021-08-01T12:41:28.000Z | analysis/rosetta/rosetta_kl_helper.py | willshi88/scrambler | fd77c05824fc99e6965d204c4f5baa1e3b0c4fb3 | [
"MIT"
] | 4 | 2021-06-28T09:41:01.000Z | 2022-02-28T09:13:29.000Z | import numpy as np
import keras
import tensorflow as tf
import keras.backend as K
#Define kl divergence helper functions (numpy)
def _get_kl_divergence_numpy(p_dist, p_theta, p_phi, p_omega, t_dist, t_theta, t_phi, t_omega) :
kl_dist = np.mean(np.sum(t_dist * np.log(t_dist / p_dist), axis=-1))
kl_theta = np.mean(np.sum(t_theta * np.log(t_theta / p_theta), axis=-1))
kl_phi = np.mean(np.sum(t_phi * np.log(t_phi / p_phi), axis=-1))
kl_omega = np.mean(np.sum(t_omega * np.log(t_omega / p_omega), axis=-1))
return kl_dist, kl_theta, kl_phi, kl_omega
def _get_smooth_kl_divergence_numpy(p_dist, p_theta, p_phi, p_omega, t_dist, t_theta, t_phi, t_omega) :
td0, t1_dist = np.expand_dims(t_dist[..., 0], axis=-1), t_dist[..., 1:]
tt0, t1_theta = np.expand_dims(t_theta[..., 0], axis=-1), t_theta[..., 1:]
tp0, t1_phi = np.expand_dims(t_phi[..., 0], axis=-1), t_phi[..., 1:]
to0, t1_omega = np.expand_dims(t_omega[..., 0], axis=-1), t_omega[..., 1:]
pd0, p1_dist = np.expand_dims(p_dist[..., 0], axis=-1), p_dist[..., 1:]
pt0, p1_theta = np.expand_dims(p_theta[..., 0], axis=-1), p_theta[..., 1:]
pp0, p1_phi = np.expand_dims(p_phi[..., 0], axis=-1), p_phi[..., 1:]
po0, p1_omega = np.expand_dims(p_omega[..., 0], axis=-1), p_omega[..., 1:]
range_dist = np.linspace(0., 1., t1_dist.shape[3])
range_theta = np.linspace(0., 1., t1_theta.shape[3])
range_phi = np.linspace(0., 1., t1_phi.shape[3])
range_omega = np.linspace(0., 1., t1_omega.shape[3])
pd1 = np.expand_dims(np.sum(p1_dist * np.tile(np.reshape(range_dist, (1, 1, 1, p1_dist.shape[3])), (1, p1_dist.shape[1], p1_dist.shape[2], 1)), axis=-1), axis=-1)
pt1 = np.expand_dims(np.sum(p1_theta * np.tile(np.reshape(range_theta, (1, 1, 1, p1_theta.shape[3])), (1, p1_theta.shape[1], p1_theta.shape[2], 1)), axis=-1), axis=-1)
pp1 = np.expand_dims(np.sum(p1_phi * np.tile(np.reshape(range_phi, (1, 1, 1, p1_phi.shape[3])), (1, p1_phi.shape[1], p1_phi.shape[2], 1)), axis=-1), axis=-1)
po1 = np.expand_dims(np.sum(p1_omega * np.tile(np.reshape(range_omega, (1, 1, 1, p1_omega.shape[3])), (1, p1_omega.shape[1], p1_omega.shape[2], 1)), axis=-1), axis=-1)
td1 = np.expand_dims(np.sum(t1_dist * np.tile(np.reshape(range_dist, (1, 1, 1, t1_dist.shape[3])), (1, t1_dist.shape[1], t1_dist.shape[2], 1)), axis=-1), axis=-1)
tt1 = np.expand_dims(np.sum(t1_theta * np.tile(np.reshape(range_theta, (1, 1, 1, t1_theta.shape[3])), (1, t1_theta.shape[1], t1_theta.shape[2], 1)), axis=-1), axis=-1)
tp1 = np.expand_dims(np.sum(t1_phi * np.tile(np.reshape(range_phi, (1, 1, 1, t1_phi.shape[3])), (1, t1_phi.shape[1], t1_phi.shape[2], 1)), axis=-1), axis=-1)
to1 = np.expand_dims(np.sum(t1_omega * np.tile(np.reshape(range_omega, (1, 1, 1, t1_omega.shape[3])), (1, t1_omega.shape[1], t1_omega.shape[2], 1)), axis=-1), axis=-1)
pd_val = np.clip(np.concatenate([pd0, pd1, 1. - pd0 - pd1], axis=-1), 1e-7, 1. - 1e-7)
pt_val = np.clip(np.concatenate([pt0, pt1, 1. - pt0 - pt1], axis=-1), 1e-7, 1. - 1e-7)
pp_val = np.clip(np.concatenate([pp0, pp1, 1. - pp0 - pp1], axis=-1), 1e-7, 1. - 1e-7)
po_val = np.clip(np.concatenate([po0, po1, 1. - po0 - po1], axis=-1), 1e-7, 1. - 1e-7)
td_val = np.clip(np.concatenate([td0, td1, 1. - td0 - td1], axis=-1), 1e-7, 1. - 1e-7)
tt_val = np.clip(np.concatenate([tt0, tt1, 1. - tt0 - tt1], axis=-1), 1e-7, 1. - 1e-7)
tp_val = np.clip(np.concatenate([tp0, tp1, 1. - tp0 - tp1], axis=-1), 1e-7, 1. - 1e-7)
to_val = np.clip(np.concatenate([to0, to1, 1. - to0 - to1], axis=-1), 1e-7, 1. - 1e-7)
kl_dist = np.mean(np.sum(td_val * np.log(td_val / pd_val), axis=-1))
kl_theta = np.mean(np.sum(tt_val * np.log(tt_val / pt_val), axis=-1))
kl_phi = np.mean(np.sum(tp_val * np.log(tp_val / pp_val), axis=-1))
kl_omega = np.mean(np.sum(to_val * np.log(to_val / po_val), axis=-1))
return kl_dist, kl_theta, kl_phi, kl_omega
def _get_smooth_circular_kl_divergence_numpy(p_dist, p_theta, p_phi, p_omega, t_dist, t_theta, t_phi, t_omega) :
td0, t1_dist = np.expand_dims(t_dist[..., 0], axis=-1), t_dist[..., 1:]
tt0, t1_theta = np.expand_dims(t_theta[..., 0], axis=-1), t_theta[..., 1:]
tp0, t1_phi = np.expand_dims(t_phi[..., 0], axis=-1), t_phi[..., 1:]
to0, t1_omega = np.expand_dims(t_omega[..., 0], axis=-1), t_omega[..., 1:]
pd0, p1_dist = np.expand_dims(p_dist[..., 0], axis=-1), p_dist[..., 1:]
pt0, p1_theta = np.expand_dims(p_theta[..., 0], axis=-1), p_theta[..., 1:]
pp0, p1_phi = np.expand_dims(p_phi[..., 0], axis=-1), p_phi[..., 1:]
po0, p1_omega = np.expand_dims(p_omega[..., 0], axis=-1), p_omega[..., 1:]
range_dist = np.linspace(0., 1., t1_dist.shape[3])
range_sin_theta = (np.sin(np.linspace(-np.pi, np.pi, p1_theta.shape[3])) + 1.) / 2.
range_cos_theta = (np.cos(np.linspace(-np.pi, np.pi, p1_theta.shape[3])) + 1.) / 2.
range_phi = np.linspace(0., 1., t1_phi.shape[3])
range_sin_omega = (np.sin(np.linspace(-np.pi, np.pi, p1_omega.shape[3])) + 1.) / 2.
range_cos_omega = (np.cos(np.linspace(-np.pi, np.pi, p1_omega.shape[3])) + 1.) / 2.
pd1 = np.expand_dims(np.sum(p1_dist * np.tile(np.reshape(range_dist, (1, 1, 1, p1_dist.shape[3])), (1, p1_dist.shape[1], p1_dist.shape[2], 1)), axis=-1), axis=-1)
pt1_sin = np.expand_dims(np.sum(p1_theta * np.tile(np.reshape(range_sin_theta, (1, 1, 1, p1_theta.shape[3])), (1, p1_theta.shape[1], p1_theta.shape[2], 1)), axis=-1), axis=-1)
pt1_cos = np.expand_dims(np.sum(p1_theta * np.tile(np.reshape(range_cos_theta, (1, 1, 1, p1_theta.shape[3])), (1, p1_theta.shape[1], p1_theta.shape[2], 1)), axis=-1), axis=-1)
pp1 = np.expand_dims(np.sum(p1_phi * np.tile(np.reshape(range_phi, (1, 1, 1, p1_phi.shape[3])), (1, p1_phi.shape[1], p1_phi.shape[2], 1)), axis=-1), axis=-1)
po1_sin = np.expand_dims(np.sum(p1_omega * np.tile(np.reshape(range_sin_omega, (1, 1, 1, p1_omega.shape[3])), (1, p1_omega.shape[1], p1_omega.shape[2], 1)), axis=-1), axis=-1)
po1_cos = np.expand_dims(np.sum(p1_omega * np.tile(np.reshape(range_cos_omega, (1, 1, 1, p1_omega.shape[3])), (1, p1_omega.shape[1], p1_omega.shape[2], 1)), axis=-1), axis=-1)
td1 = np.expand_dims(np.sum(t1_dist * np.tile(np.reshape(range_dist, (1, 1, 1, t1_dist.shape[3])), (1, t1_dist.shape[1], t1_dist.shape[2], 1)), axis=-1), axis=-1)
tt1_sin = np.expand_dims(np.sum(t1_theta * np.tile(np.reshape(range_sin_theta, (1, 1, 1, t1_theta.shape[3])), (1, t1_theta.shape[1], t1_theta.shape[2], 1)), axis=-1), axis=-1)
tt1_cos = np.expand_dims(np.sum(t1_theta * np.tile(np.reshape(range_cos_theta, (1, 1, 1, t1_theta.shape[3])), (1, t1_theta.shape[1], t1_theta.shape[2], 1)), axis=-1), axis=-1)
tp1 = np.expand_dims(np.sum(t1_phi * np.tile(np.reshape(range_phi, (1, 1, 1, t1_phi.shape[3])), (1, t1_phi.shape[1], t1_phi.shape[2], 1)), axis=-1), axis=-1)
to1_sin = np.expand_dims(np.sum(t1_omega * np.tile(np.reshape(range_sin_omega, (1, 1, 1, t1_omega.shape[3])), (1, t1_omega.shape[1], t1_omega.shape[2], 1)), axis=-1), axis=-1)
to1_cos = np.expand_dims(np.sum(t1_omega * np.tile(np.reshape(range_cos_omega, (1, 1, 1, t1_omega.shape[3])), (1, t1_omega.shape[1], t1_omega.shape[2], 1)), axis=-1), axis=-1)
pd_val = np.clip(np.concatenate([pd0, pd1, 1. - pd0 - pd1], axis=-1), 1e-7, 1. - 1e-7)
pt_sin_val = np.clip(np.concatenate([pt0, pt1_sin, 1. - pt0 - pt1_sin], axis=-1), 1e-7, 1. - 1e-7)
pt_cos_val = np.clip(np.concatenate([pt0, pt1_cos, 1. - pt0 - pt1_cos], axis=-1), 1e-7, 1. - 1e-7)
pp_val = np.clip(np.concatenate([pp0, pp1, 1. - pp0 - pp1], axis=-1), 1e-7, 1. - 1e-7)
po_sin_val = np.clip(np.concatenate([po0, po1_sin, 1. - po0 - po1_sin], axis=-1), 1e-7, 1. - 1e-7)
po_cos_val = np.clip(np.concatenate([po0, po1_cos, 1. - po0 - po1_cos], axis=-1), 1e-7, 1. - 1e-7)
td_val = np.clip(np.concatenate([td0, td1, 1. - td0 - td1], axis=-1), 1e-7, 1. - 1e-7)
tt_sin_val = np.clip(np.concatenate([tt0, tt1_sin, 1. - tt0 - tt1_sin], axis=-1), 1e-7, 1. - 1e-7)
tt_cos_val = np.clip(np.concatenate([tt0, tt1_cos, 1. - tt0 - tt1_cos], axis=-1), 1e-7, 1. - 1e-7)
tp_val = np.clip(np.concatenate([tp0, tp1, 1. - tp0 - tp1], axis=-1), 1e-7, 1. - 1e-7)
to_sin_val = np.clip(np.concatenate([to0, to1_sin, 1. - to0 - to1_sin], axis=-1), 1e-7, 1. - 1e-7)
to_cos_val = np.clip(np.concatenate([to0, to1_cos, 1. - to0 - to1_cos], axis=-1), 1e-7, 1. - 1e-7)
kl_dist = np.mean(np.sum(td_val * np.log(td_val / pd_val), axis=-1))
kl_theta_sin = np.mean(np.sum(tt_sin_val * np.log(tt_sin_val / pt_sin_val), axis=-1)) * 0.5
kl_theta_cos = np.mean(np.sum(tt_cos_val * np.log(tt_cos_val / pt_cos_val), axis=-1)) * 0.5
kl_phi = np.mean(np.sum(tp_val * np.log(tp_val / pp_val), axis=-1))
kl_omega_sin = np.mean(np.sum(to_sin_val * np.log(to_sin_val / po_sin_val), axis=-1)) * 0.5
kl_omega_cos = np.mean(np.sum(to_cos_val * np.log(to_cos_val / po_cos_val), axis=-1)) * 0.5
return kl_dist, kl_theta_sin, kl_theta_cos, kl_phi, kl_omega_sin, kl_omega_cos
#Define kl divergence helper functions (keras)
def _get_kl_divergence_keras(p_dist, p_theta, p_phi, p_omega, target_p_dist, target_p_theta, target_p_phi, target_p_omega) :
t_dist = K.clip(K.constant(target_p_dist), K.epsilon(), 1. - K.epsilon())
t_theta = K.clip(K.constant(target_p_theta), K.epsilon(), 1. - K.epsilon())
t_phi = K.clip(K.constant(target_p_phi), K.epsilon(), 1. - K.epsilon())
t_omega = K.clip(K.constant(target_p_omega), K.epsilon(), 1. - K.epsilon())
kl_dist = K.mean(K.sum(t_dist * K.log(t_dist / p_dist), axis=-1), axis=(-1, -2))
kl_theta = K.mean(K.sum(t_theta * K.log(t_theta / p_theta), axis=-1), axis=(-1, -2))
kl_phi = K.mean(K.sum(t_phi * K.log(t_phi / p_phi), axis=-1), axis=(-1, -2))
kl_omega = K.mean(K.sum(t_omega * K.log(t_omega / p_omega), axis=-1), axis=(-1, -2))
return kl_dist, kl_theta, kl_phi, kl_omega
def _get_smooth_kl_divergence_keras(p_dist, p_theta, p_phi, p_omega, target_p_dist, target_p_theta, target_p_phi, target_p_omega) :
t_dist = K.clip(K.constant(target_p_dist), K.epsilon(), 1. - K.epsilon())
t_theta = K.clip(K.constant(target_p_theta), K.epsilon(), 1. - K.epsilon())
t_phi = K.clip(K.constant(target_p_phi), K.epsilon(), 1. - K.epsilon())
t_omega = K.clip(K.constant(target_p_omega), K.epsilon(), 1. - K.epsilon())
td0, t1_dist = K.expand_dims(t_dist[..., 0], axis=-1), t_dist[..., 1:]
tt0, t1_theta = K.expand_dims(t_theta[..., 0], axis=-1), t_theta[..., 1:]
tp0, t1_phi = K.expand_dims(t_phi[..., 0], axis=-1), t_phi[..., 1:]
to0, t1_omega = K.expand_dims(t_omega[..., 0], axis=-1), t_omega[..., 1:]
pd0, p1_dist = K.expand_dims(p_dist[..., 0], axis=-1), p_dist[..., 1:]
pt0, p1_theta = K.expand_dims(p_theta[..., 0], axis=-1), p_theta[..., 1:]
pp0, p1_phi = K.expand_dims(p_phi[..., 0], axis=-1), p_phi[..., 1:]
po0, p1_omega = K.expand_dims(p_omega[..., 0], axis=-1), p_omega[..., 1:]
range_dist = np.linspace(0., 1., target_p_dist.shape[3] - 1)
range_theta = np.linspace(0., 1., target_p_theta.shape[3] - 1)
range_phi = np.linspace(0., 1., target_p_phi.shape[3] - 1)
range_omega = np.linspace(0., 1., target_p_omega.shape[3] - 1)
pd1 = K.expand_dims(K.sum(p1_dist * K.tile(K.reshape(K.constant(range_dist), (1, 1, 1, target_p_dist.shape[3] - 1)), (1, target_p_dist.shape[1], target_p_dist.shape[2], 1)), axis=-1), axis=-1)
pt1 = K.expand_dims(K.sum(p1_theta * K.tile(K.reshape(K.constant(range_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
pp1 = K.expand_dims(K.sum(p1_phi * K.tile(K.reshape(K.constant(range_phi), (1, 1, 1, target_p_phi.shape[3] - 1)), (1, target_p_phi.shape[1], target_p_phi.shape[2], 1)), axis=-1), axis=-1)
po1 = K.expand_dims(K.sum(p1_omega * K.tile(K.reshape(K.constant(range_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
td1 = K.expand_dims(K.sum(t1_dist * K.tile(K.reshape(K.constant(range_dist), (1, 1, 1, target_p_dist.shape[3] - 1)), (1, target_p_dist.shape[1], target_p_dist.shape[2], 1)), axis=-1), axis=-1)
tt1 = K.expand_dims(K.sum(t1_theta * K.tile(K.reshape(K.constant(range_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
tp1 = K.expand_dims(K.sum(t1_phi * K.tile(K.reshape(K.constant(range_phi), (1, 1, 1, target_p_phi.shape[3] - 1)), (1, target_p_phi.shape[1], target_p_phi.shape[2], 1)), axis=-1), axis=-1)
to1 = K.expand_dims(K.sum(t1_omega * K.tile(K.reshape(K.constant(range_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
pd_val = K.clip(K.concatenate([pd0, pd1, 1. - pd0 - pd1], axis=-1), K.epsilon(), 1. - K.epsilon())
pt_val = K.clip(K.concatenate([pt0, pt1, 1. - pt0 - pt1], axis=-1), K.epsilon(), 1. - K.epsilon())
pp_val = K.clip(K.concatenate([pp0, pp1, 1. - pp0 - pp1], axis=-1), K.epsilon(), 1. - K.epsilon())
po_val = K.clip(K.concatenate([po0, po1, 1. - po0 - po1], axis=-1), K.epsilon(), 1. - K.epsilon())
td_val = K.clip(K.concatenate([td0, td1, 1. - td0 - td1], axis=-1), K.epsilon(), 1. - K.epsilon())
tt_val = K.clip(K.concatenate([tt0, tt1, 1. - tt0 - tt1], axis=-1), K.epsilon(), 1. - K.epsilon())
tp_val = K.clip(K.concatenate([tp0, tp1, 1. - tp0 - tp1], axis=-1), K.epsilon(), 1. - K.epsilon())
to_val = K.clip(K.concatenate([to0, to1, 1. - to0 - to1], axis=-1), K.epsilon(), 1. - K.epsilon())
kl_dist = K.mean(K.sum(td_val * K.log(td_val / pd_val), axis=-1), axis=(-1, -2))
kl_theta = K.mean(K.sum(tt_val * K.log(tt_val / pt_val), axis=-1), axis=(-1, -2))
kl_phi = K.mean(K.sum(tp_val * K.log(tp_val / pp_val), axis=-1), axis=(-1, -2))
kl_omega = K.mean(K.sum(to_val * K.log(to_val / po_val), axis=-1), axis=(-1, -2))
return kl_dist, kl_theta, kl_phi, kl_omega
def _get_smooth_circular_kl_divergence_keras(p_dist, p_theta, p_phi, p_omega, target_p_dist, target_p_theta, target_p_phi, target_p_omega) :
t_dist = K.clip(K.constant(target_p_dist), K.epsilon(), 1. - K.epsilon())
t_theta = K.clip(K.constant(target_p_theta), K.epsilon(), 1. - K.epsilon())
t_phi = K.clip(K.constant(target_p_phi), K.epsilon(), 1. - K.epsilon())
t_omega = K.clip(K.constant(target_p_omega), K.epsilon(), 1. - K.epsilon())
td0, t1_dist = K.expand_dims(t_dist[..., 0], axis=-1), t_dist[..., 1:]
tt0, t1_theta = K.expand_dims(t_theta[..., 0], axis=-1), t_theta[..., 1:]
tp0, t1_phi = K.expand_dims(t_phi[..., 0], axis=-1), t_phi[..., 1:]
to0, t1_omega = K.expand_dims(t_omega[..., 0], axis=-1), t_omega[..., 1:]
pd0, p1_dist = K.expand_dims(p_dist[..., 0], axis=-1), p_dist[..., 1:]
pt0, p1_theta = K.expand_dims(p_theta[..., 0], axis=-1), p_theta[..., 1:]
pp0, p1_phi = K.expand_dims(p_phi[..., 0], axis=-1), p_phi[..., 1:]
po0, p1_omega = K.expand_dims(p_omega[..., 0], axis=-1), p_omega[..., 1:]
range_dist = np.linspace(0., 1., target_p_dist.shape[3] - 1)
range_sin_theta = (np.sin(np.linspace(-np.pi, np.pi, target_p_theta.shape[3] - 1)) + 1.) / 2.
range_cos_theta = (np.cos(np.linspace(-np.pi, np.pi, target_p_theta.shape[3] - 1)) + 1.) / 2.
range_phi = np.linspace(0., 1., target_p_phi.shape[3] - 1)
range_sin_omega = (np.sin(np.linspace(-np.pi, np.pi, target_p_omega.shape[3] - 1)) + 1.) / 2.
range_cos_omega = (np.cos(np.linspace(-np.pi, np.pi, target_p_omega.shape[3] - 1)) + 1.) / 2.
pd1 = K.expand_dims(K.sum(p1_dist * K.tile(K.reshape(K.constant(range_dist), (1, 1, 1, target_p_dist.shape[3] - 1)), (1, target_p_dist.shape[1], target_p_dist.shape[2], 1)), axis=-1), axis=-1)
pt1_sin = K.expand_dims(K.sum(p1_theta * K.tile(K.reshape(K.constant(range_sin_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
pt1_cos = K.expand_dims(K.sum(p1_theta * K.tile(K.reshape(K.constant(range_cos_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
pp1 = K.expand_dims(K.sum(p1_phi * K.tile(K.reshape(K.constant(range_phi), (1, 1, 1, target_p_phi.shape[3] - 1)), (1, target_p_phi.shape[1], target_p_phi.shape[2], 1)), axis=-1), axis=-1)
po1_sin = K.expand_dims(K.sum(p1_omega * K.tile(K.reshape(K.constant(range_sin_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
po1_cos = K.expand_dims(K.sum(p1_omega * K.tile(K.reshape(K.constant(range_cos_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
td1 = K.expand_dims(K.sum(t1_dist * K.tile(K.reshape(K.constant(range_dist), (1, 1, 1, target_p_dist.shape[3] - 1)), (1, target_p_dist.shape[1], target_p_dist.shape[2], 1)), axis=-1), axis=-1)
tt1_sin = K.expand_dims(K.sum(t1_theta * K.tile(K.reshape(K.constant(range_sin_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
tt1_cos = K.expand_dims(K.sum(t1_theta * K.tile(K.reshape(K.constant(range_cos_theta), (1, 1, 1, target_p_theta.shape[3] - 1)), (1, target_p_theta.shape[1], target_p_theta.shape[2], 1)), axis=-1), axis=-1)
tp1 = K.expand_dims(K.sum(t1_phi * K.tile(K.reshape(K.constant(range_phi), (1, 1, 1, target_p_phi.shape[3] - 1)), (1, target_p_phi.shape[1], target_p_phi.shape[2], 1)), axis=-1), axis=-1)
to1_sin = K.expand_dims(K.sum(t1_omega * K.tile(K.reshape(K.constant(range_sin_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
to1_cos = K.expand_dims(K.sum(t1_omega * K.tile(K.reshape(K.constant(range_cos_omega), (1, 1, 1, target_p_omega.shape[3] - 1)), (1, target_p_omega.shape[1], target_p_omega.shape[2], 1)), axis=-1), axis=-1)
pd_val = K.clip(K.concatenate([pd0, pd1, 1. - pd0 - pd1], axis=-1), K.epsilon(), 1. - K.epsilon())
pt_sin_val = K.clip(K.concatenate([pt0, pt1_sin, 1. - pt0 - pt1_sin], axis=-1), K.epsilon(), 1. - K.epsilon())
pt_cos_val = K.clip(K.concatenate([pt0, pt1_cos, 1. - pt0 - pt1_cos], axis=-1), K.epsilon(), 1. - K.epsilon())
pp_val = K.clip(K.concatenate([pp0, pp1, 1. - pp0 - pp1], axis=-1), K.epsilon(), 1. - K.epsilon())
po_sin_val = K.clip(K.concatenate([po0, po1_sin, 1. - po0 - po1_sin], axis=-1), K.epsilon(), 1. - K.epsilon())
po_cos_val = K.clip(K.concatenate([po0, po1_cos, 1. - po0 - po1_cos], axis=-1), K.epsilon(), 1. - K.epsilon())
td_val = K.clip(K.concatenate([td0, td1, 1. - td0 - td1], axis=-1), K.epsilon(), 1. - K.epsilon())
tt_sin_val = K.clip(K.concatenate([tt0, tt1_sin, 1. - tt0 - tt1_sin], axis=-1), K.epsilon(), 1. - K.epsilon())
tt_cos_val = K.clip(K.concatenate([tt0, tt1_cos, 1. - tt0 - tt1_cos], axis=-1), K.epsilon(), 1. - K.epsilon())
tp_val = K.clip(K.concatenate([tp0, tp1, 1. - tp0 - tp1], axis=-1), K.epsilon(), 1. - K.epsilon())
to_sin_val = K.clip(K.concatenate([to0, to1_sin, 1. - to0 - to1_sin], axis=-1), K.epsilon(), 1. - K.epsilon())
to_cos_val = K.clip(K.concatenate([to0, to1_cos, 1. - to0 - to1_cos], axis=-1), K.epsilon(), 1. - K.epsilon())
kl_dist = K.mean(K.sum(td_val * K.log(td_val / pd_val), axis=-1), axis=(-1, -2))
kl_theta_sin = K.mean(K.sum(tt_sin_val * K.log(tt_sin_val / pt_sin_val), axis=-1), axis=(-1, -2)) * 0.5
kl_theta_cos = K.mean(K.sum(tt_cos_val * K.log(tt_cos_val / pt_cos_val), axis=-1), axis=(-1, -2)) * 0.5
kl_phi = K.mean(K.sum(tp_val * K.log(tp_val / pp_val), axis=-1), axis=(-1, -2))
kl_omega_sin = K.mean(K.sum(to_sin_val * K.log(to_sin_val / po_sin_val), axis=-1), axis=(-1, -2)) * 0.5
kl_omega_cos = K.mean(K.sum(to_cos_val * K.log(to_cos_val / po_cos_val), axis=-1), axis=(-1, -2)) * 0.5
return kl_dist, kl_theta_sin, kl_theta_cos, kl_phi, kl_omega_sin, kl_omega_cos
| 81.53629 | 209 | 0.62801 | 3,991 | 20,221 | 2.953395 | 0.019544 | 0.082294 | 0.047849 | 0.045813 | 0.981929 | 0.960889 | 0.949606 | 0.911598 | 0.877832 | 0.862221 | 0 | 0.072074 | 0.146432 | 20,221 | 247 | 210 | 81.866397 | 0.610834 | 0.004451 | 0 | 0.521277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031915 | false | 0 | 0.021277 | 0 | 0.085106 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a882a611972e18d8b5e75bc67314be160a3e5664 | 27 | py | Python | __init__.py | Heriyadi235/jet_engine_model | e909f15a5f61420d104740b69cb86ced2aeff011 | [
"Unlicense"
] | null | null | null | __init__.py | Heriyadi235/jet_engine_model | e909f15a5f61420d104740b69cb86ced2aeff011 | [
"Unlicense"
] | null | null | null | __init__.py | Heriyadi235/jet_engine_model | e909f15a5f61420d104740b69cb86ced2aeff011 | [
"Unlicense"
] | null | null | null | """
2021-11-19
发动机部件类
"""
| 4.5 | 10 | 0.518519 | 4 | 27 | 3.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.363636 | 0.185185 | 27 | 5 | 11 | 5.4 | 0.272727 | 0.62963 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a886983633be7b6c3d9b10eb5ebcb8c5f9cd0a90 | 12,975 | py | Python | tests/venue/test_views.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | null | null | null | tests/venue/test_views.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | 12 | 2018-12-21T22:13:33.000Z | 2019-08-03T20:03:19.000Z | tests/venue/test_views.py | cjolowicz/muckr-service | 014017ab92bd1d2034cd398f2e98a6fdaf30f164 | [
"MIT"
] | null | null | null | """Test venue views."""
import json
import pytest
from muckr_api.venue.models import Venue
from muckr_api.venue.views import venue_schema, venues_schema
from tests.venue.factories import VenueFactory
from tests.utils import create_token_auth_header
class TestGetVenues:
def test_get_request_returns_list_of_venues(self, venue, client):
response = client.get(
"/venues", headers=create_token_auth_header(venue.user.get_token())
)
assert response.status == "200 OK"
assert response.get_json() == venues_schema.dump([venue])
def test_get_request_returns_first_page_of_venues_by_default(
self, client, user, database
):
venues = VenueFactory.create_batch(25, user=user)
database.session.commit()
response = client.get(
"/venues", headers=create_token_auth_header(user.get_token())
)
assert response.status == "200 OK"
assert response.get_json() == venues_schema.dump(venues[:10])
@pytest.mark.parametrize("page", [1, 2, 3, 4])
def test_get_request_returns_requested_page_of_venues(
self, client, user, database, page
):
venues = VenueFactory.create_batch(25, user=user)
database.session.commit()
response = client.get(
"/venues",
query_string={"page": page},
headers=create_token_auth_header(user.get_token()),
)
per_page = 10
offset = per_page * (page - 1)
window = venues[offset : offset + per_page]
assert response.status == "200 OK"
assert response.get_json() == venues_schema.dump(window)
@pytest.mark.parametrize("page", [1, 2, 3, 4])
@pytest.mark.parametrize("per_page", [1, 2, 5, 10, 20, 50])
def test_get_request_returns_requested_number_of_venues(
self, client, user, database, page, per_page
):
venues = VenueFactory.create_batch(25, user=user)
database.session.commit()
response = client.get(
"/venues",
query_string={"page": page, "per_page": per_page},
headers=create_token_auth_header(user.get_token()),
)
offset = per_page * (page - 1)
window = venues[offset : offset + per_page]
assert response.status == "200 OK"
assert response.get_json() == venues_schema.dump(window)
def test_get_request_for_venues_fails_without_authentication(self, client):
response = client.get("/venues")
assert response.status == "401 UNAUTHORIZED"
class TestGetVenue:
def test_get_request_returns_venue(self, venue, client):
response = client.get(
"/venues/{id}".format(id=venue.id),
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "200 OK"
assert response.get_json() == venue_schema.dump(venue)
def test_get_request_fails_without_authentication(self, venue, client):
response = client.get("/venues/{id}".format(id=venue.id))
assert response.status == "401 UNAUTHORIZED"
def test_get_request_returns_404_if_venue_not_found(self, venue, client):
response = client.get(
"/venues/2", headers=create_token_auth_header(venue.user.get_token())
)
assert response.status == "404 NOT FOUND"
assert response.get_json() == {"error": "Not Found"}
def test_get_request_returns_404_for_venue_of_another_user(self, client, database):
venue1, venue2 = VenueFactory.create_batch(2)
database.session.commit()
response = client.get(
"/venues/{id}".format(id=venue1.id),
headers=create_token_auth_header(venue2.user.get_token()),
)
assert response.status == "404 NOT FOUND"
assert response.get_json() == {"error": "Not Found"}
def test_get_request_succeeds_for_venue_of_another_user_if_admin(
self, client, venue, admin
):
response = client.get(
"/venues/{id}".format(id=venue.id),
headers=create_token_auth_header(admin.get_token()),
)
assert response.status == "200 OK"
assert response.get_json() == venue_schema.dump(venue)
class TestPostVenue:
def test_post_request_creates_venue(self, client, user):
venue = VenueFactory.build()
sent = venue_schema.dump(venue)
del sent["id"]
response = client.post(
"/venues",
data=json.dumps(sent),
content_type="application/json",
headers=create_token_auth_header(user.get_token()),
)
assert response.status == "201 CREATED"
recv = response.get_json()
assert recv is not None
assert "id" in recv
venue = Venue.query.get(recv["id"])
assert venue is not None
assert venue.id == recv["id"]
assert venue.name == recv["name"]
assert venue.name == sent["name"]
assert venue.user.id == user.id
def test_post_request_fails_without_authentication(self, venue, client):
venue = VenueFactory.build()
data = venue_schema.dump(venue)
response = client.post(
"/venues", data=json.dumps(data), content_type="application/json"
)
assert response.status == "401 UNAUTHORIZED"
def test_post_request_fails_if_name_exists(self, venue, client):
name, user = venue.name, venue.user
venue = VenueFactory.build(name=name)
data = venue_schema.dump(venue)
del data["id"]
response = client.post(
"/venues",
data=json.dumps(data),
content_type="application/json",
headers=create_token_auth_header(user.get_token()),
)
assert response.status == "400 BAD REQUEST"
assert "name" in response.get_json()["details"]
def test_post_request_fails_if_name_is_invalid(self, user, client):
venue = VenueFactory.build(name="")
data = venue_schema.dump(venue)
response = client.post(
"/venues",
data=json.dumps(data),
content_type="application/json",
headers=create_token_auth_header(user.get_token()),
)
assert response.status == "422 UNPROCESSABLE ENTITY"
assert "name" in response.get_json()["details"]
class TestPutVenue:
def test_put_request_modifies_name(self, client, venue):
original = venue_schema.dump(venue)
data = {"name": "john"}
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps(data),
content_type="application/json",
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "200 OK"
assert venue.id == original["id"]
assert venue.name == data["name"]
def test_put_request_returns_404_for_venue_of_another_user(self, client, database):
venue1, venue2 = VenueFactory.create_batch(2)
database.session.commit()
data = {"name": "john"}
response = client.put(
"/venues/{id}".format(id=venue1.id),
data=json.dumps(data),
content_type="application/json",
headers=create_token_auth_header(venue2.user.get_token()),
)
assert response.status == "404 NOT FOUND"
assert response.get_json() == {"error": "Not Found"}
def test_put_request_succeeds_for_venue_of_another_user_if_admin(
self, client, venue, admin
):
data = {"name": "john"}
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps(data),
content_type="application/json",
headers=create_token_auth_header(admin.get_token()),
)
assert response.status == "200 OK"
assert response.get_json() == venue_schema.dump(venue)
def test_put_request_fails_if_id_is_passed(self, client, venue):
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps({"id": 123}),
content_type="application/json",
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "422 UNPROCESSABLE ENTITY"
assert "id" in response.get_json()["details"]
def test_put_request_returns_modified_venue(self, venue, client):
original_id = venue.id
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps({"name": "john"}),
content_type="application/json",
headers=create_token_auth_header(venue.user.get_token()),
)
data = response.get_json()
venue = Venue.query.get(data["id"])
assert data["id"] == original_id
assert data["name"] == venue.name
def test_put_request_fails_without_authentication(self, venue, client):
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps({"name": "john"}),
content_type="application/json",
)
assert response.status == "401 UNAUTHORIZED"
def test_put_request_fails_if_name_exists(self, client, user, database):
venue1, venue2 = VenueFactory.create_batch(2, user=user)
database.session.commit()
response = client.put(
"/venues/{id}".format(id=venue1.id),
data=json.dumps({"name": venue2.name}),
content_type="application/json",
headers=create_token_auth_header(user.get_token()),
)
assert response.status == "400 BAD REQUEST"
assert "name" in response.get_json()["details"]
def test_put_request_succeeds_if_name_exists_for_another_user(
self, client, database
):
venue1, venue2 = VenueFactory.create_batch(2)
database.session.commit()
response = client.put(
"/venues/{id}".format(id=venue1.id),
data=json.dumps({"name": venue2.name}),
content_type="application/json",
headers=create_token_auth_header(venue1.user.get_token()),
)
assert response.status == "200 OK"
assert Venue.query.get(venue1.id).name == venue2.name
def test_put_request_succeeds_if_name_is_unchanged(self, venue, client):
name = venue.name
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps({"name": name}),
content_type="application/json",
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "200 OK"
assert Venue.query.get(venue.id).name == name
def test_put_request_fails_if_name_is_invalid(self, venue, client):
response = client.put(
"/venues/{id}".format(id=venue.id),
data=json.dumps({"name": ""}),
content_type="application/json",
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "422 UNPROCESSABLE ENTITY"
assert "name" in response.get_json()["details"]
class TestDeleteVenue:
def test_delete_request_removes_venue(self, venue, client):
response = client.delete(
"/venues/{id}".format(id=venue.id),
headers=create_token_auth_header(venue.user.get_token()),
)
assert response.status == "204 NO CONTENT"
assert response.data == b""
assert Venue.query.get(venue.id) is None
def test_delete_request_returns_404_if_venue_not_found(self, venue, client):
response = client.delete(
"/venues/2", headers=create_token_auth_header(venue.user.get_token())
)
assert response.status == "404 NOT FOUND"
assert response.get_json() == {"error": "Not Found"}
def test_delete_request_returns_404_for_venue_of_another_user(
self, client, database
):
venue1, venue2 = VenueFactory.create_batch(2)
database.session.commit()
response = client.delete(
"/venues/{id}".format(id=venue1.id),
headers=create_token_auth_header(venue2.user.get_token()),
)
assert response.status == "404 NOT FOUND"
assert response.get_json() == {"error": "Not Found"}
def test_delete_request_succeeds_for_venue_of_another_user_if_admin(
self, client, venue, admin, database
):
response = client.delete(
"/venues/{id}".format(id=venue.id),
headers=create_token_auth_header(admin.get_token()),
)
assert response.status == "204 NO CONTENT"
assert response.data == b""
assert Venue.query.get(venue.id) is None
def test_delete_request_fails_without_authentication(self, venue, client):
response = client.delete("/venues/{id}".format(id=venue.id))
assert response.status == "401 UNAUTHORIZED"
| 36.652542 | 87 | 0.626127 | 1,541 | 12,975 | 5.033095 | 0.080467 | 0.075812 | 0.072202 | 0.06769 | 0.838319 | 0.814595 | 0.79229 | 0.742135 | 0.729371 | 0.697138 | 0 | 0.016605 | 0.252717 | 12,975 | 353 | 88 | 36.756374 | 0.783313 | 0.00131 | 0 | 0.60274 | 0 | 0 | 0.085855 | 0 | 0 | 0 | 0 | 0 | 0.212329 | 1 | 0.099315 | false | 0.003425 | 0.020548 | 0 | 0.136986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a88cb1d580ef4a231a8f3d6da3a0be1442131f19 | 6,223 | py | Python | misc/encoder_QI.py | roma-ghewari/visDial.pytorch | 03fe6e679170d54a985b6402f07fea4a5fb4dd73 | [
"MIT"
] | 123 | 2017-10-05T00:29:34.000Z | 2022-03-28T14:16:06.000Z | misc/encoder_QI.py | roma-ghewari/visDial.pytorch | 03fe6e679170d54a985b6402f07fea4a5fb4dd73 | [
"MIT"
] | 13 | 2017-10-23T09:33:33.000Z | 2022-01-05T09:42:29.000Z | misc/encoder_QI.py | roma-ghewari/visDial.pytorch | 03fe6e679170d54a985b6402f07fea4a5fb4dd73 | [
"MIT"
] | 37 | 2017-10-06T09:47:05.000Z | 2020-06-27T21:06:01.000Z |
import torch
import torch.nn as nn
from torch.autograd import Variable
import pdb
import math
import numpy as np
import torch.nn.functional as F
'''
class _netE(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ninp, nhid, nlayers, dropout):
super(_netE, self).__init__()
self.d = dropout
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.nhid = nhid
self.ninp = ninp
self.img_embed = nn.Linear(4096, 512)
self.ques_rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
self.fc1 = nn.Linear(self.nhid*2, self.nhid)
self.fc2 = nn.Linear(self.nhid, self.ninp)
def forward(self, ques_emb, img_raw, ques_hidden, rnd):
img_emb = F.dropout(F.tanh(self.img_embed(img_raw)), self.d, training=self.training)
ques_feat, ques_hidden = self.ques_rnn(ques_emb, ques_hidden)
ques_feat = F.dropout(ques_feat[-1], self.d, training=self.training)
concat_feat = torch.cat((ques_feat, img_emb),1)
encoder_feat = F.dropout(F.tanh(self.fc1(concat_feat)), self.d, training=self.training)
encoder_feat = F.dropout(self.fc2(encoder_feat), 0.3, training=self.training)
return encoder_feat, ques_hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
'''
class _netE(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ninp, nhid, nlayers, dropout):
super(_netE, self).__init__()
self.d = dropout
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.nhid = nhid
self.ninp = ninp
self.img_embed = nn.Linear(512, 512)
self.ques_rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
self.Wq_2 = nn.Linear(self.nhid, self.nhid)
self.Wi_2 = nn.Linear(self.nhid, self.nhid)
self.Wa_2 = nn.Linear(self.nhid, 1)
self.fc1 = nn.Linear(self.nhid*2, self.ninp)
#self.fc2 = nn.Linear(self.nhid*2, self.ninp)
def forward(self, ques_emb, img_raw, ques_hidden, rnd):
img_emb = F.tanh(self.img_embed(img_raw))
ques_feat, ques_hidden = self.ques_rnn(ques_emb, ques_hidden)
#ques_feat = F.dropout(ques_feat[-1], self.d, training=self.training)
ques_feat = ques_feat[-1]
ques_emb_2 = self.Wq_2(ques_feat).view(-1, 1, self.nhid)
img_emb_2 = self.Wi_2(img_emb).view(-1, 49, self.nhid)
atten_emb_2 = F.tanh(img_emb_2 + ques_emb_2.expand_as(img_emb_2))
img_atten_weight = F.softmax(self.Wa_2(F.dropout(atten_emb_2, self.d, training=self.training
).view(-1, self.nhid)).view(-1, 49))
img_attn_feat = torch.bmm(img_atten_weight.view(-1, 1, 49),
img_emb.view(-1, 49, self.nhid))
concat_feat = F.dropout(torch.cat((img_attn_feat.view(-1, self.nhid), ques_feat), 1), self.d, training=self.training)
encoder_feat = F.tanh(self.fc1(concat_feat))
return encoder_feat, ques_hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
'''
class _netE(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ninp, nhid, nlayers, dropout):
super(_netE, self).__init__()
self.d = dropout
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.nhid = nhid
self.ninp = ninp
self.img_embed = nn.Linear(4096, 512)
self.ques_rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
#self.Wq_2 = nn.Linear(self.nhid, self.nhid)
#self.Wi_2 = nn.Linear(self.nhid, self.nhid)
#self.Wa_2 = nn.Linear(self.nhid, 1)
#self.fc1 = nn.Linear(self.nhid, self.nhid)
self.fc2 = nn.Linear(self.nhid*2, self.ninp)
def forward(self, ques_emb, img_raw, ques_hidden, rnd):
img_emb = F.dropout(F.tanh(self.img_embed(img_raw)), self.d, training=self.training)
ques_feat, ques_hidden = self.ques_rnn(ques_emb, ques_hidden)
ques_feat = F.dropout(ques_feat[-1], self.d, training=self.training)
#ques_feat = ques_feat[-1]
#ques_emb_2 = self.Wq_2(ques_feat).view(-1, 1, self.nhid)
#img_emb_2 = self.Wi_2(img_emb).view(-1, 49, self.nhid)
#atten_emb_2 = F.tanh(img_emb_2 + ques_emb_2.expand_as(img_emb_2))
#img_atten_weight = F.softmax(self.Wa_2(F.dropout(atten_emb_2, self.d, training=self.training
# ).view(-1, self.nhid)).view(-1, 49))
#img_attn_feat = torch.bmm(img_atten_weight.view(-1, 1, 49),
# img_emb.view(-1, 49, self.nhid))
#encoder_feat = F.dropout(F.tanh(self.fc1(img_attn_feat.view(-1, self.nhid))), self.d, training=self.training)
concat_feat = torch.cat((img_emb, ques_feat), 1)
encoder_feat = F.dropout(F.tanh(self.fc2(concat_feat)), self.d, training=self.training)
return encoder_feat, ques_hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
'''
| 37.487952 | 125 | 0.620119 | 913 | 6,223 | 4.015334 | 0.086528 | 0.093835 | 0.042553 | 0.052373 | 0.943808 | 0.943808 | 0.93808 | 0.911075 | 0.845881 | 0.829242 | 0 | 0.022737 | 0.243773 | 6,223 | 165 | 126 | 37.715152 | 0.756269 | 0.029086 | 0 | 0.046512 | 0 | 0 | 0.001872 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.162791 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
a892f75218dedfe24e5297830bf906e64c99785f | 216 | py | Python | src/hatch/env/plugin/hooks.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | src/hatch/env/plugin/hooks.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | src/hatch/env/plugin/hooks.py | daobook/hatch | 1cf39ad1a11ce90bc77fb7fdc4b9202433509179 | [
"MIT"
] | null | null | null | from hatchling.plugin import hookimpl
from ..system import SystemEnvironment
from ..virtual import VirtualEnvironment
@hookimpl
def hatch_register_environment():
return [SystemEnvironment, VirtualEnvironment]
| 21.6 | 50 | 0.828704 | 21 | 216 | 8.428571 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115741 | 216 | 9 | 51 | 24 | 0.926702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.5 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
a896a1c83dc3a8c9b54550f2b7cf3d2ac25b3ec3 | 51 | py | Python | mail_html/__init__.py | bsholy/learn_flask_email | a277421ad1e1a8ec637681c75da65dbca773dc1f | [
"Unlicense"
] | null | null | null | mail_html/__init__.py | bsholy/learn_flask_email | a277421ad1e1a8ec637681c75da65dbca773dc1f | [
"Unlicense"
] | null | null | null | mail_html/__init__.py | bsholy/learn_flask_email | a277421ad1e1a8ec637681c75da65dbca773dc1f | [
"Unlicense"
] | null | null | null | from mail_html.get_mail_message import get_mail_msg | 51 | 51 | 0.921569 | 10 | 51 | 4.2 | 0.7 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 51 | 1 | 51 | 51 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
a8a06172abe3ab9863b2f4b0b6dc6d548f2a7a7a | 46 | py | Python | ssl_framework/models/heads/__init__.py | ananyahjha93/libself | f43cf8d60db97bb47652eecf06efa1801e850b9f | [
"MIT"
] | 11 | 2020-02-14T21:29:45.000Z | 2021-07-30T18:49:47.000Z | ssl_framework/models/heads/__init__.py | ananyahjha93/libself | f43cf8d60db97bb47652eecf06efa1801e850b9f | [
"MIT"
] | null | null | null | ssl_framework/models/heads/__init__.py | ananyahjha93/libself | f43cf8d60db97bb47652eecf06efa1801e850b9f | [
"MIT"
] | 2 | 2020-10-21T08:11:12.000Z | 2020-11-20T11:57:43.000Z | from ssl_framework.models.heads.mlp import MLP | 46 | 46 | 0.869565 | 8 | 46 | 4.875 | 0.875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065217 | 46 | 1 | 46 | 46 | 0.906977 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
765f307d9e0cdb24514f8550f529de7c240c65d2 | 85 | py | Python | amethyst/backend/__init__.py | medav/amethyst | c75314249454f41c9ea61a1a46b1b59ba81b37d3 | [
"MIT"
] | null | null | null | amethyst/backend/__init__.py | medav/amethyst | c75314249454f41c9ea61a1a46b1b59ba81b37d3 | [
"MIT"
] | null | null | null | amethyst/backend/__init__.py | medav/amethyst | c75314249454f41c9ea61a1a46b1b59ba81b37d3 | [
"MIT"
] | 1 | 2021-05-01T06:23:51.000Z | 2021-05-01T06:23:51.000Z | from . import decode
from . import execute
from . import mem
from . import writeback
| 17 | 23 | 0.764706 | 12 | 85 | 5.416667 | 0.5 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188235 | 85 | 4 | 24 | 21.25 | 0.942029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
766576061cf3a80d3b01b28631a35290271266a7 | 65 | py | Python | lectures/code/tuples_basics.py | naskoch/python_course | 84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3 | [
"MIT"
] | 4 | 2015-08-10T17:46:55.000Z | 2020-04-18T21:09:03.000Z | lectures/code/tuples_basics.py | naskoch/python_course | 84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3 | [
"MIT"
] | null | null | null | lectures/code/tuples_basics.py | naskoch/python_course | 84adfd3f8d48ca3ad5837f7acc59d2fa051e95d3 | [
"MIT"
] | 2 | 2019-04-24T03:31:02.000Z | 2019-05-13T07:36:06.000Z | >>> myTuple = (1, 2, 3)
>>> myTuple[1]
2
>>> myTuple[1:3]
(2, 3)
| 10.833333 | 23 | 0.461538 | 12 | 65 | 2.5 | 0.333333 | 0.8 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173077 | 0.2 | 65 | 5 | 24 | 13 | 0.403846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7668a71f67ddf65e1f07b8ad239bec751bb3b3e5 | 20,466 | py | Python | atst/domain/authnid/crl/util.py | philip-dds/atst | a227044ccf464dd0e3144dd74cecfafe8d6841b9 | [
"MIT"
] | null | null | null | atst/domain/authnid/crl/util.py | philip-dds/atst | a227044ccf464dd0e3144dd74cecfafe8d6841b9 | [
"MIT"
] | null | null | null | atst/domain/authnid/crl/util.py | philip-dds/atst | a227044ccf464dd0e3144dd74cecfafe8d6841b9 | [
"MIT"
] | null | null | null | import json
import os
import re
import pendulum
import requests
class CRLNotFoundError(Exception):
pass
class CRLParseError(Exception):
pass
MODIFIED_TIME_BUFFER = 15 * 60
CRL_LIST = [
(
"https://crl.gds.disa.mil/crl/DODROOTCA2.crl",
"305b310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311630140603550403130d446f4420526f6f742043412032", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODROOTCA3.crl",
"305b310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311630140603550403130d446f4420526f6f742043412033", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODROOTCA4.crl",
"305b310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311630140603550403130d446f4420526f6f742043412034", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODROOTCA5.crl",
"305b310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311630140603550403130d446f4420526f6f742043412035", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_33.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3333", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_34.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3334", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_35.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3335", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_36.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3336", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_37.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3337", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_38.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3338", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_39.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3339", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_40.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3430", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_41.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3431", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_42.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3432", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_43.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3433", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_44.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3434", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_45.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3435", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_46.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3436", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_47.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3437", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDSWCA_48.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f442049442053572043412d3438", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_49.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442049442043412d3439", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_50.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442049442043412d3530", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_51.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442049442043412d3531", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_52.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442049442043412d3532", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODIDCA_59.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442049442043412d3539", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_53.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442053572043412d3533", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_54.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442053572043412d3534", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_55.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442053572043412d3535", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_56.crl",
"305a310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493115301306035504030c0c444f442053572043412d3536", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_57.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442053572043412d3537", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_58.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442053572043412d3538", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_60.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442053572043412d3630", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODSWCA_61.crl",
"305a310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311530130603550403130c444f442053572043412d3631", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_33.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3333", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_34.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3334", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_39.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3339", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_40.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3430", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_41.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3431", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_42.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3432", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_43.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3433", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_44.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3434", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_49.crl",
"305d310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493118301606035504030c0f444f4420454d41494c2043412d3439", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_50.crl",
"305d310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493118301606035504030c0f444f4420454d41494c2043412d3530", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_51.crl",
"305d310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493118301606035504030c0f444f4420454d41494c2043412d3531", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_52.crl",
"305d310b300906035504061302555331183016060355040a0c0f552e532e20476f7665726e6d656e74310c300a060355040b0c03446f44310c300a060355040b0c03504b493118301606035504030c0f444f4420454d41494c2043412d3532", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODEMAILCA_59.crl",
"305d310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311830160603550403130f444f4420454d41494c2043412d3539", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODINTEROPERABILITYROOTCA1.crl",
"306c310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49312730250603550403131e446f4420496e7465726f7065726162696c69747920526f6f742043412031", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODINTEROPERABILITYROOTCA2.crl",
"306c310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49312730250603550403131e446f4420496e7465726f7065726162696c69747920526f6f742043412032", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/USDODCCEBINTEROPERABILITYROOTCA1.crl",
"3074310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49312f302d06035504031326555320446f44204343454220496e7465726f7065726162696c69747920526f6f742043412031", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/USDODCCEBINTEROPERABILITYROOTCA2.crl",
"3074310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49312f302d06035504031326555320446f44204343454220496e7465726f7065726162696c69747920526f6f742043412032", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODNIPRINTERNALNPEROOTCA1.crl",
"3075310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f4431143012060355040b130b496e7465726e616c4e5045312830260603550403131f446f44204e49505220496e7465726e616c204e504520526f6f742043412031", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODNPEROOTCA1.crl",
"305f310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311a301806035504031311446f44204e504520526f6f742043412031", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DMDNSIGNINGCA_1.crl",
"305f310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f44310c300a060355040b1303504b49311a301806035504031311444d444e205369676e696e672043412d31", # pragma: allowlist secret
),
(
"https://crl.gds.disa.mil/crl/DODWCFROOTCA1.crl",
"3063310b300906035504061302555331183016060355040a130f552e532e20476f7665726e6d656e74310c300a060355040b1303446f443110300e060355040b130757434620504b49311a301806035504031311446f442057434620526f6f742043412031", # pragma: allowlist secret
),
]
JSON_CACHE = "crl_locations.json"
def _deserialize_cache_items(cache):
return {bytes.fromhex(der): data for (der, data) in cache.items()}
def load_crl_locations_cache(crl_dir):
json_location = "{}/{}".format(crl_dir, JSON_CACHE)
with open(json_location, "r") as json_file:
cache = json.load(json_file)
return _deserialize_cache_items(cache)
def serialize_crl_locations_cache(crl_dir, crl_list=CRL_LIST):
crl_cache = {}
for crl_uri, crl_issuer in crl_list:
crl_path = crl_local_path(crl_dir, crl_uri)
if os.path.isfile(crl_path):
crl_cache[crl_issuer] = crl_path
json_location = "{}/{}".format(crl_dir, JSON_CACHE)
with open(json_location, "w") as json_file:
json.dump(crl_cache, json_file)
return {bytes.fromhex(k): v for k, v in crl_cache.items()}
def crl_local_path(out_dir, crl_location):
name = re.split("/", crl_location)[-1]
crl = os.path.join(out_dir, name)
return crl
def existing_crl_modification_time(crl):
if os.path.exists(crl):
prev_time = os.path.getmtime(crl)
buffered = prev_time + MODIFIED_TIME_BUFFER
mod_time = prev_time if pendulum.now().timestamp() < buffered else buffered
dt = pendulum.from_timestamp(mod_time, tz="GMT")
return dt.format("ddd, DD MMM YYYY HH:mm:ss zz")
else:
return False
def write_crl(out_dir, target_dir, crl_location):
crl = crl_local_path(out_dir, crl_location)
existing = crl_local_path(target_dir, crl_location)
options = {"stream": True}
mod_time = existing_crl_modification_time(existing)
if mod_time:
options["headers"] = {"If-Modified-Since": mod_time}
with requests.get(crl_location, **options) as response:
if response.status_code > 399:
raise CRLNotFoundError()
if response.status_code == 304:
return (False, existing)
with open(crl, "wb") as crl_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
crl_file.write(chunk)
return (True, existing)
def remove_bad_crl(out_dir, crl_location):
crl = crl_local_path(out_dir, crl_location)
os.remove(crl)
def log_error(logger, crl_location):
if logger:
logger.error(
"Error downloading {}, removing file and continuing anyway".format(
crl_location
)
)
def refresh_crl(out_dir, target_dir, crl_uri, logger):
logger.info("updating CRL from {}".format(crl_uri))
try:
was_updated, crl_path = write_crl(out_dir, target_dir, crl_uri)
if was_updated:
logger.info("successfully synced CRL from {}".format(crl_uri))
else:
logger.info("no updates for CRL from {}".format(crl_uri))
return crl_path
except requests.exceptions.ChunkedEncodingError:
log_error(logger, crl_uri)
remove_bad_crl(out_dir, crl_uri)
except CRLNotFoundError:
log_error(logger, crl_uri)
def sync_crls(tmp_location, final_location):
crl_cache = {}
for crl_uri, crl_issuer in CRL_LIST:
crl_path = refresh_crl(tmp_location, final_location, crl_uri, logger)
crl_cache[crl_issuer] = crl_path
json_location = "{}/{}".format(final_location, JSON_CACHE)
with open(json_location, "w") as json_file:
json.dump(crl_cache, json_file)
if __name__ == "__main__":
import sys
import logging
logging.basicConfig(
level=logging.INFO, format="[%(asctime)s]:%(levelname)s: %(message)s"
)
logger = logging.getLogger()
logger.info("Updating CRLs")
try:
tmp_location = sys.argv[1]
final_location = sys.argv[2]
sync_crls(tmp_location, final_location)
except Exception as err:
logger.exception("Fatal error encountered, stopping")
sys.exit(1)
logger.info("Finished updating CRLs")
| 55.61413 | 277 | 0.792192 | 1,251 | 20,466 | 12.798561 | 0.198241 | 0.026981 | 0.037099 | 0.05059 | 0.213291 | 0.202673 | 0.195116 | 0.188745 | 0.188745 | 0.155768 | 0 | 0.519774 | 0.133929 | 20,466 | 367 | 278 | 55.765668 | 0.383526 | 0.065914 | 0 | 0.228395 | 0 | 0 | 0.690746 | 0.543804 | 0 | 1 | 0 | 0 | 0 | 1 | 0.030864 | false | 0.006173 | 0.021605 | 0.003086 | 0.08642 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7673dfd1a83cf261c2134ed671f44e2e94acafe8 | 429 | py | Python | terrascript/bitbucket/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/bitbucket/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | terrascript/bitbucket/r.py | hugovk/python-terrascript | 08fe185904a70246822f5cfbdc9e64e9769ec494 | [
"BSD-2-Clause"
] | null | null | null | # terrascript/bitbucket/r.py
import terrascript
class bitbucket_hook(terrascript.Resource):
pass
class bitbucket_default_reviewers(terrascript.Resource):
pass
class bitbucket_repository(terrascript.Resource):
pass
class bitbucket_repository_variable(terrascript.Resource):
pass
class bitbucket_project(terrascript.Resource):
pass
class bitbucket_branch_restriction(terrascript.Resource):
pass
| 15.888889 | 58 | 0.799534 | 45 | 429 | 7.422222 | 0.355556 | 0.251497 | 0.413174 | 0.419162 | 0.613772 | 0.281437 | 0 | 0 | 0 | 0 | 0 | 0 | 0.135198 | 429 | 26 | 59 | 16.5 | 0.90027 | 0.060606 | 0 | 0.461538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.461538 | 0.076923 | 0 | 0.538462 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
7677333cbe8a008146f1ca86faa7113c72020467 | 47 | py | Python | holidaycollector/__init__.py | JonathanLoscalzo/holiday-collector | 9ae122ecc7bdb45e00a6d939b09b7350797d8ace | [
"MIT"
] | null | null | null | holidaycollector/__init__.py | JonathanLoscalzo/holiday-collector | 9ae122ecc7bdb45e00a6d939b09b7350797d8ace | [
"MIT"
] | null | null | null | holidaycollector/__init__.py | JonathanLoscalzo/holiday-collector | 9ae122ecc7bdb45e00a6d939b09b7350797d8ace | [
"MIT"
] | null | null | null | from .holidaycollector import main, __version__ | 47 | 47 | 0.87234 | 5 | 47 | 7.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 47 | 1 | 47 | 47 | 0.860465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
76a3dce92ab6a2ae813445ff5b9dddde60be6f3e | 29 | py | Python | types/user_longpoll/__init__.py | StepaTa/vkbottle | 3b04a5343380cbabe782151e7cb1c1645a9fa9ce | [
"MIT"
] | 1 | 2021-05-22T10:32:57.000Z | 2021-05-22T10:32:57.000Z | types/user_longpoll/__init__.py | StepaTa/vkbottle | 3b04a5343380cbabe782151e7cb1c1645a9fa9ce | [
"MIT"
] | 73 | 2020-10-05T21:00:48.000Z | 2020-11-16T23:29:41.000Z | types/user_longpoll/__init__.py | StepaTa/vkbottle | 3b04a5343380cbabe782151e7cb1c1645a9fa9ce | [
"MIT"
] | null | null | null | from .message import Message
| 14.5 | 28 | 0.827586 | 4 | 29 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
76ce03b467b9cc5a55cdb6a64803292e3dd2486f | 46,385 | py | Python | django_comments_ink/tests/test_models.py | comments-ink/django-comments-ink | 25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69 | [
"BSD-2-Clause"
] | 3 | 2022-03-26T23:53:14.000Z | 2022-03-28T19:20:53.000Z | django_comments_ink/tests/test_models.py | comments-ink/django-comments-ink | 25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69 | [
"BSD-2-Clause"
] | null | null | null | django_comments_ink/tests/test_models.py | comments-ink/django-comments-ink | 25255d2ca60d5e2dfb8116e3ba4702dcde4e0a69 | [
"BSD-2-Clause"
] | null | null | null | from datetime import datetime
from unittest.mock import patch
import pytest
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db.models.signals import pre_save
from django.test import TestCase as DjangoTestCase
from django_comments_ink import get_form, get_model
from django_comments_ink.models import (
BlackListedDomain,
InkComment,
MaxThreadLevelExceededException,
publish_or_withhold_on_pre_save,
)
from django_comments_ink.moderation import SpamModerator, moderator
from django_comments_ink.tests.models import Article, Diary, MyComment
from django_comments_ink.tests.test_views import post_article_comment
class ArticleBaseTestCase(DjangoTestCase):
def setUp(self):
self.article_1 = Article.objects.create(
title="September", slug="september", body="During September..."
)
self.article_2 = Article.objects.create(
title="October", slug="october", body="What I did on October..."
)
class InkCommentManagerTestCase(ArticleBaseTestCase):
def setUp(self):
super(InkCommentManagerTestCase, self).setUp()
self.article_ct = ContentType.objects.get(
app_label="tests", model="article"
)
self.site1 = Site.objects.get(pk=1)
self.site2 = Site.objects.create(domain="site2.com", name="site2.com")
def post_comment_1(self):
InkComment.objects.create(
content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site1,
comment="just a testing comment",
submit_date=datetime.now(),
)
def post_comment_2(self):
InkComment.objects.create(
content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="yet another comment",
submit_date=datetime.now(),
)
def post_comment_3(self):
InkComment.objects.create(
content_type=self.article_ct,
object_pk=self.article_2.id,
content_object=self.article_2,
site=self.site1,
comment="and another one",
submit_date=datetime.now(),
)
def post_comment_4(self):
InkComment.objects.create(
content_type=self.article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=self.site2,
comment="just a testing comment in site2",
submit_date=datetime.now(),
)
def test_for_app_models(self):
# there is no comment posted yet to article_1 nor article_2
count = InkComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 0)
self.post_comment_1()
count = InkComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 1)
self.post_comment_2()
count = InkComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 2)
self.post_comment_3()
count = InkComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 3)
self.post_comment_4()
count = InkComment.objects.for_app_models("tests.article").count()
self.assertEqual(count, 4)
def test_multi_site_for_app_models(self):
self.post_comment_1() # To site1.
self.post_comment_4() # To site2.
count_site1 = InkComment.objects.for_app_models(
"tests.article", site=self.site1
).count()
self.assertEqual(count_site1, 1)
count_site2 = InkComment.objects.for_app_models(
"tests.article", site=self.site2
).count()
self.assertEqual(count_site2, 1)
# In order to test 'save' and '_calculate_thread_data' methods, simulate the
# following threads, in order of arrival:
#
# testcase cmt.id parent level-0 level-1 level-2 level-3
# step1 1 - c1 <- c1
# step1 2 - c2 <- c2
# step2 3 1 -- c3 <- c3.c1
# step2 4 1 -- c4 <- c4.c1
# step3 5 2 -- c5 <- c5.c2
# step4 6 5 -- -- c6 <- c6.c5.c2
# step4 7 4 -- -- c7 <- c7.c4.c1
# step5 8 3 -- -- c8 <- c8.c3.c1
# step5 9 - c9 <- c9
# step6 10 7 c10 <- c10.c7.c4.c1
# step6 11 8 c11 <- c11.c8.c3.c1
def thread_test_step_1(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 1 with parent_id 0
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c1",
submit_date=datetime.now(),
**kwargs,
)
# post Comment 2 with parent_id 0
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c2",
submit_date=datetime.now(),
**kwargs,
)
def thread_test_step_2(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
if not "parent_id" in kwargs:
kwargs["parent_id"] = 1
# post Comment 3 to parent_id 1
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c3.c1",
submit_date=datetime.now(),
**kwargs,
)
# post Comment 4 to parent_id 1
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c4.c1",
submit_date=datetime.now(),
**kwargs,
)
def thread_test_step_3(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 5 to parent_id 2
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="5.c2",
submit_date=datetime.now(),
parent_id=2,
**kwargs,
)
def thread_test_step_4(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 6 to parent_id 5
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c6.c5.c2",
submit_date=datetime.now(),
parent_id=5,
**kwargs,
)
# post Comment 7 to parent_id 4
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c7.c4.c1",
submit_date=datetime.now(),
parent_id=4,
**kwargs,
)
def thread_test_step_5(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 8 to parent_id 3
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c8.c3.c1",
submit_date=datetime.now(),
parent_id=3,
**kwargs,
)
# post Comment 9 with parent_id 0
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c3",
submit_date=datetime.now(),
**kwargs,
)
def thread_test_step_6(article, model=get_model(), **kwargs):
article_ct = ContentType.objects.get(app_label="tests", model="article")
if not "site" in kwargs:
kwargs["site"] = Site.objects.get(pk=1)
# post Comment 10 to parent_id 7
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c10.c7.c4.c1",
submit_date=datetime.now(),
parent_id=7,
**kwargs,
)
# post Comment 11 to parent_id 8
model.objects.create(
content_type=article_ct,
object_pk=article.id,
content_object=article,
comment="c11.c8.c3.c1",
submit_date=datetime.now(),
parent_id=8,
**kwargs,
)
class BaseThreadStep1TestCase(ArticleBaseTestCase):
def setUp(self):
super(BaseThreadStep1TestCase, self).setUp()
thread_test_step_1(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 0
self.c2, # -> 2 2 2 0 1 0
) = InkComment.objects.all()
def test_threaded_comments_step_1_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 0)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
class ThreadStep2TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep2TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2, # -> 2 2 2 0 1 0
) = InkComment.objects.all()
def test_threaded_comments_step_2_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 0)
def test_threaded_comments_step_2_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
class ThreadStep3TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep3TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
( # -> content: cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 2
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 0
self.c2, # -> 2 2 2 0 1 1
self.c5, # -> 5 2 2 1 2 0
) = InkComment.objects.all()
def test_threaded_comments_step_3_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 2)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 1)
def test_threaded_comments_step_3_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 0)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 0)
class ThreadStep4TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep4TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 3
self.c3, # -> 3 1 1 1 2 0
self.c4, # -> 4 1 1 1 3 1
self.c7, # -> 7 1 4 2 4 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
) = InkComment.objects.all()
def test_threaded_comments_step_4_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 3)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
def test_threaded_comments_step_4_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 0)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 3)
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_4_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 4)
self.assertEqual(self.c7.nested_count, 0)
class ThreadStep5TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep5TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 4
self.c3, # -> |- 3 1 1 1 2 1
self.c8, # -> |- 8 1 3 2 3 0
self.c4, # -> |- 4 1 1 1 4 1
self.c7, # -> |- 7 1 4 2 5 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> |- 5 2 2 1 2 1
self.c6, # -> |- 6 2 5 2 3 0
self.c9, # -> 9 9 9 0 1 0
) = InkComment.objects.all()
def test_threaded_comments_step_5_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 4)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_5_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 1)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 4) # changed
self.assertEqual(self.c4.nested_count, 1)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_5_level_2(self):
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 5) # changed
self.assertEqual(self.c7.nested_count, 0)
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 0)
@patch.multiple(
"django_comments_ink.conf.settings", COMMENTS_INK_MAX_THREAD_LEVEL=2
)
def test_exceed_max_thread_level_raises_exception(self):
article_ct = ContentType.objects.get(app_label="tests", model="article")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
InkComment.objects.create(
content_type=article_ct,
object_pk=self.article_1.id,
content_object=self.article_1,
site=site,
comment="cmt 1 to cmt 2 to cmt 1",
submit_date=datetime.now(),
parent_id=8,
) # already max thread level
def test_removing_c4_withdraws_c7_and_updates_nested_count(self):
cm4 = InkComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = InkComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 4)
# Remove comment 4, save, and check again.
cm4.is_removed = True
cm4.save()
cm4 = InkComment.objects.get(pk=4)
self.assertEqual(cm4.nested_count, 1)
cm1 = InkComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 3)
class ThreadStep6TestCase(ArticleBaseTestCase):
def setUp(self):
super(ThreadStep6TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
thread_test_step_3(self.article_1)
thread_test_step_4(self.article_1)
thread_test_step_5(self.article_1)
thread_test_step_6(self.article_1)
( # content -> cmt.id thread_id parent_id level order nested
self.c1, # -> 1 1 1 0 1 6
self.c3, # -> 3 1 1 1 2 2
self.c8, # -> 8 1 3 2 3 1
self.c11, # -> 11 1 8 3 4 0
self.c4, # -> 4 1 1 1 5 2
self.c7, # -> 7 1 4 2 6 1
self.c10, # -> 10 1 7 3 7 0
self.c2, # -> 2 2 2 0 1 2
self.c5, # -> 5 2 2 1 2 1
self.c6, # -> 6 2 5 2 3 0
self.c9, # -> 9 9 9 0 1 0
) = InkComment.objects.all()
def test_threaded_comments_step_6_level_0(self):
# comment 1
self.assertTrue(self.c1.parent_id == 1 and self.c1.thread_id == 1)
self.assertTrue(self.c1.level == 0 and self.c1.order == 1)
self.assertEqual(self.c1.nested_count, 6)
# comment 2
self.assertTrue(self.c2.parent_id == 2 and self.c2.thread_id == 2)
self.assertTrue(self.c2.level == 0 and self.c2.order == 1)
self.assertEqual(self.c2.nested_count, 2)
# comment 9
self.assertTrue(self.c9.parent_id == 9 and self.c9.thread_id == 9)
self.assertTrue(self.c9.level == 0 and self.c9.order == 1)
self.assertEqual(self.c9.nested_count, 0)
def test_threaded_comments_step_6_level_1(self):
# comment 3
self.assertTrue(self.c3.parent_id == 1 and self.c3.thread_id == 1)
self.assertTrue(self.c3.level == 1 and self.c3.order == 2)
self.assertEqual(self.c3.nested_count, 2)
# comment 4
self.assertTrue(self.c4.parent_id == 1 and self.c4.thread_id == 1)
self.assertTrue(self.c4.level == 1 and self.c4.order == 5)
self.assertEqual(self.c4.nested_count, 2)
# comment 5
self.assertTrue(self.c5.parent_id == 2 and self.c5.thread_id == 2)
self.assertTrue(self.c5.level == 1 and self.c5.order == 2)
self.assertEqual(self.c5.nested_count, 1)
def test_threaded_comments_step_6_level_2(self):
# comment 8
self.assertTrue(self.c8.parent_id == 3 and self.c8.thread_id == 1)
self.assertTrue(self.c8.level == 2 and self.c8.order == 3)
self.assertEqual(self.c8.nested_count, 1)
# comment 7
self.assertTrue(self.c7.parent_id == 4 and self.c7.thread_id == 1)
self.assertTrue(self.c7.level == 2 and self.c7.order == 6)
self.assertEqual(self.c7.nested_count, 1)
# comment 6
self.assertTrue(self.c6.parent_id == 5 and self.c6.thread_id == 2)
self.assertTrue(self.c6.level == 2 and self.c6.order == 3)
self.assertEqual(self.c6.nested_count, 0)
def test_threaded_comments_step_6_level_3(self):
# comment 10
self.assertTrue(self.c10.parent_id == 7 and self.c10.thread_id == 1)
self.assertTrue(self.c10.level == 3 and self.c10.order == 7)
self.assertEqual(self.c10.nested_count, 0)
# comment 11
self.assertTrue(self.c11.parent_id == 8 and self.c11.thread_id == 1)
self.assertTrue(self.c11.level == 3 and self.c11.order == 4)
self.assertEqual(self.c11.nested_count, 0)
def add_comment_to_diary_entry(diary_entry):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
get_model().objects.create(
content_type=diary_ct,
object_pk=diary_entry.id,
content_object=diary_entry,
site=site,
comment="cmt to day in diary",
submit_date=datetime.now(),
)
class DiaryBaseTestCase(DjangoTestCase):
def setUp(self):
self.day_in_diary = Diary.objects.create(body="About Today...")
add_comment_to_diary_entry(self.day_in_diary)
def test_max_thread_level_by_app_model(self):
diary_ct = ContentType.objects.get(app_label="tests", model="diary")
site = Site.objects.get(pk=1)
with self.assertRaises(MaxThreadLevelExceededException):
InkComment.objects.create(
content_type=diary_ct,
object_pk=self.day_in_diary.id,
content_object=self.day_in_diary,
site=site,
comment="cmt to cmt to day in diary",
submit_date=datetime.now(),
parent_id=1,
) # already max thread level
class PublishOrWithholdNestedComments_1_TestCase(ArticleBaseTestCase):
# Add a threaded comment structure (c1, c2, c3) and verify that
# removing c1 withholds c3.
def setUp(self):
super(PublishOrWithholdNestedComments_1_TestCase, self).setUp()
thread_test_step_1(self.article_1)
thread_test_step_2(self.article_1)
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = InkComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in InkComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
def test_removing_c1_withholds_c3_and_c4(self):
cm1 = InkComment.objects.get(pk=1)
self.assertEqual(cm1.nested_count, 2) # nested_count should be 2.
cm1.is_removed = True
cm1.save()
cm1 = InkComment.objects.get(pk=1)
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
# Is still public, so the nested_count doesn't change.
self.assertEqual(cm1.nested_count, 2)
cm3 = InkComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = InkComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
_ink_model = "django_comments_ink.tests.models.MyComment"
class PublishOrWithholdNestedComments_2_TestCase(ArticleBaseTestCase):
# Then mock the settings so that the project uses a customized
# comment model (django_comments_ink.tests.models.MyComment), and repeat
# the logic adding MyComment instances. Then remove c1 and be sure
# that c3 gets withheld.
def setUp(self):
super(PublishOrWithholdNestedComments_2_TestCase, self).setUp()
thread_test_step_1(
self.article_1, model=MyComment, title="Can't be empty 1"
)
thread_test_step_2(
self.article_1, model=MyComment, title="Can't be empty 2"
)
#
# These two lines create the following comments:
#
# ( # content -> cmt.id thread_id parent_id level order nested
# cm1, # -> 1 1 1 0 1 2
# cm3, # -> 3 1 1 1 2 0
# cm4, # -> 4 1 1 1 3 0
# cm2, # -> 2 2 2 0 1 0
# ) = MyComment.objects.all()
def test_all_comments_are_public_and_have_not_been_removed(self):
for cm in MyComment.objects.all():
self.assertTrue(cm.is_public)
self.assertFalse(cm.is_removed)
@patch.multiple(
"django_comments_ink.conf.settings", COMMENTS_INK_MODEL=_ink_model
)
def test_removing_c1_withholds_c3_and_c4(self):
# Register the receiver again. It was registered in apps.py, but we
# have patched the COMMENTS_INK_MODEL, however we won't fake the
# ready. It's easier to just register again the receiver, to test
# only what depends on django-comments-ink.
model_app_label = get_model()._meta.label
pre_save.connect(
publish_or_withhold_on_pre_save, sender=model_app_label
)
cm1 = MyComment.objects.get(pk=1)
cm1.is_removed = True
cm1.save()
self.assertTrue(cm1.is_public)
self.assertTrue(cm1.is_removed)
cm3 = MyComment.objects.get(pk=3)
self.assertFalse(cm3.is_public)
self.assertFalse(cm3.is_removed)
cm4 = MyComment.objects.get(pk=4)
self.assertFalse(cm4.is_public)
self.assertFalse(cm4.is_removed)
@pytest.mark.django_db
def test_get_reply_url(an_articles_comment):
reply_url = an_articles_comment.get_reply_url()
assert reply_url == f"/comments/reply/{an_articles_comment.pk}/"
@pytest.mark.django_db
def test_get_queryset_returns_none():
qs = InkComment.get_queryset(
content_type=None, object_pk=None, content_object=None
)
assert qs is None
@pytest.mark.django_db
def test_get_queryset_with_content_object(an_article, an_articles_comment):
qs = InkComment.get_queryset(content_object=an_article)
assert qs[0] == an_articles_comment
# ---------------------------------------------------------------------
# Test BlackListedDomain.
class ArticleCommentModerator(SpamModerator):
pass
@pytest.mark.django_db
def test_blacklisted_domain_is_blocked(an_article, an_user):
domain = BlackListedDomain.objects.create(domain="example.com")
moderator.register(Article, ArticleCommentModerator)
form = get_form()(an_article)
data = {
"name": "Joe",
"email": f"joe@{domain}",
"followup": True,
"reply_to": 0,
"level": 1,
"order": 1,
"comment": "Es war einmal eine kleine...",
}
data.update(form.initial)
response = post_article_comment(data, an_article, auth_user=an_user)
assert response.status_code == 400 # It would be 302 otherwise.
moderator.unregister(Article)
assert InkComment.objects.count() == 0
@pytest.mark.django_db
def test_non_blacklisted_domain_pass(an_article, an_user):
moderator.register(Article, ArticleCommentModerator)
form = get_form()(an_article)
data = {
"name": "Joe",
"email": f"joe@example.com",
"followup": True,
"reply_to": 0,
"level": 1,
"order": 1,
"comment": "Es war einmal eine kleine...",
}
data.update(form.initial)
response = post_article_comment(data, an_article, auth_user=an_user)
assert response.status_code == 302
moderator.unregister(Article)
assert InkComment.objects.count() == 1
# ---------------------------------------------------------------------
# Test django.db.models.signals.post_delete signal.
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_1(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm1 = InkComment.norel_objects.get(pk=1)
cm1.delete()
# It should remove comments 1, 3, 8, 11, 4, 7 and 10.
# As the comment deleted was at level 0, there is no nested_count
# record to modify.
for cid in [1, 3, 8, 11, 4, 7, 10]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_2(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm2 = InkComment.norel_objects.get(pk=2)
cm2.delete()
# It should remove comments 2, 5 and 6.
# As the comment deleted was at level 0, there is no nested_count
# record to modify.
for cid in [2, 5, 6]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_3(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm3 = InkComment.norel_objects.get(pk=3)
cm3.delete()
# It should remove comments 3, 8 and 11, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 3
for cid in [3, 8, 11]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
c1 = InkComment.norel_objects.get(pk=1)
assert c1.nested_count == 3
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_4(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm4 = InkComment.norel_objects.get(pk=4)
cm4.delete()
# It should remove comments 4, 7 and 10, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 3
for cid in [4, 7, 10]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
c1 = InkComment.norel_objects.get(pk=1)
assert c1.nested_count == 3
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_5(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm5 = InkComment.norel_objects.get(pk=5)
cm5.delete()
# It should remove comments 5 and 6, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c2 # -> 2 2 2 0 1 0
for cid in [5, 6]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
c2 = InkComment.norel_objects.get(pk=2)
assert c2.nested_count == 0
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_6(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm6 = InkComment.norel_objects.get(pk=6)
cm6.delete()
# It should remove comment 6, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c2 # -> 2 2 2 0 1 1
# c5 # -> 5 2 2 1 2 0
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=6)
c2 = InkComment.norel_objects.get(pk=2)
assert c2.nested_count == 1
c5 = InkComment.norel_objects.get(pk=5)
assert c5.nested_count == 0
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_7(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm7 = InkComment.norel_objects.get(pk=7)
cm7.delete()
# It should remove comments 7 and 10, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 4
# c4 # -> 4 1 1 1 5 0
for cid in [7, 10]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
c1 = InkComment.norel_objects.get(pk=1)
assert c1.nested_count == 4
c4 = InkComment.norel_objects.get(pk=4)
assert c4.nested_count == 0
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_8(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm8 = InkComment.norel_objects.get(pk=8)
cm8.delete()
# It should remove comments 8 and 11, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 4
# c3 # -> 3 1 1 1 2 0
for cid in [8, 11]:
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=cid)
c1 = InkComment.norel_objects.get(pk=1)
assert c1.nested_count == 4
c3 = InkComment.norel_objects.get(pk=3)
assert c3.nested_count == 0
@pytest.mark.django_db
def test_nested_count_after_deleting_comment_10(an_article):
thread_test_step_1(an_article)
thread_test_step_2(an_article)
thread_test_step_3(an_article)
thread_test_step_4(an_article)
thread_test_step_5(an_article)
thread_test_step_6(an_article)
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 6
# c3 # -> 3 1 1 1 2 2
# c8 # -> 8 1 3 2 3 1
# c11 # -> 11 1 8 3 4 0
# c4 # -> 4 1 1 1 5 2
# c7 # -> 7 1 4 2 6 1
# c10 # -> 10 1 7 3 7 0
# c2 # -> 2 2 2 0 1 2
# c5 # -> 5 2 2 1 2 1
# c6 # -> 6 2 5 2 3 0
# c9 # -> 9 9 9 0 1 0
cm10 = InkComment.norel_objects.get(pk=10)
cm10.delete()
# It should remove comments 8 and 11, and leave the following changes:
# content -> cmt.id thread_id parent_id level order nested
# c1 # -> 1 1 1 0 1 5
# c4 # -> 4 1 1 1 5 1
# c7 # -> 7 1 4 2 6 0
with pytest.raises(InkComment.DoesNotExist):
InkComment.objects.get(pk=10)
c1 = InkComment.norel_objects.get(pk=1)
assert c1.nested_count == 5
c4 = InkComment.norel_objects.get(pk=4)
assert c4.nested_count == 1
c7 = InkComment.norel_objects.get(pk=7)
assert c7.nested_count == 0
| 40.299739 | 80 | 0.530473 | 6,046 | 46,385 | 3.884386 | 0.048462 | 0.009879 | 0.050671 | 0.043687 | 0.829168 | 0.782798 | 0.75759 | 0.730381 | 0.71369 | 0.682138 | 0 | 0.074457 | 0.372556 | 46,385 | 1,150 | 81 | 40.334783 | 0.732477 | 0.305659 | 0 | 0.630936 | 0 | 0 | 0.030446 | 0.004686 | 0 | 0 | 0 | 0 | 0.222524 | 1 | 0.081411 | false | 0.002714 | 0.016282 | 0 | 0.113976 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4f8d9e940c1298daaa6bca558dfb61086f42012e | 33 | py | Python | configs/__init__.py | dqshuai/NAE4PS | 476dee524bb40c1f89fc0a30f37c735a2c0ee082 | [
"MIT"
] | 84 | 2020-03-11T15:02:06.000Z | 2022-01-03T11:38:36.000Z | configs/__init__.py | dqshuai/NAE4PS | 476dee524bb40c1f89fc0a30f37c735a2c0ee082 | [
"MIT"
] | 20 | 2020-06-16T07:55:44.000Z | 2021-09-10T15:34:28.000Z | configs/__init__.py | dqshuai/NAE4PS | 476dee524bb40c1f89fc0a30f37c735a2c0ee082 | [
"MIT"
] | 17 | 2020-06-14T13:40:47.000Z | 2021-11-30T22:21:03.000Z | from .res50_faster_rcnn import *
| 16.5 | 32 | 0.818182 | 5 | 33 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068966 | 0.121212 | 33 | 1 | 33 | 33 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
4f9bde3fb99ee72dcb81cd39954e2f294b7e4945 | 17,550 | py | Python | model.py | BluePinetree/ViolenceDetection | 3b54cf5e53e74fd99bc11d709bc3c3894065de12 | [
"MIT"
] | null | null | null | model.py | BluePinetree/ViolenceDetection | 3b54cf5e53e74fd99bc11d709bc3c3894065de12 | [
"MIT"
] | null | null | null | model.py | BluePinetree/ViolenceDetection | 3b54cf5e53e74fd99bc11d709bc3c3894065de12 | [
"MIT"
] | 1 | 2021-01-10T13:20:21.000Z | 2021-01-10T13:20:21.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv3D, MaxPooling3D, Concatenate, BatchNormalization, AveragePooling3D, Dropout, ReLU, Softmax, Lambda
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def Unit3D(inputs,
output_channels,
kernel_shape=(1,1,1),
strides=(1,1,1),
activation_fn=ReLU(),
use_batch_norm=True,
use_bias=False,
is_training=True,
name='unit_3d'):
net = Conv3D(filters=output_channels, kernel_size=kernel_shape,
strides=strides, padding='same',
use_bias=use_bias, name=name)(inputs)
if use_batch_norm:
net = BatchNormalization()(net, training=is_training)
if activation_fn is not None:
net = activation_fn(net)
return net
def inceptionI3D(inputs, num_classes=2, spatial_squeeze=True, is_training=True,
dropout_keep_prob=1.0, final_endpoint='Logits', name='inception_i3d'):
if final_endpoint not in VALID_ENDPOINTS:
raise ValueError(f'Unknown final endpoint {final_endpoint}')
net = inputs
end_points = {}
end_point = 'Conv3d_1a_7x7'
net = Unit3D(net, 64, kernel_shape=(7,7,7), strides=(2,2,2),
use_batch_norm=True, name=end_point, is_training=is_training)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'MaxPool3d_2a_3x3'
net = MaxPooling3D(pool_size=(1,3,3), strides=(1,2,2), padding='same', name=end_point)(net)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Conv3d_2b_1x1'
net = Unit3D(net, output_channels=64, kernel_shape=(1,1,1), name=end_point, is_training=is_training)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Conv3d_2c_3x3'
net = Unit3D(net, output_channels=192, kernel_shape=(3,3,3), name=end_point, is_training=is_training)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'MaxPool3d_3a_3x3'
net = MaxPooling3D(pool_size=(1,3,3), strides=(1,2,2), padding='same', name=end_point)(net)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_3b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=96, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=128, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=16, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=32, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=32, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_3c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=192, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=32, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=96, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'MaxPool3d_4a_3x3'
net = MaxPooling3D(pool_size=(3,3,3), strides=(2,2,2), padding='same', name=end_point)(net)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_4b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=192, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=96, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=208, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=16, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=48, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_4c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=160, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=112, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=224, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=24, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=64, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_4d'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=256, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=24, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=64, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_4e'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=112, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=144, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=288, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=32, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=64, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=64, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_4f'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=256, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=160, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=320, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=32, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=128, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'MaxPool3d_5a_2x2'
net = MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='same', name=end_point)(net)
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_5b'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=256, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=160, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=320, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=32, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=128, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Mixed_5c'
with tf.compat.v1.variable_scope(end_point):
with tf.compat.v1.variable_scope('Branch_0'):
branch_0 = Unit3D(net, output_channels=384, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-0', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_1'):
branch_1 = Unit3D(net, output_channels=192, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-1', is_training=is_training)
branch_1 = Unit3D(branch_1, output_channels=384, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-1', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_2'):
branch_2 = Unit3D(net, output_channels=48, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0a_1x1-2', is_training=is_training)
branch_2 = Unit3D(branch_2, output_channels=128, kernel_shape=(3,3,3), name=f'{end_point}_Conv3d_0b_3x3-2', is_training=is_training)
with tf.compat.v1.variable_scope('Branch_3'):
branch_3 = MaxPooling3D(pool_size=(3,3,3), strides=(1,1,1), padding='same', name=f'{end_point}_MaxPool3d_0a_3x3-3')(net)
branch_3 = Unit3D(branch_3, output_channels=128, kernel_shape=(1,1,1), name=f'{end_point}_Conv3d_0b_1x1-3', is_training=is_training)
net = Concatenate(axis=4)([branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point : return net, end_points
end_point = 'Logits'
with tf.compat.v1.variable_scope(end_point):
net = AveragePooling3D(pool_size=(2,7,7), strides=(1,1,1), padding='valid')(net)
net = Dropout(rate=dropout_keep_prob)(net)
logits = Unit3D(net, output_channels=num_classes, kernel_shape=(1,1,1),
activation_fn=None, use_batch_norm=False, use_bias=True,
name=f'{end_point}_Conv3d_0c_1x1', is_training=is_training)
if spatial_squeeze:
logits = Lambda(lambda x : tf.squeeze(x, axis=[2,3]), name='SpatialSqueeze')(logits)
# logits = tf.squeeze(logits, [2,3], name='SpatialSqueeze')
averaged_logits = Lambda(lambda x : tf.reduce_mean(x, axis=1))(logits)
end_points[end_point] = averaged_logits
if final_endpoint == end_point : return averaged_logits, end_points
end_point = 'Predictions'
predictions = Softmax()(averaged_logits)
end_points[end_point] = predictions
if final_endpoint == end_point : return \
predictions, end_points
if __name__ == '__main__':
inputs = Input(shape=[None, 224, 224, 3])
logits, end_points = inceptionI3D(inputs, dropout_keep_prob=0.5)
model = Model(inputs, logits)
model.summary()
| 63.586957 | 146 | 0.697151 | 2,785 | 17,550 | 4.052065 | 0.050987 | 0.095702 | 0.04537 | 0.073726 | 0.859991 | 0.84156 | 0.83642 | 0.826141 | 0.823039 | 0.820027 | 0 | 0.076639 | 0.167293 | 17,550 | 275 | 147 | 63.818182 | 0.695566 | 0.003248 | 0 | 0.538776 | 0 | 0 | 0.147104 | 0.100223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.008163 | false | 0 | 0.028571 | 0 | 0.040816 | 0.004082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
96d5656cc6df7babf80ce9ff01c22b4bef6f6c54 | 82 | py | Python | app/services/__init__.py | LuisMSoares/Sacsis-Api | c32ef44ddc9956e01f30276002059e898ba3985a | [
"Apache-2.0"
] | 4 | 2019-09-10T02:40:53.000Z | 2019-09-30T04:51:21.000Z | app/services/__init__.py | LuisMSoares/Sacsis-Api | c32ef44ddc9956e01f30276002059e898ba3985a | [
"Apache-2.0"
] | null | null | null | app/services/__init__.py | LuisMSoares/Sacsis-Api | c32ef44ddc9956e01f30276002059e898ba3985a | [
"Apache-2.0"
] | null | null | null | from app.services.token import Token
from app.services.send_email import SendEmail | 41 | 45 | 0.865854 | 13 | 82 | 5.384615 | 0.615385 | 0.2 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085366 | 82 | 2 | 45 | 41 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
96edeca93165982ac52595fee9701b8653a7263c | 35 | py | Python | sensoglove/__init__.py | TheRizzen/senso-glove | eb15d3a08350877174cd62b75beacec8f72e12af | [
"MIT"
] | null | null | null | sensoglove/__init__.py | TheRizzen/senso-glove | eb15d3a08350877174cd62b75beacec8f72e12af | [
"MIT"
] | 2 | 2018-07-02T15:54:07.000Z | 2018-08-15T19:42:21.000Z | sensoglove/__init__.py | TheRizzen/senso-glove | eb15d3a08350877174cd62b75beacec8f72e12af | [
"MIT"
] | null | null | null | from .sensoglove import SensoGlove
| 17.5 | 34 | 0.857143 | 4 | 35 | 7.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 35 | 1 | 35 | 35 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
96fa468813db6f1bb3d65ba8c36e70b5db2e5c50 | 194 | py | Python | trading_sim/event/event_tick.py | kyhorne/trading_sim | 6c2f17b7be0bfba6338ebe48ea69d2aed7e65c77 | [
"MIT"
] | 28 | 2019-03-20T16:13:10.000Z | 2022-03-12T01:30:25.000Z | trading_sim/event/event_tick.py | kyhorne/trading_sim | 6c2f17b7be0bfba6338ebe48ea69d2aed7e65c77 | [
"MIT"
] | 8 | 2019-03-17T01:22:42.000Z | 2020-09-04T15:22:37.000Z | trading_sim/event/event_tick.py | kyhorne/trading_sim | 6c2f17b7be0bfba6338ebe48ea69d2aed7e65c77 | [
"MIT"
] | 9 | 2019-03-20T22:00:46.000Z | 2020-07-02T01:30:58.000Z | import json
from .event_type import EventType
class EventTick:
@staticmethod
def instantiate():
return json.dumps({"eventType": EventType.TICK.value, "payload": {}}).encode()
| 19.4 | 86 | 0.690722 | 21 | 194 | 6.333333 | 0.809524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.180412 | 194 | 9 | 87 | 21.555556 | 0.836478 | 0 | 0 | 0 | 0 | 0 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.333333 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
96fa541e4fb274111c48f5a6d413fefd8a52c217 | 82 | py | Python | bitmovin/errors/missing_argument_error.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 44 | 2016-12-12T17:37:23.000Z | 2021-03-03T09:48:48.000Z | bitmovin/errors/missing_argument_error.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 38 | 2017-01-09T14:45:45.000Z | 2022-02-27T18:04:33.000Z | bitmovin/errors/missing_argument_error.py | camberbridge/bitmovin-python | 3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95 | [
"Unlicense"
] | 27 | 2017-02-02T22:49:31.000Z | 2019-11-21T07:04:57.000Z | from . import BitmovinError
class MissingArgumentError(BitmovinError):
pass
| 13.666667 | 42 | 0.792683 | 7 | 82 | 9.285714 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.158537 | 82 | 5 | 43 | 16.4 | 0.942029 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
96fba4163a6345fd4e281b71adee1daa8918a644 | 12,027 | py | Python | mayan/apps/cabinets/tests/test_api.py | boost-entropy-repos-org/Mayan-EDMS | 5769f26abc56f92f8edb9d311cabf659dc0535c1 | [
"Apache-2.0"
] | 1 | 2021-02-24T15:03:23.000Z | 2021-02-24T15:03:23.000Z | mayan/apps/cabinets/tests/test_api.py | ass-a2s/Mayan-EDMS | 5769f26abc56f92f8edb9d311cabf659dc0535c1 | [
"Apache-2.0"
] | null | null | null | mayan/apps/cabinets/tests/test_api.py | ass-a2s/Mayan-EDMS | 5769f26abc56f92f8edb9d311cabf659dc0535c1 | [
"Apache-2.0"
] | 1 | 2020-08-09T09:06:59.000Z | 2020-08-09T09:06:59.000Z | from django.utils.encoding import force_text
from rest_framework import status
from mayan.apps.documents.permissions import permission_document_view
from mayan.apps.documents.tests.mixins import DocumentTestMixin
from mayan.apps.rest_api.tests.base import BaseAPITestCase
from ..models import Cabinet
from ..permissions import (
permission_cabinet_add_document, permission_cabinet_create,
permission_cabinet_delete, permission_cabinet_edit,
permission_cabinet_remove_document, permission_cabinet_view
)
from .mixins import (
CabinetAPIViewTestMixin, CabinetTestMixin,
DocumentCabinetAPIViewTestMixin
)
class CabinetAPITestCase(
CabinetAPIViewTestMixin, CabinetTestMixin, BaseAPITestCase
):
def test_cabinet_create_api_view_no_permission(self):
cabinet_count = Cabinet.objects.count()
response = self._request_test_cabinet_create_api_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(cabinet_count, Cabinet.objects.count())
def test_cabinet_create_api_view_with_permission(self):
self.grant_permission(permission=permission_cabinet_create)
response = self._request_test_cabinet_create_api_view()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['id'], self.test_cabinet.pk)
self.assertEqual(response.data['label'], self.test_cabinet.label)
self.assertEqual(Cabinet.objects.count(), 1)
def test_cabinet_delete_api_view_no_permssions(self):
self._create_test_cabinet()
cabinet_count = Cabinet.objects.count()
response = self._request_test_cabinet_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(Cabinet.objects.count(), cabinet_count)
def test_cabinet_delete_api_view_with_access(self):
self._create_test_cabinet()
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_delete
)
cabinet_count = Cabinet.objects.count()
response = self._request_test_cabinet_delete_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Cabinet.objects.count(), cabinet_count - 1)
def test_cabinet_edit_api_patch_view_no_pemission(self):
self._create_test_cabinet()
cabinet_label = self.test_cabinet.label
response = self._request_test_cabinet_edit_api_patch_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_cabinet.refresh_from_db()
self.assertEqual(cabinet_label, self.test_cabinet.label)
def test_cabinet_edit_api_patch_view_with_access(self):
self._create_test_cabinet()
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_edit
)
cabinet_label = self.test_cabinet.label
response = self._request_test_cabinet_edit_api_patch_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_cabinet.refresh_from_db()
self.assertNotEqual(cabinet_label, self.test_cabinet.label)
def test_cabinet_edit_api_put_view_no_pemission(self):
self._create_test_cabinet()
cabinet_label = self.test_cabinet.label
response = self._request_test_cabinet_edit_api_put_view()
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.test_cabinet.refresh_from_db()
self.assertEqual(cabinet_label, self.test_cabinet.label)
def test_cabinet_edit_api_put_view_with_access(self):
self._create_test_cabinet()
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_edit
)
cabinet_label = self.test_cabinet.label
response = self._request_test_cabinet_edit_api_put_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.test_cabinet.refresh_from_db()
self.assertNotEqual(cabinet_label, self.test_cabinet.label)
def test_cabinet_list_api_view_no_permission(self):
self._create_test_cabinet()
response = self._request_test_cabinet_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_cabinet_list_api_view_with_access(self):
self._create_test_cabinet()
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_view
)
response = self._request_test_cabinet_list_api_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['results'][0]['label'], self.test_cabinet.label
)
class CabinetDocumentAPITestCase(
CabinetAPIViewTestMixin, CabinetTestMixin, DocumentTestMixin,
BaseAPITestCase
):
auto_upload_test_document = False
def test_cabinet_create_with_single_document(self):
self._create_test_document_stub()
self.grant_permission(permission=permission_cabinet_create)
response = self._request_test_cabinet_create_api_view(
extra_data={
'documents_pk_list': '{}'.format(
self.test_document.pk
)
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['id'], self.test_cabinet.pk)
self.assertEqual(response.data['label'], self.test_cabinet.label)
self.assertQuerysetEqual(
self.test_cabinet.documents.all(), (repr(self.test_document),)
)
def test_cabinet_create_with_multiple_documents(self):
self._create_test_document_stub()
self._create_test_document_stub()
documents_pk_list = ','.join(
[force_text(s=document.pk) for document in self.test_documents]
)
self.grant_permission(permission=permission_cabinet_create)
response = self._request_test_cabinet_create_api_view(
extra_data={
'documents_pk_list': documents_pk_list
}
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['id'], self.test_cabinet.pk)
self.assertEqual(response.data['label'], self.test_cabinet.label)
self.assertEqual(Cabinet.objects.count(), 1)
self.assertQuerysetEqual(
qs=self.test_cabinet.documents.all(),
values=map(repr, self.test_documents)
)
def test_cabinet_document_remove_api_view(self):
self._create_test_document_stub()
self._create_test_cabinet()
self.test_cabinet.documents.add(self.test_document)
self.grant_permission(
permission=permission_cabinet_remove_document
)
response = self._request_test_cabinet_document_remove_api_view()
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(self.test_cabinet.documents.count(), 0)
def test_cabinet_document_detail_api_view(self):
self._create_test_document_stub()
self._create_test_cabinet()
self.test_cabinet.documents.add(self.test_document)
self.grant_permission(
permission=permission_cabinet_view
)
self.grant_permission(
permission=permission_document_view
)
response = self.get(
viewname='rest_api:cabinet-document', kwargs={
'pk': self.test_cabinet.pk,
'document_pk': self.test_document.pk
}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['uuid'], force_text(s=self.test_document.uuid)
)
def test_cabinet_document_list_api_view(self):
self._create_test_document_stub()
self._create_test_cabinet()
self.test_cabinet.documents.add(self.test_document)
self.grant_permission(permission=permission_cabinet_view)
self.grant_permission(permission=permission_document_view)
response = self.get(
viewname='rest_api:cabinet-document-list', kwargs={
'pk': self.test_cabinet.pk
}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['results'][0]['uuid'],
force_text(s=self.test_document.uuid)
)
def test_cabinet_add_document_api_view(self):
self._create_test_document_stub()
self._create_test_cabinet()
response = self.post(
data={
'documents_pk_list': '{}'.format(self.test_document.pk)
}, kwargs={
'pk': self.test_cabinet.pk
}, viewname='rest_api:cabinet-document-list'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertQuerysetEqual(
self.test_cabinet.documents.all(), (repr(self.test_document),)
)
def test_cabinet_add_multiple_documents_api_view(self):
self._create_test_document_stub()
self._create_test_document_stub()
documents_pk_list = ','.join(
[force_text(s=document.pk) for document in self.test_documents]
)
self._create_test_cabinet()
self.grant_permission(permission=permission_cabinet_add_document)
response = self.post(
data={
'documents_pk_list': documents_pk_list
}, kwargs={
'pk': self.test_cabinet.pk
}, viewname='rest_api:cabinet-document-list'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertQuerysetEqual(
qs=self.test_cabinet.documents.all(),
values=map(repr, self.test_documents)
)
class DocumentCabinetAPITestCase(
CabinetAPIViewTestMixin, CabinetTestMixin,
DocumentCabinetAPIViewTestMixin, DocumentTestMixin, BaseAPITestCase
):
auto_upload_test_document = False
def setUp(self):
super(DocumentCabinetAPITestCase, self).setUp()
self._create_test_document_stub()
self._create_test_cabinet()
self.test_cabinet.documents.add(self.test_document)
def test_document_cabinet_list_view_no_permission(self):
response = self._request_test_document_cabinet_list_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue('count' not in response.data)
def test_document_cabinet_list_view_with_document_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view,
)
response = self._request_test_document_cabinet_list_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_document_cabinet_list_view_with_cabinet_access(self):
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_view,
)
response = self._request_test_document_cabinet_list_view()
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue('count' not in response.data)
def test_document_cabinet_list_view_with_full_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_view,
)
self.grant_access(
obj=self.test_cabinet, permission=permission_cabinet_view,
)
response = self._request_test_document_cabinet_list_view()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data['count'], Cabinet.objects.all().count()
)
self.assertEqual(
response.data['results'][0]['id'], self.test_cabinet.pk
)
| 34.264957 | 75 | 0.698512 | 1,375 | 12,027 | 5.697455 | 0.077091 | 0.116543 | 0.074675 | 0.077738 | 0.864437 | 0.841716 | 0.800613 | 0.760403 | 0.743298 | 0.737044 | 0 | 0.007645 | 0.216929 | 12,027 | 350 | 76 | 34.362857 | 0.824166 | 0 | 0 | 0.600775 | 0 | 0 | 0.024112 | 0.009562 | 0 | 0 | 0 | 0 | 0.193798 | 1 | 0.085271 | false | 0 | 0.031008 | 0 | 0.135659 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8c083f5c3a3dfc68c87f48abcd0f56bd6f9877f3 | 50,985 | py | Python | testing/test_add_data.py | mortazavilab/swan_vis | 7e5817b06d8a20792fee7af1bc59a1bf818bd3c4 | [
"MIT"
] | 34 | 2020-06-12T20:05:38.000Z | 2022-03-28T00:12:22.000Z | testing/test_add_data.py | mortazavilab/swan_vis | 7e5817b06d8a20792fee7af1bc59a1bf818bd3c4 | [
"MIT"
] | 14 | 2020-06-14T23:04:05.000Z | 2022-03-25T17:06:18.000Z | testing/test_add_data.py | mortazavilab/swan_vis | 7e5817b06d8a20792fee7af1bc59a1bf818bd3c4 | [
"MIT"
] | 5 | 2020-10-17T09:10:41.000Z | 2022-02-03T21:19:15.000Z | import pytest
import sys
import numpy as np
import swan_vis as swan
import networkx as nx
import math
import pandas as pd
###########################################################################
##################### Related to adding metadata ##########################
###########################################################################
class TestMetadata(object):
# test add_metadata - one after the other with dupe cols
# yes overwrite
def test_add_metadata_4(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=True)
assert {'3','4'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other with dupe cols
# don'e overwrite
def test_add_metadata_3(self):
sg = swan.SwanGraph()
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_dupecols.tsv'
sg.add_metadata(meta, overwrite=False)
assert {'2', '1'} == set(sg.adata.obs.cluster.tolist())
# test add_metadata - one after the other
def test_add_metadata_2(self):
pass
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
gtf = 'files/chr11_and_Tcf3.gtf'
sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
meta = 'files/chr11_and_Tcf3_metadata_2.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562', 'G0'],
['PB65_B017', '2', 'GM12878', 'M'],
['PB65_B018', '2', 'GM12878', 'S']]
cols = ['dataset', 'cluster', 'sample', 'cell_state']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
# test add_metadata - vanilla
def test_add_metadata(self):
sg = swan.SwanGraph()
# just gencode vM21 chr 11 and tcf3
# gtf = 'files/chr11_and_Tcf3.gtf'
# sg.add_annotation(gtf, verbose=True)
db = 'files/chr11_and_Tcf3_no_gname.db'
sg.add_transcriptome(db)
# print(sg.t_df)
ab = 'files/chr11_and_Tcf3_talon_abundance.tsv'
sg.add_abundance(ab)
meta = 'files/chr11_and_Tcf3_metadata.tsv'
sg.add_metadata(meta)
test = sg.adata.obs
data = [['D12', '1', 'K562'],
['PB65_B017', '2', 'GM12878'],
['PB65_B018', '2', 'GM12878']]
cols = ['dataset', 'cluster', 'sample']
ctrl = pd.DataFrame(data=data, columns=cols)
ctrl.index = ctrl.dataset
ctrl = ctrl[test.columns]
ctrl.sort_index(inplace=True)
test.sort_index(inplace=True)
print('test')
print(test)
print('control')
print(ctrl)
assert test.equals(ctrl)
###########################################################################
############### Related to high-level dataset addition ####################
###########################################################################
class TestDataset(object):
# TODO
# add_dataset, add_transcriptome, add_annotation
# tests add_transcriptome - added after adding an annotation
def test_add_transcriptome_2(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
sg.add_transcriptome('files/test_full.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_transcriptome - vanilla
def test_add_transcriptome_1(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
# tests add_annotation - transcriptome already in SG
def test_add_annotation_2(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_full.gtf')
sg.add_annotation('files/test_full_annotation.gtf')
# t_df
sg.t_df = sg.t_df[['tid', 'annotation']]
data = [['test1', True],
['test2', True],
['test3', False],
['test4', True],
['test5', True],
['test6', True]]
cols = ['tid', 'annotation']
ctrl_t_df = pd.DataFrame(data=data, columns=cols)
ctrl_t_df = swan.create_dupe_index(ctrl_t_df, 'tid')
ctrl_t_df = swan.set_dupe_index(ctrl_t_df, 'tid')
# first order to make them comparable
# sort all values by their IDs
sg.t_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_t_df = ctrl_t_df[sg.t_df.columns]
print('test')
print(sg.t_df)
print('control')
print(ctrl_t_df)
assert (sg.t_df == ctrl_t_df).all(axis=0).all()
# loc_df - new location at chr2, 65
print('test')
print(sg.loc_df)
ind = (sg.loc_df.chrom=='chr2')&(sg.loc_df.coord==65)
temp = sg.loc_df.loc[ind, 'annotation'].to_frame()
for i, entry in temp.iterrows():
assert entry.annotation == False
temp = sg.loc_df.loc[~ind]
for i, entry in temp.iterrows():
assert entry.annotation == True
# tests add_annotation - vanilla
def test_add_annotation_1(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full_annotation.gtf')
# # loc_df
# data = [['chr1', 1, 0, True],
# ['chr1', 20, 1, True],
# ['chr1', 25, 2, True],
# ['chr1', 30, 3, True],
# ['chr1', 35, 4, True],
# ['chr1', 40, 5, True],
# ['chr2', 45, 6, True],
# ['chr2', 50, 7, True],
# ['chr2', 60, 8, True],
# ['chr2', 75, 10, True],
# ['chr2', 80, 11, True],
# ['chr2', 100, 12, True],
# ['chr2', 110, 13, True]]
# cols = ['chrom', 'coord', 'vertex_id', 'annotation']
# ctrl_loc_df = pd.DataFrame(data=data, columns=cols)
# ctrl_loc_df = swan.create_dupe_index(ctrl_loc_df, 'vertex_id')
# ctrl_loc_df = swan.set_dupe_index(ctrl_loc_df, 'vertex_id')
#
# print('test')
# print(sg.loc_df)
# print('ctrl')
# print(ctrl_loc_df)
#
# print(sg.edge_df)
# assert 1 == 0
# # edge_df
# data = [[0, 1, '+', 'exon', 0, True],
# [1, 2],
# [2, 3],
# [3, 4],
# [4, 5],
# [5, 6],
# [6, 7],
#
#
# ]
# cols = ['v1', 'v2', 'strand', 'edge_type', 'annotation']
#
# # t_df
# data = [['test1', 'test1_tname', 'test1_gid', 'test1_gname', [0,1,2,3,4]], [0,1,2,3,4,5], True],
# ['test2', 'test2_tname', 'test2_gid', 'test2_gname', [5,6,7,8,9], [12,11,10,8,7,6], True],
# ['test4', 'test4_tname', 'test4_gid', 'test4_gname', [10], [6,7], True],
# ['test5', 'test5_tname', 'test2_gid', 'test2_gname', [5,11,12], [12,11,8,7], True],
# ['test6', 'test6_tname', 'test2_gid', 'test2_gname', [,6,7,8,9], [13,11,10,8,7,6], True]]
# cols = ['tid', 'tname', 'gid', 'gname', 'path', 'loc_path', 'annotation']
#
assert sg.annotation == True
assert 'annotation' in sg.t_df.columns
assert 'annotation' in sg.edge_df.columns
assert 'annotation' in sg.loc_df.columns
for ind, entry in sg.t_df.iterrows():
assert entry.annotation == True
assert entry.novelty == 'Known'
for ind, entry in sg.edge_df.iterrows():
assert entry.annotation == True
for ind, entry in sg.loc_df.iterrows():
assert entry.annotation == True
# tests:, label_annotated
# label annotated transcripts
def test_label_annotated(self):
sg = swan.SwanGraph()
data = [[0, [0,1]],
[1, [2,3]],
[2, [4,5]]]
sg.t_df = pd.DataFrame(data=data, columns=['tid', 'path'])
data = [[0,0,1], [1,1,2], [2,2,3], [3,3,4],
[4,4,5], [5,5,6]]
sg.edge_df = pd.DataFrame(data=data, columns=['edge_id', 'v1', 'v2'])
data = [0,1,2,3,4,5,6]
sg.loc_df = pd.DataFrame(data=data, columns=['vertex_id'])
tids = [0,1]
sg.label_annotated(tids)
ctrl_tids = [0,1]
tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
assert set(ctrl_tids) == set(tids)
ctrl_edges = [0,1,2,3]
edges = sg.edge_df.loc[sg.edge_df.annotation == True, 'edge_id'].tolist()
assert set(ctrl_edges) == set(edges)
ctrl_locs = [0,1,2,3,4]
locs = sg.loc_df.loc[sg.loc_df.annotation == True, 'vertex_id'].tolist()
assert set(ctrl_locs) == set(locs)
# add to empty sg, don't add isms
def test_add_transcriptome(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_talon.gtf', include_isms=False)
print(sg.t_df)
assert "ISM" not in sg.t_df.novelty.unique()
# assert 1 == 0
# tests if correct error is thrown when adding annotation to
# sg that already has one
def test_add_annotation_already(self):
sg = swan.SwanGraph()
sg.annotation = True
with pytest.raises(Exception) as e:
sg.add_annotation('files/Canx.gtf')
assert 'Annotation already' in str(e.value)
# add annotation to empty sg
def test_add_annotation_empty_sg(self):
sg = swan.SwanGraph()
sg.add_annotation('files/test_full.gtf')
# check annotation columns
assert all(sg.t_df.annotation.tolist())
assert all(sg.edge_df.annotation.tolist())
assert all(sg.loc_df.annotation.tolist())
# check novelty column in t_df
assert len(sg.t_df.loc[sg.t_df.novelty=='Known']) == len(sg.t_df.index)
# check annotation flag
assert sg.annotation == True
# add annotation to sg with data already in it
def test_add_annotation_sg_data(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel.gtf')
sg.add_annotation('files/test_known.gtf')
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
# add annotation to sg with data where data contains dupe transcript
def test_add_annotation_sg_data_dupe_tid(self):
sg = swan.SwanGraph()
sg.add_transcriptome('files/test_novel_1.gtf')
sg.add_annotation('files/test_known.gtf')
# check with coord/chr bc of reindexing fuckery not being
# remimplemented yet
# t_df
annot_tids = ['test1', 'test2', 'test4']
assert all(sg.t_df.loc[annot_tids, 'annotation'])
ctrl_novel_tids = ['test3', 'test5']
novel_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
assert len(set(ctrl_novel_tids)-set(novel_tids)) == 0
assert len(ctrl_novel_tids) == len(novel_tids)
# make sure the novelty assignment worked
annot_tids = sg.t_df.loc[sg.t_df.annotation == True, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Known', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
annot_tids = sg.t_df.loc[sg.t_df.annotation == False, 'tid'].tolist()
known_tids = sg.t_df.loc[sg.t_df.novelty == 'Undefined', 'tid'].tolist()
assert set(annot_tids) == set(known_tids)
# loc_df
ctrl_novel_locs = [('chr2', 65)]
temp = sg.loc_df[sg.loc_df.annotation == False]
chroms = temp.chrom.tolist()
coords = temp.coord.tolist()
novel_locs = [(chrom, coord) for chrom, coord in zip(chroms, coords)]
print('control')
print(ctrl_novel_locs)
print('test')
print(novel_locs)
assert len(set(ctrl_novel_locs)-set(novel_locs)) == 0
assert len(novel_locs) == len(ctrl_novel_locs)
# edge_df
edge_df = sg.add_edge_coords()
edge_df = edge_df.loc[edge_df.annotation == False]
ctrl_novel_edges = [('chr2', 75, 65, '-', 'exon'),
('chr2', 65, 50, '-', 'intron'),
('chr2', 80, 60, '-', 'intron'),
('chr2', 60, 50, '-', 'exon')]
chroms = edge_df.chrom.tolist()
v1s = edge_df.v1_coord.tolist()
v2s = edge_df.v2_coord.tolist()
strands = edge_df.strand.tolist()
etypes = edge_df.edge_type.tolist()
novel_edges = [(chrom,v1,v2,strand,etype) for chrom,v1,v2,strand,etype \
in zip(chroms,v1s,v2s,strands,etypes)]
print('control')
print(ctrl_novel_edges)
print('test')
print(novel_edges)
assert len(set(ctrl_novel_edges)-set(novel_edges)) == 0
assert len(ctrl_novel_edges) == len(novel_edges)
###########################################################################
###################### Related to file parsing ############################
###########################################################################
class TestFiles(object):
# tests GTF parsing
def test_parse_gtf(self):
gtf_file = 'files/Canx.gtf'
t_df, exon_df, from_talon = swan.parse_gtf(gtf_file, True, False)
t_df.index.name = 'tid_index'
t_df = t_df.sort_values(by='tid_index')
ctrl_t_df = pd.read_csv('files/Canx_transcript.tsv',sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df = ctrl_t_df.sort_values(by='tid_index')
ctrl_exons = ctrl_t_df.exons.tolist()
ctrl_exons = [exons.split(',') for exons in ctrl_exons]
ctrl_t_df['exons'] = ctrl_exons
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - no pass_list
def test_parse_db_1(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, None, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
# tests TALON DB parsing - yes pass_list
def test_parse_db_2(self):
db_file = 'files/test_full.db'
pass_list = 'files/test_full_pass_list.csv'
t_df, edge_df = swan.parse_db(db_file, pass_list, False, True, False)
ctrl_t_df, ctrl_e_df = get_test_transcript_exon_dicts()
for key, item in ctrl_t_df.items():
item['exons'] = swan.reorder_exons(item['exons'])
# delete entries that weren't on pass list
del ctrl_e_df['chr2_45_50_+_exon']
del ctrl_t_df['test4']
ctrl_t_df = pd.DataFrame(ctrl_t_df).transpose()
ctrl_e_df = pd.DataFrame(ctrl_e_df).transpose()
# sort all values by their IDs
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_e_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_e_df = ctrl_e_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
assert 'novelty' in t_df.columns
print('test')
print(edge_df)
print('control')
print(ctrl_e_df)
print(edge_df == ctrl_e_df)
assert (edge_df == ctrl_e_df).all(axis=0).all()
print('test')
print(t_df)
print(t_df.exons)
print('control')
print(ctrl_t_df)
print(ctrl_t_df.exons)
print(t_df == ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
###########################################################################
####################### Related to DF creation ############################
###########################################################################
class TestCreateDFs(object):
# add_edge_coords, get_current_locs, get_current_edges,
# create_loc_dict, create_transcript_edge_dict create_dfs,
# tests add_edge_coords
def test_add_edge_coords(self):
sg = swan.SwanGraph()
sg = add_transcriptome_no_reorder_gtf(sg, 'files/test_full.gtf')
# sg.add_transcriptome('files/test_full.gtf')
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type',
'v1_coord', 'v2_coord']
# print(sg.edge_df.head())
edge_df = sg.add_edge_coords()
print(edge_df.head())
edge_df = edge_df[cols]
ctrl_edge_df = pd.read_csv('files/test_add_edge_coords_result.tsv', sep='\t')
ctrl_edge_df = ctrl_edge_df[cols]
# first order to make them comparable
# sort all values by their IDs
edge_df.sort_values(by='edge_id', inplace=True)
ctrl_edge_df.sort_values(by='edge_id', inplace=True)
# and order columns the same way
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
# tests get_current_locs with an empty swangraph
def test_get_current_locs_empty_sg(self):
sg = swan.SwanGraph()
locs, n = sg.get_current_locs()
assert locs == {}
assert n == -1
# tests get_current_locs with a swangraph with data
def test_get_current_locs_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 3, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs, n = sg.get_current_locs()
ctrl_locs = {(1,2):0, (1,3):1, (3,50):2}
assert locs == ctrl_locs
assert n == 2
# tests get_current_edges with an empty swangraph
def test_get_current_edges_empty_sg(self):
sg = swan.SwanGraph()
edges, n = sg.get_current_edges()
assert(edges == {})
assert(n == -1)
# tests get_current_edges in a sg with data
def test_get_current_edges_sg_data(self):
sg = swan.SwanGraph()
cols = ['vertex_id', 'chrom', 'coord']
data = [[0, 1, 2], [1, 1, 3], [2, 1, 50]]
sg.loc_df = pd.DataFrame(data=data, columns=cols)
cols = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
data = [[0, 0, 1, '+', 'exon'],
[1, 1, 2, '+', 'intron']]
sg.edge_df = pd.DataFrame(data=data, columns=cols)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
edges, n = sg.get_current_edges()
ctrl = {(1,2,3,'+','exon'): {'edge_id': 0,
'edge_type': 'exon',
'v1': 0 ,
'v2': 1},
(1,3,50,'+','intron'): {'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2}}
assert(edges == ctrl)
assert(n == 1)
# test create_loc_dict on an empty sg
# also checks to make sure exons that use the same loc
# don't result in dupe entries in loc_df
def test_create_loc_dict_empty_sg(self):
_, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1',1): 0,
('chr1', 20): 1,
('chr1', 25): 2,
('chr1', 30): 3,
('chr1', 35): 4,
('chr1', 40): 5,
('chr2', 100): 6,
('chr2', 80): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
assert(ctrl_locs == locs)
# tests create_loc_dict when locs already exist in sg
def test_create_loc_dict_sg_data(self):
_, exons = get_test_transcript_exon_dicts()
# dummy preexisting data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1], [1, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
ctrl_locs = {('chr1', 1):0,
('chr2', 80): 1,
('chr1', 20): 2,
('chr1', 25): 3,
('chr1', 30): 4,
('chr1', 35): 5,
('chr1', 40): 6,
('chr2', 100): 7,
('chr2', 75): 8,
('chr2', 60): 9,
('chr2', 50): 10,
('chr2', 45): 11,
('chr2', 65): 12
}
print('test')
print(locs)
print('control')
print(ctrl_locs)
assert(ctrl_locs == locs)
# tests create_transcript_edge_dict empty swangraph
def test_create_transcript_edge_dict_emtpy_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
locs = sg.create_loc_dict(exons)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,1,2,3,4],
'test2': [5,6,7,8,9],
'test3': [5,6,10,11,9],
'test4': [12],
'test5': [5,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 1,
'edge_type': 'intron',
'v1': 1,
'v2': 2
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 2,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 3,
'edge_type': 'intron',
'v1': 3,
'v2': 4
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 4,
'edge_type': 'exon',
'v1': 4,
'v2': 5
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 6,
'v2': 7
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 7,
'v2': 8
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 8,
'v2': 9
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 9,
'v2': 10
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 10,
'v2': 11
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 8,
'v2': 12
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 12,
'v2': 10
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 11,
'v2': 10
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 7,
'v2': 9
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 9,
'v2': 10
}
}
assert(edges == ctrl_edges)
# tests create_transcript_edge_dict with edges already in swangraph
def test_create_transcript_edge_dict_edge_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
# add some dummy data
sg = swan.SwanGraph()
data = [[0, 'chr1', 1],
[1, 'chr2', 20],
[2, 'chr2', 100],
[3, 'chr2', 80]]
columns = ['vertex_id', 'chrom', 'coord']
sg.loc_df = pd.DataFrame(data=data, columns=columns)
cols = ['tid']
data = [0]
sg.t_df = pd.DataFrame(data=data, columns=cols)
locs = sg.create_loc_dict(exons)
data = [[0, 0, 1, '+', 'exon'],
[1, 2, 3, '-', 'exon']]
columns = ['edge_id', 'v1', 'v2', 'strand', 'edge_type']
sg.edge_df = pd.DataFrame(data=data, columns=columns)
transcripts, edges = sg.create_transcript_edge_dicts(transcripts, exons, locs)
# just compare the paths for the transcripts, which is the only
# part modified by this function
transcripts = dict([(key, item['path']) for key, item in transcripts.items()])
ctrl_transcript_paths = {
'test1': [0,2,3,4,5],
'test2': [1,6,7,8,9],
'test3': [1,6,10,11,9],
'test4': [12],
'test5': [1,13,14]
}
assert(transcripts == ctrl_transcript_paths)
ctrl_edges = {
('chr1', 1, 20, '+', 'exon'): {
'edge_id': 0,
'edge_type': 'exon',
'v1': 0,
'v2': 1
},
('chr1', 20, 25, '+', 'intron'): {
'edge_id': 2,
'edge_type': 'intron',
'v1': 4,
'v2': 5
},
('chr1', 25, 30, '+', 'exon'): {
'edge_id': 3,
'edge_type': 'exon',
'v1': 5,
'v2': 6
},
('chr1', 30, 35, '+', 'intron'): {
'edge_id': 4,
'edge_type': 'intron',
'v1': 6,
'v2': 7
},
('chr1', 35, 40, '+', 'exon'): {
'edge_id': 5,
'edge_type': 'exon',
'v1': 7,
'v2': 8
},
('chr2', 100, 80, '-', 'exon'): {
'edge_id': 1,
'edge_type': 'exon',
'v1': 2,
'v2': 3
},
('chr2', 80, 75, '-', 'intron'): {
'edge_id': 6,
'edge_type': 'intron',
'v1': 3,
'v2': 9
},
('chr2', 75, 60, '-', 'exon'): {
'edge_id': 7,
'edge_type': 'exon' ,
'v1': 9,
'v2': 10
},
('chr2', 60, 50, '-', 'intron'): {
'edge_id': 8,
'edge_type': 'intron',
'v1': 10,
'v2': 11
},
('chr2', 50, 45, '-', 'exon'): {
'edge_id': 9,
'edge_type': 'exon',
'v1': 11,
'v2': 12
},
('chr2', 75, 65, '-', 'exon'): {
'edge_id': 10,
'edge_type': 'exon',
'v1': 9,
'v2': 13
},
('chr2', 65, 50, '-', 'intron'): {
'edge_id': 11,
'edge_type': 'intron',
'v1': 13,
'v2': 11
},
('chr2', 45, 50, '+', 'exon'): {
'edge_id': 12,
'edge_type': 'exon',
'v1': 12,
'v2': 11
},
('chr2', 80, 60, '-', 'intron'): {
'edge_id': 13,
'edge_type': 'intron',
'v1': 3,
'v2': 10
},
('chr2', 60, 50, '-', 'exon'): {
'edge_id': 14,
'edge_type': 'exon',
'v1': 10,
'v2': 11
}
}
assert(edges == ctrl_edges)
# # tests create_transcript_edge_dict where transcripts already
# # # exist in the swangraph
# # def test_create_transcript_edge_dict_edge_t_sg(self):
# # pass
# # # TODO
#
# tests create_dfs with an empty sg
# also ensures that empty dict -> df -> dict conversion doesn't screw up
def test_create_dfs_empty_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path', 'novelty'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs when from_talon = True
def test_create_dfs_empty_sg_from_talon(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
ctrl_loc_df = pd.read_csv('files/test_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# remove the columns that are there just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# again, remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in a swangraph with data
def test_create_dfs_data_sg(self):
transcripts, exons = get_test_transcript_exon_dicts()
del transcripts['test2']
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, True)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs in sg with data where existing data has novelty
# and added dataset does not
def test_create_dfs_data_sg_nov1(self):
transcripts, exons = get_test_transcript_exon_dicts()
# to do - remove transcript that's already there
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
# remove novelty for entries that are new
new_tids = ['test1', 'test3', 'test4', 'test5']
ctrl_t_df.loc[ctrl_t_df.tid.isin(new_tids), 'novelty'] = 'Undefined'
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
# tests create_dfs with preexisting data and a duplicate transcript
# being added
# also tests that old data (novelty in this case) is not overwritten
def test_create_dfs_data_sg_dupe(self):
transcripts, exons = get_test_transcript_exon_dicts()
sg = swan.SwanGraph()
# add dummy data
# loc_df - format
loc_df = pd.read_csv('files/test_preexisting_loc_df.tsv', sep='\t')
loc_df.set_index('vertex_id_index', inplace=True)
loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
edge_df = pd.read_csv('files/test_preexisting_edge_df.tsv', sep='\t')
edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
edge_df.set_index('edge_id_index', inplace=True)
edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
t_df = pd.read_csv('files/test_preexisting_t_df.tsv', sep='\t')
t_df.set_index('tid_index', inplace=True)
t_df.index.name = 'tid'
t_df.drop(['loc_path'], axis=1, inplace=True)
t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
t_df['path'] = t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
t_df = t_df[t_df.columns]
sg.loc_df = loc_df
sg.edge_df = edge_df
sg.t_df = t_df
loc_df, edge_df, t_df = sg.create_dfs(transcripts, exons, False)
# control data
# loc_df - format
ctrl_loc_df = pd.read_csv('files/test_preexisting_result_loc_df.tsv', sep='\t')
ctrl_loc_df.set_index('vertex_id_index', inplace=True)
ctrl_loc_df.index.name = 'vertex_id'
# edge_df - format and remove the columns that are there
# just for debugging purposes
ctrl_edge_df = pd.read_csv('files/test_preexisting_result_edge_df.tsv', sep='\t')
ctrl_edge_df.drop(['v2_coord', 'v1_coord'], axis=1, inplace=True)
ctrl_edge_df.set_index('edge_id_index', inplace=True)
ctrl_edge_df.index.name = 'edge_id'
# t_df - remove and reformat columns that are there for debugging
ctrl_t_df = pd.read_csv('files/test_preexisting_result_t_df.tsv', sep='\t')
ctrl_t_df.set_index('tid_index', inplace=True)
ctrl_t_df.index.name = 'tid'
ctrl_t_df.drop(['loc_path'], axis=1, inplace=True)
ctrl_t_df.rename({'edge_path': 'path'}, axis=1, inplace=True)
ctrl_t_df['path'] = ctrl_t_df.apply(lambda x: [int(n) for n in x.path.split(',')], axis=1)
ctrl_t_df = ctrl_t_df[t_df.columns]
# remove novelty for entries that are new
new_tids = ['test1', 'test3', 'test4', 'test5']
ctrl_t_df.loc[ctrl_t_df.tid.isin(new_tids), 'novelty'] = 'Undefined'
check_dfs(loc_df, ctrl_loc_df, edge_df, ctrl_edge_df, t_df, ctrl_t_df)
def check_dfs(loc_df, ctrl_loc_df,
edge_df, ctrl_edge_df,
t_df, ctrl_t_df):
# first order to make them comparable
# sort all values by their IDs
loc_df.sort_index(inplace=True)
edge_df.sort_index(inplace=True)
t_df.sort_index(inplace=True)
ctrl_loc_df.sort_index(inplace=True)
ctrl_edge_df.sort_index(inplace=True)
ctrl_t_df.sort_index(inplace=True)
# and order columns the same way
ctrl_loc_df = ctrl_loc_df[loc_df.columns]
ctrl_edge_df = ctrl_edge_df[edge_df.columns]
ctrl_t_df = ctrl_t_df[t_df.columns]
print('test')
print(loc_df)
print('control')
print(ctrl_loc_df)
print(ctrl_loc_df == loc_df)
assert (loc_df == ctrl_loc_df).all(axis=0).all()
print('test')
print(edge_df)
print('control')
print(ctrl_edge_df)
assert (edge_df == ctrl_edge_df).all(axis=0).all()
print('test')
print(t_df)
print('control')
print(ctrl_t_df)
assert (t_df == ctrl_t_df).all(axis=0).all()
def add_transcriptome_no_reorder_gtf(sg, gtf):
t_df, exon_df, from_talon = swan.parse_gtf(gtf, False, False)
sg.loc_df, sg.edge_df, sg.t_df = sg.create_dfs(t_df, exon_df, from_talon)
return sg
def get_test_transcript_exon_dicts():
# features:
# exons that are "out of order"
# two transcripts from the same gene
# transcripts from - strand
# transcripts from + strand
transcripts = {
'test1': { # + strand
'gid': 'test1_gid',
'gname': 'test1_gname',
'tid': 'test1',
'tname': 'test1_tname',
'strand': '+',
'novelty': 'Known',
'exons': ['chr1_1_20_+_exon',
'chr1_35_40_+_exon', # out of order exon (+)
'chr1_25_30_+_exon',] # out of order exon (+)
},
'test2': { # - strand
'gid': 'test2_gid',
'gname': 'test2_gname',
'tid': 'test2',
'tname': 'test2_tname',
'strand': '-',
'novelty': 'Known',
'exons': ['chr2_100_80_-_exon', # duplicated exon/locations
'chr2_75_60_-_exon', # same edge but intron vs. exon
'chr2_50_45_-_exon'] # duplicated exon/locations, same exon different strand
},
'test3': { # - strand
'gid': 'test2_gid',
'gname': 'test2_gname',
'tid': 'test3',
'tname': 'test3_tname',
'strand': '-',
'novelty': 'NIC',
'exons': ['chr2_100_80_-_exon', # duplicated exon/locations
'chr2_50_45_-_exon', # out of order exon (-), duplicated exon/locations
'chr2_75_65_-_exon'] # out of order exon (-)
},
'test4': { # + strand
'gid': 'test4_gid',
'gname': 'test4_gname',
'tid': 'test4',
'tname': 'test4_tname',
'strand': '+',
'novelty': 'Known',
'exons': ['chr2_45_50_+_exon'] # same exon different strand
},
'test5': { # - strand
'gid': 'test2_gid',
'gname': 'test2_gname',
'tid': 'test5',
'tname': 'test5_tname',
'strand': '-',
'novelty': 'ISM',
'exons': ['chr2_100_80_-_exon', # duplicated exon/locations
'chr2_60_50_-_exon'] # same edge but intron vs. exon
},
}
# features
# locations that are shared across exons
exons = {
'chr1_1_20_+_exon': {
'eid': 'chr1_1_20_+_exon',
'chrom': 'chr1',
'v1': 1,
'v2': 20,
'strand': '+',
'edge_type': 'exon'
},
'chr1_25_30_+_exon': {
'eid': 'chr1_25_30_+_exon',
'chrom': 'chr1',
'v1': 25,
'v2': 30,
'strand': '+',
'edge_type': 'exon'
},
'chr1_35_40_+_exon': {
'eid': 'chr1_35_40_+_exon',
'chrom': 'chr1',
'v1': 35,
'v2': 40,
'strand': '+',
'edge_type': 'exon'
},
'chr2_100_80_-_exon': {
'eid': 'chr2_100_80_-_exon',
'chrom': 'chr2',
'v1': 100,
'v2': 80,
'strand': '-',
'edge_type': 'exon'
},
'chr2_75_60_-_exon': {
'eid': 'chr2_75_60_-_exon',
'chrom': 'chr2',
'v1': 75,
'v2': 60,
'strand': '-',
'edge_type': 'exon'
},
'chr2_50_45_-_exon': {
'eid': 'chr2_50_45_-_exon',
'chrom': 'chr2',
'v1': 50,
'v2': 45,
'strand': '-',
'edge_type': 'exon'
},
'chr2_75_65_-_exon': {
'eid': 'chr2_75_65_-_exon',
'chrom': 'chr2',
'v1': 75,
'v2': 65,
'strand': '-',
'edge_type': 'exon'
},
'chr2_45_50_+_exon': {
'eid': 'chr2_45_50_+_exon',
'chrom': 'chr2',
'v1': 45,
'v2': 50,
'strand': '+',
'edge_type': 'exon'
},
'chr2_100_80_-_exon': {
'eid': 'chr2_100_80_-_exon',
'chrom': 'chr2',
'v1': 100,
'v2': 80,
'strand': '-',
'edge_type': 'exon'
},
'chr2_60_50_-_exon': {
'eid': 'chr2_60_50_-_exon',
'chrom': 'chr2',
'v1': 60,
'v2': 50,
'strand': '-'
}
}
return transcripts, exons
| 35.628931 | 108 | 0.527685 | 6,635 | 50,985 | 3.787189 | 0.049435 | 0.030922 | 0.030365 | 0.011382 | 0.829035 | 0.789597 | 0.728669 | 0.714024 | 0.67968 | 0.659663 | 0 | 0.039351 | 0.316152 | 50,985 | 1,430 | 109 | 35.653846 | 0.681351 | 0.13757 | 0 | 0.700971 | 0 | 0 | 0.149985 | 0.036257 | 0 | 0 | 0 | 0.000699 | 0.068932 | 1 | 0.03301 | false | 0.003884 | 0.006796 | 0 | 0.045631 | 0.081553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8c3163c5ef19b0a38f2d7eb603b2c68851d5e7ba | 2,938 | py | Python | kaon_production/tasks.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | kaon_production/tasks.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | kaon_production/tasks.py | lh7326/UA_model | 009418dd94d3b7f9289a09858ba38cb5bb9129c5 | [
"Unlicense"
] | null | null | null | from kaon_production.Task import Task
from kaon_production.utils import make_partial_cross_section_for_parameters
class TaskFullFit(Task):
def _set_up(self):
self.parameters.release_all_parameters()
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
class TaskFullFitOnlyCharged(Task):
def _set_up(self):
self.parameters.release_all_parameters()
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
self.ts_fit, self.css_fit, self.errors_fit = zip(
*filter(lambda t: t[0].is_charged,
zip(self.ts_fit, self.css_fit, self.errors_fit),
)
)
class TaskFixedResonancesFit(Task):
def _set_up(self):
self.parameters.release_all_parameters()
self.parameters.fix_resonances()
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
class TaskFixedResonancesFitOnlyCharged(Task):
def _set_up(self):
self.parameters.release_all_parameters()
self.parameters.fix_resonances()
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
self.ts_fit, self.css_fit, self.errors_fit = zip(
*filter(lambda t: t[0].is_charged,
zip(self.ts_fit, self.css_fit, self.errors_fit),
)
)
class TaskFixedCouplingConstants(Task):
def _set_up(self):
self.parameters.release_all_parameters()
coupling_constants = [p.name for p in self.parameters if p.name[:2] == 'a_']
self.parameters.fix_parameters(coupling_constants)
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
class TaskFixedCouplingConstantsOnlyCharged(Task):
def _set_up(self):
self.parameters.release_all_parameters()
coupling_constants = [p.name for p in self.parameters if p.name[:2] == 'a_']
self.parameters.fix_parameters(coupling_constants)
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
self.ts_fit, self.css_fit, self.errors_fit = zip(
*filter(lambda t: t[0].is_charged,
zip(self.ts_fit, self.css_fit, self.errors_fit),
)
)
class TaskFixAccordingToParametersFit(Task):
def _set_up(self):
self.partial_f = make_partial_cross_section_for_parameters(
self.k_meson_mass, self.alpha, self.hc_squared, self.parameters
)
| 33.386364 | 84 | 0.670184 | 365 | 2,938 | 5.046575 | 0.147945 | 0.144408 | 0.06949 | 0.099891 | 0.865364 | 0.865364 | 0.837134 | 0.837134 | 0.837134 | 0.837134 | 0 | 0.002244 | 0.241661 | 2,938 | 87 | 85 | 33.770115 | 0.824506 | 0 | 0 | 0.65625 | 0 | 0 | 0.001361 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109375 | false | 0 | 0.03125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8b069ae2a6bd862b0d89bf558ecc010cc521fe7e | 45 | py | Python | pystiche/data/collections/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 129 | 2019-10-04T00:23:54.000Z | 2021-04-24T06:41:37.000Z | pystiche/data/collections/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 334 | 2019-10-01T08:10:44.000Z | 2021-04-25T19:39:09.000Z | pystiche/data/collections/__init__.py | dooglewoogle/pystiche | 14b61123ede2abdb00daaa5b4981de6d7edaf034 | [
"BSD-3-Clause"
] | 32 | 2021-05-05T05:06:18.000Z | 2022-03-17T09:14:47.000Z | from .download import *
from .local import *
| 15 | 23 | 0.733333 | 6 | 45 | 5.5 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.177778 | 45 | 2 | 24 | 22.5 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8b0d15d7825d544cd685085634cc6aca1202ce9c | 26,161 | py | Python | pyjap/json_api_serializer.py | fabian-lpz/py-jap | 1f7c144c55c284ee87ec696fa91abff39b0d93e0 | [
"Apache-2.0"
] | null | null | null | pyjap/json_api_serializer.py | fabian-lpz/py-jap | 1f7c144c55c284ee87ec696fa91abff39b0d93e0 | [
"Apache-2.0"
] | null | null | null | pyjap/json_api_serializer.py | fabian-lpz/py-jap | 1f7c144c55c284ee87ec696fa91abff39b0d93e0 | [
"Apache-2.0"
] | null | null | null | from json_api_serializer import json_api_error_document, json_api_data_document
from collections import defaultdict
import json
__author__ = "Fabian Lopez Verdugo"
__version__ = "0.1"
__maintainer__ = "Fabian Lopez"
__email__ = "fabian.lopez@enova.mx"
__status__ = "Development"
class JASerializer():
def __init__(self):
print("__init__ JASerializer")
def createDataDocument(self):
self.document = json_api_data_document.JADataDocument()
def createErrorDocument(self):
self.document = json_api_error_document.JAErrorDocument()
def serializeJAError(self, status, title, detail):
"""
:param status: The HTTP status of the error.
:param title: The HTTP title of the error.
:param detail: A more descriptive detail descrition of the error.
:returns: JSON API error document
"""
self.createErrorDocument()
errorElement = self.document.createErrorElement(status, title, detail)
self.document.appendObjectInDocument(errorElement, "errors")
return json.dumps(self.document.getJSONResponse(), default=str)
def serializeJADataColumnSetID(self, dataType, data, attributes, idColumn = "id", relationships = [], links = {}, meta = {}):
"""
:param dataType: The JSON API attribute type of the data document.
:param data: The data that will be parsed into JSON API document.
:param attributes: An array of strings, that will be the attributes of the data that will be .
:param idValue: A more descriptive detail descrition of the error.
:returns: JSON API error document
"""
self.createDataDocument()
if isinstance(data ,list):
for index in data:
dataAttributes = self.getAttributesFromDataSet(index, attributes)
idValue = self.getIdFromDataSet(index, idColumn)
if len(relationships) != 0:
if isinstance(relationships ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(index, relationships)
if len(relationshipsIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La relación dada no cumple con las especificaciones de objeto de data")
else:
for rel in relationships:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(index, rel)
if len(relationshipsIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La relación dada no cumple con las especificaciones de objeto de data")
else:
relationshipsIncludes = {"included":[],"relationship":[]}
if len(meta) != 0:
if isinstance(meta ,dict):
metasIncludes = self.getMetaIncludesFromDataSet(index, meta)
if len(metasIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La metadata dada no cumple con las especificaciones de objeto de metadata")
else:
for rel in meta:
metasIncludes = self.getMetaIncludesFromDataSet(index, rel)
if len(metasIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La metadata dada no cumple con las especificaciones de objeto de metadata")
else:
metasIncludes = {"included":[],"metas":[]}
dataElement = self.document.createDataElement(idValue, dataType, dataAttributes, relationshipsIncludes["relationship"], links, metasIncludes["metas"])
self.document.appendObjectInDocument(dataElement, "data")
if len(metasIncludes["included"]) > 0:
self.document.appendObjectInDocument(metasIncludes["included"], "included")
if len(relationshipsIncludes["included"]) > 0:
self.document.appendObjectInDocument(relationshipsIncludes["included"], "included")
elif isinstance(data ,dict):
dataAttributes = self.getAttributesFromDataSet(data, attributes)
idValue = self.getIdFromDataSet(data, idColumn)
if len(relationships) != 0:
if isinstance(relationships ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(data, relationships)
if len(relationshipsIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La relación dada no cumple con las especificaciones de objeto de data")
else:
for rel in relationships:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(data, rel)
if len(relationshipsIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La relación dada no cumple con las especificaciones de objeto de data")
else:
relationshipsIncludes = {"included":[],"relationship":[]}
if len(meta) != 0:
if isinstance(meta ,dict):
metasIncludes = self.getMetaIncludesFromDataSet(data, meta)
if len(metasIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La metadata dada no cumple con las especificaciones de objeto de metadata")
else:
for rel in meta:
metasIncludes = self.getMetaIncludesFromDataSet(data, rel)
if len(metasIncludes) == 0:
return self.serializeJAError(400, "Bad Request", "La metadata dada no cumple con las especificaciones de objeto de metadata")
else:
metasIncludes = {"included":[],"metas":[]}
dataElement = self.document.createDataElement(idValue, dataType, dataAttributes, relationshipsIncludes["relationship"], links, metasIncludes["metas"])
self.document.appendObjectInDocument(dataElement, "data")
if len(metasIncludes["included"]) > 0:
self.document.appendObjectInDocument(metasIncludes["included"], "included")
if len(relationshipsIncludes["included"]) > 0:
self.document.appendObjectInDocument(relationshipsIncludes["included"], "included")
else:
return self.serializeJAError(400, "Bad Request", "No se puede serializar la data ingresada porque no es un objeto dict o list.")
return json.dumps(self.document.getJSONResponse(), default=str)
def getAttributesFromDataSet(self, data, attributes):
parsedAttributes = defaultdict(dict)
for attr in attributes:
if isinstance(attr ,dict):
if "." in attr["column"]:
splitColumn = [x.strip() for x in attr["column"].split('.')]
loopData = data
cicleAttributes = {}
lastAttr = ""
for c in splitColumn:
cicleAttributes = loopData[c]
loopData = loopData[c]
lastAttr = c
if "group" in attr:
parsedAttributes[attr["group"]][attr["alias"]] = self.solveAttribute(cicleAttributes, attr.get('solved',{}))
else:
parsedAttributes[attr["alias"]] = self.solveAttribute(cicleAttributes, attr.get('solved',{}))
else:
if attr["column"] in data:
if "group" in attr:
parsedAttributes[attr["group"]][attr["alias"]] = self.solveAttribute(data[attr["column"]], attr.get('solved',{}))
else:
parsedAttributes[attr["alias"]] = self.solveAttribute(data[attr["column"]], attr.get('solved',{}))
elif "." in attr:
splitColumn = [x.strip() for x in attr.split('.')]
loopData = data
cicleAttributes = {}
lastAttr = ""
for c in splitColumn:
cicleAttributes = loopData[c]
loopData = loopData[c]
lastAttr = c
parsedAttributes[lastAttr] = cicleAttributes
else:
if attr in data:
parsedAttributes[attr] = data[attr]
return parsedAttributes
def solveAttribute(self, pivotArray, solvedArray):
if len(solvedArray) == 0:
return pivotArray
else:
for i, k in enumerate(pivotArray):
if k not in solvedArray:
pivotArray[i] = solvedArray[int(k)]
return pivotArray
def getMetaAttributesFromDataSet(self, data, attributes):
parsedAttributes = []
for attr in attributes:
if "." in attr:
splitColumn = [x.strip() for x in attr.split('.')]
loopData = data
cicleAttributes = {}
lastAttr = ""
for c in splitColumn:
cicleAttributes = loopData[c]
loopData = loopData[c]
lastAttr = c
parsedAttributes.append({"attribute":lastAttr,"value":cicleAttributes})
else:
if attr in data:
parsedAttributes.append({"attribute":attr,"value":data[attr]})
return parsedAttributes
def getIdFromDataSet(self, data, idColumn):
if "." in idColumn:
splitColumn = [x.strip() for x in idColumn.split('.')]
loopData = data
for c in splitColumn:
parsedId = loopData[c]
loopData = loopData[c]
else:
if idColumn in data:
parsedId = data[idColumn]
else:
parsedId = 1
return parsedId
def getRelationshipsIncludesFromDataSet(self, data, relationships):
includeElement = []
relationshipElement = []
if "." in relationships['column_relation']:
splitColumn = [x.strip() for x in relationships['column_relation'].split('.')]
for c in splitColumn:
if c in json.dumps(data, default=str):
data = data[c]
relationData = data
for relda in relationData:
relrel = {}
idValue = self.getIdFromDataSet(relda, relationships['id'])
dataAttributes = self.getAttributesFromDataSet(relda, relationships['attributes'])
relationshipsRelationships = relationships.get('relationships', {})
if len(relationshipsRelationships) != 0:
if isinstance(relationships['relationships'] ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, relationships['relationships'])
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
else:
for j in relationships['relationships']:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, j)
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["relationship"])
else:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
else:
relationshipsIncludes = {"included":[],"relationship":[]}
relationshipElement = self.appendInclude(relationshipElement, self.document.createDataElement(idValue, relationships['type'],[],relrel))
includeElement = self.appendInclude(includeElement, self.document.createDataElement(idValue, relationships['type'], dataAttributes))
else:
if(not self.validateRelationship(relationships)):
return {}
if relationships['column_relation'] in json.dumps(data, default=str):
relationData = data[relationships['column_relation']]
for relda in relationData:
relrel = {}
idValue = self.getIdFromDataSet(relda, relationships['id'])
dataAttributes = self.getAttributesFromDataSet(relda, relationships['attributes'])
relationshipsRelationships = relationships.get('relationships', {})
if len(relationshipsRelationships) != 0:
if isinstance(relationships['relationships'] ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, relationships['relationships'])
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
for j in relationships['relationships']:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, j)
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["relationship"])
else:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
else:
relationshipsIncludes = {"included":[],"relationship":[]}
relationshipElement = self.appendInclude(relationshipElement, self.document.createDataElement(idValue, relationships['type'],[],relrel))
includeElement = self.appendInclude(includeElement, self.document.createDataElement(idValue, relationships['type'], dataAttributes))
return {"included":includeElement,"relationship":relationshipElement}
def getMetaIncludesFromDataSet(self, data, meta):
includeElement = []
metadataElement = {}
if "." in meta['column_relation']:
splitColumn = [x.strip() for x in meta['column_relation'].split('.')]
for c in splitColumn:
if c in json.dumps(data, default=str):
data = data[c]
relationData = data
for relda in relationData:
# Metadata
relrel = []
idValue = self.getIdFromDataSet(relda, meta['id'])
dataAttributes = self.getAttributesFromDataSet(relda, meta['attributes'])
meta_attributes = meta.get('meta_attributes', [])
metaDataAttributes = self.getMetaAttributesFromDataSet(relda, meta_attributes)
metaRelationshipsRelationships = meta.get('meta_relationships', {})
if len(metaRelationshipsRelationships) != 0:
if isinstance(meta['meta_relationships'] ,dict):
relationshipsIncludes = self.getMetaIncludesFromDataSet(relda, meta['meta_relationships'])
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["metas"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"metas":[]}
else:
for j in meta['meta_relationships']:
relationshipsIncludes = self.getMetaIncludesFromDataSet(relda, j)
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["metas"])
else:
relrel = relationshipsIncludes["metas"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"metas":[]}
else:
relationshipsIncludes = {"included":[],"metas":[]}
metadataElement = self.insertObjectInDictAsAttributes(self.document.createMetaElement(idValue, metaDataAttributes,[],relrel), metadataElement)
# Relationships
relationshipsRelationships = meta.get('relationships', {})
relrel = []
relation = {}
relType = ""
if len(relationshipsRelationships) != 0:
if isinstance(meta['relationships'] ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, meta['relationships'])
relType = j['type']
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
for j in meta['relationships']:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, j)
relType = j['type']
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["relationship"])
else:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
relation = self.createRelationShipElement(relrel,relType)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
includeElement = self.appendInclude(includeElement, self.document.createDataElement(idValue, meta['type'], dataAttributes,relation))
else:
if(not self.validateRelationship(meta)):
return {}
if meta['column_relation'] in json.dumps(data, default=str):
relationData = data[meta['column_relation']]
for relda in relationData:
# Metadata
relrel = []
idValue = self.getIdFromDataSet(relda, meta['id'])
dataAttributes = self.getAttributesFromDataSet(relda, meta['attributes'])
meta_attributes = meta.get('meta_attributes', [])
metaDataAttributes = self.getMetaAttributesFromDataSet(relda, meta_attributes)
metaRelationshipsRelationships = meta.get('meta_relationships', {})
if len(metaRelationshipsRelationships) != 0:
if isinstance(meta['meta_relationships'] ,dict):
relationshipsIncludes = self.getMetaIncludesFromDataSet(relda, meta['meta_relationships'])
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["metas"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
for j in meta['meta_relationships']:
relationshipsIncludes = self.getMetaIncludesFromDataSet(relda, j)
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["metas"])
else:
relrel = relationshipsIncludes["metas"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"metas":[]}
else:
relationshipsIncludes = {"included":[],"metas":[]}
metadataElement = self.insertObjectInDictAsAttributes(self.document.createMetaElement(idValue, metaDataAttributes,[],relrel), metadataElement)
# Relatonships
relationshipsRelationships = meta.get('relationships', {})
relrel = []
relation = {}
if len(relationshipsRelationships) != 0:
relType = "";
if isinstance(meta['relationships'] ,dict):
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, meta['relationships'])
relType = meta['relationships']['type']
if len(relationshipsIncludes) != 0:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
for j in meta['relationships']:
relationshipsIncludes = self.getRelationshipsIncludesFromDataSet(relda, j)
relType = j['type']
if len(relationshipsIncludes) != 0:
if len(relrel) != 0:
relrel.extend(relationshipsIncludes["relationship"])
else:
relrel = relationshipsIncludes["relationship"]
for included in relationshipsIncludes["included"]:
includeElement = self.appendInclude(includeElement, included)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
relation = self.createRelationShipElement(relrel,relType)
else:
relationshipsIncludes = {"included":[],"relationship":[]}
includeElement = self.appendInclude(includeElement, self.document.createDataElement(idValue, meta['type'], dataAttributes,relation))
return {"included":includeElement,"metas":metadataElement}
def validateRelationship(self, relationship):
if 'column_relation' not in json.dumps(relationship, default=str):
return False
if 'id' not in json.dumps(relationship, default=str):
return False
if 'type' not in json.dumps(relationship, default=str):
return False
if 'attributes' not in json.dumps(relationship, default=str):
return False
if 'relationships' in json.dumps(relationship, default=str):
if isinstance(relationship['relationships'] ,dict):
self.validateRelationship(relationship['relationships'])
else:
for rela in relationship:
self.validateRelationship(rela)
return True
def appendInclude(self, includeArray, itemToInclude):
for item in includeArray:
if item['id'] == itemToInclude['id'] and item['type'] == itemToInclude['type']:
if len(item.keys()) != len(itemToInclude.keys()):
item.update(itemToInclude)
return includeArray
includeArray.append(itemToInclude)
return includeArray
def insertObjectInDictAsAttributes(self, objectToInsert, dictionary):
for key in objectToInsert.keys():
dictionary[key] = objectToInsert[key]
return dictionary
def createRelationShipElement(self, relation, relationType):
if(relationType[-1:]!='s'):
relationType = relationType + "s"
relationship = {relationType:{"data":relation}}
return relationship | 59.864989 | 166 | 0.543404 | 1,882 | 26,161 | 7.519129 | 0.09458 | 0.01484 | 0.033072 | 0.030528 | 0.791181 | 0.768638 | 0.749205 | 0.740584 | 0.735354 | 0.718606 | 0 | 0.004359 | 0.368679 | 26,161 | 437 | 167 | 59.864989 | 0.852446 | 0.021521 | 0 | 0.721675 | 0 | 0 | 0.094771 | 0.000824 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036946 | false | 0 | 0.007389 | 0 | 0.118227 | 0.002463 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
8b21665c421a5f7d420cc820c141aac2e747b356 | 30 | py | Python | slim/support/asyncpg/__init__.py | 0xNone/slim | 7cc8bc6ac8773f63ef1ebcafe23c0b1ec52811c6 | [
"Zlib"
] | 1 | 2017-09-18T07:56:25.000Z | 2017-09-18T07:56:25.000Z | slim/support/asyncpg/__init__.py | 0xNone/slim | 7cc8bc6ac8773f63ef1ebcafe23c0b1ec52811c6 | [
"Zlib"
] | null | null | null | slim/support/asyncpg/__init__.py | 0xNone/slim | 7cc8bc6ac8773f63ef1ebcafe23c0b1ec52811c6 | [
"Zlib"
] | null | null | null | from .view import AsyncpgView
| 15 | 29 | 0.833333 | 4 | 30 | 6.25 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133333 | 30 | 1 | 30 | 30 | 0.961538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
8cd82c9ec7a9486aa14a2962dd861e6d99e500ee | 2,819 | py | Python | epytope/Data/pssms/smmpmbec/mat/A_02_02_11.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/smmpmbec/mat/A_02_02_11.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/smmpmbec/mat/A_02_02_11.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | A_02_02_11 = {0: {'A': -0.199, 'C': 0.016, 'E': -0.034, 'D': 0.127, 'G': -0.025, 'F': -0.087, 'I': -0.139, 'H': 0.05, 'K': -0.162, 'M': -0.225, 'L': -0.264, 'N': 0.275, 'Q': -0.102, 'P': 0.428, 'S': 0.063, 'R': 0.072, 'T': 0.074, 'W': 0.109, 'V': 0.076, 'Y': -0.052}, 1: {'A': 0.173, 'C': 0.018, 'E': 0.22, 'D': 0.285, 'G': 0.141, 'F': -0.392, 'I': -0.104, 'H': -0.143, 'K': -0.171, 'M': -0.387, 'L': -0.382, 'N': 0.059, 'Q': 0.294, 'P': 0.633, 'S': 0.296, 'R': -0.142, 'T': 0.225, 'W': -0.275, 'V': -0.011, 'Y': -0.338}, 2: {'A': -0.371, 'C': -0.05, 'E': 0.014, 'D': 0.013, 'G': 0.014, 'F': -0.48, 'I': -0.118, 'H': 0.185, 'K': 0.338, 'M': -0.097, 'L': -0.114, 'N': -0.064, 'Q': 0.353, 'P': -0.089, 'S': 0.103, 'R': 0.646, 'T': -0.004, 'W': -0.01, 'V': -0.249, 'Y': -0.021}, 3: {'A': -0.407, 'C': -0.116, 'E': -0.463, 'D': -0.226, 'G': 0.082, 'F': -0.059, 'I': 0.037, 'H': 0.154, 'K': 0.201, 'M': 0.051, 'L': -0.118, 'N': 0.132, 'Q': 0.037, 'P': -0.075, 'S': 0.041, 'R': 0.368, 'T': -0.008, 'W': 0.172, 'V': 0.053, 'Y': 0.145}, 4: {'A': 0.293, 'C': -0.055, 'E': -0.081, 'D': 0.152, 'G': -0.03, 'F': -0.139, 'I': 0.061, 'H': 0.066, 'K': 0.226, 'M': -0.117, 'L': -0.246, 'N': -0.15, 'Q': -0.18, 'P': 0.136, 'S': 0.131, 'R': 0.219, 'T': 0.078, 'W': -0.287, 'V': 0.048, 'Y': -0.127}, 5: {'A': -0.031, 'C': 0.033, 'E': 0.023, 'D': 0.01, 'G': -0.122, 'F': 0.307, 'I': 0.064, 'H': 0.12, 'K': 0.183, 'M': -0.003, 'L': 0.088, 'N': -0.35, 'Q': -0.236, 'P': 0.037, 'S': -0.235, 'R': 0.112, 'T': -0.182, 'W': 0.064, 'V': -0.021, 'Y': 0.14}, 6: {'A': -0.394, 'C': 0.003, 'E': 0.039, 'D': 0.024, 'G': 0.006, 'F': -0.016, 'I': -0.123, 'H': 0.084, 'K': -0.037, 'M': -0.029, 'L': -0.144, 'N': 0.185, 'Q': 0.181, 'P': 0.061, 'S': 0.069, 'R': 0.056, 'T': 0.09, 'W': 0.163, 'V': -0.327, 'Y': 0.112}, 7: {'A': 0.371, 'C': -0.016, 'E': -0.369, 'D': -0.164, 'G': 0.126, 'F': -0.004, 'I': -0.368, 'H': 0.133, 'K': 0.229, 'M': -0.125, 'L': -0.288, 'N': -0.089, 'Q': -0.053, 'P': 0.309, 'S': 0.058, 'R': 0.416, 'T': 0.027, 'W': -0.104, 'V': -0.185, 'Y': 0.097}, 8: {'A': 0.315, 'C': 0.003, 'E': -0.049, 'D': -0.332, 'G': 0.143, 'F': 0.018, 'I': -0.126, 'H': 0.043, 'K': 0.193, 'M': -0.131, 'L': -0.072, 'N': -0.236, 'Q': -0.054, 'P': -0.404, 'S': 0.2, 'R': 0.21, 'T': 0.245, 'W': -0.077, 'V': 0.098, 'Y': 0.013}, 9: {'A': 0.324, 'C': -0.005, 'E': -0.201, 'D': -0.035, 'G': 0.275, 'F': -0.169, 'I': -0.008, 'H': 0.031, 'K': 0.142, 'M': 0.023, 'L': 0.035, 'N': 0.165, 'Q': -0.109, 'P': -0.411, 'S': 0.134, 'R': -0.117, 'T': 0.149, 'W': -0.143, 'V': 0.111, 'Y': -0.192}, 10: {'A': -0.527, 'C': 0.069, 'E': -0.119, 'D': 0.038, 'G': -0.023, 'F': -0.059, 'I': -0.521, 'H': 0.357, 'K': 0.099, 'M': -0.013, 'L': -0.32, 'N': 0.225, 'Q': 0.169, 'P': -0.163, 'S': 0.071, 'R': 0.368, 'T': -0.032, 'W': 0.556, 'V': -0.582, 'Y': 0.406}, -1: {'con': 3.96545}} | 2,819 | 2,819 | 0.39553 | 679 | 2,819 | 1.637703 | 0.290133 | 0.019784 | 0.008993 | 0.010791 | 0.061151 | 0 | 0 | 0 | 0 | 0 | 0 | 0.375635 | 0.161405 | 2,819 | 1 | 2,819 | 2,819 | 0.094755 | 0 | 0 | 0 | 0 | 0 | 0.079078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ba06051bc7fea982bb86ea53c36655fdc2935929 | 107 | py | Python | dear_petition/petition/etl/__init__.py | nthall/wext | 7db1dda77436f85d905659d49604c311613ea7a3 | [
"MIT"
] | null | null | null | dear_petition/petition/etl/__init__.py | nthall/wext | 7db1dda77436f85d905659d49604c311613ea7a3 | [
"MIT"
] | null | null | null | dear_petition/petition/etl/__init__.py | nthall/wext | 7db1dda77436f85d905659d49604c311613ea7a3 | [
"MIT"
] | null | null | null | from .extract import transform_ciprs_document, parse_ciprs_document
from .load import import_ciprs_records
| 35.666667 | 67 | 0.88785 | 15 | 107 | 5.933333 | 0.6 | 0.292135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084112 | 107 | 2 | 68 | 53.5 | 0.908163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
ba2c48e2c891d394c366270d5ca25dd0e20d570b | 93 | py | Python | settings.py | anuran-roy/Im2SQL-api | 7cbbc4772ccc7b06367bea51070798ed8f9f9ab4 | [
"MIT"
] | 1 | 2021-11-14T16:06:31.000Z | 2021-11-14T16:06:31.000Z | settings.py | anuran-roy/Im2SQL-api | 7cbbc4772ccc7b06367bea51070798ed8f9f9ab4 | [
"MIT"
] | 1 | 2022-03-12T01:03:03.000Z | 2022-03-12T01:03:03.000Z | settings.py | anuran-roy/Im2SQL-api | 7cbbc4772ccc7b06367bea51070798ed8f9f9ab4 | [
"MIT"
] | null | null | null | import os
BASE_DIR: str = os.getcwd()
MEDIA_DIR: str = os.path.join(BASE_DIR, 'media/temp/') | 23.25 | 54 | 0.709677 | 17 | 93 | 3.705882 | 0.588235 | 0.222222 | 0.253968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11828 | 93 | 4 | 54 | 23.25 | 0.768293 | 0 | 0 | 0 | 0 | 0 | 0.117021 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
e865566c2e7fb0a9d712c065d9152412a1c09664 | 14,843 | py | Python | tests/test_observer.py | gegenschall/djangochannelsrestframework | bb611a6c251517d0e014b028c4b808e6db1785f3 | [
"MIT"
] | 1 | 2021-07-28T09:17:48.000Z | 2021-07-28T09:17:48.000Z | tests/test_observer.py | gegenschall/djangochannelsrestframework | bb611a6c251517d0e014b028c4b808e6db1785f3 | [
"MIT"
] | null | null | null | tests/test_observer.py | gegenschall/djangochannelsrestframework | bb611a6c251517d0e014b028c4b808e6db1785f3 | [
"MIT"
] | null | null | null | import asyncio
import pytest
from asgiref.sync import async_to_sync
from channels import DEFAULT_CHANNEL_LAYER
from channels.db import database_sync_to_async
from channels.layers import channel_layers
from channels.testing import WebsocketCommunicator
from django.contrib.auth import user_logged_in, get_user_model
from django.db import transaction
from django.utils.text import slugify
from djangochannelsrestframework.consumers import AsyncAPIConsumer
from djangochannelsrestframework.observer import observer, model_observer
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_observer_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self):
await self.handle_user_logged_in.subscribe()
await super().accept()
@observer(user_logged_in)
async def handle_user_logged_in(self, message, observer=None, **kwargs):
await self.send_json({"message": message, "observer": observer is not None})
communicator = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
await database_sync_to_async(user_logged_in.send)(
sender=user.__class__, request=None, user=user
)
response = await communicator.receive_json_from()
assert {"message": {}, "observer": True} == response
await communicator.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_observer_wrapper.subscribe()
await super().accept()
@model_observer(get_user_model())
async def user_change_observer_wrapper(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
communicator = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
response = await communicator.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.observer.wrapper",
} == response
await communicator.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_wrapper_in_transaction(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self, **kwargs):
await TestConsumer.user_change_wrapper_in_transaction.subscribe(self)
await super().accept()
@model_observer(get_user_model())
async def user_change_wrapper_in_transaction(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
communicator = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator.connect()
assert connected
@database_sync_to_async
def create_user_and_wait():
with transaction.atomic():
user = get_user_model().objects.create(
username="test", email="test@example.com"
)
assert async_to_sync(communicator.receive_nothing(timeout=0.1))
user.username = "mike"
user.save()
assert async_to_sync(communicator.receive_nothing(timeout=0.1))
return user
user = await create_user_and_wait()
response = await communicator.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.wrapper.in.transaction",
} == response
await communicator.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_delete_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumerObserverDelete(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_observer_delete.subscribe()
await super().accept()
@model_observer(get_user_model())
async def user_change_observer_delete(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
communicator = WebsocketCommunicator(TestConsumerObserverDelete(), "/testws/")
connected, _ = await communicator.connect()
assert connected
await communicator.receive_nothing()
user = await database_sync_to_async(get_user_model())(
username="test", email="test@example.com"
)
await database_sync_to_async(user.save)()
response = await communicator.receive_json_from()
await communicator.receive_nothing()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.observer.delete",
} == response
pk = user.pk
await database_sync_to_async(user.delete)()
response = await communicator.receive_json_from()
await communicator.receive_nothing()
assert {
"action": "delete",
"body": {"pk": pk},
"type": "user.change.observer.delete",
} == response
await communicator.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_many_connections_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_many_connections_wrapper.subscribe()
await super().accept()
@model_observer(get_user_model())
async def user_change_many_connections_wrapper(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
communicator1 = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator1.connect()
assert connected
communicator2 = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator2.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
response = await communicator1.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.many.connections.wrapper",
} == response
await communicator1.disconnect()
response = await communicator2.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.many.connections.wrapper",
} == response
await communicator2.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_many_consumers_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_many_consumers_wrapper_1.subscribe()
await super().accept()
@model_observer(get_user_model())
async def user_change_many_consumers_wrapper_1(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
class TestConsumer2(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_many_consumers_wrapper_2.subscribe()
await super().accept()
@model_observer(get_user_model())
async def user_change_many_consumers_wrapper_2(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
communicator1 = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator1.connect()
assert connected
communicator2 = WebsocketCommunicator(TestConsumer2(), "/testws/")
connected, _ = await communicator2.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
response = await communicator1.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.many.consumers.wrapper.1",
} == response
await communicator1.disconnect()
response = await communicator2.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.many.consumers.wrapper.2",
} == response
await communicator2.disconnect()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_custom_groups_wrapper(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumer(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_custom_groups_wrapper.subscribe(username="test")
await super().accept()
@model_observer(get_user_model())
async def user_change_custom_groups_wrapper(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
@user_change_custom_groups_wrapper.groups
def user_change_custom_groups_wrapper(
self, instance=None, username=None, **kwargs
):
if username:
yield "-instance-username-{}".format(slugify(username))
else:
yield "-instance-username-{}".format(instance.username)
communicator = WebsocketCommunicator(TestConsumer(), "/testws/")
connected, _ = await communicator.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
response = await communicator.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.custom.groups.wrapper",
} == response
await communicator.disconnect()
user = await database_sync_to_async(get_user_model().objects.create)(
username="test2", email="test@example.com"
)
# no event since this is only subscribed to 'test'
with pytest.raises(asyncio.TimeoutError):
await communicator.receive_json_from()
@pytest.mark.django_db(transaction=True)
@pytest.mark.asyncio
async def test_model_observer_custom_groups_wrapper_with_split_function_api(settings):
settings.CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer",
"TEST_CONFIG": {
"expiry": 100500,
},
},
}
layer = channel_layers.make_test_backend(DEFAULT_CHANNEL_LAYER)
class TestConsumerObserverCustomGroups(AsyncAPIConsumer):
async def accept(self, **kwargs):
await self.user_change_custom_groups.subscribe(username="test")
await super().accept()
@model_observer(get_user_model())
async def user_change_custom_groups(
self, message, action, message_type, observer=None, **kwargs
):
await self.send_json(dict(body=message, action=action, type=message_type))
@user_change_custom_groups.groups_for_signal
def user_change_custom_groups(self, instance=None, **kwargs):
yield "-instance-username-{}".format(instance.username)
@user_change_custom_groups.groups_for_consumer
def user_change_custom_groups(self, username=None, **kwargs):
yield "-instance-username-{}".format(slugify(username))
communicator = WebsocketCommunicator(TestConsumerObserverCustomGroups(), "/testws/")
connected, _ = await communicator.connect()
assert connected
user = await database_sync_to_async(get_user_model().objects.create)(
username="test", email="test@example.com"
)
response = await communicator.receive_json_from()
assert {
"action": "create",
"body": {"pk": user.pk},
"type": "user.change.custom.groups",
} == response
await communicator.disconnect()
user = await database_sync_to_async(get_user_model().objects.create)(
username="test2", email="test@example.com"
)
# no event since this is only subscribed to 'test'
with pytest.raises(asyncio.TimeoutError):
await communicator.receive_json_from()
| 30.667355 | 88 | 0.658896 | 1,540 | 14,843 | 6.107792 | 0.080519 | 0.034021 | 0.02424 | 0.02828 | 0.871359 | 0.858814 | 0.823092 | 0.800234 | 0.800234 | 0.795981 | 0 | 0.006831 | 0.230749 | 14,843 | 483 | 89 | 30.730849 | 0.816956 | 0.006535 | 0 | 0.676056 | 0 | 0 | 0.100522 | 0.046802 | 0 | 0 | 0 | 0 | 0.064789 | 1 | 0.011268 | false | 0 | 0.033803 | 0 | 0.073239 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e87b0223b168a2889ad03f1ccbb4f2d7ee2d6b74 | 648 | py | Python | user_agent/device.py | zeroday0619/user_agent | 03b0154d7019ddc8e4323a07f106a383cbd8af22 | [
"MIT"
] | null | null | null | user_agent/device.py | zeroday0619/user_agent | 03b0154d7019ddc8e4323a07f106a383cbd8af22 | [
"MIT"
] | null | null | null | user_agent/device.py | zeroday0619/user_agent | 03b0154d7019ddc8e4323a07f106a383cbd8af22 | [
"MIT"
] | null | null | null | import requests
def fetch_device_data(url: str):
resp = requests.get(url)
return resp.json()
SMARTPHONE_DEV_IDS = fetch_device_data('https://cdn.statically.io/gh/zeroday0619/user_agent/master/user_agent/data/smartphone_dev_id.json')
SMARTPHONE_DEV_EXT = fetch_device_data('https://cdn.statically.io/gh/zeroday0619/user_agent/master/user_agent/data/smartphone_dev_ext.json')
TABLET_DEV_IDS = fetch_device_data('https://cdn.statically.io/gh/zeroday0619/user_agent/master/user_agent/data/tablet_dev_id.json')
TABLET_DEV_EXT = fetch_device_data('https://cdn.statically.io/gh/zeroday0619/user_agent/master/user_agent/data/tablet_dev_ext.json') | 64.8 | 140 | 0.824074 | 103 | 648 | 4.854369 | 0.262136 | 0.144 | 0.15 | 0.16 | 0.744 | 0.744 | 0.744 | 0.744 | 0.744 | 0.744 | 0 | 0.025932 | 0.04784 | 648 | 10 | 141 | 64.8 | 0.784441 | 0 | 0 | 0 | 0 | 0.5 | 0.588598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e87b1c45d8532cfc0cba1a8b6f8ad44f459d636e | 52,179 | py | Python | image_labelling_tool/test_imagelabels.py | monforte-dt/django-labeller | bf185e8cd00447f1a098059827a5c5435f34d0d4 | [
"MIT"
] | 94 | 2020-08-05T09:46:52.000Z | 2022-03-31T19:10:26.000Z | image_labelling_tool/test_imagelabels.py | monforte-dt/django-labeller | bf185e8cd00447f1a098059827a5c5435f34d0d4 | [
"MIT"
] | 15 | 2020-11-14T16:26:18.000Z | 2022-01-24T16:38:45.000Z | image_labelling_tool/test_imagelabels.py | monforte-dt/django-labeller | bf185e8cd00447f1a098059827a5c5435f34d0d4 | [
"MIT"
] | 20 | 2020-07-19T12:17:04.000Z | 2021-12-11T13:13:18.000Z | import math
import numpy as np
from PIL import Image, ImageDraw
from unittest import TestCase
from . import labelling_tool
class AbstractLabelTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.AbstractLabel()
b = labelling_tool.AbstractLabel(object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertIsNone(a.object_id)
self.assertEqual(b.object_id, 'abc_123')
self.assertIsNone(a.classification)
self.assertEqual(b.classification, 'cls_a')
self.assertIsNone(a.source)
self.assertEqual(b.source, 'manual')
self.assertEqual(a.anno_data, {})
self.assertEqual(b.anno_data, {'purpose': 'test'})
def test_dependencies(self):
a = labelling_tool.AbstractLabel()
self.assertEqual(a.dependencies, [])
def test_flatten(self):
a = labelling_tool.AbstractLabel()
self.assertEqual(list(a.flatten()), [a])
def test_accumulate_label_class_histogram(self):
a = labelling_tool.AbstractLabel()
b = labelling_tool.AbstractLabel(object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
h1 = {}
a.accumulate_label_class_histogram(h1)
self.assertEqual(h1, {None: 1})
h2 = {}
b.accumulate_label_class_histogram(h2)
self.assertEqual(h2, {'cls_a': 1})
b.accumulate_label_class_histogram(h2)
self.assertEqual(h2, {'cls_a': 2})
class PointLabelTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.PointLabel(position_xy=np.array([-1.0, 1.0]))
b = labelling_tool.PointLabel(position_xy=np.array([1.0, 2.0]), object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
c = labelling_tool.PointLabel(position_xy=np.array([2.0, 4.0]), classification='cls_b',
source='auto:test', anno_data={'purpose': 'second_test'})
self.assertTrue((a.position_xy == np.array([-1.0, 1.0])).all())
self.assertTrue((b.position_xy == np.array([1.0, 2.0])).all())
self.assertTrue((c.position_xy == np.array([2.0, 4.0])).all())
self.assertIsNone(a.object_id)
self.assertEqual(b.object_id, 'abc_123')
self.assertIsNone(c.object_id)
self.assertIsNone(a.classification)
self.assertEqual(b.classification, 'cls_a')
self.assertEqual(c.classification, 'cls_b')
self.assertIsNone(a.source)
self.assertEqual(b.source, 'manual')
self.assertEqual(c.source, 'auto:test')
self.assertEqual(a.anno_data, {})
self.assertEqual(b.anno_data, {'purpose': 'test'})
self.assertEqual(c.anno_data, {'purpose': 'second_test'})
def test_bounding_box(self):
rad01 = labelling_tool.LabelContext(point_radius=0.1)
a = labelling_tool.PointLabel(position_xy=np.array([-1.0, 1.0]))
self.assertTrue((a.bounding_box()[0] == np.array([-1.0, 1.0])).all())
self.assertTrue((a.bounding_box()[1] == np.array([-1.0, 1.0])).all())
self.assertTrue((a.bounding_box(rad01)[0] == np.array([-1.1, 0.9])).all())
self.assertTrue((a.bounding_box(rad01)[1] == np.array([-0.9, 1.1])).all())
def test_warped(self):
obj_tab = labelling_tool.ObjectTable('abc')
a = labelling_tool.PointLabel(position_xy=np.array([-1.0, 1.0]))
a_7 = a.warped(lambda p_xy: p_xy + 7.0)
a_7b = a.warped(lambda p_xy: p_xy + 7.0, object_table=obj_tab)
a_7c = a.warped(lambda p_xy: p_xy + 7.0, id_prefix='abc')
self.assertTrue((a.position_xy == np.array([-1.0, 1.0])).all())
self.assertTrue((a_7.position_xy == np.array([6.0, 8.0])).all())
self.assertTrue((a_7b.position_xy == np.array([6.0, 8.0])).all())
self.assertIsNone(a.object_id)
self.assertTrue(a_7.object_id.endswith('__1'))
self.assertEqual(a_7b.object_id, 'abc__1')
self.assertEqual(a_7c.object_id, 'abc__1')
def test_render_mask(self):
rad10 = labelling_tool.LabelContext(point_radius=10.0)
a = labelling_tool.PointLabel(position_xy=np.array([25.0, 25.0]))
# Point
tgt_point = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_point).point((25, 25), fill=1)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_point)).all())
self.assertTrue((a.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_point)).all())
# Radius 10 outlined
tgt_rad10_outline = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_rad10_outline).ellipse([15, 15, 35, 35], outline=1, fill=0)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=rad10) ==
np.array(tgt_rad10_outline)).all())
# Radius 10 filled
tgt_rad10_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_rad10_filled).ellipse([15, 15, 35, 35], outline=1, fill=1)
self.assertTrue((a.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=rad10) ==
np.array(tgt_rad10_filled)).all())
# Point, offset
tgt_point_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_point_dxy).point((20, 30), fill=1)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=-5.0, dy=5.0, ctx=None) ==
np.array(tgt_point_dxy)).all())
# Radius 10 filled, offset
tgt_rad10_filled_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_rad10_filled_dxy).ellipse([10, 20, 30, 40], outline=1, fill=1)
self.assertTrue((a.render_mask(50, 50, fill=True, dx=-5.0, dy=5.0, ctx=rad10) ==
np.array(tgt_rad10_filled_dxy)).all())
def test_to_json(self):
b = labelling_tool.PointLabel(position_xy=np.array([1.0, 2.0]), object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertEqual(b.to_json(),
dict(label_type='point', position={'x': 1.0, 'y': 2.0}, object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'}))
def test_from_json(self):
obj_tab = labelling_tool.ObjectTable('abc')
js = dict(label_type='point', position={'x': 1.0, 'y': 2.0}, object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.AbstractLabel.from_json(js, obj_tab)
self.assertTrue(isinstance(b, labelling_tool.PointLabel))
self.assertTrue((b.position_xy == np.array([1.0, 2.0])).all())
self.assertEqual(b.object_id, 'abc_123')
self.assertEqual(b.classification, 'cls_a')
self.assertEqual(b.source, 'manual')
self.assertEqual(b.anno_data, {'purpose': 'test'})
def test_flatten_json(self):
js = dict(label_type='point', position={'x': 1.0, 'y': 2.0}, object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
labs_js = list(labelling_tool.AbstractLabel.flatten_json(js))
self.assertEqual(labs_js, [js])
class PolygonLabelTestCase(TestCase):
def are_polygons_cyclically_equal(self, a, b, both_directions=False):
if len(a) == len(b):
for i in range(len(a)):
if (a == np.append(b[i:], b[:i], axis=0)).all():
return True
if both_directions:
a = a[::-1]
for i in range(len(a)):
if (a == np.append(b[i:], b[:i], axis=0)).all():
return True
return False
def test_constructor(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
a = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertEqual(len(a.regions), 1)
self.assertTrue((a.regions[0] == inner_rect).all())
self.assertEqual(len(b.regions), 2)
self.assertTrue((b.regions[0] == outer_rect).all())
self.assertTrue((b.regions[1] == inner_rect).all())
self.assertEqual(b.object_id, 'abc_123')
self.assertEqual(b.classification, 'cls_a')
self.assertEqual(b.source, 'manual')
self.assertEqual(b.anno_data, {'purpose': 'test'})
def test_bounding_box(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
a = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertTrue((a.bounding_box()[0] == np.array([20.0, 20.0])).all())
self.assertTrue((a.bounding_box()[1] == np.array([30.0, 30.0])).all())
self.assertTrue((b.bounding_box()[0] == np.array([10.0, 10.0])).all())
self.assertTrue((b.bounding_box()[1] == np.array([40.0, 40.0])).all())
def test_warped(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
a = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
a_7 = a.warped(lambda p_xy: p_xy + 7.0)
b_7 = b.warped(lambda p_xy: p_xy + 7.0)
self.assertEqual(len(a_7.regions), 1)
self.assertTrue((a_7.regions[0] == (inner_rect + 7)).all())
self.assertEqual(len(b_7.regions), 2)
self.assertTrue((b_7.regions[0] == (outer_rect + 7)).all())
self.assertTrue((b_7.regions[1] == (inner_rect + 7)).all())
# Rotation matrix
theta = np.radians(20.0)
c = np.cos(theta)
s = np.sin(theta)
r = np.array([[c, -s],
[s, c]])
a_r = a.warped(lambda p_xy: (r @ p_xy.T).T)
self.assertTrue(np.allclose(a_r.regions[0], (r @ inner_rect.T).T))
def test_render_mask(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
a = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
# Outlined
tgt_a_outline = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_outline).polygon([tuple(v) for v in inner_rect], outline=1, fill=0)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_outline)).all())
tgt_b_outline = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_outline).polygon([tuple(v) for v in outer_rect], outline=1, fill=0)
ImageDraw.Draw(tgt_b_outline).polygon([tuple(v) for v in inner_rect], outline=1, fill=0)
self.assertTrue((b.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_b_outline)).all())
# Outlined, offset
tgt_b_outline_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_outline_dxy).polygon([tuple(v) for v in outer_rect + np.array([5, -5])], outline=1, fill=0)
ImageDraw.Draw(tgt_b_outline_dxy).polygon([tuple(v) for v in inner_rect + np.array([5, -5])], outline=1, fill=0)
self.assertTrue((b.render_mask(50, 50, fill=False, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_outline_dxy)).all())
# Filled
tgt_a_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_filled).polygon([tuple(v) for v in inner_rect], outline=1, fill=1)
self.assertTrue((a.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_filled)).all())
tgt_b_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in outer_rect], outline=1, fill=1)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in inner_rect], outline=0, fill=0)
self.assertTrue((b.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_b_filled)).all())
# Filled, offset
tgt_b_filled_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_filled_dxy).polygon([tuple(v) for v in outer_rect + np.array([5, -5])], outline=1, fill=1)
ImageDraw.Draw(tgt_b_filled_dxy).polygon([tuple(v) for v in inner_rect + np.array([5, -5])], outline=0, fill=0)
self.assertTrue((b.render_mask(50, 50, fill=True, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_filled_dxy)).all())
def test_to_json(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
a = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
b = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
outer_js = [dict(x=p[0], y=p[1]) for p in outer_rect]
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
self.assertEqual(a.to_json(),
dict(label_type='polygon', regions=[inner_js], object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'}))
self.assertEqual(b.to_json(),
dict(label_type='polygon', regions=[outer_js, inner_js], object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'}))
def test_from_json(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
outer_js = [dict(x=p[0], y=p[1]) for p in outer_rect]
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
obj_tab = labelling_tool.ObjectTable('abc')
js_a = dict(label_type='polygon', regions=[inner_js], object_id='abc_123',
label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
js_b = dict(label_type='polygon', regions=[outer_js, inner_js], object_id='abc_124',
label_class='cls_b', source='manual2', anno_data={'purpose': 'test2'})
a = labelling_tool.AbstractLabel.from_json(js_a, obj_tab)
b = labelling_tool.AbstractLabel.from_json(js_b, obj_tab)
self.assertTrue(isinstance(a, labelling_tool.PolygonLabel))
self.assertTrue(isinstance(b, labelling_tool.PolygonLabel))
self.assertEqual(len(a.regions), 1)
self.assertTrue((a.regions[0] == inner_rect).all())
self.assertEqual(len(b.regions), 2)
self.assertTrue((b.regions[0] == outer_rect).all())
self.assertTrue((b.regions[1] == inner_rect).all())
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(b.object_id, 'abc_124')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(b.classification, 'cls_b')
self.assertEqual(a.source, 'manual')
self.assertEqual(b.source, 'manual2')
self.assertEqual(a.anno_data, {'purpose': 'test'})
self.assertEqual(b.anno_data, {'purpose': 'test2'})
def test_mask_image_to_regions(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
# skimage.measure.find_contours rounds corners
outer_rect_v2 = np.array([[10.0, 9.5], [40.0, 9.5], [40.5, 10.0], [40.5, 40.0],
[40.0, 40.5], [10.0, 40.5], [9.5, 40.0], [9.5, 10.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
inner_rect_v2 = np.array([[20.0, 19.5], [30.0, 19.5], [30.5, 20.0], [30.5, 30.0],
[30.0, 30.5], [20.0, 30.5], [19.5, 30.0], [19.5, 20.0]])
tgt_a_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_filled).polygon([tuple(v) for v in inner_rect], outline=1, fill=1)
reg_a = labelling_tool.PolygonLabel.mask_image_to_regions(np.array(tgt_a_filled) != 0)
self.assertEqual(len(reg_a), 1)
self.assertTrue(self.are_polygons_cyclically_equal(reg_a[0], inner_rect_v2, both_directions=True))
tgt_b_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in outer_rect], outline=1, fill=1)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in inner_rect], outline=0, fill=0)
reg_b = labelling_tool.PolygonLabel.mask_image_to_regions(np.array(tgt_b_filled) != 0)
self.assertEqual(len(reg_b), 2)
self.assertTrue(self.are_polygons_cyclically_equal(reg_b[0], outer_rect_v2, both_directions=True))
self.assertTrue(self.are_polygons_cyclically_equal(reg_b[1], inner_rect_v2, both_directions=True))
def test_mask_image_to_regions_cv(self):
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
# cv2.findContours generates a slightly rotated square for the inner region...
inner_rect_v2 = np.array([[20.0, 19.0], [31.0, 20.0], [30.0, 31.0], [19.0, 30.0]])
tgt_a_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_filled).polygon([tuple(v) for v in inner_rect], outline=1, fill=1)
reg_a = labelling_tool.PolygonLabel.mask_image_to_regions_cv(np.array(tgt_a_filled) != 0)
self.assertEqual(len(reg_a), 1)
self.assertTrue(self.are_polygons_cyclically_equal(reg_a[0], inner_rect.astype(int), both_directions=True))
tgt_b_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in outer_rect], outline=1, fill=1)
ImageDraw.Draw(tgt_b_filled).polygon([tuple(v) for v in inner_rect], outline=0, fill=0)
reg_b = labelling_tool.PolygonLabel.mask_image_to_regions_cv(np.array(tgt_b_filled) != 0)
self.assertEqual(len(reg_b), 2)
self.assertTrue(self.are_polygons_cyclically_equal(reg_b[0], outer_rect.astype(int), both_directions=True))
self.assertTrue(self.are_polygons_cyclically_equal(reg_b[1], inner_rect_v2.astype(int), both_directions=True))
class BoxLabelTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertTrue((a.centre_xy == np.array([15.0, 25.0])).all())
self.assertTrue((a.size_xy == np.array([8.0, 12.0])).all())
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(a.source, 'manual')
self.assertEqual(a.anno_data, {'purpose': 'test'})
def test_bounding_box(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]))
self.assertTrue((a.bounding_box()[0] == np.array([11.0, 19.0])).all())
self.assertTrue((a.bounding_box()[1] == np.array([19.0, 31.0])).all())
def test_warped(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]))
a_7 = a.warped(lambda p_xy: p_xy + 7.0)
self.assertTrue((a_7.centre_xy == np.array([22.0, 32.0])).all())
self.assertTrue((a_7.size_xy == np.array([8.0, 12.0])).all())
# Rotation matrix
theta = np.radians(20.0)
c = np.cos(theta)
s = np.sin(theta)
r = np.array([[c, -s],
[s, c]])
a_r = a.warped(lambda p_xy: (r @ p_xy.T).T)
self.assertTrue(np.allclose(a_r.centre_xy, r @ np.array([15.0, 25.0])))
self.assertTrue(np.allclose(a_r.size_xy, np.array([8.0 * c + 12.0 * s, 12.0 * c + 8.0 * s])))
def test_render_mask(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]))
# Outlined
tgt_a_outline = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_outline).rectangle([(11.0, 19.0), (19.0, 31.0)], outline=1, fill=0)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_outline)).all())
# Outlined, offset
tgt_b_outline_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_outline_dxy).rectangle([(16.0, 14.0), (24.0, 26.0)], outline=1, fill=0)
self.assertTrue((a.render_mask(50, 50, fill=False, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_outline_dxy)).all())
# Filled
tgt_a_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_a_filled).rectangle([(11.0, 19.0), (19.0, 31.0)], outline=1, fill=1)
self.assertTrue((a.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_filled)).all())
# Filled, offset
tgt_b_filled_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_b_filled_dxy).rectangle([(16.0, 14.0), (24.0, 26.0)], outline=1, fill=1)
self.assertTrue((a.render_mask(50, 50, fill=True, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_filled_dxy)).all())
def test_to_json(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
self.assertEqual(a.to_json(),
dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a', source='manual',
anno_data={'purpose': 'test'}))
def test_from_json(self):
obj_tab = labelling_tool.ObjectTable('abc')
js_a = dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
a = labelling_tool.AbstractLabel.from_json(js_a, obj_tab)
self.assertTrue(isinstance(a, labelling_tool.BoxLabel))
self.assertTrue((a.centre_xy == np.array([15.0, 25.0])).all())
self.assertTrue((a.size_xy == np.array([8.0, 12.0])).all())
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(a.source, 'manual')
self.assertEqual(a.anno_data, {'purpose': 'test'})
class OrientedEllipseLabelTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=10.0, radius2=3.0, orientation_rad=math.radians(30.0),
object_id='abc_123', classification='cls_a', source='manual', anno_data={'purpose': 'test'})
self.assertTrue((a.centre_xy == np.array([15.0, 25.0])).all())
self.assertEqual(a.radius1, 10.0)
self.assertEqual(a.radius2, 3.0)
self.assertEqual(a.orientation_rad, math.radians(30.0))
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(a.source, 'manual')
self.assertEqual(a.anno_data, {'purpose': 'test'})
def test_bounding_box(self):
# Ellipse with no orientation
a = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=0.0)
self.assertTrue(np.allclose(a.bounding_box()[0], np.array([11.0, 19.0])))
self.assertTrue(np.allclose(a.bounding_box()[1], np.array([19.0, 31.0])))
# 90 degrees orientation
b = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=math.radians(90.0))
self.assertTrue(np.allclose(b.bounding_box()[0], np.array([9.0, 21.0])))
self.assertTrue(np.allclose(b.bounding_box()[1], np.array([21.0, 29.0])))
# 45 degrees orientation
c = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=math.radians(45.0))
self.assertTrue(np.allclose(c.bounding_box()[0], np.array([9.900980486407214, 19.900980486407214])))
self.assertTrue(np.allclose(c.bounding_box()[1], np.array([20.099019513592786, 30.099019513592786])))
# 30 degrees orientation
d = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=math.radians(30.0))
self.assertTrue(np.allclose(d.bounding_box()[0], np.array([10.41742430504416, 19.432235637169978])))
self.assertTrue(np.allclose(d.bounding_box()[1], np.array([19.58257569495584, 30.567764362830022])))
def test_warped(self):
a = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=0.0)
a_7 = a.warped(lambda p_xy: p_xy + 7.0)
self.assertTrue(np.allclose(a_7.centre_xy, np.array([22.0, 32.0])))
self.assertTrue(np.allclose(a_7.radius1, 4.0 ))
self.assertTrue(np.allclose(a_7.radius2, 6.0 ))
self.assertTrue(np.allclose(a_7.orientation_rad, 0.0))
# Rotation matrix
theta = np.radians(20.0)
c = np.cos(theta)
s = np.sin(theta)
r = np.array([[c, -s],
[s, c]])
a_r = a.warped(lambda p_xy: (r @ p_xy.T).T)
self.assertTrue(np.allclose(a_r.centre_xy, r @ np.array([15.0, 25.0])))
self.assertTrue(np.allclose(a_r.radius1, 4.0))
self.assertTrue(np.allclose(a_r.radius2, 6.0))
self.assertTrue(np.allclose(a_r.orientation_rad, math.radians(20.0)))
def test_render_mask(self):
def draw_polygon_image(verts_xy, outline, fill, image_size):
img = Image.new('L', image_size, 0)
xy = [(v[0], v[1]) for v in verts_xy]
ImageDraw.Draw(img).polygon(xy, outline=outline, fill=fill)
return img
a = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=0.0)
# 38 vertices for
n_verts = 38
thetas = np.linspace(0.0, math.pi * 2, n_verts + 1)[:-1]
# Axis aligned verts, centred on origin
aa_verts_xy = np.stack([np.cos(thetas) * 4.0, np.sin(thetas) * 6.0], axis=1)
# Outlined
tgt_a_outline = draw_polygon_image(aa_verts_xy + np.array([15.0, 25.0]),
outline=1, fill=0, image_size=(50,50))
self.assertTrue((a.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_outline)).all())
# Outlined, offset
tgt_a_outline_dxy = draw_polygon_image(aa_verts_xy + np.array([20.0, 20.0]),
outline=1, fill=0, image_size=(50,50))
self.assertTrue((a.render_mask(50, 50, fill=False, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_a_outline_dxy)).all())
# Filled
tgt_a_filled = draw_polygon_image(aa_verts_xy + np.array([15.0, 25.0]),
outline=1, fill=1, image_size=(50,50))
self.assertTrue((a.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_a_filled)).all())
# Filled, offset
tgt_b_filled_dxy = draw_polygon_image(aa_verts_xy + np.array([20.0, 20.0]),
outline=1, fill=1, image_size=(50,50))
self.assertTrue((a.render_mask(50, 50, fill=True, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_filled_dxy)).all())
# With orientation
b = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=4.0, radius2=6.0, orientation_rad=math.radians(25.0))
rot_theta = np.radians(25.0)
c = np.cos(rot_theta)
s = np.sin(rot_theta)
r = np.array([[c, -s],
[s, c]])
# Outlined
tgt_b_outline = draw_polygon_image((r @ aa_verts_xy.T).T + np.array([15.0, 25.0]),
outline=1, fill=0, image_size=(50,50))
self.assertTrue((b.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_b_outline)).all())
# Outlined, offset
tgt_b_outline_dxy = draw_polygon_image((r @ aa_verts_xy.T).T + np.array([20.0, 20.0]),
outline=1, fill=0, image_size=(50,50))
self.assertTrue((b.render_mask(50, 50, fill=False, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_outline_dxy)).all())
# Filled
tgt_b_filled = draw_polygon_image((r @ aa_verts_xy.T).T + np.array([15.0, 25.0]),
outline=1, fill=1, image_size=(50,50))
self.assertTrue((b.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_b_filled)).all())
# Filled, offset
tgt_b_filled_dxy = draw_polygon_image((r @ aa_verts_xy.T).T + np.array([20.0, 20.0]),
outline=1, fill=1, image_size=(50,50))
self.assertTrue((b.render_mask(50, 50, fill=True, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_b_filled_dxy)).all())
def test_uv_points_to_params(self):
a_cen_xy, a_rad1, a_rad2, a_ori = labelling_tool.OrientedEllipseLabel.uv_points_to_params(
np.array([[5.0, 5.0], [15.0, 15.0]]), np.array([12.0, 8.0]))
self.assertTrue(np.allclose(a_cen_xy, np.array([10.0, 10.0])))
self.assertTrue(np.allclose(a_rad1, 5.0 * math.sqrt(2.0)))
self.assertTrue(np.allclose(a_rad2, 2.0 * math.sqrt(2.0)))
self.assertTrue(np.allclose(a_ori, np.radians(45.0)))
def test_to_json(self):
a = labelling_tool.OrientedEllipseLabel(
centre_xy=np.array([15.0, 25.0]), radius1=10.0, radius2=3.0, orientation_rad=math.radians(30.0),
object_id='abc_123', classification='cls_a', source='manual', anno_data={'purpose': 'test'})
self.assertEqual(a.to_json(),
dict(label_type='oriented_ellipse', centre=dict(x=15.0, y=25.0), radius1=10.0,
radius2=3.0, orientation_radians=math.radians(30.0),
object_id='abc_123', label_class='cls_a', source='manual',
anno_data={'purpose': 'test'}))
def test_from_uv_points(self):
a = labelling_tool.OrientedEllipseLabel.new_instance_from_uv_points(
np.array([[5.0, 5.0], [15.0, 15.0]]), np.array([12.0, 8.0]),
object_id='abc_123', classification='cls_a', source='manual', anno_data={'purpose': 'test'})
self.assertTrue(np.allclose(a.centre_xy, np.array([10.0, 10.0])))
self.assertTrue(np.allclose(a.radius1, 5.0 * math.sqrt(2.0)))
self.assertTrue(np.allclose(a.radius2, 2.0 * math.sqrt(2.0)))
self.assertTrue(np.allclose(a.orientation_rad, np.radians(45.0)))
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(a.source, 'manual')
self.assertEqual(a.anno_data, {'purpose': 'test'})
def test_from_json(self):
obj_tab = labelling_tool.ObjectTable('abc')
js_a = dict(label_type='oriented_ellipse', centre=dict(x=15.0, y=25.0), radius1=10.0,
radius2=3.0, orientation_radians=math.radians(30.0),
object_id='abc_123', label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
a = labelling_tool.AbstractLabel.from_json(js_a, obj_tab)
self.assertTrue(isinstance(a, labelling_tool.OrientedEllipseLabel))
self.assertTrue((a.centre_xy == np.array([15.0, 25.0])).all())
self.assertEqual(a.radius1, 10.0)
self.assertEqual(a.radius2, 3.0)
self.assertEqual(a.orientation_rad, math.radians(30.0))
self.assertEqual(a.object_id, 'abc_123')
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(a.source, 'manual')
self.assertEqual(a.anno_data, {'purpose': 'test'})
class GroupLabelTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_124', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
object_id='grp_1', classification='cls_c',
source='manual', anno_data={'purpose': 'all'})
self.assertEqual(len(ab), 2)
self.assertIs(ab[0], a)
self.assertIs(ab[1], b)
self.assertEqual(ab.classification, 'cls_c')
self.assertEqual(ab.source, 'manual')
self.assertEqual(ab.anno_data, {'purpose': 'all'})
def test_flatten(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_124', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
object_id='grp_1', classification='cls_c',
source='manual', anno_data={'purpose': 'all'})
self.assertEqual(list(ab.flatten()), [a, b, ab])
def test_bounding_box(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 30.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_124', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
object_id='grp_1', classification='cls_c',
source='manual', anno_data={'purpose': 'all'})
self.assertTrue((a.bounding_box()[0] == np.array([11.0, 24.0])).all())
self.assertTrue((a.bounding_box()[1] == np.array([19.0, 36.0])).all())
self.assertTrue((b.bounding_box()[0] == np.array([20.0, 20.0])).all())
self.assertTrue((b.bounding_box()[1] == np.array([30.0, 30.0])).all())
self.assertTrue((ab.bounding_box()[0] == np.array([11.0, 20.0])).all())
self.assertTrue((ab.bounding_box()[1] == np.array([30.0, 36.0])).all())
def test_warped(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]))
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect])
ab = labelling_tool.GroupLabel(component_labels=[a, b])
ab_7 = ab.warped(lambda p_xy: p_xy + 7.0)
self.assertTrue((ab_7[0].centre_xy == np.array([22.0, 32.0])).all())
self.assertTrue((ab_7[0].size_xy == np.array([8.0, 12.0])).all())
self.assertTrue((ab_7[1].regions[0] == (inner_rect + 7)).all())
# Rotation matrix
theta = np.radians(20.0)
c = np.cos(theta)
s = np.sin(theta)
r = np.array([[c, -s],
[s, c]])
ab_r = ab.warped(lambda p_xy: (r @ p_xy.T).T)
self.assertTrue(np.allclose(ab_r[0].centre_xy, r @ np.array([15.0, 25.0])))
self.assertTrue(np.allclose(ab_r[0].size_xy, np.array([8.0 * c + 12.0 * s, 12.0 * c + 8.0 * s])))
self.assertTrue(np.allclose(ab_r[1].regions, (r @ inner_rect.T).T))
def test_render_mask(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]))
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect])
ab = labelling_tool.GroupLabel(component_labels=[a, b])
# Outlined
tgt_ab_outline = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_ab_outline).rectangle([(11.0, 19.0), (19.0, 31.0)], outline=1, fill=0)
ImageDraw.Draw(tgt_ab_outline).polygon([tuple(v) for v in inner_rect], outline=1, fill=0)
self.assertTrue((ab.render_mask(50, 50, fill=False, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_ab_outline)).all())
# Outlined, offset
tgt_ab_outline_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_ab_outline_dxy).rectangle([(16.0, 14.0), (24.0, 26.0)], outline=1, fill=0)
ImageDraw.Draw(tgt_ab_outline_dxy).polygon([tuple(v) for v in (inner_rect + np.array([5, -5]))], outline=1, fill=0)
self.assertTrue((ab.render_mask(50, 50, fill=False, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_ab_outline_dxy)).all())
# Filled
tgt_ab_filled = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_ab_filled).rectangle([(11.0, 19.0), (19.0, 31.0)], outline=1, fill=1)
ImageDraw.Draw(tgt_ab_filled).polygon([tuple(v) for v in inner_rect], outline=1, fill=1)
self.assertTrue((ab.render_mask(50, 50, fill=True, dx=0.0, dy=0.0, ctx=None) ==
np.array(tgt_ab_filled)).all())
# Filled, offset
tgt_ab_filled_dxy = Image.new('L', (50, 50), 0)
ImageDraw.Draw(tgt_ab_filled_dxy).rectangle([(16.0, 14.0), (24.0, 26.0)], outline=1, fill=1)
ImageDraw.Draw(tgt_ab_filled_dxy).polygon([tuple(v) for v in (inner_rect + np.array([5, -5]))],
outline=1, fill=1)
self.assertTrue((ab.render_mask(50, 50, fill=True, dx=5.0, dy=-5.0, ctx=None) ==
np.array(tgt_ab_filled_dxy)).all())
def test_to_json(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
b = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_124', classification='cls_b',
source='manual2', anno_data={'purpose': 'test2'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
object_id='grp_1', classification='cls_c',
source='manual3', anno_data={'purpose': 'test3'})
js_a = dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
js_b = dict(label_type='polygon', regions=[inner_js], object_id='abc_124',
label_class='cls_b', source='manual2', anno_data={'purpose': 'test2'})
self.assertEqual(ab.to_json()['component_models'][0], js_a)
self.assertEqual(ab.to_json()['component_models'][1], js_b)
self.assertEqual(ab.to_json(),
dict(label_type='group', component_models=[js_a, js_b], object_id='grp_1',
label_class='cls_c', source='manual3', anno_data={'purpose': 'test3'}))
def test_from_json(self):
obj_tab = labelling_tool.ObjectTable('abc')
js_a = dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
js_b = dict(label_type='polygon', regions=[inner_js], object_id='abc_124',
label_class='cls_b', source='manual2', anno_data={'purpose': 'test2'})
js_ab = dict(label_type='group', component_models=[js_a, js_b], object_id='grp_1',
label_class='cls_c', source='manual3', anno_data={'purpose': 'test3'})
ab = labelling_tool.AbstractLabel.from_json(js_ab, obj_tab)
self.assertTrue(isinstance(ab, labelling_tool.GroupLabel))
self.assertTrue((ab[0].centre_xy == np.array([15.0, 25.0])).all())
self.assertTrue((ab[0].size_xy == np.array([8.0, 12.0])).all())
self.assertEqual(ab[0].object_id, 'abc_123')
self.assertEqual(ab[0].classification, 'cls_a')
self.assertEqual(ab[0].source, 'manual')
self.assertEqual(ab[0].anno_data, {'purpose': 'test'})
self.assertEqual(len(ab[1].regions), 1)
self.assertTrue((ab[1].regions[0] == inner_rect).all())
self.assertEqual(ab[1].object_id, 'abc_124')
self.assertEqual(ab[1].classification, 'cls_b')
self.assertEqual(ab[1].source, 'manual2')
self.assertEqual(ab[1].anno_data, {'purpose': 'test2'})
self.assertEqual(len(ab), 2)
self.assertEqual(ab.object_id, 'grp_1')
self.assertEqual(ab.classification, 'cls_c')
self.assertEqual(ab.source, 'manual3')
self.assertEqual(ab.anno_data, {'purpose': 'test3'})
def test_flatten_json(self):
js_a = dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a', source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
js_b = dict(label_type='polygon', regions=[inner_js], object_id='abc_124',
label_class='cls_b', source='manual2', anno_data={'purpose': 'test2'})
js_ab = dict(label_type='group', component_models=[js_a, js_b], object_id='grp_1',
label_class='cls_c', source='manual3', anno_data={'purpose': 'test3'})
labs_js = list(labelling_tool.AbstractLabel.flatten_json(js_ab))
self.assertEqual(labs_js, [js_a, js_b, js_ab])
class ImageLabelsTestCase(TestCase):
def test_constructor(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
object_id='abc_123', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], object_id='abc_124', classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
object_id='grp_1', classification='cls_c',
source='manual', anno_data={'purpose': 'all'})
labels = labelling_tool.ImageLabels([ab])
self.assertEqual(len(labels), 1)
self.assertIs(labels[0], ab)
self.assertIs(labels[0][0], a)
self.assertIs(labels[0][1], b)
self.assertIs(labels['abc_123'], a)
self.assertIs(labels['abc_124'], b)
self.assertIs(labels['grp_1'], ab)
def test_constructor_b(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], classification='cls_a',
source='manual', anno_data={'purpose': 'test'})
ab = labelling_tool.GroupLabel(component_labels=[a, b],
classification='cls_c',
source='manual', anno_data={'purpose': 'all'})
labels = labelling_tool.ImageLabels([ab], id_prefix='abc')
self.assertEqual(a.object_id, 'abc__1')
self.assertEqual(b.object_id, 'abc__2')
self.assertEqual(ab.object_id, 'abc__3')
self.assertEqual(len(labels), 1)
self.assertIs(labels[0], ab)
self.assertIs(labels[0][0], a)
self.assertIs(labels[0][1], b)
self.assertIs(labels['abc__1'], a)
self.assertIs(labels['abc__2'], b)
self.assertIs(labels['abc__3'], ab)
def test_flatten(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
classification='cls_a')
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], classification='cls_a')
ab = labelling_tool.GroupLabel(component_labels=[a, b], classification='cls_c')
labels = labelling_tool.ImageLabels([ab], id_prefix='abc')
self.assertEqual(list(labels.flatten()), [a, b, ab])
def test_label_class_histogram(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
classification='cls_a')
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], classification='cls_b')
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
c = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], classification='cls_a')
ab = labelling_tool.GroupLabel(component_labels=[a, b], classification='cls_c')
self.assertEqual(labelling_tool.ImageLabels([ab]).label_class_histogram(),
{'cls_c': 1})
self.assertEqual(labelling_tool.ImageLabels([a, b, c]).label_class_histogram(),
{'cls_a': 2, 'cls_b': 1})
def test_replace_label_classes(self):
a = labelling_tool.BoxLabel(centre_xy=np.array([15.0, 25.0]), size_xy=np.array([8.0, 12.0]),
classification='cls_a')
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
b = labelling_tool.PolygonLabel(regions=[inner_rect], classification='cls_b')
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
c = labelling_tool.PolygonLabel(regions=[outer_rect, inner_rect], classification='cls_a')
ab = labelling_tool.GroupLabel(component_labels=[a, b], classification='cls_c')
labels = labelling_tool.ImageLabels([ab, c])
self.assertEqual(a.classification, 'cls_a')
self.assertEqual(b.classification, 'cls_b')
self.assertEqual(c.classification, 'cls_a')
self.assertEqual(ab.classification, 'cls_c')
labels.replace_label_classes({'cls_a': 'new_a', 'cls_c': 'new_c'})
self.assertEqual(a.classification, 'new_a')
self.assertEqual(b.classification, 'cls_b')
self.assertEqual(c.classification, 'new_a')
self.assertEqual(ab.classification, 'new_c')
def test_replace_label_classes_json(self):
inner_rect = np.array([[20.0, 20.0], [30.0, 20.0], [30.0, 30.0], [20.0, 30.0]])
outer_rect = np.array([[10.0, 10.0], [40.0, 10.0], [40.0, 40.0], [10.0, 40.0]])
inner_js = [dict(x=p[0], y=p[1]) for p in inner_rect]
outer_js = [dict(x=p[0], y=p[1]) for p in outer_rect]
js_a = dict(label_type='box', centre=dict(x=15.0, y=25.0), size=dict(x=8.0, y=12.0),
object_id='abc_123', label_class='cls_a')
js_b = dict(label_type='polygon', regions=[inner_js], object_id='abc_124',
label_class='cls_b')
js_c = dict(label_type='polygon', regions=[outer_js, inner_js], object_id='abc_125',
label_class='cls_a')
js_ab = dict(label_type='group', component_models=[js_a, js_b], object_id='grp_1',
label_class='cls_c')
js_labels = [js_ab, js_c]
self.assertEqual(js_labels[0]['component_models'][0]['label_class'], 'cls_a')
self.assertEqual(js_labels[0]['component_models'][1]['label_class'], 'cls_b')
self.assertEqual(js_labels[1]['label_class'], 'cls_a')
self.assertEqual(js_labels[0]['label_class'], 'cls_c')
labelling_tool.ImageLabels.replace_label_classes_json(js_labels, {'cls_a': 'new_a', 'cls_c': 'new_c'})
self.assertEqual(js_labels[0]['component_models'][0]['label_class'], 'new_a')
self.assertEqual(js_labels[0]['component_models'][1]['label_class'], 'cls_b')
self.assertEqual(js_labels[1]['label_class'], 'new_a')
self.assertEqual(js_labels[0]['label_class'], 'new_c')
| 58.431131 | 123 | 0.583568 | 7,724 | 52,179 | 3.754272 | 0.032367 | 0.045865 | 0.012966 | 0.013863 | 0.915339 | 0.883682 | 0.851197 | 0.822505 | 0.795158 | 0.785158 | 0 | 0.075593 | 0.239675 | 52,179 | 892 | 124 | 58.496637 | 0.655332 | 0.012917 | 0 | 0.606906 | 0 | 0 | 0.054968 | 0 | 0 | 0 | 0 | 0 | 0.350598 | 1 | 0.065073 | false | 0 | 0.00664 | 0 | 0.086321 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
fa1be6e510bae80858c17146f1de2e85ef790740 | 208 | py | Python | bld/libs/builder/src/reporters/bufferedreporter.py | webbers/dongle.net | 784bc584bd74b28a1c580835bf776d76cf371c79 | [
"MIT"
] | 2 | 2017-07-31T15:57:01.000Z | 2020-02-21T09:31:11.000Z | bld/libs/builder/src/reporters/bufferedreporter.py | webbers/dongle.net | 784bc584bd74b28a1c580835bf776d76cf371c79 | [
"MIT"
] | null | null | null | bld/libs/builder/src/reporters/bufferedreporter.py | webbers/dongle.net | 784bc584bd74b28a1c580835bf776d76cf371c79 | [
"MIT"
] | null | null | null | class BufferedReporter:
def __init__( self ):
self.__buffer = "";
def message( self, message ):
self.__buffer += "\n" + message
def getBuffer( self ):
return self.__buffer
| 26 | 39 | 0.591346 | 21 | 208 | 5.380952 | 0.47619 | 0.265487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.293269 | 208 | 7 | 40 | 29.714286 | 0.768707 | 0 | 0 | 0 | 0 | 0 | 0.009615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.428571 | false | 0 | 0 | 0.142857 | 0.714286 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
fa3bf7ad254ebf1c963aab5b71a178752ae20837 | 24 | py | Python | tests/__init__.py | cristealab/TRANSPIRE_JASMS2020 | 2a65a534f62c26f91d920e5781768bbf45f0e6e5 | [
"MIT"
] | null | null | null | tests/__init__.py | cristealab/TRANSPIRE_JASMS2020 | 2a65a534f62c26f91d920e5781768bbf45f0e6e5 | [
"MIT"
] | 1 | 2021-05-15T17:36:18.000Z | 2021-05-15T17:36:18.000Z | tests/__init__.py | cristealab/TRANSPIRE_JASMS2020 | 2a65a534f62c26f91d920e5781768bbf45f0e6e5 | [
"MIT"
] | 1 | 2020-05-10T01:49:26.000Z | 2020-05-10T01:49:26.000Z | from . import test_files | 24 | 24 | 0.833333 | 4 | 24 | 4.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 24 | 1 | 24 | 24 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fa4b4b68e326ad8f41e841ace36f84c920b91bbc | 21 | py | Python | kevtools_common/__init__.py | SaierLaboratory/deuterocol | 0a133bb4071a06bb8c4333f4b90a11fc65d46359 | [
"BSD-3-Clause"
] | null | null | null | kevtools_common/__init__.py | SaierLaboratory/deuterocol | 0a133bb4071a06bb8c4333f4b90a11fc65d46359 | [
"BSD-3-Clause"
] | null | null | null | kevtools_common/__init__.py | SaierLaboratory/deuterocol | 0a133bb4071a06bb8c4333f4b90a11fc65d46359 | [
"BSD-3-Clause"
] | null | null | null |
from . import types
| 7 | 19 | 0.714286 | 3 | 21 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.238095 | 21 | 2 | 20 | 10.5 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fa7179646e4cf4b641eadc19bd43d4f4cb43d307 | 981 | py | Python | PyTorch/Learning/pytorch_basics.py | ashirwadsangwan/Python | b4e570bb31783178d241b9f2a7145343d830b698 | [
"MIT"
] | null | null | null | PyTorch/Learning/pytorch_basics.py | ashirwadsangwan/Python | b4e570bb31783178d241b9f2a7145343d830b698 | [
"MIT"
] | null | null | null | PyTorch/Learning/pytorch_basics.py | ashirwadsangwan/Python | b4e570bb31783178d241b9f2a7145343d830b698 | [
"MIT"
] | 1 | 2022-02-22T16:08:43.000Z | 2022-02-22T16:08:43.000Z | import torch
import numpy as np
# Tensor: Multidimensional matrix containing elements of single datatype
arr = np.array([1, 2, 3, 4, 5])
# converting numpy to pytorch tensor
tensor = torch.from_numpy(
arr
) # it creates a direct link to numpy array if the array is changes so will the tensor.
print(tensor)
print(type(tensor))
# we don't want any dependency on numpy so we can do the following
import torch
import numpy as np
# Tensor: Multidimensional matrix containing elements of single datatype
arr = np.array([1, 2, 3, 4, 5])
# converting numpy to pytorch tensor
tensor = torch.from_numpy(
arr
) # it creates a direct link to numpy array if the array is changes so will the tensor.
print(tensor)
print(type(tensor))
# we don't want any dependency on numpy so we can do the following
tensor2 = torch.tensor(arr)
arr[0] = 23
print(tensor)
print(
tensor2
) # so we can see that it doesn't copy the new change in the numpy array here.
print(tensor2.dtype)
| 25.153846 | 88 | 0.738022 | 167 | 981 | 4.323353 | 0.353293 | 0.076177 | 0.066482 | 0.060942 | 0.828255 | 0.828255 | 0.828255 | 0.828255 | 0.828255 | 0.828255 | 0 | 0.020126 | 0.189602 | 981 | 38 | 89 | 25.815789 | 0.88805 | 0.595311 | 0 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0.304348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d75c83d9ffedcb9ec7ef67709e1daca25054ee14 | 10,916 | py | Python | model-optimizer/extensions/middle/ShufflenetReshape_test.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 2 | 2021-04-19T06:08:35.000Z | 2021-08-25T02:43:43.000Z | model-optimizer/extensions/middle/ShufflenetReshape_test.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 6 | 2022-01-11T18:56:22.000Z | 2022-02-21T13:20:20.000Z | model-optimizer/extensions/middle/ShufflenetReshape_test.py | apexxs/dldt | 17e66dc5a6631d630da454506902bd7c25d4170b | [
"Apache-2.0"
] | 3 | 2021-02-05T17:11:17.000Z | 2021-04-19T08:33:31.000Z | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.ShufflenetReshape import FeatureShuffleReshape, ReshapeSoftmaxReshape
from mo.utils.unittest.graph import build_graph, compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# Reshape layers
'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape', 'dim': None},
'reshape_1_data': {'name': 'reshape_1_data', 'value': None, 'shape': None, 'kind': 'data'},
'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_2_data': {'name': 'reshape_2_data', 'value': None, 'shape': None, 'kind': 'data'},
'reshape_3': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'reshape_3_data': {'name': 'reshape_3_data', 'value': None, 'shape': None, 'kind': 'data'},
# Transpose layer
'transpose_1': {'type': 'Permute', 'kind': 'op', 'op': 'Transpose'},
'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Softmax layer
'softmax_1': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
'softmax_1_data': {'value': None, 'shape': None, 'kind': 'data'},
}
class FeatureShuffleReshapeTests(unittest.TestCase):
def test_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'transpose_1'),
('transpose_1', 'transpose_1_data'),
('transpose_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
'transpose_1': {'order': np.array([0, 1, 3, 2])},
'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
})
graph.graph['layout'] = 'NHWC'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'transpose_1'),
('transpose_1', 'transpose_1_data'),
('transpose_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
'reshape_1_data': {'shape': np.array([1, 4, 28, 227 * 227])},
'transpose_1': {'order': np.array([0, 2, 1, 3])},
'transpose_1_data': {'shape': np.array([1, 28, 4, 227 * 227])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
'reshape_3_data': {'shape': np.array([1, 227, 227, 112])},
})
pattern = FeatureShuffleReshape()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'transpose_1'),
('transpose_1', 'transpose_1_data'),
('transpose_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 112, 227, 227])},
'reshape_1_data': {'shape': np.array([1, 4, 28, 227, 227])},
'transpose_1': {'order': np.array([0, 2, 1, 3, 4])},
'transpose_1_data': {'shape': np.array([1, 28, 4, 227, 227])},
'reshape_2_data': {'shape': np.array([1, 112, 227, 227])},
})
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'transpose_1'),
('transpose_1', 'transpose_1_data'),
('transpose_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 112, 227, 227])},
'reshape_1_data': {'shape': np.array([1, 4, 28, 227 * 227])},
'transpose_1': {'order': np.array([0, 2, 1, 3])},
'transpose_1_data': {'shape': np.array([1, 28, 4, 227 * 227])},
'reshape_2_data': {'shape': np.array([1, 112, 227, 227])},
})
pattern = FeatureShuffleReshape()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
class ReshapeSoftmaxReshapeTests(unittest.TestCase):
def test_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
'reshape_1': {'dim': np.array([1, 227 * 227, 2])},
'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
})
graph.graph['layout'] = 'NHWC'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'reshape_3'),
('reshape_3', 'reshape_3_data'),
('reshape_3_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
'reshape_1_data': {'shape': np.array([1, 2, 227 * 227])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
})
pattern = ReshapeSoftmaxReshape()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
})
graph.graph['layout'] = 'NCHW'
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'reshape_1'),
('reshape_1', 'reshape_1_data'),
('reshape_1_data', 'softmax_1'),
('softmax_1', 'softmax_1_data'),
('softmax_1_data', 'reshape_2'),
('reshape_2', 'reshape_2_data')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
})
pattern = ReshapeSoftmaxReshape()
pattern.find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
self.assertTrue(flag, resp)
| 54.308458 | 96 | 0.457585 | 1,063 | 10,916 | 4.393227 | 0.117592 | 0.078158 | 0.068308 | 0.099358 | 0.783084 | 0.783084 | 0.770664 | 0.743897 | 0.702355 | 0.700857 | 0 | 0.071483 | 0.397673 | 10,916 | 200 | 97 | 54.58 | 0.638783 | 0.055698 | 0 | 0.8 | 0 | 0 | 0.264386 | 0 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.025 | false | 0 | 0.025 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d7655c4482e0317261258dc036704b87ddac7be8 | 1,468 | py | Python | main.py | VJPranay/getAllYoutubeChannelVideos | b98e8f4fe48c784883d5f2daef1e68e922b60d03 | [
"MIT"
] | 1 | 2020-01-22T08:02:57.000Z | 2020-01-22T08:02:57.000Z | main.py | VJPranay/getAllYoutubeChannelVideos | b98e8f4fe48c784883d5f2daef1e68e922b60d03 | [
"MIT"
] | null | null | null | main.py | VJPranay/getAllYoutubeChannelVideos | b98e8f4fe48c784883d5f2daef1e68e922b60d03 | [
"MIT"
] | null | null | null | import requests
import csv
r = requests.get('https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=UUBhADBpNLBo8IP027XrxKAw&key={YOUR_API_KEY}')
d = r.json()
with open('videos.csv', 'w') as csvFile:
writer = csv.writer(csvFile)
fields = ["title","video"]
writer = csv.DictWriter(csvFile, fieldnames=fields)
writer.writeheader()
for video in d['items']:
data = {'video': 'https://www.youtube.com/watch?v='+ video['snippet']['resourceId']['videoId'],'title': video['snippet']['title']}
print(data)
writer.writerow(data)
r = requests.get('https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=UUBhADBpNLBo8IP027XrxKAw&key={YOUR_API_KEY}&pageToken=CDIQAA')
d = r.json()
for video in d['items']:
data = {'video': 'https://www.youtube.com/watch?v='+ video['snippet']['resourceId']['videoId'],'title': video['snippet']['title']}
print(data)
writer.writerow(data)
r = requests.get('https://www.googleapis.com/youtube/v3/playlistItems?part=snippet&maxResults=50&playlistId=UUBhADBpNLBo8IP027XrxKAw&key={YOUR_API_KEY}&pageToken=CGQQAA')
d = r.json()
for video in d['items']:
data = {'video': 'https://www.youtube.com/watch?v='+ video['snippet']['resourceId']['videoId'],'title': video['snippet']['title']}
print(data)
writer.writerow(data)
csvFile.close()
print(d['nextPageToken']) | 40.777778 | 174 | 0.673025 | 182 | 1,468 | 5.395604 | 0.28022 | 0.04888 | 0.03666 | 0.051935 | 0.800407 | 0.800407 | 0.800407 | 0.800407 | 0.800407 | 0.800407 | 0 | 0.016562 | 0.13624 | 1,468 | 36 | 175 | 40.777778 | 0.757886 | 0 | 0 | 0.555556 | 0 | 0.111111 | 0.487406 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.148148 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
ad50994258f92a94d54a387368439cf70f5453df | 90 | py | Python | mayan/apps/document_indexing/views/__init__.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/document_indexing/views/__init__.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/document_indexing/views/__init__.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from .index_instance_views import * # NOQA
from .index_template_views import * # NOQA
| 30 | 44 | 0.755556 | 12 | 90 | 5.333333 | 0.583333 | 0.28125 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.177778 | 90 | 2 | 45 | 45 | 0.864865 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
d16367e87bb677855fba128c43d95924ddb6bd4b | 27,563 | py | Python | sdk/python/pulumi_kubernetes/authorization/v1beta1/outputs.py | polivbr/pulumi-kubernetes | 36a5fb34240a38a60b52a5f4e55e66e248d9305f | [
"Apache-2.0"
] | 277 | 2018-06-18T14:57:09.000Z | 2022-03-29T04:05:06.000Z | sdk/python/pulumi_kubernetes/authorization/v1beta1/outputs.py | polivbr/pulumi-kubernetes | 36a5fb34240a38a60b52a5f4e55e66e248d9305f | [
"Apache-2.0"
] | 1,447 | 2018-06-20T00:58:34.000Z | 2022-03-31T21:28:43.000Z | sdk/python/pulumi_kubernetes/authorization/v1beta1/outputs.py | polivbr/pulumi-kubernetes | 36a5fb34240a38a60b52a5f4e55e66e248d9305f | [
"Apache-2.0"
] | 95 | 2018-06-30T03:30:05.000Z | 2022-03-29T04:05:09.000Z | # coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'NonResourceAttributes',
'NonResourceRule',
'ResourceAttributes',
'ResourceRule',
'SelfSubjectAccessReviewSpec',
'SelfSubjectRulesReviewSpec',
'SubjectAccessReviewSpec',
'SubjectAccessReviewStatus',
'SubjectRulesReviewStatus',
]
@pulumi.output_type
class NonResourceAttributes(dict):
"""
NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
"""
def __init__(__self__, *,
path: Optional[str] = None,
verb: Optional[str] = None):
"""
NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface
:param str path: Path is the URL path of the request
:param str verb: Verb is the standard HTTP verb
"""
if path is not None:
pulumi.set(__self__, "path", path)
if verb is not None:
pulumi.set(__self__, "verb", verb)
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Path is the URL path of the request
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def verb(self) -> Optional[str]:
"""
Verb is the standard HTTP verb
"""
return pulumi.get(self, "verb")
@pulumi.output_type
class NonResourceRule(dict):
"""
NonResourceRule holds information that describes a rule for the non-resource
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nonResourceURLs":
suggest = "non_resource_urls"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NonResourceRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NonResourceRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NonResourceRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
verbs: Sequence[str],
non_resource_urls: Optional[Sequence[str]] = None):
"""
NonResourceRule holds information that describes a rule for the non-resource
:param Sequence[str] verbs: Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.
:param Sequence[str] non_resource_urls: NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.
"""
pulumi.set(__self__, "verbs", verbs)
if non_resource_urls is not None:
pulumi.set(__self__, "non_resource_urls", non_resource_urls)
@property
@pulumi.getter
def verbs(self) -> Sequence[str]:
"""
Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all.
"""
return pulumi.get(self, "verbs")
@property
@pulumi.getter(name="nonResourceURLs")
def non_resource_urls(self) -> Optional[Sequence[str]]:
"""
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. "*" means all.
"""
return pulumi.get(self, "non_resource_urls")
@pulumi.output_type
class ResourceAttributes(dict):
"""
ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
"""
def __init__(__self__, *,
group: Optional[str] = None,
name: Optional[str] = None,
namespace: Optional[str] = None,
resource: Optional[str] = None,
subresource: Optional[str] = None,
verb: Optional[str] = None,
version: Optional[str] = None):
"""
ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface
:param str group: Group is the API Group of the Resource. "*" means all.
:param str name: Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
:param str namespace: Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
:param str resource: Resource is one of the existing resource types. "*" means all.
:param str subresource: Subresource is one of the existing resource types. "" means none.
:param str verb: Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.
:param str version: Version is the API Version of the Resource. "*" means all.
"""
if group is not None:
pulumi.set(__self__, "group", group)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource is not None:
pulumi.set(__self__, "resource", resource)
if subresource is not None:
pulumi.set(__self__, "subresource", subresource)
if verb is not None:
pulumi.set(__self__, "verb", verb)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def group(self) -> Optional[str]:
"""
Group is the API Group of the Resource. "*" means all.
"""
return pulumi.get(self, "group")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def resource(self) -> Optional[str]:
"""
Resource is one of the existing resource types. "*" means all.
"""
return pulumi.get(self, "resource")
@property
@pulumi.getter
def subresource(self) -> Optional[str]:
"""
Subresource is one of the existing resource types. "" means none.
"""
return pulumi.get(self, "subresource")
@property
@pulumi.getter
def verb(self) -> Optional[str]:
"""
Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.
"""
return pulumi.get(self, "verb")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Version is the API Version of the Resource. "*" means all.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class ResourceRule(dict):
"""
ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "apiGroups":
suggest = "api_groups"
elif key == "resourceNames":
suggest = "resource_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResourceRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResourceRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResourceRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
verbs: Sequence[str],
api_groups: Optional[Sequence[str]] = None,
resource_names: Optional[Sequence[str]] = None,
resources: Optional[Sequence[str]] = None):
"""
ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
:param Sequence[str] verbs: Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.
:param Sequence[str] api_groups: APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.
:param Sequence[str] resource_names: ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.
:param Sequence[str] resources: Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
"*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
"""
pulumi.set(__self__, "verbs", verbs)
if api_groups is not None:
pulumi.set(__self__, "api_groups", api_groups)
if resource_names is not None:
pulumi.set(__self__, "resource_names", resource_names)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def verbs(self) -> Sequence[str]:
"""
Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all.
"""
return pulumi.get(self, "verbs")
@property
@pulumi.getter(name="apiGroups")
def api_groups(self) -> Optional[Sequence[str]]:
"""
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. "*" means all.
"""
return pulumi.get(self, "api_groups")
@property
@pulumi.getter(name="resourceNames")
def resource_names(self) -> Optional[Sequence[str]]:
"""
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all.
"""
return pulumi.get(self, "resource_names")
@property
@pulumi.getter
def resources(self) -> Optional[Sequence[str]]:
"""
Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups.
"*/foo" represents the subresource 'foo' for all resources in the specified apiGroups.
"""
return pulumi.get(self, "resources")
@pulumi.output_type
class SelfSubjectAccessReviewSpec(dict):
"""
SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nonResourceAttributes":
suggest = "non_resource_attributes"
elif key == "resourceAttributes":
suggest = "resource_attributes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SelfSubjectAccessReviewSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SelfSubjectAccessReviewSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SelfSubjectAccessReviewSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
non_resource_attributes: Optional['outputs.NonResourceAttributes'] = None,
resource_attributes: Optional['outputs.ResourceAttributes'] = None):
"""
SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set
:param 'NonResourceAttributesArgs' non_resource_attributes: NonResourceAttributes describes information for a non-resource access request
:param 'ResourceAttributesArgs' resource_attributes: ResourceAuthorizationAttributes describes information for a resource access request
"""
if non_resource_attributes is not None:
pulumi.set(__self__, "non_resource_attributes", non_resource_attributes)
if resource_attributes is not None:
pulumi.set(__self__, "resource_attributes", resource_attributes)
@property
@pulumi.getter(name="nonResourceAttributes")
def non_resource_attributes(self) -> Optional['outputs.NonResourceAttributes']:
"""
NonResourceAttributes describes information for a non-resource access request
"""
return pulumi.get(self, "non_resource_attributes")
@property
@pulumi.getter(name="resourceAttributes")
def resource_attributes(self) -> Optional['outputs.ResourceAttributes']:
"""
ResourceAuthorizationAttributes describes information for a resource access request
"""
return pulumi.get(self, "resource_attributes")
@pulumi.output_type
class SelfSubjectRulesReviewSpec(dict):
def __init__(__self__, *,
namespace: Optional[str] = None):
"""
:param str namespace: Namespace to evaluate rules for. Required.
"""
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
"""
Namespace to evaluate rules for. Required.
"""
return pulumi.get(self, "namespace")
@pulumi.output_type
class SubjectAccessReviewSpec(dict):
"""
SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nonResourceAttributes":
suggest = "non_resource_attributes"
elif key == "resourceAttributes":
suggest = "resource_attributes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectAccessReviewSpec. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectAccessReviewSpec.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectAccessReviewSpec.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
extra: Optional[Mapping[str, Sequence[str]]] = None,
group: Optional[Sequence[str]] = None,
non_resource_attributes: Optional['outputs.NonResourceAttributes'] = None,
resource_attributes: Optional['outputs.ResourceAttributes'] = None,
uid: Optional[str] = None,
user: Optional[str] = None):
"""
SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set
:param Mapping[str, Sequence[str]] extra: Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.
:param Sequence[str] group: Groups is the groups you're testing for.
:param 'NonResourceAttributesArgs' non_resource_attributes: NonResourceAttributes describes information for a non-resource access request
:param 'ResourceAttributesArgs' resource_attributes: ResourceAuthorizationAttributes describes information for a resource access request
:param str uid: UID information about the requesting user.
:param str user: User is the user you're testing for. If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
"""
if extra is not None:
pulumi.set(__self__, "extra", extra)
if group is not None:
pulumi.set(__self__, "group", group)
if non_resource_attributes is not None:
pulumi.set(__self__, "non_resource_attributes", non_resource_attributes)
if resource_attributes is not None:
pulumi.set(__self__, "resource_attributes", resource_attributes)
if uid is not None:
pulumi.set(__self__, "uid", uid)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter
def extra(self) -> Optional[Mapping[str, Sequence[str]]]:
"""
Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.
"""
return pulumi.get(self, "extra")
@property
@pulumi.getter
def group(self) -> Optional[Sequence[str]]:
"""
Groups is the groups you're testing for.
"""
return pulumi.get(self, "group")
@property
@pulumi.getter(name="nonResourceAttributes")
def non_resource_attributes(self) -> Optional['outputs.NonResourceAttributes']:
"""
NonResourceAttributes describes information for a non-resource access request
"""
return pulumi.get(self, "non_resource_attributes")
@property
@pulumi.getter(name="resourceAttributes")
def resource_attributes(self) -> Optional['outputs.ResourceAttributes']:
"""
ResourceAuthorizationAttributes describes information for a resource access request
"""
return pulumi.get(self, "resource_attributes")
@property
@pulumi.getter
def uid(self) -> Optional[str]:
"""
UID information about the requesting user.
"""
return pulumi.get(self, "uid")
@property
@pulumi.getter
def user(self) -> Optional[str]:
"""
User is the user you're testing for. If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups
"""
return pulumi.get(self, "user")
@pulumi.output_type
class SubjectAccessReviewStatus(dict):
"""
SubjectAccessReviewStatus
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "evaluationError":
suggest = "evaluation_error"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectAccessReviewStatus. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectAccessReviewStatus.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectAccessReviewStatus.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed: bool,
denied: Optional[bool] = None,
evaluation_error: Optional[str] = None,
reason: Optional[str] = None):
"""
SubjectAccessReviewStatus
:param bool allowed: Allowed is required. True if the action would be allowed, false otherwise.
:param bool denied: Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.
:param str evaluation_error: EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
:param str reason: Reason is optional. It indicates why a request was allowed or denied.
"""
pulumi.set(__self__, "allowed", allowed)
if denied is not None:
pulumi.set(__self__, "denied", denied)
if evaluation_error is not None:
pulumi.set(__self__, "evaluation_error", evaluation_error)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def allowed(self) -> bool:
"""
Allowed is required. True if the action would be allowed, false otherwise.
"""
return pulumi.get(self, "allowed")
@property
@pulumi.getter
def denied(self) -> Optional[bool]:
"""
Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.
"""
return pulumi.get(self, "denied")
@property
@pulumi.getter(name="evaluationError")
def evaluation_error(self) -> Optional[str]:
"""
EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.
"""
return pulumi.get(self, "evaluation_error")
@property
@pulumi.getter
def reason(self) -> Optional[str]:
"""
Reason is optional. It indicates why a request was allowed or denied.
"""
return pulumi.get(self, "reason")
@pulumi.output_type
class SubjectRulesReviewStatus(dict):
"""
SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nonResourceRules":
suggest = "non_resource_rules"
elif key == "resourceRules":
suggest = "resource_rules"
elif key == "evaluationError":
suggest = "evaluation_error"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SubjectRulesReviewStatus. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SubjectRulesReviewStatus.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SubjectRulesReviewStatus.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
incomplete: bool,
non_resource_rules: Sequence['outputs.NonResourceRule'],
resource_rules: Sequence['outputs.ResourceRule'],
evaluation_error: Optional[str] = None):
"""
SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.
:param bool incomplete: Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
:param Sequence['NonResourceRuleArgs'] non_resource_rules: NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
:param Sequence['ResourceRuleArgs'] resource_rules: ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
:param str evaluation_error: EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.
"""
pulumi.set(__self__, "incomplete", incomplete)
pulumi.set(__self__, "non_resource_rules", non_resource_rules)
pulumi.set(__self__, "resource_rules", resource_rules)
if evaluation_error is not None:
pulumi.set(__self__, "evaluation_error", evaluation_error)
@property
@pulumi.getter
def incomplete(self) -> bool:
"""
Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.
"""
return pulumi.get(self, "incomplete")
@property
@pulumi.getter(name="nonResourceRules")
def non_resource_rules(self) -> Sequence['outputs.NonResourceRule']:
"""
NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
"""
return pulumi.get(self, "non_resource_rules")
@property
@pulumi.getter(name="resourceRules")
def resource_rules(self) -> Sequence['outputs.ResourceRule']:
"""
ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.
"""
return pulumi.get(self, "resource_rules")
@property
@pulumi.getter(name="evaluationError")
def evaluation_error(self) -> Optional[str]:
"""
EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.
"""
return pulumi.get(self, "evaluation_error")
| 45.111293 | 374 | 0.665131 | 3,236 | 27,563 | 5.53152 | 0.090853 | 0.025196 | 0.02324 | 0.033966 | 0.80933 | 0.779944 | 0.738827 | 0.70905 | 0.696872 | 0.687765 | 0 | 0.000048 | 0.24834 | 27,563 | 610 | 375 | 45.185246 | 0.863935 | 0.421616 | 0 | 0.58 | 1 | 0.017143 | 0.159206 | 0.050445 | 0 | 0 | 0 | 0 | 0 | 1 | 0.168571 | false | 0 | 0.017143 | 0 | 0.337143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
d1671857f789718ccaeac23d8e3969d13ff0d12b | 987 | py | Python | src/speech_features_kit/Volume/Volume.py | dhchenx/speech-feature-kit | e92e053fc756127c7679d58a5df22cbb73040dd6 | [
"MIT"
] | null | null | null | src/speech_features_kit/Volume/Volume.py | dhchenx/speech-feature-kit | e92e053fc756127c7679d58a5df22cbb73040dd6 | [
"MIT"
] | null | null | null | src/speech_features_kit/Volume/Volume.py | dhchenx/speech-feature-kit | e92e053fc756127c7679d58a5df22cbb73040dd6 | [
"MIT"
] | null | null | null | import math
import numpy as np
# method 1: absSum
def calVolume(waveData, frameSize, overLap):
wlen = len(waveData)
step = frameSize - overLap
frameNum = int(math.ceil(wlen*1.0/step))
volume = np.zeros((frameNum,1))
for i in range(frameNum):
curFrame = waveData[np.arange(i*step,min(i*step+frameSize,wlen))]
#curFrame = curFrame - np.median(curFrame) # False
curFrame = curFrame - np.mean(curFrame) # zero-justified
volume[i] = np.sum(np.abs(curFrame))
return volume
# method 2: log10 of square sum
def calVolumeDB(waveData, frameSize, overLap):
wlen = len(waveData)
step = frameSize - overLap
frameNum = int(math.ceil(wlen*1.0/step))
volume = np.zeros((frameNum,1))
for i in range(frameNum):
curFrame = waveData[np.arange(i*step,min(i*step+frameSize,wlen))]
curFrame = curFrame - np.mean(curFrame) # zero-justified
volume[i] = 10*np.log10(np.sum(curFrame*curFrame))
return volume
| 35.25 | 73 | 0.662614 | 135 | 987 | 4.844444 | 0.318519 | 0.097859 | 0.082569 | 0.085627 | 0.727829 | 0.727829 | 0.727829 | 0.727829 | 0.727829 | 0.602446 | 0 | 0.01788 | 0.206687 | 987 | 27 | 74 | 36.555556 | 0.817369 | 0.125633 | 0 | 0.727273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.