hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca963c794e6b8ca2f4dbf07be128d72d92a54e5c | 9,514 | py | Python | openGaussBase/testcase/SQL/INNERFUNC/last/Opengauss_Function_Innerfunc_Last_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/INNERFUNC/last/Opengauss_Function_Innerfunc_Last_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SQL/INNERFUNC/last/Opengauss_Function_Innerfunc_Last_Case0007.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : last函数返回最后一个输入,若无输入,则返回一个空行:mot表
Description :
1.创建外表
2.表中未插入数据,使用last函数,返回一个空行
3.给表中插入数据
4.与group by,order by,having 结合使用
5.输入包含null,并排序(nulls last)
6.与函数嵌套使用
7.清理环境
8.恢复参数默认值
Expect :
1.创建外表成功
2.表中未插入数据,使用last函数,返回一个空行
3.给表中插入数据成功
4.与group by,order by,having 结合使用,返回结果正确
5.输入包含null,并排序(nulls last),返回结果正确
6.与函数嵌套使用,返回结果正确
7.清理环境成功
8.恢复参数默认值
History :
"""
import os
import unittest
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class Jsonb(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.pri_user = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.com = Common()
text = '-----预置条件:enable_incremental_checkpoint=off-----'
self.log.info(text)
self.default_value = self.com.show_param(
"enable_incremental_checkpoint")
self.log.info(self.default_value)
self.config_item = 'enable_incremental_checkpoint=off'
check_res = self.pri_user.execut_db_sql(
f'''show enable_incremental_checkpoint;''')
if 'off' != check_res.split('\n')[-2].strip():
self.pri_user.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG, self.config_item)
self.pri_user.restart_db_cluster()
result = self.pri_user.get_db_cluster_status()
self.assertTrue('Degraded' in result or 'Normal' in result,
'执行失败:' + text)
def test_mot_table_last(self):
text = f'-----{os.path.basename(__file__)} start-----'
self.log.info(text)
text = ('-----step1.创建mot表;expect:创建成功-----')
self.log.info(text)
self.sql_cmd = f'''drop foreign table if exists student;
drop foreign table if exists score;
create foreign table student(
s_id integer(20),
s_name varchar(20) ,
s_birth date,s_sex varchar(10));
create foreign table score(
s_id integer(20),
c_id integer(20),
s_score float(3));
'''
self.log.info(self.sql_cmd)
msg1 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg1)
self.assertIn(self.constant.CREATE_FOREIGN_SUCCESS_MSG, msg1,
'执行失败:' + text)
text = ('-----step2.2.未插入数据时,使用last函数,返回一个空行;expect:创建成功-----')
self.log.info(text)
self.sql_cmd = f'''select last(s_name) from student;
select last(s_name), last(s_id) from student;
select last(s_score) from score;
select last(c_id), last(s_score) from score;
'''
self.log.info(self.sql_cmd)
msg2 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg2)
self.assertIn('1 row', msg2, '执行失败:' + text)
text = ('-----step3.给表中插入数据;expect:成功-----')
self.log.info(text)
self.sql_cmd = f'''insert into student values (1,'zhaolei',null,'男');
insert into student values (2,'zhoumei','1991-12-01','女');
insert into student values (3,'zhuzhu','1991-06-01','男');
insert into student values (4,'lilei','1992-05-01','男');
insert into student values (null,'lihua','1991-03-01','男');
insert into student values (1,'zhangsan','1992-08-01','男');
insert into student values (2,'sunjin','1991-09-01','女');
insert into student values (3,'wangwu','1992-10-01','女');
insert into student values (4,null,'1990-11-01','女');
insert into student values (5,'ninghao','1993-12-01','女');
insert into score values(1, 101, 69.5),(1, 101, 80);
insert into score values(2, 102, 70),(2, 102, 82);
insert into score values(3, 103, 71),(3, 103, 93);
insert into score values(4, 104, 85),(4, 104, 85);
insert into score values(5, 105, 73),(5, 105, 91);
insert into score values(1, 106, null);
insert into score values(2, 107, 75);
insert into score values(3, null, 78);
insert into score values(4, 109, 86);
insert into score values(null, 110, 99);
'''
self.log.info(self.sql_cmd)
msg3 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg3)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, msg3, '执行失败:' + text)
text = (
'-----step4.与group by,order by,having 结合使用;expect:返回结果正确-----')
self.log.info(text)
self.sql_cmd = f'''select last(s_name order by s_id) from student;
select s_name, last(s_id) as id from student
group by s_name order by s_name;
select s_id, last(s_name) from student group by s_id
having s_id > 2 order by s_id;
select sc.s_id,last(sc.s_score) from score as sc ,
student as st where st.s_sex = '女'
group by sc.s_id order by sc.s_id;
select sc.s_id,last(st.s_name) from score as sc
inner join student as st on sc.s_id = st.s_id
where st.s_sex = '女'group by sc.s_id order by sc.s_id;
'''
self.log.info(self.sql_cmd)
msg4 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg4)
self.assertIn('lihua', msg4, '执行失败:' + text)
self.assertIn('10 rows', msg4, '执行失败:' + text)
self.assertIn('3 rows', msg4, '执行失败:' + text)
self.assertIn('6 rows', msg4, '执行失败:' + text)
self.assertIn('4 rows', msg4, '执行失败:' + text)
text = ('-----step5.使用nulls last排序;expect:返回结果正确-----')
self.log.info(text)
self.sql_cmd = f'''
select last(s_name order by s_id nulls last) from student;
select s_name, last(s_birth order by s_birth nulls last)
from student group by s_name order by s_name;
select s_id, last(s_score order by s_score desc NULLS last)
from score group by s_id;
select c_id, last(s_score order by s_score desc NULLS last)
from score group by c_id having c_id > 102 order by c_id;
select st.s_id,last(s_score order by s_score NULLS last)
from score as sc inner join student as st on sc.s_id = st.s_id
where st.s_sex = '女' group by st.s_id order by st.s_id;
'''
self.log.info(self.sql_cmd)
msg5 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg5)
self.assertIn('lihua', msg5, '执行失败:' + text)
self.assertIn('10 rows', msg5, '执行失败:' + text)
self.assertIn('6 rows', msg5, '执行失败:' + text)
self.assertIn('7 rows', msg5, '执行失败:' + text)
self.assertIn('4 rows', msg5, '执行失败:' + text)
text = ('-----step6.与函数嵌套使用;expect:返回结果正确-----')
self.log.info(text)
self.sql_cmd = f'''
select char_length(last(s_name order by s_name)) from student;
select isfinite(last(s_birth order by s_birth)) from student;
select ceil(last(s_score order by s_score)) from score;
'''
self.log.info(self.sql_cmd)
msg6 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg6)
self.assertIn('6', msg6, '执行失败:' + text)
self.assertIn('t', msg6, '执行失败:' + text)
self.assertIn('99', msg6, '执行失败:' + text)
def tearDown(self):
text = '--step7.1.清理环境,删除创建的mot表;expect:删除成功--'
self.log.info(text)
self.sql_cmd = f'''drop foreign table student cascade;
drop foreign table score cascade;
'''
msg7 = self.pri_user.execut_db_sql(self.sql_cmd)
self.log.info(msg7)
text = '--step7.2.恢复默认值;expect:恢复成功--'
self.log.info(text)
msg8 = self.pri_user.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
f"enable_incremental_checkpoint="
f"{self.default_value}")
self.log.info(msg8)
restart_msg = self.pri_user.restart_db_cluster()
self.log.info(restart_msg)
status = self.pri_user.get_db_cluster_status('detail')
self.log.info(status)
self.recovery_value = self.com.show_param(
"enable_incremental_checkpoint")
self.assertIn(self.constant.DROP_FOREIGN_SUCCESS_MSG, msg7,
'执行失败:' + text)
self.assertTrue("Degraded" in status or "Normal" in status,
'执行失败' + text)
self.assertEqual(self.recovery_value, self.default_value,
'执行失败:' + text)
text = f'-----{os.path.basename(__file__)} end-----'
self.log.info(text)
| 42.663677 | 84 | 0.586189 | 1,324 | 9,514 | 4.076284 | 0.212991 | 0.037613 | 0.057069 | 0.030573 | 0.523254 | 0.426163 | 0.312025 | 0.254771 | 0.241616 | 0.217528 | 0 | 0.039195 | 0.289363 | 9,514 | 222 | 85 | 42.855856 | 0.759059 | 0.053395 | 0 | 0.196532 | 0 | 0 | 0.513659 | 0.107985 | 0 | 0 | 0 | 0 | 0.115607 | 1 | 0.017341 | false | 0 | 0.034682 | 0 | 0.057803 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca9b95c978ba4ebdf7212bca4a59b66c0c1f46e7 | 356 | py | Python | test/record.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | test/record.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | test/record.py | titulebolide/variometer | 7e5fbacdb9c403d11dd01abc6f5e20db4b922756 | [
"MIT"
] | null | null | null | import flask
import numpy as np
buff = []
app = flask.Flask(__name__)
@app.route("/",methods=['POST'])
def index():
global buff
datas = flask.request.get_json(force=True)
buff.extend(datas["data"])
return "",200
@app.route("/save/",methods=['GET'])
def save():
np.save("record.npy", buff)
return "",200
app.run("0.0.0.0", 7998)
| 17.8 | 46 | 0.620787 | 53 | 356 | 4.075472 | 0.566038 | 0.027778 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047297 | 0.168539 | 356 | 19 | 47 | 18.736842 | 0.682432 | 0 | 0 | 0.133333 | 0 | 0 | 0.098315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.133333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca9bf7b2d5b27c913dc9d1327604f448153b014a | 13,207 | py | Python | framework/cefs.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 13 | 2018-06-02T09:11:15.000Z | 2020-08-29T01:01:19.000Z | framework/cefs.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 1 | 2021-01-17T14:03:13.000Z | 2021-01-17T14:03:13.000Z | framework/cefs.py | shew91/Retropy | 9feb34855b997c48d93a5343a9842788d19582e6 | [
"MIT"
] | 6 | 2018-06-02T16:20:47.000Z | 2021-12-30T22:26:54.000Z | # https://www.cefconnect.com/closed-end-funds-screener
# https://www.cefa.com/
# http://cefdata.com/funds/clm/
# https://www.cefconnect.com/fund/CLM
# https://www.cefchannel.com/clm/
from framework.utils import *
from framework.base import *
from framework.meta_data import *
from framework.stats_basic import *
from framework.stats import *
import Retropy_framework as frm
import framework.meta_data_dfs as meta_dfs
# these lists and categories are based on cefconnect
# cef_highyield_taxable = 'ACP|ACV|AFT|AIF|ARDC|AVK|AWF|BBN|BCV|BGB|BGH|BGT|BGX|BHK|BIT|BKT|BLW|BSL|BTZ|CBH|CCD|CHI|CHY|CIF|CIK|DBL|DCF|DFP|DHF|DHY|DMO|DSL|DSU|DUC|EAD|ECC|ECF|EFF|EFL|EFR|EFT|EGF|EHT|ERC|EVF|EVG|EVV|EXD|FCT|FFC|MGF|MPV|NBB|NBD|NCV|NCZ|NHS|NSL|OPP|OXLC|PAI|PCF|PCI|PCM|PCN|PDI|PDT|PFD|PFL|PFN|PFO|PGP|PHD|PHK|PHT|PIM|PKO|PPR|PSF|PTY|RA|TLI|TSI|TSLF|VBF|VGI|VLT|VTA|VVR|WEA|WIA|WIW|XFLT|FIV|FLC|FMY|FPF|FRA|FSD|FT|FTF|GBAB|GDO|GFY|GGM|GHY|HFRO|HIO|HIX|HNW|HPF|HPI|HPS|HYB|HYI|HYT|IGI|IHIT|IHTA|INSI|ISD|IVH|JCO|JFR|JHA|JHB|JHD|JHI|JHS|JHY|JLS|JMM|JMT|JPC|JPI|JPS|JPT|JQC|JRO|JSD|KIO|LDP|MCI'
# cef_highyield_taxable = cef_highyield_taxable.split('|')
# cef_municipal_tax_exempt = 'AKP|AFB|BJZ|BFZ|BFO|BKN|BTA|BZM|BPK|BAF|BYM|MUI|MNE|BTT|MUA|MEN|MUC|MHD|MUH|MFL|MUJ|MHN|MUE|MUS|MVF|MVT|MYD|MZA|MYC|MCA|MYF|MIY|MYJ|MYN|MPA|MYI|MQY|MQT|BKK|BBK|BFK|BLE|BBF|MFT|BLH|BSE|BQH|BFY|BNY|BSD|BHV|DTF|VFL|VMM|KTF|KSM|DMB|DMF|DSM|LEO|EIA|EVM|CEV|MAB|MMV|MIW|EMI|ETX|EIM|EIV|EVN|EMJ|EVJ|NYH|ENX|EVY|EOT|EIO|EVO|EIP|EVP|FMN|VKI|VCV|OIA|VGM|VMO|VKQ|VPV|IQI|VTN|IIM|CCA|CXE|CMU|CXH|MFM|MMD|MHE|NBW|NBH|NBO|NVG|NUW|NEA|NAZ|NKX|NCA|NCB|NAC|NXC|NTC|NEV|NKG|NIQ|NID|NMT|NMY|NUM|NMS|NOM|NHA|NZF|NMZ|NMI|NUV|NNC|NJV|NXJ|NRK|NNY|NYV|NAN|NXN|NUO|NPN|NQP|NAD|NIM|NXP|NXQ|NXR|NTX|NPV|PCQ|PCK|PZC|PMF|PML|PMX|PNF|PNI|PYN|MAV|MHI|PMM|PMO|SBI|MMU|MTT|MHF|MNP'
# cef_municipal_tax_exempt = cef_municipal_tax_exempt.split('|')
# cef_us_equity = 'NIE|ASA|AWP|ADX|PEO|NFJ|BGR|CII|BDJ|BOE|BGY|BME|BCX|BUI|BIF|IGR|CSQ|CEN|CET|CBA|CEM|EMO|CTR|FOF|INB|UTF|MIE|RQI|RNP|RFI|STK|CLM|CRF|SRF|SRV|SZC|DNP|DDF|DNI|DPG|DSE|EOI|EOS|ETJ|ETO|ETG|EVT|ETB|ETV|ETY|ETW|EXG|GRF|FMO|FEN|FIF|FFA|FEI|FPL|FGB|FXBY|GGN|GNT|GCV|GDV|GAB|GLU|GGO|GRX|GUT|GAM|GMZ|GER|GPM|GOF|BTO|HTY|HTD|KYN|KMF|USA|ASG|MFV|MFD|MGU|MCN|MSP|HIE|NML|NRO|NHF|JMLP|JCE|DIAX|JMF|QQQX|JRI|JRS|BXMX|SPXX|JTD|JTA|PGZ|RCG|RIF|UTG|RIV|RMT|RVT|SMM|SOR|SPE|FUND|CEF|PHYS|SPPP|PSLV|HQH|THQ|HQL|THW|NDP|TYG|NTG|TTP|TPZ|TY|ZTR|IGD|IGA|IRR|ERH'
# cef_us_equity = cef_us_equity.split('|')
# cef_non_us_other = 'FAX|IAF|AEF|AGD|FCO|JEQ|AOD|APB|BGIO|BST|BWG|INF|CHW|CGO|CEE|CHN|GLV|GLQ|GLO|DEX|VCF|KMM|KST|EGIF|EEA|FDEU|FEO|FAM|GDL|GGZ|GGT|CUBA|IFN|HEQ|JOF|KF|SCD|LDF|LGI|LOR|MCR|MIN|MMT|APF|CAF|MSF|MSD|EDD|IIF|MXE|MXF|GF|IRL|JDD|JEMD|JGH|RCS|PPT|RGT|EDF|EDI|SWZ|TWN|TDF|EMF|TEI|GIM|ZF|IAE|IHD|IDE|IID|EOD|EMD|EHI'
# cef_non_us_other = cef_non_us_other.split('|')
# all = cef_highyield_taxable + cef_municipal_tax_exempt + cef_us_equity + cef_non_us_other
# http://cefdata.com/screener/
df = meta_dfs.cef_metadata_df
taxable_bond_funds = df.query('sec_main == "Taxable Bond Funds"').index.tolist()
specialty_equity_funds = df.query('sec_main == "Specialty Equity Funds"').index.tolist()
specialty_prefered = df.query('sec_sub == "Prefered Equity Leveraged" and sec_main == "Specialty Equity Funds"').index.tolist()
specialty_covered_call = df.query('sec_sub == "Covered Call Funds" and sec_main == "Specialty Equity Funds"').index.tolist()
specialty_mlp = df.query('sec_sub == "MLP Funds" and sec_main == "Specialty Equity Funds"').index.tolist()
specialty_real_estate = df.query('sec_sub == "Global Real Estate, REIT & Real Assets" and sec_main == "Specialty Equity Funds"').index.tolist()
specialty_utilities = df.query('sec_sub == "Utilities Funds" and sec_main == "Specialty Equity Funds"').index.tolist()
national_muni_bond_funds = df.query('sec_main == "National Muni Bond Funds"').index.tolist()
taxable_muni_bond_funds = df.query('sec_sub == "Taxable Municipal Bond Funds"').index.tolist()
loan_participation_funds = df.query('sec_sub == "Loan Participation"').index.tolist()
fixed_income = taxable_bond_funds + specialty_prefered + specialty_real_estate + taxable_muni_bond_funds
all = df.index.tolist()
pimco = df.query('sponsor == "PacificInvestmentManagementCompany"').index.tolist()
# only_new = set(cef_taxable_bond_funds) - set(cef_highyield_taxable)
# only_old = set(cef_highyield_taxable) - set(cef_taxable_bond_funds)
# cef_old_and_new = set(cef_taxable_bond_funds) | set(cef_highyield_taxable)
# these have broken/corrupt data in Yahoo
cefs_bad_yahoo = ['EHT', 'DCF', 'JHY', 'CBH', 'CCD', 'FIV', 'JPT', 'JHD', 'EFL', 'HFRO', 'CBH', 'GGO', 'BST', 'JCO', 'FTSM'] # these have broken/corrupt data in Yahoo
cef_nav_map = {
'ARDC': 'XADCX',
'OXLC': 'OXLCX',
'RA': 'XRAIX',
'TSI': 'XXCVTXX',
'TSLF': 'XTSLX',
'XFLT': 'XFLTX',
'FT': 'XFUTX',
'GBAB': 'XGBAX',
'HFRO': 'XHFOX',
'HIX': 'XHGIX',
'HYB': 'XHYBX',
'IHIT': 'XHITX',
'IHTA': 'XHTAX',
'INSI': 'XBDFX',
'BGX': 'XXBGX'
}
def get_cef_nav_ticker(s):
if is_series(s):
s = s.name.ticker
nav = get_cef_meta(s, "nav_symbol")
if nav:
return nav
nav = cef_nav_map.get(s, '')
if not nav:
nav = f'X{s}X'
return nav
def get_cef_nav(s, source=None):
if s is None:
return None
if not is_cef(s):
return None
if is_series(s) and source is None:
source = s.name.source
source = source or "Y"
return get(get_cef_nav_ticker(s), source=source, mode="PR", error='ignore', cache_fails=True)
def get_cef_premium(s, source="AV"):
nav = get_cef_nav(s, source=source)
if nav is None:
warn(f'Unable to get NAV for {get_pretty_name(s)}')
return None
pr = get(s, source=source, mode="PR")
if pr.index[-1] > nav.index[-1]:
warn(f"{get_ticker_name(s)} filling NAV history gap from {nav.index[-1]} to {pr.index[-1]}")
nav = nav.reindex(pr.index).fillna(method='ffill')
prem = (pr / nav - 1) * 100
return name(prem, f"{get_name(s)} premium")
def show_cef_premium(*all):
frm.show(1, lmap(get_cef_premium, all), ta=False, log=False, title="Premium")
def show_cef_zscore(*all):
frm.show(-2, -1, 1, 2, lmap(show_cef_zscore, all), ta=False, log=False, title="3y z-score")
def show_cef_nav_and_pr(*all):
frm.show(0, lmap(get_cef_nav, all), lmap(pr, all), ta=False, title="NAV and Price")
def show_cef_nav_and_ntr(*all):
frm.show(lmap(get_cef_nav_ntr, all), lmap(ntr, all), ta=False, title="NAV and market NTR")
def get_cef_curr_premium(s):
if not is_cef(s):
return 0
p = get_cef_premium(s)
if p is None:
# warn(f"can't get calculated premium, using meta_data premium instead for {get_ticker_name(s)}")
p = get_cef_meta(s, "premium")
if not p is None:
warn(f"can't get calculated premium, using meta_data premium instead for {get_ticker_name(s)}")
return p
return None
return p.dropna()[-1]
def get_cef_start_premium(s):
p = get_cef_premium(s)
if p is None:
return None
return p.dropna()[0]
def get_cef_nav_yield_no_fees(s, type='normal'):
return get_cef_nav_yield(s, type=type, reduce_fees=False)
def get_cef_nav_yield(s, type='normal', reduce_fees=True, source="AV"):
nav = get_cef_nav(s)
if nav is None:
return None
res = frm.get_yield(s, type=type, altPriceName=get_cef_nav_ticker(s)+"@"+source, reduce_fees=reduce_fees)
name(res, f"{res.name} NAV")
return res
def get_cef_curr_nav_yield_no_fees(s):
res = get_cef_nav_yield_no_fees(s)
return 0 if res is None or len(res) == 0 else res[-1]
def get_cef_cur_nav_yield(s):
res = get_cef_nav_yield(s)
return 0 if res is None or len(res) == 0 else res[-1]
def get_cef_zscore(s, period=365*3):
p = get_cef_premium(s)
if p is None:
return None
p_avg = ma(p, period)
p_std = mstd(p, period)
p_zscore = (p-p_avg)/p_std
return name(p_zscore, f"{get_name(s)} zscore")
def get_cef_curr_zscore(s, period=365*3):
if not is_cef(s):
return None
res = get_cef_zscore(s, period=period)
return None if res is None or len(res) == 0 else res[-1]
def show_cef_zscore(*all):
frm.show(-2, -1, 1, 2, lmap(get_cef_zscore, all), ta=False, log=False, title="3y z-score")
def get_cef_nav_ntr(s):
if not is_cef(s):
return None
if get_cef_nav(s) is None:
return None
return name(getNtr(s, {"mode": "NTR"}, alt_price_symbol=get_cef_nav_ticker(s)), f"{get_name(s, nomode=True)} NAV NTR")
def get_cef_nav_intr(s):
if get_cef_nav(s) is None:
return None
return name(get_intr(s, {"mode": "NTR"}, alt_price_symbol=get_cef_nav_ticker(s)), f"{get_name(s, nomode=True)} NAV INTR")
def show_cef_premium_and_returns(s):
nav = get_cef_nav_ntr(s)
if nav is None:
return
frm.show(get_cef_premium(s), (ntr(s) / nav - 1)*100, ta=False, title="Effect of premium/discount of NTR returns")
def show_cef_relative_premium(a, b):
a = get_cef_premium(a)
b = get_cef_premium(b)
if a is None or b is None:
return
frm.show(1, a - b, ta=False, log=False, title="Relative Premium")
def analyze_cef(s, base='SPY'):
s = get(s)
if is_cef(base):
base_cef = base
base_ntr = ntr(base)
else:
base_cef = None
ntr_s = ntr(s)
frm.show(get_cef_premium(s), frm.get_income(s, smooth=1)/10, ta=False)
if not base_cef is None:
frm.show(get_cef_premium(base_cef), frm.get_income(base_cef, smooth=1)/10, ta=False)
frm.show(0, 5, get_cef_nav_yield(ntr_s, type='true', reduce_fees=False), get_yield_true_no_fees(ntr_s), ta=False, title="NAV and Market net-yield (no fees)")
if not base_cef is None:
frm.show(0, 5, get_cef_nav_yield(base_ntr, type='true', reduce_fees=False), get_yield_true_no_fees(base_ntr), ta=False, title="NAV and Market net-yield (no fees)")
show_cef_premium(s, base_cef)
if not base_cef is None:
show_cef_relative_premium(s, base)
show_cef_premium_and_returns(s)
show_cef_zscore(s, base_cef)
show_cef_nav_and_pr(s, base_cef)
show_cef_nav_and_ntr(s, base_cef)
frm.show_dd(get_cef_nav(s), get_cef_nav_ntr(s), get_cef_nav(base_cef), get_cef_nav_ntr(base_cef), do_get=False, mode='', title_prefix="NAV / NAV-NTR")
frm.show_comp(s, base)
def get_pr_loss_last_week(s):
# if is_cef(s):
# nav = get_cef_nav(s)
# if nav is None:
# return None
# else:
nav = get(s, mode="PR", untrim=True)
nav = nav["2018-10-01":]
if len(nav) < 10:
return None
return (nav[-1] / nav[0] - 1) * 100
def get_cef_nav_loss_2010(s):
if is_cef(s):
nav = get_cef_nav(s)
if nav is None:
return None
return -cagr(lr(nav["2010":]))
else:
return -cagr(lr(get(s, mode="PR", untrim=True)["2010":]))
def get_cef_nav_loss_2013(s):
if is_cef(s):
nav = get_cef_nav(s)
if nav is None:
return None
return -cagr(lr(nav["2013":]))
else:
return -cagr(lr(get(s, mode="PR", untrim=True)["2013":]))
def get_cef_roc_3y(s):
return get_cef_meta(s, "roc_3y")
def get_cef_coverage(s):
r = get_cef_meta(s, "coverage")
if r is None:
return r
return r - 100
def get_cef_leverage(s):
return get_cef_meta(s, "total_leverage")
def get_cef_nav_or_pr(s, untrim):
if is_cef(s):
nav = get_cef_nav(s)
if not nav is None:
if not untrim:
nav = nav[s.index[0]:]
return nav
return get(s, mode="PR", untrim=untrim)
def ulcer_nav(s):
return ulcer(get_cef_nav_or_pr(s, untrim=False))
def ulcer_nav_ntr(s):
if not is_cef(s):
return None
ntr = get_cef_nav_ntr(s)
if ntr is None:
return None
return ulcer(ntr)
def get_cef_section(s):
sec = get_etf_cef_meta(s, 'yc_sub_category', 'sec_sub')
if not sec:
return '<NA>'
return sec.replace(" Bond Funds", "").replace(" Equity Leveraged", "").replace(" Bond", "").replace(" Funds", "").replace("Taxable Municipal", "Municipal").replace("US Government", "US Govt").replace("Emerging Market Income", "EM Income").replace("Global Real Estate, REIT & Real Assets", 'Real Estate')
def get_cef_maxdd_nav_ntr(s):
ntr = get_cef_nav_ntr(s)
if ntr is None:
return None
#ntr = drop_outliers(ntr) # this completely messes NRO for example
if len(ntr) == 0:
return None
return max_dd(ntr)
def get_cef_maxdd_nav_ntr_2008(s):
nav = get_cef_nav(s)
if nav is None or len(nav[:"2007-02"]) == 0:
return None
return get_cef_maxdd_nav_ntr(get(s, untrim=True)["2007-02":])
def get_sponsor(s):
spn = get_cef_meta(s, "sponsor")
if not spn:
return None
return spn.replace("Fund", '').replace("Advisors", '').replace("Management", '').replace("Partners", '').replace("Investment", '').replace("Company", '').replace("Advisers", '').replace("Services", '').replace("Financial", '').replace("Capital", '').replace("Management", '').replace('Incorporated', '')
| 41.271875 | 678 | 0.673885 | 2,330 | 13,207 | 3.629614 | 0.329185 | 0.048244 | 0.039376 | 0.013007 | 0.371763 | 0.311694 | 0.253754 | 0.215561 | 0.19676 | 0.177486 | 0 | 0.010702 | 0.16514 | 13,207 | 319 | 679 | 41.401254 | 0.756303 | 0.245779 | 0 | 0.274262 | 0 | 0.008439 | 0.17823 | 0.003625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14346 | false | 0 | 0.029536 | 0.016878 | 0.409283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ca9c994cab0cb35cf5defa4b1739417be15fb6c9 | 2,616 | py | Python | ecco_v4_py/vector_calc.py | zelunwu/ECCOv4-py | 03b6a1b01fcd17b0b88c25bee205c195df52d7fa | [
"MIT"
] | 1 | 2022-03-08T12:27:08.000Z | 2022-03-08T12:27:08.000Z | ecco_v4_py/vector_calc.py | zelunwu/ECCOv4-py | 03b6a1b01fcd17b0b88c25bee205c195df52d7fa | [
"MIT"
] | null | null | null | ecco_v4_py/vector_calc.py | zelunwu/ECCOv4-py | 03b6a1b01fcd17b0b88c25bee205c195df52d7fa | [
"MIT"
] | 1 | 2020-05-09T01:07:03.000Z | 2020-05-09T01:07:03.000Z | """
Functions defined on vector valued fields
"""
import xarray as xr
import xgcm
from .ecco_utils import get_llc_grid
def UEVNfromUXVY(xfld,yfld,coords,grid=None):
"""Compute east, north facing vector field components from x, y components
by interpolating to cell centers and rotating by grid cell angle
Note: this mirrors gcmfaces_calc/calc_UEVNfromUXVY.m
Parameters
----------
xfld, yfld : xarray DataArray
fields living on west and south grid cell edges, e.g. UVELMASS and VVELMASS
coords : xarray Dataset
must contain CS (cosine of grid orientation) and
SN (sine of grid orientation)
grid : xgcm Grid object, optional
see ecco_utils.get_llc_grid and xgcm.Grid
Returns
-------
u_east, v_north : xarray DataArray
eastward and northward components of input vector field at
grid cell center/tracer points
"""
# Check to make sure 'CS' and 'SN' are in coords
# before doing calculation
required_fields = ['CS','SN']
for var in required_fields:
if var not in coords.variables:
raise KeyError('Could not find %s in coords DataSet' % var)
# If no grid, establish it
if grid is None:
grid = get_llc_grid(coords)
# First, interpolate velocity fields from cell edges to cell centers
velc = grid.interp_2d_vector({'X': xfld, 'Y': yfld},boundary='fill')
# Compute UE VN using cos(), sin()
u_east = velc['X']*coords['CS'] - velc['Y']*coords['SN']
v_north= velc['X']*coords['SN'] + velc['Y']*coords['CS']
return u_east, v_north
def get_latitude_masks(lat_val,yc,grid):
"""Compute maskW/S which grabs vector field grid cells along specified latitude
band and corrects the sign associated with X-Y LLC grid
This mirrors the MATLAB function gcmfaces/gcmfaces_calc/gcmfaces_lines_zonal.m
Parameters
----------
lat_val : int
latitude at which to compute mask
yc : xarray DataArray
Contains latitude values at cell centers
grid : xgcm Grid object
llc grid object generated via get_llc_grid
Returns
-------
maskWedge, maskSedge : xarray DataArray
contains masks of latitude band at grid cell west and south grid edges
"""
# Compute difference in X, Y direction.
# multiply by 1 so that "True" -> 1, 2nd arg to "where" puts False -> 0
ones = xr.ones_like(yc)
maskC = ones.where(yc>=lat_val,0)
maskWedge = grid.diff( maskC, 'X', boundary='fill')
maskSedge = grid.diff( maskC, 'Y', boundary='fill')
return maskWedge, maskSedge
| 30.068966 | 84 | 0.66552 | 371 | 2,616 | 4.609164 | 0.431267 | 0.024561 | 0.023392 | 0.018713 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00303 | 0.243119 | 2,616 | 86 | 85 | 30.418605 | 0.860606 | 0.603211 | 0 | 0 | 0 | 0 | 0.075964 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
045f42e7e3687860c6585444a23e801817f61e12 | 2,409 | py | Python | StockExporter.py | A1eXFei/StockMarket3 | 01000d0731395868c4477982c6ed6c632c9a7427 | [
"Apache-2.0"
] | null | null | null | StockExporter.py | A1eXFei/StockMarket3 | 01000d0731395868c4477982c6ed6c632c9a7427 | [
"Apache-2.0"
] | null | null | null | StockExporter.py | A1eXFei/StockMarket3 | 01000d0731395868c4477982c6ed6c632c9a7427 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
import os
import logging
from logging.config import fileConfig
from util import DatabaseUtil as dbu
from util import ExportUtil as eu
fileConfig("logging_config.ini")
logger = logging.getLogger(__name__)
class StockExporter():
def __init__(self, export_dir, stock_code, start_date, end_date):
self.dir = export_dir
self.stock_code = stock_code
self.end_year = int(end_date[:4])
self.start_year = int(start_date[:4])
if self.end_year == self.start_year:
self.start_year = self.end_year - 1
self.sql_basic_data = "SELECT * FROM pdtb_stock_basic_data t WHERE t.CODE ='" + self.stock_code + "' "
self.sql_tech_data = "SELECT * FROM pdtb_stock_tech_data t WHERE t.CODE ='" + self.stock_code + "' "
def export(self):
for year in range(self.start_year, self.end_year):
logger.info("Export data for stock code " + self.stock_code + " for year " + str(year))
current_dir = self.dir + os.sep + self.stock_code + os.sep + str(year)
first_day = str(year) + "-01-01"
last_day = str(year) + "-12-31"
'''EXPORT BASIC DATA'''
basic_data_filename = "BASIC_" + self.stock_code + "_" + str(year) + ".csv"
sql_basic_data = self.sql_basic_data + "and t.DATE BETWEEN '" + first_day + "' AND '" + last_day + "'"
basic_data = dbu.get_pd_data(sql_basic_data)
if basic_data.shape[0] > 0:
if not os.path.exists(current_dir):
logger.debug("Make dir because there is no existing dir")
os.makedirs(current_dir)
eu.export(current_dir, basic_data_filename, basic_data)
logger.info("Basic data exported")
'''EXPORT TECH DATA'''
tech_data_filename = "TECH_" + self.stock_code + "_" + str(year) + ".csv"
sql_tech_data = self.sql_tech_data + "and t.DATE BETWEEN '" + first_day + "' AND '" + last_day + "'"
tech_data = dbu.get_pd_data(sql_tech_data)
if tech_data.shape[0] > 0:
if not os.path.exists(current_dir):
logger.debug("Make dir because there is no existing dir")
os.makedirs(current_dir)
eu.export(current_dir, tech_data_filename, tech_data)
logger.info("Tech data exported")
| 43.8 | 114 | 0.602325 | 326 | 2,409 | 4.177914 | 0.236196 | 0.079295 | 0.066814 | 0.037445 | 0.420705 | 0.374449 | 0.311307 | 0.273128 | 0.232012 | 0.232012 | 0 | 0.00927 | 0.28352 | 2,409 | 54 | 115 | 44.611111 | 0.779838 | 0.008717 | 0 | 0.146341 | 0 | 0 | 0.158906 | 0.008971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.121951 | 0 | 0.195122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
046046d511220f9b7bfb8a21f8d381f0f028cf8d | 34,758 | py | Python | experiments/2021-02-18-knockouts/local/py_analysis/avida.py | amlalejini/plastic-evolvability-avida | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | 2 | 2021-09-16T14:47:43.000Z | 2021-10-31T04:55:16.000Z | experiments/2021-02-18-knockouts/local/py_analysis/avida.py | amlalejini/evolutionary-consequences-of-plasticity | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | null | null | null | experiments/2021-02-18-knockouts/local/py_analysis/avida.py | amlalejini/evolutionary-consequences-of-plasticity | 909d944e52a102e09dd714a8b7e0f2274c4a8072 | [
"MIT"
] | 2 | 2020-08-19T20:01:14.000Z | 2020-12-21T21:24:12.000Z | from helpers import *
import copy
import pygame
import random
# Render vars
inst_x = 32
inst_width = 128
arrow_width = inst_x / 2
font_size = 10
font = pygame.font.SysFont('ubuntu', size = font_size)
large_font_size = 20
large_font = pygame.font.SysFont('ubuntu', size = large_font_size)
# Basic Avidian class. Handles the bare minimum of execution
class Organism:
def __init__(self, genome, sensors, in_A = True):
self.genome = copy.deepcopy(list(genome))
self.genome_clean = copy.deepcopy(genome)
self.inst_map = get_inst_map()
self.inst_color_map = get_inst_color_map()
self.sensors_work = sensors
self.in_env_A = in_A
self.reset()
# Completely reset organism to it's original state *except* for it's genome (keep mutations)
def reset(self):
print('########### RESET ###########')
self.reg_a = 0
self.reg_b = 0
self.reg_c = 0
self.reg_a_str = '0'
self.reg_b_str = '0'
self.reg_c_str = '0'
self.mem = []
self.mem = copy.deepcopy(list(self.genome))
self.inst_pointer = 0
self.read_head = 0
self.write_head = 0
self.flow_head = 0
self.stack_a = []
self.stack_b = []
self.stack_a_str = []
self.stack_b_str = []
self.using_stack_a = True
self.cur_input = None
self.copy_history = []
self.inst_executed = 0
self.task_map = {
'NOT' : False,
'AND' : False,
'OR' : False,
'NAND' : False,
'ANDNOT' : False,
'ORNOT' : False}
self.input_list = [
random.randint(0, (1 << 32) - 1),
random.randint(0, (1 << 32) - 1),
random.randint(0, (1 << 32) - 1)]
# Plastic
#self.input_list = [252908703, 856220990, 1432502763]
self.input_idx = 0
self.output_list = []
self.output_list_str = []
# Clear all mutations in genome and reset
def clear(self):
print('########### CLEAR ###########')
self.genome = copy.deepcopy(list(self.genome_clean))
self.reset()
# Mutate the given site, to either the next or previous instruction (in order of the a-Z mapping)
def mutate(self, idx, reverse = False):
diff = 1
if reverse:
diff = -1
self.genome[idx] = chr(ord(self.genome[idx]) + diff)
if self.genome[idx] == 'G':
self.genome[idx] = 'a'
elif ord(self.genome[idx]) == ord('a') - 1:
self.genome[idx] = 'F'
elif ord(self.genome[idx]) == ord('z') + 1:
self.genome[idx] = 'A'
elif ord(self.genome[idx]) == ord('A') - 1:
self.genome[idx] = 'z'
self.mem[idx] = self.genome[idx]
print('########### MUTATE ###########')
self.reset()
# Add the given value to the active stack.
# Optional arg is used to keep track of the var came from
def stack_push(self, val, s = '???'):
if self.using_stack_a:
self.stack_a.append(val)
self.stack_a_str.append(s)
else:
self.stack_b.append(val)
self.stack_b_str.append(s)
# Create a list of no-operations instructions following the current instruction
def get_following_nops(self):
cur_idx = self.inst_pointer + 1
L = []
if cur_idx >= len(self.mem):
return L
while cur_idx != self.inst_pointer:
inst = self.inst_map[self.mem[cur_idx]]
if inst == 'nop-A':
L.append('a')
elif inst == 'nop-B':
L.append('b')
elif inst == 'nop-C':
L.append('c')
else:
break
cur_idx += 1
if cur_idx >= len(self.mem):
cur_idx = 0
return L
# Given a list of letters in [a,c], return their complements as a list
def get_complement(self, in_L):
out_L = []
for x in in_L:
if x == 'a':
out_L.append('b')
elif x == 'b':
out_L.append('c')
elif x == 'c':
out_L.append('a')
return out_L
# Search ahead for the given label (nop pattern). If found return that position in memory
def find_label(self, label):
label_size = len(label)
cur_pos = self.inst_pointer + 1
start_pos = self.inst_pointer
while cur_pos != start_pos:
match = True
for idx in range(label_size):
if self.mem[cur_pos + idx] != label[idx]:
match = False
break
if match:
#return cur_pos + label_size - 1
return cur_pos
cur_pos += 1
if cur_pos >= len(self.mem):
cur_pos = 0
return -1
# Handle the organisms output and check for task performance
def do_output(self, val, s = '???'):
print('Output: ', val)
self.output_list.append(val)
self.output_list_str.append(s)
for idx_a in range(len(self.input_list)):
in_a = self.input_list[idx_a]
if not self.task_map['NOT']:
if ~in_a == val:
self.task_map['NOT'] = 'True at IP = ' + str(self.inst_pointer)
for idx_b in range(idx_a + 1, len(self.input_list)):
in_b = self.input_list[idx_b]
if not self.task_map['AND']:
if in_a & in_b == val:
self.task_map['AND'] = 'True at IP = ' + str(self.inst_pointer)
if not self.task_map['OR']:
if in_a | in_b == val:
self.task_map['OR'] = True
self.task_map['OR'] = 'True at IP = ' + str(self.inst_pointer)
if not self.task_map['NAND']:
if ~(in_a & in_b) == val:
self.task_map['NAND'] = 'True at IP = ' + str(self.inst_pointer)
if not self.task_map['ANDNOT']:
if ~in_a & in_b == val or in_a & ~in_b == val:
self.task_map['ANDNOT'] = 'True at IP = ' + str(self.inst_pointer)
if not self.task_map['ORNOT']:
if ~in_a | in_b == val or in_a | ~in_b == val:
self.task_map['ORNOT'] = 'True at IP = ' + str(self.inst_pointer)
# Return the next input value
def get_input(self):
self.cur_input = self.input_list[self.input_idx]
self.input_idx = (self.input_idx + 1) % 3
return self.cur_input
# Execute the instruction at the current instruction pointer (may also consumer following nops)
def execute_inst(self):
char = self.mem[self.inst_pointer]
inst = self.inst_map[char]
inst_pointer_inc = 1
if inst == 'nop-A' or inst == 'nop-B' or inst == 'nop-C' or inst == 'nop-X':
pass
elif inst == 'if-n-equ':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
if(nop_list[0] == 'a'):
if self.reg_a != self.reg_b:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
elif(nop_list[0] == 'b'):
if self.reg_b != self.reg_c:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
elif(nop_list[0] == 'c'):
if self.reg_c != self.reg_a:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
else:
if self.reg_b != self.reg_c:
inst_pointer_inc = 1
else:
inst_pointer_inc = 2
elif inst == 'if-less':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
if(nop_list[0] == 'a'):
if self.reg_a < self.reg_b:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
elif(nop_list[0] == 'b'):
if self.reg_b < self.reg_c:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
elif(nop_list[0] == 'c'):
if self.reg_c < self.reg_a:
inst_pointer_inc = 2
else:
inst_pointer_inc = 3
else:
if self.reg_b < self.reg_c:
inst_pointer_inc = 1
else:
inst_pointer_inc = 2
elif inst == 'if-label':
inst_pointer_inc = 1
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = len(nop_list) + 2
search_label = self.get_complement(nop_list)
if len(search_label) > len(self.copy_history):
pass
else:
match = True
for idx in range(len(search_label)):
if search_label[len(search_label) - (idx + 1)] != \
self.copy_history[len(self.copy_history) - (idx + 1)]:
match = False
break
if match:
inst_pointer_inc = len(nop_list) + 1
elif inst == 'mov-head':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.inst_pointer = self.flow_head
inst_pointer_inc = 0
elif(nop_list[0] == 'b'):
self.read_head = self.flow_head
elif(nop_list[0] == 'c'):
self.write_head = self.flow_head
else:
self.inst_pointer = self.flow_head
inst_pointer_inc = 0
elif inst == 'jmp-head':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.inst_pointer += 2
inst_pointer_int = 0
self.inst_pointer += self.reg_c
elif(nop_list[0] == 'b'):
self.read_head += self.reg_c
elif(nop_list[0] == 'c'):
self.write_head += self.reg_c
else:
self.inst_pointer += 1
inst_pointer_int = 0
self.inst_pointer += self.reg_c
elif inst == 'get-head':
val = self.inst_pointer
val_str = 'IP at ' + str(self.inst_pointer)
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
val = self.inst_pointer
val_str = 'IP at ' + str(self.inst_pointer)
elif(nop_list[0] == 'b'):
val = self.read_head
val_str = 'RH at ' + str(self.inst_pointer)
elif(nop_list[0] == 'c'):
val = self.write_head
val_str = 'WH at ' + str(self.inst_pointer)
self.reg_c = val
self.reg_c_str = val_str
elif inst == 'set-flow':
nop_list = self.get_following_nops()
pos = self.reg_c
if len(nop_list) > 0:
print(nop_list)
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
pos = self.reg_a
elif(nop_list[0] == 'b'):
pos = self.reg_b
elif(nop_list[0] == 'c'):
pos = self.reg_c
if pos < 0:
pos = 0
elif pos > len(self.mem) and pos < 2 * len(self.mem):
pos -= len(self.mem)
elif pos > 2 * len(self.mem):
pos = pos % len(self.mem)
self.flow_head = pos
elif inst == 'shift-r':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a >>= 1
self.reg_a_str = '(' + self.reg_a_str + ') >> 1'
elif(nop_list[0] == 'b'):
self.reg_b >>= 1
self.reg_b_str = '(' + self.reg_b_str + ') >> 1'
elif(nop_list[0] == 'c'):
self.reg_c >>= 1
self.reg_c_str = '(' + self.reg_c_str + ') >> 1'
else:
self.reg_b >>= 1
self.reg_b_str = '(' + self.reg_b_str + ') >> 1'
elif inst == 'shift-l':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a <<= 1
self.reg_a &= ((1 << 32) - 1) # Truncate to 32 bits
self.reg_a_str = '(' + self.reg_a_str + ') << 1'
elif(nop_list[0] == 'b'):
self.reg_b <<= 1
self.reg_b &= ((1 << 32) - 1) # Truncate to 32 bits
self.reg_b_str = '(' + self.reg_b_str + ') << 1'
elif(nop_list[0] == 'c'):
self.reg_c <<= 1
self.reg_c &= ((1 << 32) - 1) # Truncate to 32 bits
self.reg_c_str = '(' + self.reg_c_str + ') << 1'
else:
self.reg_b <<= 1
self.reg_b &= ((1 << 32) - 1) # Truncate to 32 bits
self.reg_b_str = '(' + self.reg_b_str + ') << 1'
elif inst == 'inc':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a += 1
self.reg_a_str = '(' + self.reg_a_str + ')' + ' + 1'
elif(nop_list[0] == 'b'):
self.reg_b += 1
self.reg_b_str = '(' + self.reg_b_str + ')' + ' + 1'
elif(nop_list[0] == 'c'):
self.reg_c += 1
self.reg_c_str = '(' + self.reg_c_str + ')' + ' + 1'
else:
self.reg_b += 1
self.reg_b_str = '(' + self.reg_b_str + ')' + ' + 1'
elif inst == 'dec':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a -= 1
self.reg_a_str = '(' + self.reg_a_str + ')' + ' - 1'
elif(nop_list[0] == 'b'):
self.reg_b -= 1
self.reg_b_str = '(' + self.reg_b_str + ')' + ' - 1'
elif(nop_list[0] == 'c'):
self.reg_c -= 1
self.reg_c_str = '(' + self.reg_c_str + ')' + ' - 1'
else:
self.reg_b -= 1
self.reg_b_str = '(' + self.reg_b_str + ')' + ' - 1'
elif inst == 'push':
nop_list = self.get_following_nops()
val_str = 'NA'
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
val = self.reg_a
val_str = self.reg_a_str
elif(nop_list[0] == 'b'):
val = self.reg_b
val_str = self.reg_b_str
elif(nop_list[0] == 'c'):
val = self.reg_c
val_str = self.reg_c_str
else:
val = self.reg_b
val_str = self.reg_b_str
if self.using_stack_a:
self.stack_a.append(val)
self.stack_a_str.append(val_str)
else:
self.stack_b.append(val)
self.stack_b_str.append(val_str)
elif inst == 'pop':
popped_val = 0
popped_str = '0'
if self.using_stack_a:
if len(self.stack_a) > 0:
popped_val = self.stack_a[-1]
self.stack_a = self.stack_a[:-1]
popped_str = self.stack_a_str[-1]
self.stack_a_str = self.stack_a_str[:-1]
else:
if len(self.stack_b) > 0:
popped_val = self.stack_b[-1]
self.stack_b = self.stack_b[:-1]
popped_str = self.stack_b_str[-1]
self.stack_b_str = self.stack_b_str[:-1]
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a = popped_val
self.reg_a_str = popped_str
elif(nop_list[0] == 'b'):
self.reg_b = popped_val
self.reg_b_str = popped_str
elif(nop_list[0] == 'c'):
self.reg_c = popped_val
self.reg_c_str = popped_str
else:
self.reg_b = popped_val
self.reg_b_str = popped_str
elif inst == 'swap-stk':
self.using_stack_a = not self.using_stack_a
elif inst == 'swap':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
tmp = self.reg_a
tmp_str = self.reg_a_str
self.reg_a = self.reg_b
self.reg_a_str = self.reg_b_str
self.reg_b = tmp
self.reg_b_str = tmp_str
elif(nop_list[0] == 'b'):
tmp = self.reg_b
tmp_str = self.reg_b_str
self.reg_b = self.reg_c
self.reg_b_str = self.reg_c_str
self.reg_c = tmp
self.reg_c_str = tmp_str
elif(nop_list[0] == 'c'):
tmp = self.reg_c
tmp_str = self.reg_c_str
self.reg_c = self.reg_a
self.reg_c_str = self.reg_a_str
self.reg_a = tmp
self.reg_a_str = tmp_str
else:
tmp = self.reg_b
tmp_str = self.reg_b_str
self.reg_b = self.reg_c
self.reg_b_str = self.reg_c_str
self.reg_c = tmp
self.reg_c_str = tmp_str
elif inst == 'add':
res = self.reg_b + self.reg_c
res_str = '(' + self.reg_b_str + ') + (' + self.reg_c_str + ')'
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a = res
self.reg_a_str = res_str
elif(nop_list[0] == 'b'):
self.reg_b = res
self.reg_b_str = res_str
elif(nop_list[0] == 'c'):
self.reg_c = res
self.reg_c_str = res_str
else:
self.reg_b = res
self.reg_b_str = res_str
elif inst == 'sub':
res = self.reg_b - self.reg_c
res_str = '(' + self.reg_b_str + ') - (' + self.reg_c_str + ')'
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a = res
self.reg_a_str = res_str
elif(nop_list[0] == 'b'):
self.reg_b = res
self.reg_b_str = res_str
elif(nop_list[0] == 'c'):
self.reg_c = res
self.reg_c_str = res_str
else:
self.reg_b = res
self.reg_b_str = res_str
elif inst == 'nand':
res = ~(self.reg_b & self.reg_c)
res_str = '(' + self.reg_b_str + ') !& (' + self.reg_c_str + ')'
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
self.reg_a = res
self.reg_a_str = res_str
elif(nop_list[0] == 'b'):
self.reg_b = res
self.reg_b_str = res_str
elif(nop_list[0] == 'c'):
self.reg_c = res
self.reg_c_str = res_str
else:
self.reg_b = res
self.reg_b_str = res_str
elif inst == 'h-alloc':
old_len = len(self.mem)
len_add = len(self.genome)#self.reg_b
if old_len + len_add >= len(self.genome) * 2:
len_add = len(self.genome) * 2 - old_len
print('Allocation an additional', len_add, 'bytes to existing ', old_len)
#if len_add < 1:
# print('Cannot allocate, too short')
#else:
if len_add > 0:
self.mem = self.mem + (['a'] * len_add)
self.reg_a = old_len
elif inst == 'h-copy':
self.mem[self.write_head] = self.mem[self.read_head]
self.copy_history.append(self.mem[self.read_head])
self.write_head += 1
self.read_head += 1
elif inst == 'h-divide':
#if self.inst_executed > int(len(self.genome) / 2):
if len(self.copy_history) >= len(self.genome):
print('Offspring:')
print(self.mem[self.read_head:self.write_head])
self.reset()
inst_pointer_inc = 0
else:
print('Unsuccesful divide: not enough instructions executed')
elif inst == 'IO':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = 2
if(nop_list[0] == 'a'):
tmp_str = 'in[' + str(self.input_idx) + ',' + str(self.inst_pointer) + ']'
self.do_output(self.reg_a, '[' + str(self.inst_pointer) + '] ' + self.reg_a_str)
self.reg_a = self.get_input()
self.reg_a_str = tmp_str
elif(nop_list[0] == 'b'):
tmp_str = 'in[' + str(self.input_idx) + ',' + str(self.inst_pointer) + ']'
self.do_output(self.reg_b, '[' + str(self.inst_pointer) + '] ' + self.reg_b_str)
self.reg_b = self.get_input()
self.reg_b_str = tmp_str
elif(nop_list[0] == 'c'):
tmp_str = 'in[' + str(self.input_idx) + ',' + str(self.inst_pointer) + ']'
self.do_output(self.reg_c, '[' + str(self.inst_pointer) + '] ' + self.reg_b_str)
self.reg_c = self.get_input()
self.reg_c_str = tmp_str
else:
tmp_str = 'in[' + str(self.input_idx) + ',' + str(self.inst_pointer) + ']'
self.do_output(self.reg_b, '[' + str(self.inst_pointer) + '] ' + self.reg_b_str)
self.reg_b = self.get_input()
tmp_str = self.reg_b
elif inst == 'h-search':
nop_list = self.get_following_nops()
if len(nop_list) > 0:
inst_pointer_inc = len(nop_list) + 1
pos = self.find_label(self.get_complement(nop_list))
self.reg_b = pos - self.inst_pointer
self.reg_c = len(nop_list)
self.flow_head = pos + len(nop_list)
else:
self.reg_b = 0
self.reg_c = 0
self.flow_head = self.inst_pointer + 1
elif inst == 'sense-react-NAND':
if(not self.sensors_work):
pass
else:
if(not self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
elif inst == 'sense-react-NOT':
if(not self.sensors_work):
pass
else:
if(self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
elif inst == 'sense-react-AND':
if(not self.sensors_work):
pass
else:
if(self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
elif inst == 'sense-react-ORN':
if(not self.sensors_work):
pass
else:
if(not self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
elif inst == 'sense-react-OR':
if(not self.sensors_work):
pass
else:
if(self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
elif inst == 'sense-react-ANDN':
if(not self.sensors_work):
pass
else:
if(not self.in_env_A):
self.stack_push(1, 'sense-pos')
else:
self.stack_push(-1, 'sense-neg')
self.inst_pointer += inst_pointer_inc
self.inst_executed += 1
# Render the execution heads
def render_heads(self, surf):
# Instruction pointer
pygame.draw.polygon(surf, (238,0,0), \
((int(inst_x / 2), self.inst_pointer * font_size), \
(inst_x, int((self.inst_pointer + 0.5) * font_size)), \
(int(inst_x / 2), (self.inst_pointer + 1) * font_size)))
# Flow head
if self.flow_head < len(self.genome):
if self.inst_pointer == self.flow_head:
pygame.draw.polygon(surf, (238,238,0), \
((0, self.flow_head * font_size), \
(int(inst_x / 2), int((self.flow_head + 0.5) * font_size)), \
(0, (self.flow_head + 1) * font_size)))
else:
pygame.draw.polygon(surf, (238,238,0), \
((int(inst_x / 2), self.flow_head * font_size), \
(inst_x, int((self.flow_head + 0.5) * font_size)), \
(int(inst_x / 2), (self.flow_head + 1) * font_size)))
else:
x = inst_width + inst_x * 3
y_top = (self.flow_head - len(self.genome)) * font_size
pygame.draw.polygon(surf, (238,238,0), \
( (x - arrow_width, y_top), \
(x, y_top + int(0.5 * font_size)), \
(x - arrow_width,y_top + font_size) ) )
# Read head
pygame.draw.polygon(surf, (0,238,0), \
((inst_x + inst_width + arrow_width, self.read_head * font_size), \
(inst_x + inst_width, int((self.read_head + 0.5) * font_size)), \
(inst_x + inst_width + arrow_width, (self.read_head + 1) * font_size)))
# Write head
if self.write_head < len(self.genome):
if self.read_head == self.write_head:
pygame.draw.polygon(surf, (0,0,238),
((inst_x + inst_width + arrow_width * 2, self.write_head * font_size), \
(inst_x + inst_width + arrow_width, int((self.write_head+0.5)*font_size)), \
(inst_x + inst_width + arrow_width * 2, (self.write_head + 1) * font_size)))
else:
pygame.draw.polygon(surf, (0,0,238), \
((inst_x + inst_width + arrow_width, self.write_head * font_size), \
(inst_x + inst_width, int((self.write_head + 0.5) * font_size)), \
(inst_x + inst_width + arrow_width, (self.write_head + 1) * font_size)))
else:
x = inst_x * 3 + inst_width * 2
pygame.draw.polygon(surf, (0,0,238),
((x + int(inst_x / 2), (self.write_head - len(self.genome)) * font_size), \
(x, int((self.write_head - len(self.genome) + 0.5) * font_size)), \
(x + int(inst_x / 2), (self.write_head + 1 - len(self.genome)) * font_size)))
# Render the status of the organism's registers, stacks, etc as text
def render_states(self, surf):
# Registers
surf.blit(large_font.render('Reg A:' + str(self.reg_a) + '; ' + str(self.reg_a_str), \
0, (255, 255, 255)), (512, 64))
surf.blit(large_font.render('Reg A:' + \
format(self.reg_a if self.reg_a >= 0 else self.reg_a + (1 << 32), '032b'),\
0, (255, 255, 255)), (512 + 32, 64 + large_font_size))
surf.blit(large_font.render('Reg B:' + str(self.reg_b) + '; ' + str(self.reg_b_str), \
0, (255, 255, 255)), (512, 64 + large_font_size * 2))
surf.blit(large_font.render('Reg B:' + \
format(self.reg_b if self.reg_b >= 0 else self.reg_b + (1 << 32), '032b'),\
0, (255, 255, 255)), (512 + 32, 64 + large_font_size * 3))
surf.blit(large_font.render('Reg C:' + str(self.reg_c) + '; ' + str(self.reg_c_str), \
0, (255, 255, 255)), (512, 64 + large_font_size * 4))
surf.blit(large_font.render('Reg C:' + format(self.reg_c, '032b'), 0, (255, 255, 255)), \
(512 + 32, 64 + large_font_size * 5))
# Heads
surf.blit(large_font.render('IP:' + str(self.inst_pointer), 0, (238, 0, 0)), \
(512, 64 + large_font_size * 6))
surf.blit(large_font.render('RH:' + str(self.read_head), 0, (0, 238, 0)), \
(512, 64 + large_font_size * 7))
surf.blit(large_font.render('WH:' + str(self.write_head), 0, (0, 0, 238)), \
(512, 64 + large_font_size * 8))
surf.blit(large_font.render('FH:' + str(self.flow_head), 0, (238, 238,0)), \
(512, 64 + large_font_size * 9))
# Stacks
if self.using_stack_a:
surf.blit(large_font.render('SA:' + str(self.stack_a) + str(self.stack_a_str), \
0, (255, 255,255)), (512, 64 + large_font_size * 10))
surf.blit(large_font.render('SB:' + str(self.stack_b) + str(self.stack_b_str), \
0, (150, 150,150)), (512, 64 + large_font_size * 11))
else:
surf.blit(large_font.render('SA:' + str(self.stack_a) + str(self.stack_a_str), \
0, (150,150,150)), (512, 64 + large_font_size * 10))
surf.blit(large_font.render('SB:' + str(self.stack_b) + str(self.stack_b_str), \
0, (255,255,255)), (512, 64 + large_font_size * 11))
# Tasks
surf.blit(large_font.render('NOT:' + str(self.task_map['NOT']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 12))
surf.blit(large_font.render('AND:' + str(self.task_map['AND']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 13))
surf.blit(large_font.render('OR:' + str(self.task_map['OR']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 14))
surf.blit(large_font.render('NAND:' + str(self.task_map['NAND']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 15))
surf.blit(large_font.render('ANDNOT:' + str(self.task_map['ANDNOT']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 16))
surf.blit(large_font.render('ORNOT:' + str(self.task_map['ORNOT']), 0, (255, 255,255)), \
(512, 64 + large_font_size * 17))
# IO
surf.blit(large_font.render('Inputs:' + str(self.input_list), 0, (255, 255,255)), \
(512, 64 + large_font_size * 18))
cur_idx = 19
for i in range(len(self.input_list)):
val = self.input_list[i]
surf.blit(large_font.render(format(val if val >= 0 else val + (1 << 32), '032b'),\
0, (255, 255, 255)), (512 + 32, 64 + large_font_size * cur_idx))
cur_idx += 1
surf.blit(large_font.render('Outputs:' + str(self.output_list), 0, (255, 255,255)), \
(512, 64 + large_font_size * cur_idx))
cur_idx += 1
for i in range(len(self.output_list)):
val = self.output_list[i]
surf.blit(large_font.render(format(val if val >= 0 else val + (1 << 32), '032b') + \
'; ' + str(self.output_list_str[i]),\
0, (255, 255, 255)), (512 + 32, 64 + large_font_size * cur_idx))
cur_idx += 1
# Render the organism's genome
def render_genome(self, surf):
for locus_idx in range(len(self.genome)):
char = self.mem[locus_idx]
inst_name = self.inst_map[char]
surf.blit(font.render(str(locus_idx), 0, (255,255,255)), (0, locus_idx * font_size))
pygame.draw.rect(surf, self.inst_color_map[char], \
(inst_x, locus_idx * font_size, inst_width, font_size))
surf.blit(font.render(inst_name, 0, (255,255,255)), (inst_x, locus_idx * font_size))
if len(self.mem) > len(self.genome): # If a h-alloc has executed, draw the offspring memory
x = inst_width + inst_x * 3
for locus_idx in range(len(self.genome), len(self.mem)):
char = self.mem[locus_idx]
inst_name = self.inst_map[char]
pygame.draw.rect(surf, self.inst_color_map[char], \
(x, (locus_idx - len(self.genome)) * font_size, inst_width, font_size))
surf.blit(font.render(inst_name, 0, (255,255,255)),\
(x, (locus_idx - len(self.genome)) * font_size))
# Call all render subprocesses
def render(self, surf):
self.render_genome(surf)
self.render_heads(surf)
self.render_states(surf)
| 43.831021 | 101 | 0.466857 | 4,435 | 34,758 | 3.407892 | 0.060203 | 0.094019 | 0.047638 | 0.032023 | 0.707821 | 0.631203 | 0.562988 | 0.523025 | 0.488686 | 0.429999 | 0 | 0.040145 | 0.406611 | 34,758 | 792 | 102 | 43.886364 | 0.692655 | 0.039962 | 0 | 0.47561 | 0 | 0 | 0.034444 | 0 | 0.01084 | 0 | 0 | 0 | 0 | 1 | 0.020325 | false | 0.01084 | 0.00542 | 0 | 0.03523 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
046277d3c02237d1299502009394a5bf6a8ac7e5 | 6,262 | py | Python | examples/cvpr2020/reference_trainer.py | UCMerced-ML/LC-model-compression | bf12c3c1baad2bdd8d2aa3c560f8d7d0bed64a1e | [
"BSD-3-Clause"
] | 40 | 2020-05-19T01:31:02.000Z | 2022-01-28T00:25:33.000Z | examples/cvpr2020/reference_trainer.py | UCMerced-ML/LC-model-compression | bf12c3c1baad2bdd8d2aa3c560f8d7d0bed64a1e | [
"BSD-3-Clause"
] | 3 | 2020-09-09T16:04:17.000Z | 2022-03-18T00:59:39.000Z | examples/cvpr2020/reference_trainer.py | UCMerced-ML/LC-model-compression | bf12c3c1baad2bdd8d2aa3c560f8d7d0bed64a1e | [
"BSD-3-Clause"
] | 8 | 2020-09-08T14:11:16.000Z | 2022-01-28T00:25:41.000Z | #!/bin/env/python3
from types import ModuleType
from lc.models import torch as model_def
from utils import AverageMeter, Recorder, format_time, data_loader, compute_acc_loss
import argparse
import torch
from torch import optim
import torch.backends.cudnn as cudnn
import torch.nn as nn
import time
import os
if __name__ == '__main__':
if not os.path.exists('references'):
os.makedirs('references')
model_names = model_def.__all__
parser = argparse.ArgumentParser(description='Reference Network Trainer for MNIST and CIFAR10 networks')
parser.add_argument('--arch', '-a', metavar='ARCH', default=model_names[0],
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: {})'.format(model_names[0]))
parser.add_argument('--dataset', choices=['MNIST', 'CIFAR10'], default='MNIST')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--checkpoint', type=int, default=20)
parser.add_argument('--epochs', default=300, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.09, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--scheduler', choices=['exponential', 'steps'], default='exponential')
parser.add_argument('--milestones', nargs='+', default=[100,150], type=int)
parser.add_argument('--lr_decay', type=float, default=0.99)
parser.add_argument('--print-freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--resume', action='store_true',
help='resumes from recent checkpoint')
args = parser.parse_args()
print(args)
cudnn.benchmark = True
model = getattr(model_def, args.arch)()
model.cuda()
train_loader, test_loader = data_loader(batch_size=args.batch_size,
n_workers=args.workers,
dataset=args.dataset)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum, nesterov=True)
prev_state = None
if args.resume:
prev_state = torch.load('references/{}_checkpoint.th'.format(args.arch))
epoch_time = AverageMeter()
rec = Recorder()
all_start_time = time.time()
start_epoch = 0
if prev_state:
print()
model.load_state_dict(prev_state['model_state'])
optimizer.load_state_dict(prev_state['optimizer_state'])
epoch_time = prev_state['epoch_time']
rec = prev_state['records']
all_start_time -= prev_state['training_time']
print('Overriding provided arg with prev_state args: ', prev_state['args'])
args = prev_state['args']
start_epoch = prev_state['epoch']
scheduler = None
if args.scheduler == 'exponential':
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=args.lr_decay, last_epoch=start_epoch - 1)
elif args.scheduler == 'steps':
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, args.milestones, gamma=args.lr_decay, last_epoch=start_epoch-1)
def my_eval(x, target):
out_ = model.forward(x)
return out_, model.loss(out_, target)
training_time = 0
for epoch in range(start_epoch, args.epochs):
start_time = time.time()
model.train()
for batch_idx, (x, target) in enumerate(train_loader):
optimizer.zero_grad()
x, target = x.cuda(), target.cuda()
out = model.forward(x)
loss = model.loss(out, target)
loss.backward()
optimizer.step()
end_time = time.time()
epoch_time.update(end_time - start_time)
training_time = end_time - all_start_time
model.eval()
print('Epoch {0} finished in {et.val:.3f}s (avg.: {et.avg:.3f}s). Training for {1}'
.format(epoch, format_time(training_time), et=epoch_time))
print('\tLR: {:.4}'.format(scheduler.get_lr()[0]))
if (epoch+1)%args.print_freq == 0:
accuracy, ave_loss = compute_acc_loss(my_eval, train_loader)
rec.record('train', [ave_loss, accuracy, training_time, epoch+1])
print('\ttrain loss: {:.6f}, accuracy: {:.4f}'.format(ave_loss, accuracy))
accuracy, ave_loss = compute_acc_loss(my_eval, test_loader)
print('\ttest loss: {:.6f}, accuracy: {:.4f}'.format(ave_loss, accuracy))
rec.record('test', [ave_loss, accuracy, training_time, epoch+1])
scheduler.step()
if args.checkpoint and (epoch+1) % args.checkpoint == 0:
# create and save checkpoint here
to_save = {'records': rec, 'epoch_time': epoch_time, 'training_time': training_time}
to_save['model_state'] = model.state_dict()
to_save['optimizer_state'] = optimizer.state_dict()
to_save['lr'] = scheduler.get_lr()[0]
to_save['epoch'] = epoch + 1
to_save['args'] = args
torch.save(to_save, 'references/{}_checkpoint.th'.format(args.arch))
pass
# training has finished. save all recorded values, parameters, end optimization states
to_save = {'records': rec, 'epoch_time': epoch_time, 'training_time': training_time}
to_save['model_state'] = model.state_dict()
to_save['optimizer_state'] = optimizer.state_dict()
to_save['lr'] = scheduler.get_lr()[0]
to_save['epoch'] = epoch + 1
to_save['args'] = args
torch.save(to_save, 'references/{}.th'.format(args.arch))
if args.checkpoint:
# additionally remove checkpoints
import os
os.remove('references/{}_checkpoint.th'.format(args.arch)) | 44.728571 | 125 | 0.621048 | 770 | 6,262 | 4.848052 | 0.244156 | 0.022502 | 0.059202 | 0.016073 | 0.261988 | 0.226092 | 0.199839 | 0.167694 | 0.129119 | 0.109831 | 0 | 0.013061 | 0.241935 | 6,262 | 140 | 126 | 44.728571 | 0.773331 | 0.026509 | 0 | 0.118644 | 0 | 0.008475 | 0.173314 | 0.013294 | 0.008475 | 0 | 0 | 0 | 0 | 1 | 0.008475 | false | 0.008475 | 0.09322 | 0 | 0.110169 | 0.084746 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0463351ecd48fd4a8685d1b1e0e86fd55a8bbaa2 | 1,249 | py | Python | sa/profiles/Generic/sync_vlans.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/Generic/sync_vlans.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/Generic/sync_vlans.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
from noc.core.script.base import BaseScript
from noc.sa.interfaces.isyncvlans import ISyncVlans
class Script(BaseScript):
name = "Generic.sync_vlans"
interface = ISyncVlans
requires = ["get_vlans", "add_vlan", "remove_vlan"]
def execute(self, vlans, tagged_ports):
v_map = {}
for v in vlans:
v_map[v["vlan_id"]] = v["name"]
r_vlans = self.scripts.get_vlans()
dev_vlans = {v["vlan_id"] for v in r_vlans}
db_vlans = {v["vlan_id"] for v in vlans}
# Do not provision VLAN1
if 1 in dev_vlans:
dev_vlans.remove(1)
if 1 in db_vlans:
db_vlans.remove(1)
#
to_create = db_vlans - dev_vlans
for vlan in to_create:
self.scripts.add_vlan(vlan_id=vlan, name=v_map[vlan], tagged_ports=tagged_ports)
to_remove = dev_vlans - db_vlans
for vlan in to_remove:
self.scripts.remove_vlan(vlan_id=vlan)
return {"created": list(to_create), "removed": list(to_remove)}
| 33.756757 | 92 | 0.552442 | 160 | 1,249 | 4.0875 | 0.35 | 0.045872 | 0.027523 | 0.033639 | 0.103976 | 0.055046 | 0.055046 | 0 | 0 | 0 | 0 | 0.013771 | 0.244195 | 1,249 | 36 | 93 | 34.694444 | 0.679025 | 0.180945 | 0 | 0 | 0 | 0 | 0.083661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0463d410530853a1c2fe7284a9f305e2222bedae | 1,875 | py | Python | Day_21/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | Day_21/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | Day_21/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | from types import new_class
from AoCUtils import *
result = 0
partNumber = "2"
writeToLog = True
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
start1 = 8
start2 = 9
# Example
# start1 = 4
# start2 = 8
score1 = 0
score2 = 0
distribution = {
3: 1,
4: 3,
5: 6,
6: 7,
7: 6,
8: 3,
9: 1
}
turn = 0
state = (score1 + score2, start1, start2, score1, score2, turn)
numStates = defaultdict(int)
nextState = PriorityQueue()
numStates[state] = 1
nextState.put(state)
win1 = 0
win2 = 0
MAXSCORE = 21
while not nextState.empty():
state = nextState.get()
n = numStates[state]
numStates[state] = 0
s, p1, p2, score1, score2, turn = state
# printLog(f"s: {s}, p1: {p1}, p2: {p2}, score1: {score1}, score2: {score2}, turn: {turn}")
for (k, p) in distribution.items():
flag = False
if turn == 0:
newp1 = (p1 + k) % 10
newscore1 = score1 + (newp1 if newp1 else 10)
s = newscore1 + score2
if newscore1 >= MAXSCORE:
win1 += n * p
flag = True
newState = (s, newp1, p2, newscore1, score2, 1)
else:
newp2 = (p2 + k) % 10
newscore2 = score2 + (newp2 if newp2 else 10)
s = score1 + newscore2
if newscore2 >= MAXSCORE:
win2 += n * p
flag = True
newState = (s, p1, newp2, score1, newscore2, 0)
if not flag:
if newState not in numStates:
nextState.put(newState)
numStates[newState] += n * p
result = max(win1, win2)
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
| 21.067416 | 95 | 0.5488 | 227 | 1,875 | 4.528634 | 0.356828 | 0.046693 | 0.027237 | 0.019455 | 0.036965 | 0.036965 | 0 | 0 | 0 | 0 | 0 | 0.07613 | 0.327467 | 1,875 | 88 | 96 | 21.306818 | 0.739096 | 0.063467 | 0 | 0.092308 | 0 | 0 | 0.014849 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.030769 | 0 | 0.030769 | 0.030769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
046764af8b8febc01686b2caf4cec52bcb012ff6 | 20,510 | py | Python | dpytools/menus.py | ComfortablyCoding/dpytools | e5588bc1795543eddb94c3d3a58a9ea9668740a0 | [
"MIT"
] | 22 | 2021-01-21T19:57:39.000Z | 2022-03-14T02:34:01.000Z | dpytools/menus.py | ComfortablyCoding/dpytools | e5588bc1795543eddb94c3d3a58a9ea9668740a0 | [
"MIT"
] | 3 | 2021-05-20T16:34:21.000Z | 2021-06-23T19:54:32.000Z | dpytools/menus.py | ComfortablyCoding/dpytools | e5588bc1795543eddb94c3d3a58a9ea9668740a0 | [
"MIT"
] | 7 | 2021-04-28T04:18:09.000Z | 2021-06-23T00:20:06.000Z | # -*- coding: utf-8 -*-
"""
This module holds functions for displaying different kinds of menus.
All menus are reaction based.
"""
import asyncio
from copy import copy
from inspect import isawaitable
from typing import List, Optional, Union, Callable
import discord
from discord import Embed
from discord.ext import commands
from discord.ext.commands import Context, Converter
from dpytools import EmojiNumbers, Emoji, chunkify_string_list, Color
from dpytools.errors import UserAnswerParsingError
from dpytools.waiters import BaseLock
__all__ = (
'try_clear_reactions',
'arrows',
'confirm',
'multichoice',
'TextMenu'
)
async def try_clear_reactions(msg):
"""helper function to remove reactions excepting forbidden
either by context being a dm_channel or bot lacking perms"""
if msg.guild:
try:
await msg.clear_reactions()
except discord.errors.Forbidden:
pass
async def arrows(ctx: commands.Context,
embed_list: List[Embed],
content: Optional[str] = None,
head: int = 0,
timeout: int = 30,
closed_embed: Optional[Embed] = None,
channel: Optional[discord.abc.Messageable] = None):
"""
Sends multiple embeds with a reaction navigation menu.
Parameters
----------
ctx: :class:`discord.ext.commands.Context`
The context where this function is called.
embed_list: :class:`List[Embed]`
An ordered list containing the embeds to be sent.
content: :class:`str`
A static string. This wont change with pagination.
It will be cleared when its closed, but will persist on pause
head: :class:`int`
The index in embed_list of the first Embed to be displayed.
timeout: :class:`int` (seconds)
The time before the bot closes the menu.
This is reset with each interaction.
closed_embed: :class:`Optional[Embed]`
The embed to be displayed when the user closes the menu.
Defaults to plain embed with "Closed by user" in description
channel: :class:`discord.abc.Messageable`
The channel to be used for displaying the menu, defaults to ctx.channel.
Example
-------
::
from dpytools.menus import arrows
@bot.command()
async def test(ctx):
embed_list = [Embed(...), Embed(...), ...)
await arrows(ctx, embed_list)
"""
channel = channel or ctx.channel
closed_embed = closed_embed or Embed(description="Closed by user", color=Color.RED)
if len(embed_list) == 1:
return await channel.send(content=content, embed=embed_list[0])
def get_reactions(_head: int):
_to_react = []
emb_range = range(len(embed_list))
if _head - 2 in emb_range:
_to_react.append(Emoji.LAST_TRACK.value)
if _head - 1 in emb_range:
_to_react.append(Emoji.REVERSE.value)
if _head + 1 in emb_range:
_to_react.append(Emoji.PLAY.value)
if _head + 2 in emb_range:
_to_react.append(Emoji.NEXT_TRACK.value)
_to_react += [Emoji.PAUSE.value, Emoji.X.value]
return _to_react
msg = await channel.send(content=content, embed=embed_list[head])
to_react = get_reactions(head)
for emoji in to_react:
await msg.add_reaction(emoji)
def check(payload_):
return all([
msg.id == payload_.message_id,
payload_.user_id == ctx.author.id,
payload_.emoji.name in to_react,
])
def get_head(head_: int, emoji_) -> Union[bool, int]:
actions = {
Emoji.LAST_TRACK: 0,
Emoji.REVERSE: head_ - 1 if head_ else 0,
Emoji.PLAY: head_ + 1 if head_ < len(embed_list) - 1 else len(embed_list),
Emoji.NEXT_TRACK: len(embed_list) - 1,
Emoji.X: False,
Emoji.PAUSE: True,
}
return actions[emoji_]
while True:
try:
payload = await ctx.bot.wait_for('raw_reaction_add', timeout=timeout, check=check)
except asyncio.TimeoutError:
return await try_clear_reactions(msg)
else:
head = get_head(head, payload.emoji.name)
if head is True: # pause emoji triggered
return await try_clear_reactions(msg)
if head is False: # X emoji triggered
await try_clear_reactions(msg)
return await msg.edit(content=None, embed=closed_embed, delete_after=10)
else:
await try_clear_reactions(msg)
await msg.edit(embed=embed_list[head])
to_react = get_reactions(head)
for emoji in to_react:
await msg.add_reaction(emoji)
async def confirm(ctx: commands.Context,
msg: discord.Message,
lock: Union[discord.Member, discord.Role, bool, None] = True,
timeout: int = 30) -> Optional[bool]:
"""
Helps to create a reaction menu to confirm an action.
Parameters
----------
ctx: :class:`discord.ext.commands.Context`
the context for the menu
msg: :class:`Message`
the message to confirm or deny by the user.
lock: :class:`Union[discord.Member, discord.Role, bool, None]`
- **True** (default)
- the menu will only listen for the author's reactions.
- **False**
- ANY user can react to the menu
- :class:`discord.Member`
- Only target member will be able to react
- :class:`discord.Role`
- ANY user with target role will be able to react.
timeout: :class:ìnt` (seconds)
Timeout before the menu closes.
Returns
-------
:class:`Optional[bool]`
- **True** if the message was confirmed
- **False** if it was denied
- **None** if timeout
Example
-------
::
from dpytools.menus import confirm
@bot.command()
async def test(ctx):
msg = await ctx.send('Please confirm to this important message')
confirmation = await confirm(ctx, msg)
if confirmation:
await msg.edit(content='Confirmed')
elif confirmation is False:
await msg.edit(content='Cancelled')
else:
await msg.edit(content='Timeout')
"""
emojis = ['👍', '❌']
for emoji in emojis:
await msg.add_reaction(emoji)
def check(payload):
_checks = [
payload.user_id != ctx.bot.user.id,
payload.emoji.name in emojis,
payload.message_id == msg.id,
]
if lock:
if isinstance(lock, bool):
_checks.append(payload.user_id == ctx.author.id)
elif isinstance(lock, discord.Member):
_checks.append(payload.user_id == lock.id)
elif isinstance(lock, discord.Role):
_checks.append(lock in ctx.guild.get_member(payload.user_id).roles)
return all(_checks)
try:
payload = await ctx.bot.wait_for('raw_reaction_add', check=check, timeout=timeout)
except asyncio.TimeoutError:
await try_clear_reactions(msg)
return None
else:
await try_clear_reactions(msg)
if payload.emoji.name == '👍':
return True
else:
return False
async def multichoice(ctx: Context,
options: List[str],
timeout: int = 60,
base_embed: Embed = Embed()
) -> Optional[str]:
"""
Takes a list of strings and creates a selection menu.
**ctx.author** will select and item and the function will return it.
Parameters
----------
ctx: :class:`Context`
The command's context
options: :class:`List[str]`
List of strings that the user must select from
timeout: :class:ìnt` (seconds)
Timeout before the menu closes.
base_embed: :class:`Optional[discord.Embed]`
An optional embed object to take as a blueprint.
- The menu will only modify the footer and description.
- All other fields are free to be set by you.
Example
-------
::
from dpytools.menus import multichoice
@bot.command()
async def test(ctx):
options = [str(uuid4()) + for _ in range(110)]
choice = await multichoice(ctx, options)
await ctx.send(f'You selected: {choice}')
Returns
-------
:class:`str`
The item selected by the user.
"""
if not options:
raise ValueError("Options cannot be empty")
elif (t := type(options)) is not list:
raise TypeError(f'"options" param must be :list: but is {t}')
elif not all([type(item) is str for item in options]):
raise TypeError(f'All of the "options" param contents must be :str:')
elif any([len(opt) > 2000 for opt in options]):
raise ValueError("The maximum length for any option is 2000")
multiple = len(options) > 10
head = 0
embeds = []
nums = {
EmojiNumbers.ONE.value: 0,
EmojiNumbers.TWO.value: 1,
EmojiNumbers.THREE.value: 2,
EmojiNumbers.FOUR.value: 3,
EmojiNumbers.FIVE.value: 4,
EmojiNumbers.SIX.value: 5,
EmojiNumbers.SEVEN.value: 6,
EmojiNumbers.EIGHT.value: 7,
EmojiNumbers.NINE.value: 8,
EmojiNumbers.TEN.value: 9,
}
for i, chunk in enumerate(chunkify_string_list(options, 10, 2000, separator_length=10)):
description = "".join(f"{list(nums)[i]} {opt.strip()}\n\n" for i, opt in enumerate(chunk))
embed = copy(base_embed)
embed.description = description
embeds.append((chunk, embed))
def get_nums(_chunk):
return list(nums)[:len(_chunk)]
def get_reactions():
to_react = get_nums(embeds[head][0])
if multiple:
if head not in [0, len(embeds) - 1]:
to_react = [Emoji.LAST_TRACK, Emoji.REVERSE] + to_react + [Emoji.PLAY, Emoji.NEXT_TRACK]
elif head == 0:
to_react = to_react + [Emoji.PLAY, Emoji.NEXT_TRACK]
elif head == len(embeds) - 1:
to_react = [Emoji.LAST_TRACK, Emoji.REVERSE] + to_react
return to_react + [Emoji.X]
def adjust_head(head_: int, emoji: str):
if not multiple:
return
else:
if emoji == Emoji.LAST_TRACK:
head_ = 0
elif emoji == Emoji.REVERSE:
head_ -= 1 if head_ > 0 else 0
elif emoji == Emoji.PLAY:
head_ += 1 if head_ < len(embeds) - 1 else 0
elif emoji == Emoji.NEXT_TRACK:
head_ = len(embeds) - 1
return head_
def check(reaction: discord.Reaction,
user: Union[discord.User, discord.Member]):
return all([
user != ctx.bot.user,
user == ctx.author,
ctx.channel == reaction.message.channel,
reaction.emoji in to_react,
])
to_react = get_reactions()
first_embed = embeds[0][1]
first_embed.set_footer(text=f"Page 1/{len(embeds)}")
msg = await ctx.send(embed=first_embed)
for reaction in to_react:
await msg.add_reaction(reaction)
while True:
try:
reaction, user = await ctx.bot.wait_for('reaction_add', check=check, timeout=timeout)
except asyncio.TimeoutError:
await msg.delete()
return
else:
emoji = reaction.emoji
if emoji == Emoji.X:
await msg.delete()
return
else:
if emoji in nums:
await msg.delete()
return embeds[head][0][nums[emoji]]
else:
head = adjust_head(head, emoji)
next_embed = embeds[head][1]
next_embed.set_footer(text=f"Page {head + 1}/{len(embeds)}")
await msg.edit(embed=next_embed)
try:
await msg.clear_reactions()
except discord.errors.Forbidden:
pass
else:
to_react = get_reactions()
for reaction in to_react:
await msg.add_reaction(reaction)
class _QuestionData:
"""This class is not intended to be instantiated or subclassed"""
def __init__(self,
*,
question: str = None,
embed: discord.Embed = None,
parser: Union[Converter, Callable, None] = None,
parse_fail_response: str = None,
parse_fail_embed: discord.Embed = None
):
self.failed = False
if not question and not embed:
raise ValueError('Either question or embed are required to construct the instance')
self.question = question
self.embed = embed
self.parser = parser
self.parse_fail_response = parse_fail_response
self.parse_fail_embed = parse_fail_embed
def __str__(self):
return f"QuestionData(text={self.question})"
class TextMenu:
"""
Constructs the menu instance
Parameters
----------
lock: **Union[discord.Member, discord.Role, bool, None]**
- If **True** (default)
- the menu will only listen for the author's reactions.
- If **False**
- ANY user can react to the menu
- If **member**
- Only target member will be able to react
- If **role**
- ANY user with target role will be able to react.
stop: **str** (Default **'cancel'**)
If the users passes this string in the message content the menu will end, clean up and return False
timeout: **int** (Default **60**)
The amount of time to wait for each question.
If a timeout is reached, the menu is cancelled and cleaned up and returns None
cleanup: **bool** (Default **True**)
Whether to clean up messages or not
.. note::
If the users response matches the **stop** parameter the menu will return an explicit **False**
If timeout occurs however return value will be **None**
This way you can differentiate the output reasons
"""
def __init__(self, *,
lock: Union[discord.Member, discord.Role, bool, None] = True,
stop: str = 'cancel',
timeout: int = 60,
cleanup: bool = False,
retry_parse_fail: bool = False,
):
self._questions: List[_QuestionData] = []
self._messages = []
self.lock = lock
self.stop = stop
self.timeout = timeout
self.cleanup = cleanup
self.retry_parse_fail = retry_parse_fail
def add_question(self,
*,
question: str = None,
embed: discord.Embed = None,
parser: Union[Converter, Callable, None] = None,
parse_fail_response: str = None,
parse_fail_embed: discord.Embed = None,
):
"""
Adds a question to the menu
Parameters
----------
question: str
The bot's question's text to display
embed: discord.Embed
An embed to send with the question
parser: Union[Converter, Callable, None]
- A function that takes a single string argument and returns something else
The function will be passed the user's message.content
- Or a **discord.ext.commands.Converter**
Which will be given the same string and context from the command
parse_fail_response: Optional[str]
If the parser raises an exception and TextMenu .retry_parse_fail is enabled message content will be this
parse_fail_embed: Optional[discord.Embed]
If the parser raises an exception and TextMenu .retry_parse_fail is enabled message embed will be this
.. warning::
Either **question** or **embed** are required. If you dont pass any :class:`ValueError` will be raised
"""
parse_fail_response = (parse_fail_response
or 'Failed to convert **"{}"** to desired type, try again'
if not parse_fail_embed else None)
q = _QuestionData(question=question,
embed=embed,
parser=parser,
parse_fail_response=parse_fail_response,
parse_fail_embed=parse_fail_embed)
self._questions.append(q)
return self
async def _try_to_clean(self, ctx: Context):
"""
Tries to clean up messages excepting errors silently
"""
if self.cleanup:
try:
await ctx.channel.delete_messages(self._messages)
except:
pass
async def _ask(self, ctx, question: _QuestionData):
"""Asks an individual question"""
check = BaseLock(ctx, lock=self.lock)
msg_text = question.question if not question.failed else question.parse_fail_response
msg_embed = question.embed if not question.failed else question.parse_fail_embed
self._messages.append(await ctx.send(content=msg_text, embed=msg_embed))
answer_msg = await ctx.bot.wait_for('message', check=check, timeout=self.timeout)
self._messages.append(answer_msg)
if answer_msg.content.lower().strip() == self.stop:
return False
if question.parser:
try:
if isinstance(question.parser, Converter):
answer = await question.parser.convert(ctx, answer_msg.content)
elif isinstance(question.parser, type) and issubclass(question.parser, Converter):
answer = await question.parser().convert(ctx, answer_msg.content)
else:
answer = question.parser(answer_msg.content)
if isawaitable(question.parser):
answer = await answer
except Exception as e:
question.failed = True
question.parse_fail_response = (question.parse_fail_response.format(answer_msg.content)
if question.parse_fail_response else None)
raise UserAnswerParsingError(f"Failed to parse {question}") from e
else:
answer = answer_msg.content
return answer
async def call(self, ctx: commands.Context):
"""Activates the menu
Displays the menu one question at the time.
The user can cancel the menu using the :param stop: passed in the constructor
The menu will only listen for messages that pass the base lock
An attepmpt to clear all menu messages will be made with errors excepted silently
Returns
-------
:class:`Optional[List[Any]]`
Returns the list of answers from the user processed by the optional parser
The menu will return None if a TimeoutError occurs.
Raises
------
:class:`Any`
This menu will raise any exception derived from parsers
"""
answers = []
for question in self._questions:
answer = None
while answer is None:
try:
answer = await self._ask(ctx, question)
if answer is False:
await self._try_to_clean(ctx)
return answer
except asyncio.TimeoutError:
await self._try_to_clean(ctx)
return
except UserAnswerParsingError as error:
if not self.retry_parse_fail:
await self._try_to_clean(ctx)
raise error
answers.append(answer)
await self._try_to_clean(ctx)
return answers
| 35.982456 | 120 | 0.570697 | 2,392 | 20,510 | 4.777174 | 0.150502 | 0.017765 | 0.01934 | 0.012252 | 0.30638 | 0.274788 | 0.21668 | 0.205303 | 0.175199 | 0.136781 | 0 | 0.005776 | 0.341589 | 20,510 | 569 | 121 | 36.045694 | 0.840196 | 0.10941 | 0 | 0.26935 | 0 | 0 | 0.039958 | 0.00253 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037152 | false | 0.009288 | 0.034056 | 0.012384 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
046b23b296bfcedfcca9aaf23e3a6837b6efe373 | 9,706 | py | Python | formulator/models.py | Cahersan/django-formulator | 0b7552f9b0c11b83568ad37350c14a111694a931 | [
"MIT"
] | null | null | null | formulator/models.py | Cahersan/django-formulator | 0b7552f9b0c11b83568ad37350c14a111694a931 | [
"MIT"
] | null | null | null | formulator/models.py | Cahersan/django-formulator | 0b7552f9b0c11b83568ad37350c14a111694a931 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import re
import importlib
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from model_utils import Choices
from collections import OrderedDict, Counter
from autoslug import AutoSlugField
from autoslug.settings import slugify as default_slugify
from positions import PositionField
from crispy_forms.helper import FormHelper
from crispy_forms import layout
import floppyforms as forms
from django_hstore import hstore
from formulator.conf import settings
# Autoslugify modification to obtain slugs like valid variable names
def variable_slugify(value):
return default_slugify(value).replace('-', '_')
def create_field_slug(instance):
return variable_slugify("%s %s" % (instance.fieldset.slug, instance.name))
@python_2_unicode_compatible
class Form(models.Model):
"""
Model that defines a django Form class. The form_class_factory method returns a Form class with a default base of
a floppy form Form class. It uses crispyforms helpers to add the additional form properties that django forms don't
handle out of the box. It also uses crispyform fieldsets per default.
"""
ENCTYPES = Choices((0, 'urlencoded', 'application/x-www-form-urlencoded'),
(1, 'multipart', 'multipart/form-data'),
(2, 'plain', 'text/plain'))
METHODS = Choices((0, 'get', 'GET'), (1, 'post', 'POST'))
name = models.CharField(max_length=100, help_text='Name of the Form type')
# HTML5 FORM ATTRIBUTES for crispy forms
# common form attributes
form_name = models.CharField(max_length=100, blank=True)
form_action = models.CharField(max_length=250, blank=True)
form_method = models.IntegerField(max_length=10, choices=METHODS, default=METHODS.post)
form_id = AutoSlugField(populate_from='name', unique=True, slugify=variable_slugify)
form_class = models.CharField(max_length=250, blank=True)
# secondary attributes
form_accept_charset = models.CharField(max_length=100, blank=True)
form_autocomplete = models.BooleanField(default=False)
form_novalidate = models.BooleanField(default=False)
form_enctype = models.IntegerField(choices=ENCTYPES, default=ENCTYPES.urlencoded)
form_target = models.CharField(max_length=50, blank=True)
# json field for global and event attributes
attrs = hstore.DictionaryField(null=True, blank=True)
objects = hstore.HStoreManager()
def __str__(self):
return "formulator.Form instance: %s" % self.name
def save(self, *args, **kwargs):
super(Form, self).save(*args, **kwargs)
@cached_property
def fieldsets(self):
return self.fieldset_set.all().prefetch_related('field_set')
def form_class_factory(self, form_class=forms.Form):
# again make sure that we have everything we need to create a class
self.full_clean()
helper = FormHelper()
attrs = {}
layouts = []
# Checks for the existance of fields with repeat_min and/or repeat_max
# attributes.
for fieldset in self.fieldsets:
fieldset_fields = fieldset.fields
fieldset_layout = layout.Fieldset(fieldset.safe_legend, *[f.field_id for f in fieldset_fields])
layouts.append(fieldset_layout)
for field in fieldset_fields:
attrs[field.field_id] = field.formfield_instance_factory()
helper.form_id = self.form_id
helper.form_action = self.form_action
helper.form_method = self.METHODS[self.form_method]
helper.attrs = {
'accept-charset': self.form_accept_charset,
'autocomplete': self.form_autocomplete,
'novalidate': self.form_novalidate,
'enctype': self.form_enctype,
'target': self.form_target
}
helper.layout = layout.Layout(*layouts)
attrs['helper'] = helper
return type(str(self.form_id), (form_class,), attrs)
@python_2_unicode_compatible
class FieldSet(models.Model):
form = models.ForeignKey(Form)
position = PositionField(collection='form')
name = models.CharField(max_length=100)
slug = AutoSlugField(unique_with="form", populate_from='name', slugify=variable_slugify)
legend=models.CharField(max_length=200)
class Meta:
ordering = ['form', 'position']
def __str__(self):
return "FieldSet: %s %s" % (self.name, self.form.name)
@cached_property
def safe_legend(self):
try:
return self.legend
except:
return self.name.title()
@cached_property
def fields(self):
return self.field_set.all()
@python_2_unicode_compatible
class Field(models.Model):
"""
Stores the information for a django form field.
"""
fieldset = models.ForeignKey(FieldSet)
label=models.CharField(max_length=200,
help_text=_("""A verbose name for this field, for use in displaying this
field in a form. By default, Django will use a "pretty"
version of the form field name, if the Field is part of a
Form. """))
name = models.CharField(max_length=200,
help_text=_("""A short name to build the database field """))
field_id = AutoSlugField(unique_with='fieldset__form', populate_from=create_field_slug, slugify=variable_slugify)
position = PositionField(collection='fieldset')
field = models.CharField(max_length=100, choices=settings.FORMULATOR_FIELDS)
maxlength = models.IntegerField(blank=True, null=True)
attrs = hstore.DictionaryField(blank=True, null=True)
choices=hstore.DictionaryField(blank=True, null=True)
required = models.BooleanField(default=True)
help_text=models.TextField(blank=True,
help_text=_("An optional string to use as 'help text' for this Field."))
initial=models.CharField(max_length=200, blank=True,
help_text=_("""A value to use in this Field's initial display. This value
is *not* used as a fallback if data isn't given. """))
widget = models.CharField(max_length=100, choices=settings.FORMULATOR_WIDGETS, blank=True,
help_text=_("""A Widget class, or instance of a Widget class, that should
be used for this Field when displaying it. Each Field has a
default Widget that it'll use if you don't specify this. In
most cases, the default widget is TextInput."""))
show_hidden_initial = models.BooleanField(
default=False,
help_text=_('Boolean that specifies whether the field is hidden.'))
repeat_min = models.IntegerField(default=1,
help_text=_("The minimum number of times this Field should appear in the Form"))
repeat_max = models.IntegerField(blank=True,
null=True,
help_text=_("The maximum number of times this Field should appear in the Form"))
class Meta:
order_with_respect_to = 'fieldset'
ordering = ['fieldset__form', 'fieldset', 'position']
def __str__(self):
return "Field: %s" % self.name
@cached_property
def safe_label(self):
try:
return self.label
except:
return self.name.title()
@cached_property
def safe_initial(self):
try:
return self.initial
except:
return ''
@cached_property
def safe_help_text(self):
try:
return self.help_text
except:
return ''
def formfield_instance_factory(self, field_class=None, attrs=None):
"""Returns an instance of a form field"""
# Get the field class for this particular field
if field_class is None:
field_class = dict(settings.FORMULATOR_FIELDS)[self.field]
module_name, class_name = field_class.rsplit(".", 1)
module = importlib.import_module(module_name)
field = getattr(module, class_name)
# Get the widget class for this particular field
if not self.widget:
widget = getattr(field, 'widget', None)
else:
widget_class = dict(settings.FORMULATOR_WIDGETS)[self.widget]
module_name, class_name = widget_class.rsplit(".", 1)
module = importlib.import_module(module_name)
widget = getattr(module, class_name)
if attrs is None:
attrs = {
"required": self.required,
"label": self.safe_label,
"initial": self.safe_initial,
"help_text": self.safe_help_text,
"show_hidden_initial": self.show_hidden_initial,
}
if widget:
attrs['widget'] = widget(attrs=self.attrs)
try:
choices = self.choices
except:
choices = None
if choices:
choices = [(key, _(value)) for key, value in choices.iteritems()]
choices.reverse()
attrs['choices'] = choices
if self.maxlength:
attrs['max_length'] = self.maxlength
return field(**attrs)
class Meta:
ordering = ['fieldset', 'position']
| 35.166667 | 119 | 0.637441 | 1,143 | 9,706 | 5.247594 | 0.218723 | 0.022508 | 0.039013 | 0.052017 | 0.204235 | 0.147049 | 0.106536 | 0.087362 | 0.030677 | 0.014338 | 0 | 0.007522 | 0.274057 | 9,706 | 275 | 120 | 35.294545 | 0.843741 | 0.084381 | 0 | 0.167568 | 0 | 0 | 0.170196 | 0.003737 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075676 | false | 0 | 0.102703 | 0.037838 | 0.497297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04700730113a9f3f7622d5a4ec5c89cef15b8e21 | 963 | py | Python | src/python/pyLCIO/io/StdHepReader.py | jstrube/lcio | 763eb323df33d3b0c4240e6b6feb7d0a27b790d5 | [
"BSD-3-Clause"
] | null | null | null | src/python/pyLCIO/io/StdHepReader.py | jstrube/lcio | 763eb323df33d3b0c4240e6b6feb7d0a27b790d5 | [
"BSD-3-Clause"
] | 4 | 2020-08-26T23:35:37.000Z | 2020-09-22T21:13:25.000Z | src/python/pyLCIO/io/StdHepReader.py | JeffersonLab/hps-lcio | b83447dd8189d5f541601343066f8e3b4621f4ec | [
"BSD-3-Clause"
] | 1 | 2020-08-26T19:31:19.000Z | 2020-08-26T19:31:19.000Z | '''
Created on Dec 4, 2012
@author: <a href="mailto:christian.grefe@cern.ch">Christian Grefe</a>
'''
import os
from pyLCIO.io.Reader import Reader
from pyLCIO import EVENT, IMPL, UTIL
class StdHepReader( Reader ):
''' Class to hold an LCStdHepRdr object '''
def __init__( self, fileName=None):
''' Constructor '''
Reader.__init__(self, None, fileName)
self.processedEvents = 0
def __open__( self, fileName ):
if self.isOpen:
self.__close__()
self.reader = UTIL.LCStdHepRdr( fileName )
self.isOpen = True
def __read__( self ):
''' Get the next event from the stream '''
event = IMPL.LCEventImpl()
try:
self.reader.updateNextEvent( event, EVENT.LCIO.MCPARTICLE )
event.setEventNumber( self.processedEvents )
self.processedEvents += 1
return event
except:
return
| 25.342105 | 71 | 0.589823 | 102 | 963 | 5.372549 | 0.54902 | 0.104015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010526 | 0.30945 | 963 | 37 | 72 | 26.027027 | 0.813534 | 0.185877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0470d58f55c3b1b314cc0262448436f7712baf8d | 6,440 | py | Python | test/deposit.py | DAism2019/ERCs | f5cfec6b5700685b5515c8106a373b5f71b429c6 | [
"CC0-1.0"
] | null | null | null | test/deposit.py | DAism2019/ERCs | f5cfec6b5700685b5515c8106a373b5f71b429c6 | [
"CC0-1.0"
] | null | null | null | test/deposit.py | DAism2019/ERCs | f5cfec6b5700685b5515c8106a373b5f71b429c6 | [
"CC0-1.0"
] | null | null | null | from contract import Forge,Validator,TOH,NDAO
from privateKey import my_address, private_key
# from web3.auto.infura.rinkeby import w3
from web3.auto import w3
import time # 引入time模块
another = "0x2E8b222CFac863Ec6D3446c78fD46aAEA289A9fb"
another_privateKey = "078e9ed558a9afd1e7e27b9884fbcc95f8fa406bd02de3a2a19fbac401d7c74c"
approve_value = 2 ** 256 -1
FIXED_DEPOSIT_AMOUNT = 10000 * 10**18
FIXED_INPUT_AMOUNT = 23 * 10**18
def getOutputPrice():
out_price = Forge.functions.getOutputPrice(FIXED_DEPOSIT_AMOUNT).call()
print("当前所需要ETH价格为:",out_price/10 ** 18)
return out_price
def getInputPrice():
input_price = Forge.functions.getInputPrice(FIXED_INPUT_AMOUNT).call()
print("当前锻造",FIXED_INPUT_AMOUNT/10**18,"ETH得到的NDAO为:",input_price/10 ** 18)
return input_price
def depositByETH():
eth_value = getOutputPrice()
nonce = w3.eth.getTransactionCount(my_address)
unicorn_txn = Validator.functions.depositByETHDemo().buildTransaction({
'nonce': nonce,
'value': eth_value,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=private_key)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("质押交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
else:
print("交易失败")
#授权质押合约
def approve():
nonce = w3.eth.getTransactionCount(another)
unicorn_txn = NDAO.functions.approve(Validator.address,approve_value).buildTransaction({
'from': another,
'nonce': nonce,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=another_privateKey)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("授权交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
else:
print("交易失败")
#获得NDAO余额
def getNDAOBalance(address):
bal = NDAO.functions.balanceOf(address).call()
print("当前地址:",address,"的NDAO余额为:",bal/10 ** 18)
return bal
#使用ETH先锻造NDAO(input)
def forgeNDAOInput():
old_bal = getNDAOBalance(another)
out_ndao = getInputPrice()
nonce = w3.eth.getTransactionCount(another)
args = [out_ndao,int(time.time()) + 900,another]
unicorn_txn = Forge.functions.ForgeNdaoInput(*args).buildTransaction({
'from': another,
'nonce': nonce,
'value': FIXED_INPUT_AMOUNT,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=another_privateKey)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("锻造交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
new_bal = getNDAOBalance(another)
assert old_bal + out_ndao == new_bal
print("测试成功")
else:
print("交易失败")
#使用ETH先锻造NDAO(output)
def forgeNDAOOutPut():
getNDAOBalance(another)
eth_value = getOutputPrice()
nonce = w3.eth.getTransactionCount(another)
args = [FIXED_DEPOSIT_AMOUNT,eth_value + 1000,int(time.time()) + 900,another,another]
unicorn_txn = Forge.functions.ForgeNdaoOutput(*args).buildTransaction({
'from': another,
'nonce': nonce,
'value': eth_value,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=another_privateKey)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("锻造交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
getNDAOBalance(another)
else:
print("交易失败")
#使用NDAO质押
def depositByNDAO():
nonce = w3.eth.getTransactionCount(another)
unicorn_txn = Validator.functions.depositByNDAODemo().buildTransaction({
'nonce': nonce,
'from': another,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=another_privateKey)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("质押交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
else:
print("交易失败")
def getStartTime(address):
start_time = Validator.functions.registration_times(address).call()
print("地址为",address,"的账号的质押时间为:",start_time)
ticks = int(time.time())
maxi = (ticks - start_time) //60
print("从开始时间计算的最大等级为:",maxi)
def getMaxLevel(address):
max_level = TOH.functions.calMaxLevel(address).call()
print("合约可以领取勋章的最大等级为:",max_level)
return max_level
def withdrawAnother(level):
max_level = getMaxLevel(another)
assert level <= max_level
nonce = w3.eth.getTransactionCount(another)
unicorn_txn = TOH.functions.withDrawToken(level).buildTransaction({
'from': another,
'nonce': nonce,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=another_privateKey)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("领取交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
getMaxLevel(another)
else:
print("交易失败")
def withdraw(level):
max_level = getMaxLevel(my_address)
assert level <= max_level
nonce = w3.eth.getTransactionCount(my_address)
unicorn_txn = TOH.functions.withDrawToken(level).buildTransaction({
'nonce': nonce,
'gasPrice': 6 * (10 ** 9)
})
signed_txn = w3.eth.account.signTransaction(
unicorn_txn, private_key=private_key)
hash = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
print("领取勋章交易已经发送")
result = w3.eth.waitForTransactionReceipt(hash)
if result.status == 1:
print("交易成功")
getMaxLevel(my_address)
else:
print("交易失败")
def getBalance(address):
bal = TOH.functions.balanceOf(address).call()
print("我的纪念币数量为:",bal)
def getTokenInfos(tokenId):
infos = TOH.functions.tokenInfos(tokenId).call()
[val,level] = infos
print("当前ID为",tokenId,"的勋章的验证者为:",val)
print("当前ID为",tokenId,"的勋章的等级为:",level)
# depositByETH() #只能投资一次
# approve()
# forgeNDAO()
# depositByNDAO()
# forgeNDAOInput()
getTokenInfos(5)
# withdrawAnother(5)
| 29.406393 | 92 | 0.678416 | 695 | 6,440 | 6.151079 | 0.185612 | 0.032749 | 0.016374 | 0.047485 | 0.549942 | 0.511345 | 0.499415 | 0.421053 | 0.382924 | 0.382924 | 0 | 0.033462 | 0.197205 | 6,440 | 218 | 93 | 29.541284 | 0.793424 | 0.031832 | 0 | 0.605882 | 0 | 0 | 0.075563 | 0.017042 | 0 | 0 | 0.006752 | 0 | 0.017647 | 1 | 0.082353 | false | 0 | 0.023529 | 0 | 0.129412 | 0.182353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0470f24c34be36437ebfed026dd28c5941c7d5c4 | 815 | py | Python | probe/influx.py | phenobarbital/asyncdb | d4ea3d6b60436afce758abbcd632164a6ac6fa0c | [
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2020-09-07T15:39:10.000Z | 2022-03-15T03:48:34.000Z | probe/influx.py | phenobarbital/asyncdb | d4ea3d6b60436afce758abbcd632164a6ac6fa0c | [
"Apache-2.0",
"BSD-3-Clause"
] | 394 | 2020-10-08T08:05:54.000Z | 2022-03-31T10:28:27.000Z | probe/influx.py | jelitox/asyncdb | c5cad9857baa14fa949d34fa29406024d68d5735 | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-09-07T15:38:56.000Z | 2021-09-26T03:52:27.000Z | from asyncdb import AsyncDB, AsyncPool
import asyncio
loop = asyncio.get_event_loop()
asyncio.set_event_loop(loop)
params = {
"host": "127.0.0.1",
"port": "8086",
"database": 'testdb',
"user": "influxdata",
"password": "12345678"
}
DRIVER='influx'
async def test_connect(driver, params, event_loop):
db = AsyncDB(driver, params=params, loop=event_loop)
await db.connection()
print('IS CONNECTED> ', db.is_connected() is True)
await db.create_database('testdb')
result, error = await db.test_connection()
print(result, error)
print(type(result) == list)
await db.close()
if __name__ == '__main__':
try:
loop.run_until_complete(test_connect(DRIVER, params, loop))
except Exception as err:
print(err)
finally:
loop.close()
| 22.638889 | 67 | 0.656442 | 103 | 815 | 4.990291 | 0.514563 | 0.070039 | 0.066148 | 0.089494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027821 | 0.206135 | 815 | 35 | 68 | 23.285714 | 0.766615 | 0 | 0 | 0 | 0 | 0 | 0.121472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.035714 | 0.071429 | 0 | 0.071429 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0472705f71b9f473bebe34fc4fa5caeef5f89456 | 17,557 | py | Python | UI.py | Abdur-rahmaanJ/PygameUI | 91139272f41558176ea105f7f0b613aef9a243f5 | [
"Apache-2.0"
] | 1 | 2020-05-27T04:24:36.000Z | 2020-05-27T04:24:36.000Z | UI.py | Abdur-rahmaanJ/PygameUI | 91139272f41558176ea105f7f0b613aef9a243f5 | [
"Apache-2.0"
] | null | null | null | UI.py | Abdur-rahmaanJ/PygameUI | 91139272f41558176ea105f7f0b613aef9a243f5 | [
"Apache-2.0"
] | null | null | null | #Ui Widgets for pygame
#this is supposed to make it as easy as possible to make and use buttons and other widgets
#Documentation at 'https://github.com/TheBigKahuna353/PygameUI'
#All made by Jordan Withell
import pygame
#make screen quick
def Window(w = 500, h = 500):
pygame.init()
return pygame.display.set_mode((w,h))
#instead of updating every widget individually, this func updates all created for you
def update_all():
for widget in _all_widgets:
widget.update()
#this is a list that holds every widget created, used by the func update_all
_all_widgets = []
#this creates a curved rect, given a w,h and the curve amount, bewtween 0 and 1
def curve_square(width,height,curve, color = (0,0,0)):
if not 0 <= curve <= 1:
raise ValueError("curve value out of range, must be between 0 and 1")
curve *= min(width,height)
curve = int(curve)
surf = pygame.Surface((width,height),pygame.SRCALPHA)
pygame.draw.rect(surf,color,(0,curve,width,height-2*curve))
pygame.draw.rect(surf,color,(curve,0,width - 2 * curve,height))
pygame.draw.circle(surf,color, (curve,curve),curve)
pygame.draw.circle(surf,color, (width - curve,curve),curve)
pygame.draw.circle(surf,color, (curve,height - curve),curve)
pygame.draw.circle(surf,color, (width - curve,height - curve),curve)
return surf
#
class Shape:
def __init__(self,type = "rect",col=(255,255,255),w=0,h=0):
pass
#used to simplify outlining the button/checkbox
#instead of many vars in button, create an outline object to give to button
class Outline:
def __init__(self, type="full", outline_amount = 2, outline_color = (0,0,0)):
self.type = type
self.s = outline_amount
self.col = outline_color
def _draw(self,surf,col,w,h,curve_amount):
if self.type == "half":
surf.blit(curve_square(w, h, curve_amount, col), (0,0))
elif self.type == "full":
surf.blit(curve_square(w, h, curve_amount, self.col), (0,0))
surf.blit(curve_square(w-self.s*2, h-self.s*2, curve_amount, col), (self.s,self.s))
#button class
class Button:
def __init__(self,x,y,w= 0,h=0, calculateSize = False,text="",background = (255,255,255),font = "Calibri", font_size = 30, font_colour = (0,0,0), outline = None,action = None, action_arg = None, surface = None, image = None, enlarge = False, enlarge_amount = 1.1, hover_image = None, dont_generate = False, hover_background_color = None, curve_amount = 0):
self.x = x
self.y = y
self.w = w
self.h = h
_all_widgets.append(self)
self.surface = surface
#if no surface is supplied, try getting
if self.surface == None:
self.surface = pygame.display.get_surface()
if self.surface == None:
raise ValueError("No surface to blit to")
self.text = text
self.text_colour = font_colour
self.background = background
self.curve_amount = curve_amount
self.hover_background = self.background if hover_background_color == None else hover_background_color
self.font = pygame.font.Font(pygame.font.match_font(font),font_size)
self.out = outline
self.action = action
self.image = image.copy() if image else None
self.clicked_on = False
self.hover_image = hover_image
self.enlarge = enlarge
self.enlarge_amount = enlarge_amount
if self.enlarge:
if self.text != "":
self.enlarge_font = pygame.font.Font(pygame.font.match_font(font),int(font_size * enlarge_amount))
self.action_arg = action_arg
self.hover = False
self.caclulateSize = calculateSize
self.prev_clicked_state = False
#create the surfaces for the button to blit every frame
if not dont_generate:
if self.w == 0 or self.h == 0 or self.caclulateSize:
if image != None:
self.w = self.image.get_width()
self.h = self.image.get_height()
else:
if self.text != "":
self._caclulate_size()
else:
raise ValueError("cannot calculate width and height without text")
self._Generate_images()
def _Generate_images(self):
#generate images
if self.image == None:
self.image = pygame.Surface((self.w,self.h), pygame.SRCALPHA)
self.hover_image = pygame.Surface((self.w,self.h), pygame.SRCALPHA)
self.image.blit(curve_square(self.w, self.h, self.curve_amount, self.background), (0,0))
self.hover_image.blit(curve_square(self.w, self.h, self.curve_amount, self.hover_background), (0,0))
#self.hover_image.fill(self.hover_background)
if self.out:
self.out._draw(self.hover_image,self.hover_background,self.w,self.h,self.curve_amount)
self.hover_image.convert()
self.image.convert()
elif self.hover_image == None:
self.hover_image = self.image.copy()
if self.out:
pygame.draw.rect(self.hover_image,(0,0,0,255),(0,0,self.w,self.out.s))
pygame.draw.rect(self.hover_image,(0,0,0,255),(0,0,self.out.s,self.h))
pygame.draw.rect(self.hover_image,(0,0,0,255),(self.w,self.h,-self.w,-self.out.s))
pygame.draw.rect(self.hover_image,(0,0,0,255),(self.w,self.h,-self.out.s, -self.h))
self.hover_image.convert_alpha()
self.image.convert_alpha()
if self.enlarge:
size = (int(self.w * self.enlarge_amount), int(self.h * self.enlarge_amount))
self.dx, self.dy = size[0] - self.w, size[1] - self.h
self.hover_image = pygame.transform.scale(self.image,size)
if self.text != "":
txt = self.font.render(self.text,True,self.text_colour)
self.image.blit(txt,((self.w - txt.get_width())//2, (self.h - txt.get_height())//2))
if self.enlarge:
txt = self.enlarge_font.render(self.text,True,self.text_colour)
self.hover_image.blit(txt,((self.hover_image.get_width() - txt.get_width())//2, (self.hover_image.get_height() - txt.get_height())//2))
if self.hover_image.get_width() != self.w or self.hover_image.get_height() != self.h:
self.enlarge = True
self.dx, self.dy = self.hover_image.get_width() - self.w, self.hover_image.get_height() - self.h
self.image.convert()
self.hover_image.convert()
#if no width or height is given, calculate it with length of text
def _caclulate_size(self):
txt = self.font.render(self.text,False,(0,0,0))
self.w = txt.get_width() + self.w
self.h = txt.get_height() + self.h
def get_rect(self):
return pygame.Rect(self.x,self.y,self.w,self.h)
#this is what will be shown when print(button)
def __str__(self):
if self.text:
return "Button: '" + self.text + "'"
else:
return "Button: at (" + str(self.x) + ", " + str(self.y) + ")"
#update the text of the button, remake the surfaces for the button
def Update_text(self,text):
self.text = text
if self.caclulateSize:
self._caclulate_size()
self._Generate_images()
#update the button, this should get called every frame
def update(self):
click = pygame.mouse.get_pressed()[0]
mouse_pos = pygame.mouse.get_pos()
self.hover = False
returnee = False
#check if mouse over button
if mouse_pos[0] > self.x and mouse_pos[0] < self.x+self.w:
if mouse_pos[1] > self.y and mouse_pos[1] < self.y+self.h:
self.hover = True
#check for click, if held down, action only gets called once
if click and not self.prev_clicked_state:
self.clicked_on = True
if self.prev_clicked_state and self.clicked_on and click == False:
if self.action:
if self.action_arg:
self.action(self.action_arg)
else:
self.action()
returnee = True
if not click:
self.clicked_on = False
self.prev_clicked_state = click
#draw
self._draw()
#return if the button was clicked on
return returnee
#draw the button
def _draw(self):
if self.hover:
if self.enlarge:
self.surface.blit(self.hover_image,(self.x - self.dx//2,self.y - self.dy//2))
else:
self.surface.blit(self.hover_image,(self.x,self.y))
else:
self.surface.blit(self.image,(self.x,self.y))
#class textbox
class TextBox:
def __init__(self,x, y, w, h = 0,lines = 1, text = "", background = None, font_size = 30, font = "Calibri", text_colour = (0,0,0), surface = None, margin = 2, cursor = True,Enter_action = None, calculateSize = False):
self.x = x
self.y = y
self.w = w
self.h = h
_all_widgets.append(self)
self.cursor = cursor
self.current_line = 0
self.current_col = len(text)
self.lines = lines
self.font = pygame.font.Font(pygame.font.match_font(font),font_size)
self.text_colour = text_colour
self.text = [list(text)]
self.char_length = [self._get_text_width(x) for x in self.text]
self.background = background
self.surface= surface if surface else pygame.display.get_surface()
if self.surface == None:
raise ValueError("No surface to blit to")
self.margin = margin
self.Enter_action = Enter_action
#if no surface is supplied, get window
if self.surface == None:
self.surface = pygame.display.get_surface()
if self.surface == None:
raise ValueError("No surface to blit to")
if calculateSize or self.h == 0:
self.h = self._get_font_height() + h
#get the width of the text using the font
def _get_text_width(self,text):
text = "".join(text)
if len(text) == 0:
return 0
obj = self.font.render(text,True,(0,0,0))
return obj.get_width()
#returns the height of the font
def _get_font_height(self):
obj = self.font.render(" ",True,(0,0,0))
return obj.get_height()
#call this when the user presses a key down, supply the event from `pygame.event.get()`
def key_down(self,e):
#when backspace is pressed, delete last char
if e.unicode == "":
#if nothing in line, delete line
if len(self.text[self.current_line]) == 0:
if self.current_line > 0:
del self.text[self.current_line]
self.current_line -= 1
self.current_col = len(self.text[self.current_line])
else:
del self.text[self.current_line][-1]
self.current_col -= 1
#if key is enter, create line
elif e.key == 13:
if self.Enter_action:
self.Enter_action()
elif self.current_line < self.lines - 1:
self.current_line += 1
self.text.append([""])
self.char_length.append([0])
self.current_col = 0
#if key is a charachter, put on screen
elif e.unicode != "":
if len(self.text[self.current_line]) > 0:
if self.text[self.current_line][-1] == "":
del self.text[self.current_line][-1]
self.text[self.current_line] = self.text[self.current_line][:self.current_col] + [e.unicode] + self.text[self.current_line][self.current_col:]
self.current_col += 1
#if the down arrow is pressed
elif e.key == 274:
self.current_line += 1 if self.current_line < len(self.text)-1 else 0
self.current_col = min(self.current_col, len(self.text[self.current_line]))
#if the up arrow is pressed
elif e.key == 273:
self.current_line -= 1 if self.current_line > 0 else 0
self.current_col = min(self.current_col, len(self.text[self.current_line]))
#if the right arrow is pressed
elif e.key == 275:
self.current_col += 1 if len(self.text[self.current_line]) > self.current_col else 0
#if the left arrow is pressed
elif e.key == 276:
self.current_col -= 1 if 0 < self.current_col else 0
#draw the textbox
def _draw(self):
#draw background
if self.background:
pygame.draw.rect(self.surface, self.background, (self.x,self.y,self.w,self.h*self.lines))
#draw all text
for line,text in enumerate(self.text):
if len(text) != 0:
txt = "".join(text)
obj = self.font.render(txt,True,self.text_colour)
self.surface.blit(obj,(self.x + self.margin,self.y +(self.h*line)))
#draw cursor
if self.cursor:
total = 0
total = self._get_text_width(self.text[self.current_line][:self.current_col])
pygame.draw.line(self.surface,(0,0,0),(self.x + total,self.y +(self.h*self.current_line)),
(self.x + total,self.y + (self.h*(self.current_line+1))),2)
#print(self.current_col)
#update should be called every frame, it draws the textbox
def update(self):
self._draw()
#get the text of a specific line or lines
def get_lines(self, lines= -1,return_as_string = False):
pas = False
if isinstance(lines,int):
if lines == -1:
lines = (0,self.lines)
pas = True
if not pas:
if 0 > lines or self.lines < lines:
raise IndexError("line index not in range")
if len(self.text) < lines:
return ""
return "".join(self.text[lines])
if isinstance(lines,tuple):
if lines[0] < 0 or lines[0] > self.lines or lines[1] < 0 or lines[1] > self.lines or lines[0] > lines[1]:
raise IndexError("line index is out of range: " + str(lines) + " (0, " + str(str(self.lines)))
string = []
for x in range(lines[0],lines[1]):
if len(self.text) > x:
string.append("".join(self.text[x]))
else:
string.append("")
if return_as_string:
return "\n".join(string)
return string
#CheckBox class
class CheckBox:
def __init__(self,x,y,w,checked=False,background=(255,255,255),outline=None,surface=None,check_width = 2):
self.x = x
self.y = y
self.w = w
_all_widgets.append(self)
self.checked = checked
self.backgound = background
self.out = outline
self._prev_click = False
self.surface = surface if surface else pygame.display.get_surface()
self.check_width = check_width
if self.surface == None:
raise ValueError("No surface to blit to")
#return if checkbox is checked when converting to a bool
def __bool__(self):
return self.checked
#return if checkbox is checked when for comparing e.g. 'if checkbox:'
def __repr__(self):
return self.checked
#when represented as a string e.g. 'print(checkbox)'
def __str__(self):
return "Checkbox at (" + str(self.x) + ", " + str(self.y) + "): " + str(self.checked)
#update the checkbox
def update(self):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()[0]
if mouse[0] > self.x and mouse[0] < self.x + self.w:
if mouse[1] > self.y and mouse[1] < self.y + self.w:
if click:
if not self._prev_click:
self.checked = not self.checked
self._prev_click = True
else:
self._prev_click = False
self._draw()
#draw the checkbox
def _draw(self):
if self.out:
pygame.draw.rect(self.surface,(0,0,0),(self.x,self.y,self.w,self.w))
pygame.draw.rect(self.surface,self.backgound,(self.x + self.out.s,self.y + self.out.s,self.w - self.out.s*2,self.w - self.out.s*2))
else:
pygame.draw.rect(self.surface,self.backgound,(self.x,self.y,self.w,self.w))
if self.checked:
pygame.draw.line(self.surface,(0,0,0),(self.x,self.y), (self.x + self.w,self.y + self.w),self.check_width)
pygame.draw.line(self.surface,(0,0,0),(self.x,self.y + self.w), (self.x + self.w,self.y),self.check_width)
| 43.8925 | 361 | 0.566327 | 2,382 | 17,557 | 4.054996 | 0.103694 | 0.047831 | 0.038824 | 0.027539 | 0.414846 | 0.330676 | 0.278083 | 0.249094 | 0.197743 | 0.157987 | 0 | 0.01802 | 0.317252 | 17,557 | 400 | 362 | 43.8925 | 0.787687 | 0.111466 | 0 | 0.288961 | 0 | 0 | 0.020593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087662 | false | 0.003247 | 0.003247 | 0.012987 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04763099553a3f60ab99174135b7fe8f42971ff3 | 2,108 | py | Python | blurple/io/reaction.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 4 | 2021-06-30T19:58:59.000Z | 2021-07-27T13:43:49.000Z | blurple/io/reaction.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 2 | 2021-07-10T16:08:25.000Z | 2021-07-12T02:15:40.000Z | blurple/io/reaction.py | jeremytiki/blurple.py | c8f65955539cc27be588a06592b1c81c03f59c37 | [
"MIT"
] | 3 | 2021-07-08T03:00:40.000Z | 2021-09-08T19:57:50.000Z | import discord
import blurple.io as io
class ReactionAddBasic(io.Reply):
"""An unopinionated, lower level class to wait for a user to add a reaction."""
event = "raw_reaction_add"
async def on_reply_init(self, message: discord.Message):
"""Sepcialized to pass message object."""
self.message = message
def reply_check(self, payload: discord.RawReactionActionEvent):
"""Specialized to check if the reaction and payload message is valid."""
if payload.message_id == self.message.id and not payload.user_id == self.ctx.me.id:
if self._iscontainer(self.validate):
return str(payload.emoji) in self.validate
return True
class ReactionRemoveBasic(ReactionAddBasic):
"""An unopinionated, lower level class to wait for a user to remove a reaction."""
event = "raw_reaction_remove"
class ReactionAddReply(ReactionAddBasic):
""" Ask for the user's reaction reply.
:Example Usage:
.. code-block:: python
reply = await io.ReactionAddBasic(ctx, validate=["✅", "❎"]).result()
"""
async def on_reply_init(self, message: discord.Message):
"""Specialized to add vaild reaction emojis to message, if validation is on."""
await super().on_reply_init(message)
if self._iscontainer(self.validate):
for react in self.validate:
await self.message.add_reaction(react)
def reply_check(self, payload: discord.RawReactionActionEvent):
"""Specialized to check if payload user and message are valid."""
return payload.user_id == self.ctx.author.id and \
payload.message_id == self.message.id
async def on_reply_attempt(self, payload: discord.RawReactionActionEvent):
"""Specialized to remove the user's reaction."""
await self.message.remove_reaction(payload.emoji, self.ctx.bot.get_user(payload.user_id))
return payload
async def on_reply_complete(self):
"""Specialized to clear all reactions off the message."""
await self.message.clear_reactions()
| 37.642857 | 97 | 0.675047 | 266 | 2,108 | 5.255639 | 0.296992 | 0.062947 | 0.028612 | 0.042918 | 0.418455 | 0.312589 | 0.23319 | 0.23319 | 0.23319 | 0.170243 | 0 | 0 | 0.225332 | 2,108 | 55 | 98 | 38.327273 | 0.854868 | 0.202087 | 0 | 0.222222 | 0 | 0 | 0.024982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
047794e8835f067229c32824232b6e2929ad5189 | 824 | py | Python | client/events.py | adamstallard/EtherBank | d5b24034d9416b6b61efb9f8cb8699e63cf322e9 | [
"BSD-2-Clause"
] | 1 | 2019-01-09T22:05:03.000Z | 2019-01-09T22:05:03.000Z | client/events.py | adamstallard/EtherBank | d5b24034d9416b6b61efb9f8cb8699e63cf322e9 | [
"BSD-2-Clause"
] | null | null | null | client/events.py | adamstallard/EtherBank | d5b24034d9416b6b61efb9f8cb8699e63cf322e9 | [
"BSD-2-Clause"
] | null | null | null | from web3.auto import w3
import time
# from infura import *
import config
import json
with open("../build/contracts/EtherBank.json") as f:
ether_bank_json = json.load(f)
ether_bank_contract = w3.eth.contract(
address=config.ETHER_BANK_ADDR,
abi=ether_bank_json['abi']
)
accounts = w3.eth.accounts
def handle_event(event):
receipt = w3.eth.waitForTransactionReceipt(event['transactionHash'])
result = ether_bank_contract.events.greeting.processReceipt(receipt)
print(result[0]['args'])
def log_loop(event_filter, poll_interval):
while True:
for event in event_filter.get_new_entries():
handle_event(event)
time.sleep(poll_interval)
block_filter = w3.eth.filter({
'fromBlock': 'latest',
'address': config.ETHER_BANK_ADDR
})
log_loop(block_filter, 2)
| 23.542857 | 72 | 0.71966 | 111 | 824 | 5.126126 | 0.495496 | 0.094903 | 0.035149 | 0.077329 | 0.091388 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011645 | 0.166262 | 824 | 34 | 73 | 24.235294 | 0.816594 | 0.024272 | 0 | 0 | 0 | 0 | 0.09601 | 0.041147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.24 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0477d1fec515526460f168e4daf0936536d74afc | 14,316 | py | Python | train.py | HuipengXu/MixMatch-PyTorch | 026a733de773a58e361f66ee48dc150d23ba9754 | [
"MIT"
] | 1 | 2021-12-20T14:19:10.000Z | 2021-12-20T14:19:10.000Z | train.py | HuipengXu/MixMatch-PyTorch | 026a733de773a58e361f66ee48dc150d23ba9754 | [
"MIT"
] | null | null | null | train.py | HuipengXu/MixMatch-PyTorch | 026a733de773a58e361f66ee48dc150d23ba9754 | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
import time
import random
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchtext.vocab import GloVe
from progress.bar import Bar
from tensorboardX import SummaryWriter
from textcnn import MixTextCNN, Config
from preprocess import get_imdb
from util import mkdir_p, AverageMeter, Logger, accuracy, \
SemiLoss, WeightEMA, save_checkpoint, MyIMDB
parser = argparse.ArgumentParser(description='PyTorch MixMatch Training')
# Optimization options
parser.add_argument('--epochs', default=15, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch-size', default=128, type=int, metavar='N',
help='train batch-size')
parser.add_argument('--vocab-size', default=50000, type=int, metavar='N',
help='vocabulary size')
parser.add_argument('--max-length', default=512, type=int, metavar='N',
help='max text length')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--pad-token', default='<pad>', type=str,
help='token used for pad sentence to max length')
# Checkpoints
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Miscs
parser.add_argument('--manual-seed', type=int, default=None, help='manual seed')
# Device options
parser.add_argument('--gpus', default='0,1,2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
# Method options
parser.add_argument('--n-labeled', type=int, default=1000,
help='Number of labeled data')
parser.add_argument('--val-iteration', type=int, default=1024,
help='Number of labeled data')
parser.add_argument('--out', default='result',
help='Directory to output the result')
parser.add_argument('--alpha', default=0.75, type=float)
parser.add_argument('--lambda-u', default=0.3, type=float)
parser.add_argument('--T', default=0.5, type=float)
parser.add_argument('--ema-decay', default=0.999, type=float)
def set_seed(args):
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
if args.n_gpus > 0:
torch.cuda.manual_seed_all(args.manual_seed)
def main(args):
best_acc = 0
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
use_cuda = torch.cuda.is_available()
# Random seed
random.seed(time.time())
if args.manual_seed is None:
args.manual_seed = random.randint(1, 10000)
if os.path.exists(args.out):
shutil.rmtree(args.out)
mkdir_p(args.out)
args.n_gpus = len(args.gpus.split(','))
state = {k: v for k, v in args._get_kwargs()}
with open(os.path.join(args.out, 'args.json'), 'w', encoding='utf8') as f:
json.dump(state, f)
print('==> saved arguments')
print(json.dumps(state, indent=4))
set_seed(args)
# Data
print(f'==> Preparing IMDB')
train_labeled_set, train_unlabeled_set, valid_set, test_set,\
text_field, label_field = get_imdb('./data/aclImdb/')
text_field.build_vocab(train_unlabeled_set, max_size=args.vocab_size,
vectors=GloVe(name='6B', dim=300, cache='./data/'))
label_field.build_vocab(train_unlabeled_set)
text_vocab, label_vocab = text_field.vocab, label_field.vocab
print(f"Unique tokens in TEXT vocabulary: {len(text_vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(label_vocab)}")
embedding_matrix = text_vocab.vectors
train_labeled_set = MyIMDB(train_labeled_set, text_vocab, label_vocab)
train_unlabeled_set = MyIMDB(train_unlabeled_set, text_vocab, label_vocab, unlabeled=True)
valid_set = MyIMDB(valid_set, text_vocab, label_vocab)
test_set = MyIMDB(test_set, text_vocab, label_vocab)
train_labeled_loader = DataLoader(train_labeled_set, batch_size=args.batch_size, shuffle=True, num_workers=0,
drop_last=True)
train_unlabeled_loader = DataLoader(train_unlabeled_set, batch_size=args.batch_size, shuffle=True, num_workers=0,
drop_last=True)
valid_loader = DataLoader(valid_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
test_loader = DataLoader(test_set, batch_size=args.batch_size, shuffle=False, num_workers=0)
# Model
print("==> creating TextCNN")
def create_model(config, model=MixTextCNN, use_cuda=False, ema=False):
model = model(config)
if use_cuda: model = model.cuda()
if ema:
for param in model.parameters():
param.detach_()
return model
config = Config(text_field, label_field, embedding=embedding_matrix)
model = create_model(config, use_cuda=use_cuda)
ema_model = create_model(config, use_cuda=use_cuda, ema=True)
cudnn.benchmark = True
print(' Total params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0))
train_criterion = SemiLoss()
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=2)
ema_optimizer = WeightEMA(model, ema_model, args.lr, alpha=args.ema_decay)
start_epoch = 0
# Resume
title = 'noisy-imdb'
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.out = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
ema_model.load_state_dict(checkpoint['ema_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.out, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.out, 'log.txt'), title=title)
logger.set_names(
['Train Loss', 'Train Loss X', 'Train Loss U', 'Valid Loss', 'Valid Acc.', 'Test Loss', 'Test Acc.'])
writer = SummaryWriter(args.out)
step = 0
test_accs = []
# Train and val
for epoch in range(start_epoch, args.epochs):
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_loss_x, train_loss_u = train(train_labeled_loader, train_unlabeled_loader, text_vocab, model,
optimizer, ema_optimizer, train_criterion, epoch, use_cuda)
_, train_acc = validate(train_labeled_loader, ema_model, criterion, use_cuda, mode='Train Stats')
val_loss, val_acc = validate(valid_loader, ema_model, criterion, use_cuda, mode='Valid Stats')
test_loss, test_acc = validate(test_loader, ema_model, criterion, use_cuda, mode='Test Stats ')
lr_scheduler.step(test_acc)
step = args.val_iteration * (epoch + 1)
writer.add_scalar('losses/train_loss', train_loss, step)
writer.add_scalar('losses/valid_loss', val_loss, step)
writer.add_scalar('losses/test_loss', test_loss, step)
writer.add_scalar('accuracy/train_acc', train_acc, step)
writer.add_scalar('accuracy/val_acc', val_acc, step)
writer.add_scalar('accuracy/test_acc', test_acc, step)
# append logger file
logger.append([train_loss, train_loss_x, train_loss_u, val_loss, val_acc, test_loss, test_acc])
# save model
is_best = val_acc > best_acc
best_acc = max(val_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'ema_state_dict': ema_model.state_dict(),
'acc': val_acc,
'best_val_acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best, args.out)
test_accs.append(test_acc)
logger.close()
writer.close()
print('Best val acc:')
print(best_acc)
print('Mean test acc:')
print(np.mean(test_accs[-20:]))
def get_batch(iterator, loader):
try:
inputs, targets = next(iterator)
except:
iterator = iter(loader)
inputs, targets = next(iterator)
return iterator, inputs, targets
def get_max_length(tensors, pad_id):
return max((tensor != pad_id).sum() for tensor in tensors)
def train(labeled_trainloader, unlabeled_trainloader, vocab, model, optimizer,
ema_optimizer, criterion, epoch, use_cuda):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_u = AverageMeter()
ws = AverageMeter()
end = time.time()
bar = Bar('Training', max=args.val_iteration)
labeled_train_iter = iter(labeled_trainloader)
unlabeled_train_iter = iter(unlabeled_trainloader)
model.train()
for batch_idx in range(args.val_iteration):
labeled_train_iter, inputs_x, targets_x = get_batch(labeled_train_iter, labeled_trainloader)
unlabeled_train_iter, inputs_u, inputs_u2 = get_batch(unlabeled_train_iter, unlabeled_trainloader)
# measure data loading time
data_time.update(time.time() - end)
batch_size = inputs_x.size(0)
# Transform label to one-hot
targets_x = torch.zeros(batch_size, 2).scatter_(1, targets_x.view(-1, 1).long(), 1)
if use_cuda:
inputs_x, targets_x = inputs_x.cuda(), targets_x.cuda(non_blocking=True)
inputs_u = inputs_u.cuda()
inputs_u2 = inputs_u2.cuda()
with torch.no_grad():
# compute guessed labels of unlabel samples
outputs_u = model(inputs_u)
outputs_u2 = model(inputs_u2)
p = (torch.softmax(outputs_u, dim=1) + torch.softmax(outputs_u2, dim=1)) / 2
pt = p ** (1 / args.T)
targets_u = pt / pt.sum(dim=1, keepdim=True)
targets_u = targets_u.detach()
# mixup
pad_id = vocab.stoi[args.pad_token]
max_len = max(get_max_length(inputs, pad_id) for inputs in [inputs_x, inputs_u, inputs_u2])
inputs_x, inputs_u, inputs_u2 = inputs_x[:max_len], inputs_u[:max_len], inputs_u2[:max_len]
all_inputs = torch.cat([inputs_x, inputs_u, inputs_u2], dim=0)
all_targets = torch.cat([targets_x, targets_u, targets_u], dim=0)
l = np.random.beta(args.alpha, args.alpha)
l = max(l, 1 - l)
# shuffle
idx = torch.randperm(all_inputs.size(0))
input_a, input_b = all_inputs, all_inputs[idx]
target_a, target_b = all_targets, all_targets[idx]
# 13、14行合成一步了
mixed_target = l * target_a + (1 - l) * target_b
logits_x, logits_u = model(input_a, input_b, l, mix=True)
Lx, Lu, w = criterion(args.lambda_u, logits_x, mixed_target[:batch_size], logits_u, mixed_target[batch_size:],
epoch + batch_idx / args.val_iteration, args.epochs)
loss = Lx + w * Lu
# record loss
losses.update(loss.item(), inputs_x.size(0))
losses_x.update(Lx.item(), inputs_x.size(0))
losses_u.update(Lu.item(), inputs_x.size(0))
ws.update(w, inputs_x.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema_optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Loss_x: {loss_x:.4f} | Loss_u: {loss_u:.4f} | W: {w:.4f}'.format(
batch=batch_idx + 1,
size=args.val_iteration,
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
loss_x=losses_x.avg,
loss_u=losses_u.avg,
w=ws.avg,
)
bar.next()
bar.finish()
return (losses.avg, losses_x.avg, losses_u.avg,)
def validate(valloader, model, criterion, use_cuda, mode):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
Acc = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
bar = Bar(f'{mode}', max=len(valloader))
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(non_blocking=True)
# compute output
outputs = model(inputs)
# 注意和训练的时候计算方式有些不一样
loss = criterion(outputs[:, 1], targets)
# measure accuracy and record loss
acc = accuracy(outputs, targets)
losses.update(loss.item(), inputs.size(0))
Acc.update(acc.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=batch_idx + 1,
size=len(valloader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=Acc.avg,
)
bar.next()
bar.finish()
return (losses.avg, Acc.avg)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 38.074468 | 194 | 0.634325 | 1,902 | 14,316 | 4.564669 | 0.17245 | 0.017623 | 0.033287 | 0.008639 | 0.297742 | 0.229095 | 0.173923 | 0.147892 | 0.101359 | 0.092375 | 0 | 0.011518 | 0.241897 | 14,316 | 375 | 195 | 38.176 | 0.788446 | 0.036323 | 0 | 0.143369 | 0 | 0.007168 | 0.113775 | 0 | 0 | 0 | 0 | 0 | 0.003584 | 1 | 0.02509 | false | 0 | 0.0681 | 0.003584 | 0.111111 | 0.046595 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0477db777b9cee4f58f0a5582459326ea8dada3b | 1,221 | py | Python | api/anubis/utils/visuals/users.py | AnubisLMS/Anubis | 2f66ed2e0518422832816615d6f948239c081400 | [
"MIT"
] | 87 | 2021-11-08T10:58:26.000Z | 2022-03-31T19:02:47.000Z | api/anubis/utils/visuals/users.py | AnubisLMS/Anubis | 2f66ed2e0518422832816615d6f948239c081400 | [
"MIT"
] | 80 | 2021-11-07T04:46:42.000Z | 2022-03-31T23:58:00.000Z | api/anubis/utils/visuals/users.py | AnubisLMS/Anubis | 2f66ed2e0518422832816615d6f948239c081400 | [
"MIT"
] | 15 | 2021-11-07T17:02:21.000Z | 2022-03-28T02:04:16.000Z | from datetime import datetime, timedelta
from anubis.utils.cache import cache
from anubis.utils.data import is_debug, is_job
from anubis.utils.usage.users import get_platform_users
from anubis.utils.visuals.files import convert_fig_bytes
from anubis.utils.visuals.watermark import add_watermark
@cache.memoize(timeout=-1, forced_update=is_job, unless=is_debug)
def get_platform_users_plot(days: int, step: int = 1):
import matplotlib.pyplot as plt
now = datetime.now().replace(hour=0, second=0, microsecond=0)
start_datetime = now - timedelta(days=days - 1)
xx = []
yy = []
fig, ax = plt.subplots(figsize=(12, 10))
for n in range(0, days, step):
day = start_datetime + timedelta(days=n)
y = get_platform_users(day)
xx.append(day)
yy.append(y)
ax.plot(xx, yy, 'b--', label='Total users registered on platform')
ax.legend(loc='upper left')
ax.grid()
ax.set(
title='Total users registered on Anubis LMS over time',
xlabel='Time',
ylabel='Registered Users',
)
# set y max to the nearest hundred
ax.set_ylim([0, ((yy[-1] // 100) + 1) * 100])
add_watermark(ax)
return convert_fig_bytes(plt, fig)
| 28.395349 | 70 | 0.673219 | 181 | 1,221 | 4.425414 | 0.458564 | 0.062422 | 0.093633 | 0.054931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020683 | 0.208026 | 1,221 | 42 | 71 | 29.071429 | 0.807653 | 0.026208 | 0 | 0 | 0 | 0 | 0.095198 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.233333 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04796e79a9664545eb63e6ba84915a3f7ac3d290 | 438 | py | Python | heatingcontrolstate.py | szbeni/heatingcontroller | 624538fdb8ddb88992b1dc5240b5caa7f7380255 | [
"MIT"
] | null | null | null | heatingcontrolstate.py | szbeni/heatingcontroller | 624538fdb8ddb88992b1dc5240b5caa7f7380255 | [
"MIT"
] | null | null | null | heatingcontrolstate.py | szbeni/heatingcontroller | 624538fdb8ddb88992b1dc5240b5caa7f7380255 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
sys.argv[1:]
if len(sys.argv) < 2:
print("Off")
exit()
inp = sys.argv[1]
if(inp == '00000'):
print ("Off")
elif(inp == '10000'):
print ("Gas Auto Fan")
elif(inp == '01000'):
print ("Gas Slow Fan");
elif(inp == '00100'):
print ("Fan");
elif(inp == '00010'):
print ("Elec Auto Fan");
elif(inp == '00001'):
print ("Elec Slow Fan");
else:
print("Error " + sys.argv[1])
| 16.846154 | 33 | 0.538813 | 64 | 438 | 3.6875 | 0.4375 | 0.148305 | 0.169492 | 0.084746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103245 | 0.226027 | 438 | 25 | 34 | 17.52 | 0.59292 | 0.038813 | 0 | 0.1 | 0 | 0 | 0.22619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
047d17f6bf4f854c158900b4950a6ed69d11af25 | 2,919 | py | Python | user/admin.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | null | null | null | user/admin.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | null | null | null | user/admin.py | kthaisse/website | be0d0e0763ae2a6b8351c08b432229eae9521f1d | [
"MIT"
] | null | null | null | from django.contrib import admin, messages
from django.contrib.auth.models import Group
from app.settings import GROUP_BY_DIVISION_NAME
from user.models import Division, History, Role, Team, User
from user.utils import send_imported, send_slack
def send_welcome(modeladmin, request, users):
for user in users:
send_imported(user=user)
messages.success(
request, f"Welcome emails have been sent to {users.count()} user/s."
)
def send_slack_invite(modeladmin, request, users):
for user in users:
send_slack(user=user)
messages.success(
request, f"Slack invitations have been sent to {users.count()} user/s."
)
send_welcome.short_description = "Send welcome email"
send_slack_invite.short_description = "Send Slack invitation"
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
search_fields = ("id", "email", "name", "surname", "type")
list_display = (
"email",
"name",
"surname",
"type",
"email_verified",
"registration_finished",
"is_active",
"slack_user",
"created_at",
)
list_filter = (
"type",
"email_verified",
"registration_finished",
"is_active",
"gender",
"university",
"degree",
"graduation_year",
)
readonly_fields = (
"groups",
"last_login",
"slack_user",
"created_at",
)
exclude = ("password", "user_permissions")
ordering = ("-created_at",)
actions = [send_welcome, send_slack_invite]
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
search_fields = ("id",)
list_display = ("id", "starts_at", "ends_at")
list_filter = ("starts_at", "ends_at")
ordering = ("-starts_at",)
@admin.register(Division)
class DivisionAdmin(admin.ModelAdmin):
search_fields = ("id", "name")
list_display = ("id", "name", "team")
list_filter = ("team",)
ordering = ("-team__starts_at", "name")
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
search_fields = ("id", "user", "division")
list_display = ("id", "division", "user", "starts_at", "ends_at", "is_head")
list_filter = ("division__team", "user", "starts_at", "ends_at")
ordering = ("-division__team__starts_at", "-starts_at", "user")
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
group_name = GROUP_BY_DIVISION_NAME.get(obj.division.name)
if group_name:
group = Group.objects.get(name=group_name)
if obj.is_active:
obj.user.groups.add(group)
else:
obj.user.groups.remove(group)
@admin.register(History)
class HistoryAdmin(admin.ModelAdmin):
search_fields = ("id", "title", "body")
list_display = ("title", "time")
list_filter = ("time",)
ordering = ("-time",)
| 28.067308 | 80 | 0.627612 | 337 | 2,919 | 5.204748 | 0.293769 | 0.036488 | 0.059863 | 0.076967 | 0.285633 | 0.165336 | 0.129989 | 0.078677 | 0 | 0 | 0 | 0 | 0.231929 | 2,919 | 103 | 81 | 28.339806 | 0.782337 | 0 | 0 | 0.188235 | 0 | 0 | 0.224735 | 0.023296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035294 | false | 0.011765 | 0.070588 | 0 | 0.435294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
047d1fce76502daee3bd9c513a2ef6f8cb1425bd | 11,584 | py | Python | chassisml-sdk/chassisml/chassisml.py | modzy/chassis | a147411c5a27dfee371b75d4cba8d12171d89255 | [
"Apache-2.0"
] | 29 | 2021-07-27T15:59:31.000Z | 2022-03-24T17:22:17.000Z | chassisml-sdk/chassisml/chassisml.py | modzy/chassis | a147411c5a27dfee371b75d4cba8d12171d89255 | [
"Apache-2.0"
] | 46 | 2021-07-29T08:47:21.000Z | 2022-03-28T01:59:24.000Z | chassisml-sdk/chassisml/chassisml.py | modzy/chassis | a147411c5a27dfee371b75d4cba8d12171d89255 | [
"Apache-2.0"
] | 2 | 2021-12-06T20:33:38.000Z | 2022-03-11T15:47:48.000Z | #!/usr/bin/env python
# -*- coding utf-8 -*-
import _io
import os
import time
import json
import requests
import urllib.parse
import tempfile
import shutil
import mlflow
import base64
import string
import numpy as np
from chassisml import __version__
from ._utils import zipdir,fix_dependencies,write_modzy_yaml,NumpyEncoder
###########################################
MODEL_ZIP_NAME = 'model.zip'
MODZY_YAML_NAME = 'model.yaml'
CHASSIS_TMP_DIRNAME = 'chassis_tmp'
routes = {
'build': '/build',
'job': '/job',
'test': '/test'
}
###########################################
class ChassisModel(mlflow.pyfunc.PythonModel):
"""The Chassis Model object.
This class inherits from mlflow.pyfunc.PythonModel and adds Chassis functionality.
Attributes:
predict (function): MLflow pyfunc compatible predict function.
Will wrap user-provided function which takes two arguments: model_input (bytes) and model_context (dict).
chassis_build_url (str): The build url for the Chassis API.
"""
def __init__(self,model_context,process_fn,batch_process_fn,batch_size,chassis_base_url):
if process_fn and batch_process_fn:
if not batch_size:
raise ValueError("Both batch_process_fn and batch_size must be provided for batch support.")
self.predict = self._gen_predict_method(process_fn,model_context)
self.batch_predict = self._gen_predict_method(batch_process_fn,model_context,batch=True)
self.batch_input = True
self.batch_size = batch_size
elif process_fn and not batch_process_fn:
self.predict = self._gen_predict_method(process_fn,model_context)
self.batch_input = False
self.batch_size = None
elif batch_process_fn and not process_fn:
if not batch_size:
raise ValueError("Both batch_process_fn and batch_size must be provided for batch support.")
self.predict = self._gen_predict_method(batch_process_fn,model_context,batch_to_single=True)
self.batch_predict = self._gen_predict_method(batch_process_fn,model_context,batch=True)
self.batch_input = True
self.batch_size = batch_size
else:
raise ValueError("At least one of process_fn or batch_process_fn must be provided.")
self.chassis_build_url = urllib.parse.urljoin(chassis_base_url, routes['build'])
self.chassis_test_url = urllib.parse.urljoin(chassis_base_url, routes['test'])
def _gen_predict_method(self,process_fn,model_context,batch=False,batch_to_single=False):
def predict(_,model_input):
if batch_to_single:
output = process_fn([model_input],model_context)[0]
else:
output = process_fn(model_input,model_context)
if batch:
return [json.dumps(out,separators=(",", ":"),cls=NumpyEncoder).encode() for out in output]
else:
return json.dumps(output,separators=(",", ":"),cls=NumpyEncoder).encode()
return predict
def test(self,test_input):
if isinstance(test_input,_io.BufferedReader):
result = self.predict(None,test_input.read())
elif isinstance(test_input,bytes):
result = self.predict(None,test_input)
elif isinstance(test_input,str):
if os.path.exists(test_input):
result = self.predict(None,open(test_input,'rb').read())
else:
result = self.predict(None,bytes(test_input,encoding='utf8'))
else:
print("Invalid input. Must be buffered reader, bytes, valid filepath, or text input.")
return False
return result
def test_batch(self,test_input):
if not self.batch_input:
raise NotImplementedError("Batch inference not implemented.")
if hasattr(self,'batch_predict'):
batch_method = self.batch_predict
else:
batch_method = self.predict
if isinstance(test_input,_io.BufferedReader):
results = batch_method(None,[test_input.read() for _ in range(self.batch_size)])
elif isinstance(test_input,bytes):
results = batch_method(None,[test_input for _ in range(self.batch_size)])
elif isinstance(test_input,str):
if os.path.exists(test_input):
results = batch_method(None,[open(test_input,'rb').read() for _ in range(self.batch_size)])
else:
results = batch_method(None,[bytes(test_input,encoding='utf8') for _ in range(self.batch_size)])
else:
print("Invalid input. Must be buffered reader, bytes, valid filepath, or text input.")
return False
return results
def test_env(self,test_input_path,conda_env=None,fix_env=True):
model_directory = os.path.join(tempfile.mkdtemp(),CHASSIS_TMP_DIRNAME)
mlflow.pyfunc.save_model(path=model_directory, python_model=self, conda_env=conda_env,
extra_pip_requirements = None if conda_env else ["chassisml=={}".format(__version__)])
if fix_env:
fix_dependencies(model_directory)
# Compress all files in model directory to send them as a zip.
tmppath = tempfile.mkdtemp()
zipdir(model_directory,tmppath,MODEL_ZIP_NAME)
with open('{}/{}'.format(tmppath,MODEL_ZIP_NAME),'rb') as model_f, \
open(test_input_path,'rb') as test_input_f:
files = [
('sample_input', test_input_f),
('model', model_f)
]
print('Starting test job... ', end='', flush=True)
res = requests.post(self.chassis_test_url, files=files)
res.raise_for_status()
print('Ok!')
shutil.rmtree(tmppath)
shutil.rmtree(model_directory)
return res.json()
def save(self,path,conda_env=None,overwrite=False):
if overwrite and os.path.exists(path):
shutil.rmtree(path)
mlflow.pyfunc.save_model(path=path, python_model=self, conda_env=conda_env)
print("Chassis model saved.")
def publish(self,model_name,model_version,registry_user,registry_pass,
conda_env=None,fix_env=True,gpu=False,modzy_sample_input_path=None,
modzy_api_key=None):
if (modzy_sample_input_path or modzy_api_key) and not \
(modzy_sample_input_path and modzy_api_key):
raise ValueError('"modzy_sample_input_path", and "modzy_api_key" must both be provided to publish to Modzy.')
try:
model_directory = os.path.join(tempfile.mkdtemp(),CHASSIS_TMP_DIRNAME)
mlflow.pyfunc.save_model(path=model_directory, python_model=self, conda_env=conda_env,
extra_pip_requirements = None if conda_env else ["chassisml=={}".format(__version__)])
if fix_env:
fix_dependencies(model_directory)
# Compress all files in model directory to send them as a zip.
tmppath = tempfile.mkdtemp()
zipdir(model_directory,tmppath,MODEL_ZIP_NAME)
image_name = "-".join(model_name.translate(str.maketrans('', '', string.punctuation)).lower().split())
image_data = {
'name': "{}/{}".format(registry_user,"{}:{}".format(image_name,model_version)),
'model_name': model_name,
'model_path': tmppath,
'registry_auth': base64.b64encode("{}:{}".format(registry_user,registry_pass).encode("utf-8")).decode("utf-8"),
'publish': True,
'gpu': gpu
}
if modzy_sample_input_path and modzy_api_key:
modzy_metadata_path = os.path.join(tmppath,MODZY_YAML_NAME)
modzy_data = {
'metadata_path': modzy_metadata_path,
'sample_input_path': modzy_sample_input_path,
'deploy': True,
'api_key': modzy_api_key
}
write_modzy_yaml(model_name,model_version,modzy_metadata_path,batch_size=self.batch_size,gpu=gpu)
else:
modzy_data = {}
with open('{}/{}'.format(tmppath,MODEL_ZIP_NAME),'rb') as f:
files = [
('image_data', json.dumps(image_data)),
('modzy_data', json.dumps(modzy_data)),
('model', f)
]
file_pointers = []
for key, file_key in [('metadata_path', 'modzy_metadata_data'),
('sample_input_path', 'modzy_sample_input_data')]:
value = modzy_data.get(key)
if value:
fp = open(value, 'rb')
file_pointers.append(fp)
files.append((file_key, fp))
print('Starting build job... ', end='', flush=True)
res = requests.post(self.chassis_build_url, files=files)
res.raise_for_status()
print('Ok!')
for fp in file_pointers:
fp.close()
shutil.rmtree(tmppath)
shutil.rmtree(model_directory)
return res.json()
except Exception as e:
print(e)
if os.path.exists(tmppath):
shutil.rmtree(tmppath)
if os.path.exists(model_directory):
shutil.rmtree(model_directory)
return False
###########################################
class ChassisClient:
"""The Chassis Client object.
This class is used to interact with the Kaniko service.
Attributes:
base_url (str): The base url for the API.
"""
def __init__(self,base_url='http://localhost:5000'):
self.base_url = base_url
def get_job_status(self, job_id):
route = f'{urllib.parse.urljoin(self.base_url, routes["job"])}/{job_id}'
res = requests.get(route)
data = res.json()
return data
def block_until_complete(self,job_id,timeout=1800,poll_interval=5):
endby = time.time() + timeout if (timeout is not None) else None
while True:
status = self.get_job_status(job_id)
if status['status']['succeeded'] or status['status']['failed']:
return status
if (endby is not None) and (time.time() > endby - poll_interval):
print('Timed out before completion.')
return False
time.sleep(poll_interval)
def download_tar(self, job_id, output_filename):
url = f'{urllib.parse.urljoin(self.base_url, routes["job"])}/{job_id}/download-tar'
r = requests.get(url)
if r.status_code == 200:
with open(output_filename, 'wb') as f:
f.write(r.content)
else:
print(f'Error download tar: {r.text}')
def create_model(self,context,process_fn=None,batch_process_fn=None,batch_size=None):
if not (process_fn or batch_process_fn):
raise ValueError("At least one of process_fn or batch_process_fn must be provided.")
if (batch_process_fn and not batch_size) or (batch_size and not batch_process_fn):
raise ValueError("Both batch_process_fn and batch_size must be provided for batch support.")
return ChassisModel(context,process_fn,batch_process_fn,batch_size,self.base_url)
| 41.224199 | 127 | 0.610238 | 1,404 | 11,584 | 4.770655 | 0.165954 | 0.041654 | 0.035533 | 0.018812 | 0.496268 | 0.461332 | 0.397283 | 0.372947 | 0.333831 | 0.299492 | 0 | 0.002878 | 0.280041 | 11,584 | 280 | 128 | 41.371429 | 0.80024 | 0.05827 | 0 | 0.314554 | 0 | 0 | 0.121037 | 0.016971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061033 | false | 0.00939 | 0.065728 | 0 | 0.201878 | 0.046948 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
047ffe9066fea69b5d6a121fa7e090201689c5c8 | 2,659 | py | Python | test/cli/init_command/location_files_case/test_location_files.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | null | null | null | test/cli/init_command/location_files_case/test_location_files.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | null | null | null | test/cli/init_command/location_files_case/test_location_files.py | openeuler-mirror/pkgship | 5aaa4953023fde8ff03892fe5608f0711a26a942 | [
"MulanPSL-1.0"
] | 1 | 2021-11-20T00:10:53.000Z | 2021-11-20T00:10:53.000Z | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
import os
from test.cli.init_command import InitTestBase
from packageship.application.initialize.base import del_temporary_file
from packageship.application.initialize.repo import RepoFile
class LocationFiles(InitTestBase):
"""Test the local configuration file"""
_dirname = os.path.dirname(__file__)
def setUp(self):
super(LocationFiles, self).setUp()
self.repo_file = RepoFile(temporary_directory=os.path.join(
self._dirname, "tmp"))
self._location_path = os.path.join(self._dirname, "sqlites")
def _get_repo_file_path(self, repo):
_src_db_file = self.repo_file.location_file(path=repo["src_db_file"])
_bin_db_file = self.repo_file.location_file(path=repo["bin_db_file"])
_file_list = self.repo_file.location_file(
path=repo["bin_db_file"], file_type="filelists")
return (_src_db_file, _bin_db_file, _file_list)
def test_normal_location_files(self):
"""
Normal zip package file
"""
_repo_conf = {
"dbname": "openeuler",
"src_db_file": "file://" + os.path.join(self._location_path, "src"),
"bin_db_file": "file://" + os.path.join(self._location_path, "bin"),
"priority": 1
}
self.assertEqual(True, all(self._get_repo_file_path(
repo=_repo_conf)))
def test_not_exists_location(self):
"""
The location file does not exist
"""
_repo_conf = {
"dbname": "openeuler",
"src_db_file": "file:///home/test-openeuler/src",
"bin_db_file": "file:///home/test-openeuler/bin",
"priority": 1
}
self.assertEqual(False, all(self._get_repo_file_path(
repo=_repo_conf)))
def tearDown(self) -> None:
folder = os.path.join(self._dirname, "tmp")
del_temporary_file(path=folder, folder=True)
| 39.686567 | 98 | 0.616773 | 332 | 2,659 | 4.686747 | 0.391566 | 0.042416 | 0.044987 | 0.044987 | 0.382391 | 0.298201 | 0.219794 | 0.219794 | 0.179949 | 0.105398 | 0 | 0.008142 | 0.214742 | 2,659 | 66 | 99 | 40.287879 | 0.737069 | 0.303498 | 0 | 0.210526 | 0 | 0 | 0.126957 | 0.034676 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.131579 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04802ec7cded7ac0ac872783dbfebaa1d973ca5f | 2,667 | py | Python | apt/transports/filesytem.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | 1 | 2019-10-31T08:36:29.000Z | 2019-10-31T08:36:29.000Z | apt/transports/filesytem.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | null | null | null | apt/transports/filesytem.py | javajawa/debian-repo-remux | b6626b268acd1743208d8a399f8c975316cfbc80 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
APT transport for a local OS accessible filesystem
"""
import os.path
import urllib.parse
from typing import IO
from apt.transport import Transport
from apt.transport.exceptions import URIMismatchError
from apt.transport.directorylisting import DirectoryListing
class File(Transport):
"""
APT transport for a local OS accessible filesystem
"""
def exists(self, uri: str) -> bool:
"""
Returns whether a given uri exists.
:param str uri:
:return bool:
:raises URIMismatchError:
"""
url: urllib.parse.ParseResult = urllib.parse.urlparse(uri)
if url.scheme != 'file':
raise URIMismatchError("Scheme must be file:")
return os.path.exists(url.path)
def open_read(self, uri: str) -> IO:
"""
Opens a file as an IO-like for reading
:param string uri:
:return IO:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
url: urllib.parse.ParseResult = urllib.parse.urlparse(uri)
if url.scheme != 'file':
raise URIMismatchError("Scheme must be file:")
if not os.path.exists(url.path):
raise FileNotFoundError(url.path + " does not exist")
return open(url.path, 'rb')
def open_write(self, uri: str) -> IO:
"""
Opens a file as an IO-like for writing
:param string uri:
:return:
:raises URIMismatchError:
"""
url: urllib.parse.ParseResult = urllib.parse.urlparse(uri)
if url.scheme != 'file':
raise URIMismatchError("Scheme must be file:")
os.makedirs(os.path.dirname(url.path), exist_ok=True)
return open(url.path, 'wb')
def list_directory(self, uri: str) -> DirectoryListing:
"""
Returns a list of files and directories in a directory
:param string uri:
:return List[str]:
:raises URIMismatchError:
:raises FileNotFoundError:
"""
url: urllib.parse.ParseResult = urllib.parse.urlparse(uri)
if url.scheme != 'file':
raise URIMismatchError("Scheme must be file:")
if not os.path.exists(url.path):
raise FileNotFoundError(url.path + " does not exist")
listing = DirectoryListing()
with os.scandir(url.path) as iterator:
for entry in iterator:
if entry.is_dir():
listing.directories.append(entry.name)
if entry.is_file():
listing.files.append(entry.name)
return listing
| 24.694444 | 66 | 0.594301 | 305 | 2,667 | 5.177049 | 0.272131 | 0.062698 | 0.025332 | 0.063331 | 0.528816 | 0.516783 | 0.516783 | 0.516783 | 0.462318 | 0.462318 | 0 | 0.001075 | 0.302587 | 2,667 | 107 | 67 | 24.925234 | 0.847849 | 0.229096 | 0 | 0.410256 | 0 | 0 | 0.071116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048379e73ffaf235bc1f161c9acc29e96bf1085e | 3,845 | py | Python | code/.ipynb_checkpoints/main-checkpoint.py | Bcastet/DeepReconstruction | 0a91eeda4c34350bee6ebafdfc0528060b8142fc | [
"MIT"
] | 8 | 2019-08-08T16:45:45.000Z | 2021-12-09T07:00:52.000Z | code/.ipynb_checkpoints/main-checkpoint.py | Bcastet/DeepReconstruction | 0a91eeda4c34350bee6ebafdfc0528060b8142fc | [
"MIT"
] | null | null | null | code/.ipynb_checkpoints/main-checkpoint.py | Bcastet/DeepReconstruction | 0a91eeda4c34350bee6ebafdfc0528060b8142fc | [
"MIT"
] | 4 | 2020-05-20T02:08:37.000Z | 2021-12-01T08:47:05.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 10:12:12 2019
@author: Phan Huy Thong
"""
import os, sys
sys.path.append(os.getcwd())
import argparse
from system import System
from config import Config as cfg
import utils
import torch
if __name__ == '__main__':
#read argument
parser = argparse.ArgumentParser()
parser.add_argument('config', default='default.cfg', help='Specify config file', metavar='FILE')
args = parser.parse_args()
cfg.load_config(args.config)
#run
s = System(cfg)
if cfg.main_task == 'test':
if cfg.operator == 'MRI':
def h(x):
return utils.h_mri(x, mask=s.mask)
def ht(x):
return utils.ht_mri(x, mask=s.mask)
elif cfg.operator == 'Convolution':
def h(x):
return torch.nn.functional.conv2d(x, s.weight)
def ht(y):
return torch.nn.functional.conv_transpose2d(y, s.weight)
################################################# ADD elif HERE FOR NEW OPERATORS
else:
raise Exception('imaging operator not recognized')
if cfg.task == 'train projector':
s.init_optimizer(cfg.lr1)
s.train(1)
if cfg.reset_optimizer:
s.init_optimizer(cfg.lr2)
s.train(2)
if cfg.reset_optimizer:
s.init_optimizer(cfg.lr3)
s.train(3)
if cfg.plot_loss:
s.plot_loss()
elif cfg.task == 'train1':
s.init_optimizer(cfg.lr1)
s.train(1)
if cfg.plot_loss:
s.plot_loss()
elif cfg.task == 'train2':
s.init_optimizer(cfg.lr2)
s.train(2)
if cfg.plot_loss:
s.plot_loss()
elif cfg.task == 'train3':
s.init_optimizer(cfg.lr3)
s.train(3)
if cfg.plot_loss:
s.plot_loss()
elif cfg.task == 'test':
print('reconstructing at gamma = ', cfg.gamma)
snr0, x0 = utils.RSNR(s.x0, s.t)
best_snr, best_rec = s.test(s.x0, s.t, cfg.gamma, h=h, ht=ht)
utils.plot_sub_fig(torch.cat([utils.compress3d(x0), best_rec, utils.compress3d(s.t)], dim=0),
1,3, title=('RSNR = '+str(round(snr0,2)), 'RSNR = '+str(round(best_snr,2)), 'clean'),
save_path=cfg.fig_save_path+'test.png')
elif cfg.task == 'reconstruct':
print('sweep gamma to find best reconstruction')
snr0, x0 = utils.RSNR(s.x0, s.t)
best_snr, best_rec = s.reconstruct(s.x0, s.t, h=h, ht=ht, idx=str(cfg.test_sample_id+1))
utils.plot_sub_fig(torch.cat([utils.compress3d(x0), best_rec, utils.compress3d(s.t)], dim=0),
1,3, title=('RSNR = '+str(round(snr0,2)), 'RSNR = '+str(round(best_snr,2)), 'clean'),
save_path=cfg.fig_save_path+'reconstruct sample '+str(cfg.test_sample_id+1)+'.png')
elif cfg.task == 'overall snr increase':
t = 0
idx = 1
for s.x0, s.t in s.testloader:
snr0, x0 = utils.RSNR(s.x0, s.t)
snr, rec = s.reconstruct(s.x0, s.t, h=h, ht=ht, idx=str(idx))
if cfg.show_reconstruction:
utils.plot_sub_fig(torch.cat([utils.compress3d(x0), rec, utils.compress3d(s.t)], dim=0),
1,3, title=('RSNR = '+str(round(snr0,2)), 'RSNR = '+str(round(snr,2)), 'clean'),
save_path=cfg.fig_save_path+'reconstruct sample '+str(idx)+'.png')
idx += 1
t += snr-snr0
print('average snr increase among all test samples = ', t/cfg.n_test_samples)
else:
raise Exception('task not recognized')
| 36.971154 | 115 | 0.527698 | 517 | 3,845 | 3.806576 | 0.257253 | 0.025407 | 0.014228 | 0.017785 | 0.502541 | 0.489329 | 0.473069 | 0.473069 | 0.448679 | 0.428354 | 0 | 0.029231 | 0.323797 | 3,845 | 104 | 116 | 36.971154 | 0.727692 | 0.039012 | 0 | 0.407407 | 0 | 0 | 0.11163 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049383 | false | 0 | 0.074074 | 0.049383 | 0.17284 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0487bb220d4008a76d9487e6275983871d38df44 | 2,800 | py | Python | movielens_data.py | CLArg-group/Aspect-Item-Recommender-Systems | 6fe98657cb17bcb8f4eb4606bdea7446909f4a71 | [
"Apache-2.0"
] | 1 | 2021-02-09T21:40:08.000Z | 2021-02-09T21:40:08.000Z | movielens_data.py | CLArg-group/Aspect-Item-Recommender-Systems | 6fe98657cb17bcb8f4eb4606bdea7446909f4a71 | [
"Apache-2.0"
] | null | null | null | movielens_data.py | CLArg-group/Aspect-Item-Recommender-Systems | 6fe98657cb17bcb8f4eb4606bdea7446909f4a71 | [
"Apache-2.0"
] | 1 | 2021-02-09T21:40:55.000Z | 2021-02-09T21:40:55.000Z | ''' script to get predictions for movielens data '''
from measures import predictions
from processing import preprocessing
import time
import pickle
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--movielens_data', choices=['small', '100k'], required=True)
script_arguments = vars(parser.parse_args())
movielens_data = script_arguments['movielens_data']
if movielens_data == 'small':
ratings = pickle.load(open("data/MOVIELENS/ml-latest-small/small_ratings_movielens.pkl","rb"))
films = pickle.load(open("data/MOVIELENS/ml-latest-small/small_films_movielens.pkl","rb"))
elif movielens_data == '100k':
ratings = pickle.load(open("data/MOVIELENS/ml-100k/100k_benchmark_ratings.pkl","rb"))
films = pickle.load(open("data/MOVIELENS/ml-100k/100k_benchmark_films_movielens.pkl","rb"))
# remove from ratings the missing films (that were missing info and hence were discarded)
ids_to_del_rf = set(ratings.keys()).difference(set(films.keys()))
ids_to_del_fr = set(films.keys()).difference(set(ratings.keys()))
ids_to_del = ids_to_del_rf.union(ids_to_del_fr)
corrected_ratings = dict()
for x in ratings.keys():
if x not in ids_to_del:
curr_rats = []
for curr_rat in ratings[x]:
temp_dict = dict()
temp_dict['user_rating'] = curr_rat['user_rating']
temp_dict['user_rating_date'] = curr_rat['user_rating_date']
temp_dict['user_id'] = 'x'+curr_rat['user_id']
curr_rats.append(temp_dict)
corrected_ratings[x] = curr_rats
ratings = corrected_ratings
corrected_films = dict()
for x in films.keys():
if x not in ids_to_del:
corrected_films[x] = films[x]
films = corrected_films
assert len(ratings) == len(films)
films, ratings_dict, compressed_test_ratings_dict, sims, movies_all_genres_matrix, movies_all_directors_matrix, movies_all_actors_matrix = preprocessing(ratings, films, movielens_data)
start = time.time()
MUR = 0.1
MUG = 0.6
MUA = 0.1
MUD = 0.1
nr_predictions, accuracy, rmse, mae, precision, recall, f1 = predictions(MUR, MUG, MUA, MUD, films, compressed_test_ratings_dict, ratings_dict, sims, movies_all_genres_matrix, movies_all_directors_matrix, movies_all_actors_matrix, movielens_data)
# print results
print("Number of user-items pairs: %d" % nr_predictions)
print("Accuracy: %.2f " % accuracy)
print("RMSE: %.2f" % rmse)
print("MAE: %.2f" % mae)
print("Precision: %.2f" % precision)
print("Recall: %.2f" % recall)
print("F1: %.2f" % f1)
end = time.time()
print("\nComputing strengths took %d seconds" % (end-start))
| 37.333333 | 250 | 0.675714 | 375 | 2,800 | 4.778667 | 0.296 | 0.058036 | 0.03125 | 0.040179 | 0.233259 | 0.233259 | 0.233259 | 0.225446 | 0.203125 | 0.090402 | 0 | 0.01566 | 0.201786 | 2,800 | 74 | 251 | 37.837838 | 0.78613 | 0.052857 | 0 | 0.037736 | 0 | 0 | 0.185087 | 0.08327 | 0 | 0 | 0 | 0 | 0.018868 | 1 | 0 | false | 0 | 0.09434 | 0 | 0.09434 | 0.150943 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0487ce7010d92a962a3a7ae8eef685aa4dc87679 | 11,608 | py | Python | src/main/python/widgets/syntax_highlighters/nasm_highlighter.py | michaelbradley91/NASM-Debugger | c7b5593cfa2583c48c6607ee6e7d608c486bce0a | [
"MIT"
] | null | null | null | src/main/python/widgets/syntax_highlighters/nasm_highlighter.py | michaelbradley91/NASM-Debugger | c7b5593cfa2583c48c6607ee6e7d608c486bce0a | [
"MIT"
] | null | null | null | src/main/python/widgets/syntax_highlighters/nasm_highlighter.py | michaelbradley91/NASM-Debugger | c7b5593cfa2583c48c6607ee6e7d608c486bce0a | [
"MIT"
] | null | null | null | from PyQt5.QtCore import QRegularExpression, Qt, QRegularExpressionMatchIterator, QRegularExpressionMatch, QRegExp
from PyQt5.QtGui import QSyntaxHighlighter, QTextDocument, QTextCharFormat, QFont, QColor
from widgets.syntax_highlighters.highlighting_rule import HighlightingRule
# noinspection SpellCheckingInspection
KEYWORD_PATTERNS = ["aaa", "aad", "aam", "aas", "adc", "add", "and", "call", "cbw", "clc", "cld", "cli", "cmc", "cmp",
"cmpsb",
"cmpsw", "cwd", "daa", "das", "dec", "div", "esc", "hlt", "idiv", "imul", "in", "inc", "int",
"into",
"iret", "ja", "jae", "jb", "jbe", "jc", "jcxz", "je", "jg", "jge", "jl", "jle", "jna", "jnae",
"jnb",
"jnbe", "jnc", "jne", "jng", "jnge", "jnl", "jnle", "jno", "jnp", "jns", "jnz", "jo", "jp", "jpe",
"jpo",
"js", "jz", "jmp", "lahf", "lds", "lea", "les", "lock", "lodsb", "lodsw", "loop", "loope", "loopne",
"loopnz", "loopz", "mov", "movsb", "movsw", "mul", "neg", "nop", "not", "or", "out", "pop", "popf",
"push",
"pushf", "rcl", "rcr", "rep", "repe", "repne", "repnz", "repz", "ret", "retn", "retf", "rol", "ror",
"sahf",
"sal", "sar", "sbb", "scasb", "scasw", "shl", "shr", "stc", "std", "sti", "stosb", "stosw", "sub",
"test",
"wait", "xchg", "xlat", "xor", "bound", "enter", "ins", "leave", "outs", "popa", "pusha", "arpl",
"clts",
"lar", "lgdt", "lidt", "lldt", "lmsw", "loadall", "lsl", "ltr", "sgdt", "sidt", "sldt", "smsw",
"str",
"verr", "verw", "bsf", "bsr", "bt", "btc", "btr", "bts", "cdq", "cmpsd", "cwde", "insd", "iret",
"iretw",
"iretd", "jcxz", "jecxz", "lfs", "lgs", "lss", "lodsd", "movsd", "movsx", "movzx", "outsd", "popad",
"popfd", "pushad", "pushfd", "scasd", "seta", "setae", "setb", "setbe", "setc", "sete", "setg",
"setge",
"setl", "setle", "setna", "setnae", "setnb", "setnbe", "setnc", "setne", "setng", "setnge", "setnl",
"setnle", "setno", "setnp", "setns", "setnz", "seto", "setp", "setpe", "setpo", "sets", "setz",
"shld",
"shrd", "stosd", "popad", "popfd", "pushad", "pushfd", "scasd", "bswap", "cmpxchg", "invd",
"invlpg",
"wbinvd", "xadd", "cpuid", "cmpxchg8b", "rdmsr", "rdtsc", "wrmsr", "rsm", "rdpmc", "cmova",
"cmovae",
"cmovb", "cmovbe", "cmovc", "cmove", "cmovg", "cmovge", "cmovl", "cmovle", "cmovna", "cmovnae",
"cmovnb",
"cmovnbe", "cmovnc", "cmovne", "cmovng", "cmovnge", "cmovnl", "cmovnle", "cmovno", "cmovnp",
"cmovns",
"cmovnz", "cmovo", "cmovp", "cmovpe", "cmovpo", "cmovs", "cmovz", "f2xm1", "fabs", "fadd", "faddp",
"fbld",
"fbstp", "fchs", "fclex", "fcom", "fcomp", "fcompp", "fdecstp", "fdisi", "fdiv", "fdivp", "fdivr",
"fdivrp",
"feni", "ffree", "fiadd", "ficom", "ficomp", "fidiv", "fidivr", "fild", "fimul", "fincstp", "finit",
"fist",
"fistp", "fisub", "fisubr", "fld", "fld1", "fldcw", "fldenv", "fldenvw", "fldl2e", "fldl2t",
"fldlg2",
"fldln2", "fldpi", "fldz", "fmul", "fmulp", "fnclex", "fndisi", "fneni", "fninit", "fnop", "fnsave",
"fnsavew", "fnstcw", "fnstenv", "fnstenvw", "fnstsw", "fpatan", "fprem", "fptan", "frndint",
"frstor",
"frstorw", "fsave", "fsavew", "fscale", "fsqrt", "fst", "fstcw", "fstenv", "fstenvw", "fstp",
"fstsw",
"fsub", "fsubp", "fsubr", "fsubrp", "ftst", "fwait", "fxam", "fxch", "fxtract", "fyl2x", "fyl2xp1",
"fsetpm", "fcos", "fldenvd", "fsaved", "fstenvd", "fprem1", "frstord", "fsin", "fsincos", "fstenvd",
"fucom", "fucomp", "fucompp", "fcmovb", "fcmovbe", "fcmove", "fcmovnb", "fcmovnbe", "fcmovne",
"fcmovnu",
"fcmovu", "fcomi", "fcomip", "fucomi", "fucomip", "cdqe", "cqo", "movmskps", "movmskpd", "popcnt",
"lzcnt",
"cmpsq", "scasq", "movsq", "lodsq", "stosq", "jrcxz", "iretq", "pushfq", "popfq", "cmpxchg16b",
"jrcxz",
"insb", "insw", "outsb", "outsw", "lfence", "sfence", "mfence", "prefetch", "prefetchl",
"prefetchw",
"clflush", "sysenter", "sysexit", "syscall", "sysret"]
KEYWORD_REGEX = QRegularExpression(str.join("|", [f"\\b{keyword}\\b" for keyword in KEYWORD_PATTERNS]),
QRegularExpression.CaseInsensitiveOption)
# noinspection SpellCheckingInspection
MEMORY_PATTERNS = ["\\bresb\\b", "\\bresw\\b", "\\bresd\\b", "\\bresq\\b", "\\brest\\b", "\\breso\\b", "\\bresy\\b",
"\\bddq\\b", "\\bresdq\\b", "\\bdb\\b", "\\bdw\\b", "\\bdd\\b", "\\bdq\\b", "\\bdt\\b", "\\bdo\\b",
"\\bdy\\b", "\\bequ\\b", "\\bbyte[\\s\\[]", "\\bword[\\s\\[]", "\\bdword[\\s\\[]",
"\\bqword[\\s\\[]", "\\btword[\\s\\[]", "\\boword[\\s\\[]", "\\byword[\\s\\[]", "\\[", "\\]"]
MEMORY_REGEX = QRegularExpression(str.join("|", MEMORY_PATTERNS), QRegularExpression.CaseInsensitiveOption)
# noinspection SpellCheckingInspection
REGISTER_PATTERNS = ["eax", "ebx", "ecx", "edx", "ebp", "esp", "edi", "esi", "ax", "bx", "cx", "dx", "bp", "sp", "si",
"di", "al", "ah", "bl", "bh", "cl", "ch", "dl", "dh",
# 64 bit registers
"rax", "rbx", "rcx", "rdx", "rbp", "rsp", "rdi", "rsi", "spl", "bpl", "sil", "dil", "r8", "r8d",
"r8w", "r8b", "r9", "r9d", "r9w", "r9b", "r10", "r10d", "r10w", "r10b", "r11", "r11d", "r11w",
"r11b", "r12", "r12d", "r12w", "r12b", "r13", "r13d", "r13w", "r13b", "r14", "r14d", "r14w",
"r14b", "r15", "r15d", "r15w", "r15b"]
REGISTER_REGEX = QRegularExpression(str.join("|", [f"\\b{register}\\b" for register in REGISTER_PATTERNS]),
QRegularExpression.CaseInsensitiveOption)
LABEL_PATTERNS = ["\\.[^\\s:]+[^:]", "\\.[^\\s:]+:", "\\S+:"]
LABEL_REGEX = QRegularExpression(str.join("|", LABEL_PATTERNS))
NUMBER_PATTERNS = ["\\b[\\-\\+]?\\d+\\.\\d+\\b", "\\b0[bo]\\d+\\b", "\\b[0-9A-Fa-f]+h\\b", "\\b0[xh][0-9A-Fa-f]+\\b",
"\\b[\\-\\+]?\\d+[bod]?\\b"]
NUMBER_REGEX = QRegularExpression(str.join("|", NUMBER_PATTERNS))
# noinspection SpellCheckingInspection
SYSTEM_PATTERNS = ["\\btimes\\b", "\\bsection\\b", "\\.bss\\b", "\\.text\\b", "\\.data\\b", "\\bglobal\\b",
"\\.rodata\\b", "\\bextern\\b", "\\%arg\\b", "\\%assign\\b", "\\%clear\\b", "\\%comment\\b",
"\\%define\\b", "\\%defstr\\b", "\\%deftok\\b", "\\%depend\\b", "\\%line\\b", "\\%local\\b",
"\\%macro\\b", "\\%n\\b", "\\%pathsearch\\b", "\\%pop\\b", "\\%push\\b", "\\%rep\\b", "\\%repl\\b",
"\\%rotate\\b", "\\%stacksize\\b", "\\%strcat\\b", "\\%strlen\\b", "\\%substr\\b", "\\%undef\\b",
"\\%unmacro\\b", "\\%use\\b", "\\%warning\\b", "\\%xdefine\\b", "\\%endcomment\\b", "\\%endif\\b",
"\\%endmacro\\b", "\\%endrep\\b", "\\%error\\b", "\\%exitrep\\b", "\\%fatal\\b", "\\%idefine\\b",
"\\%else\\b", "\\%imacro\\b", "\\%include\\b", "\\%if\\b", "\\%ifctx\\b", "\\%ifdef\\b",
"\\%ifempty\\b", "\\%ifenv\\b", "\\%ifidn\\b", "\\%ifidni\\b", "\\%ifmacro\\b", "\\%ifstr\\b",
"\\%iftoken\\b", "\\%ifnum\\b", "\\%ifid\\b", "\\%elif\\b", "\\%elifctx\\b", "\\%elifdef\\b",
"\\%elifempty\\b", "\\%elifenv\\b", "\\%elifidn\\b", "\\%elifidni\\b", "\\%elifmacro\\b",
"\\%elifstr\\b", "\\%eliftoken\\b", "\\%elifnum\\b", "\\%elifid\\b", "\\%ifn\\b", "\\%ifnctx\\b",
"\\%ifndef\\b", "\\%ifnempty\\b", "\\%ifnenv\\b", "\\%ifnidn\\b", "\\%ifnidni\\b", "\\%ifnmacro\\b",
"\\%ifnstr\\b", "\\%ifntoken\\b", "\\%ifnnum\\b", "\\%ifnid\\b", "\\%elifn\\b", "\\%elifnctx\\b",
"\\%elifndef\\b", "\\%elifnempty\\b", "\\%elifnenv\\b", "\\%elifnidn\\b", "\\%elifnidni\\b",
"\\%elifnmacro\\b", "\\%elifnstr\\b", "\\%elifntoken\\b", "\\%elifnnum\\b", "\\%elifnid\\b"]
SYSTEM_REGEX = QRegularExpression(str.join("|", SYSTEM_PATTERNS), QRegularExpression.CaseInsensitiveOption)
QUOTES_PATTERNS = ["\"[^\"]*\"", "'[^']*'", "`[^`]*`"]
QUOTES_REGEX = QRegularExpression(str.join("|", QUOTES_PATTERNS))
SINGLE_LINE_COMMENT_PATTERNS = [";[^\n]*"]
SINGLE_LINE_COMMENT_REGEX = QRegularExpression(str.join("|", SINGLE_LINE_COMMENT_PATTERNS))
class NASMHighlighter(QSyntaxHighlighter):
""" Highlight NASM syntax in files! """
def __init__(self, document: QTextDocument):
super().__init__(document)
self.highlighting_rules = []
keyword_format = QTextCharFormat()
keyword_format.setForeground(Qt.blue)
keyword_format.setFontWeight(QFont.Bold)
memory_format = QTextCharFormat()
memory_format.setForeground(QColor(0, 128, 255))
register_format = QTextCharFormat()
register_format.setForeground(QColor(153, 0, 204))
label_format = QTextCharFormat()
label_format.setForeground(QColor(128, 0, 0))
number_format = QTextCharFormat()
number_format.setForeground(QColor(255, 122, 0))
system_format = QTextCharFormat()
system_format.setForeground(Qt.darkCyan)
quotes_format = QTextCharFormat()
quotes_format.setForeground(QColor(128, 128, 128))
comment_format = QTextCharFormat()
comment_format.setForeground(Qt.darkGreen)
# Note that the order these are applied in matters!
self.highlighting_rules.append(HighlightingRule(pattern=KEYWORD_REGEX, format=keyword_format))
self.highlighting_rules.append(HighlightingRule(pattern=MEMORY_REGEX, format=memory_format))
self.highlighting_rules.append(HighlightingRule(pattern=REGISTER_REGEX, format=register_format))
self.highlighting_rules.append(HighlightingRule(pattern=LABEL_REGEX, format=label_format))
self.highlighting_rules.append(HighlightingRule(pattern=NUMBER_REGEX, format=number_format))
self.highlighting_rules.append(HighlightingRule(pattern=SYSTEM_REGEX, format=system_format))
self.highlighting_rules.append(HighlightingRule(pattern=QUOTES_REGEX, format=quotes_format))
self.highlighting_rules.append(HighlightingRule(pattern=SINGLE_LINE_COMMENT_REGEX, format=comment_format))
def highlightBlock(self, text: str):
"""
Highlight the given text block, which is assumed to be one line of text. This is the case
with a QPlainTextEdit widget.
"""
for rule in self.highlighting_rules:
match_iterator: QRegularExpressionMatchIterator = rule.pattern.globalMatch(text)
while match_iterator.hasNext():
match: QRegularExpressionMatch = match_iterator.next()
self.setFormat(match.capturedStart(), match.capturedLength(), rule.format)
| 67.098266 | 120 | 0.504221 | 1,116 | 11,608 | 5.165771 | 0.603943 | 0.027754 | 0.036427 | 0.041631 | 0.125585 | 0.087771 | 0.067997 | 0 | 0 | 0 | 0 | 0.013193 | 0.249052 | 11,608 | 172 | 121 | 67.488372 | 0.648159 | 0.031702 | 0 | 0.014493 | 0 | 0 | 0.325712 | 0.006609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.021739 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048a2c388530b970ba835f4824f7f77e7ebda823 | 1,964 | py | Python | Intuitive.py | Sundragon1993/Smart-real-estate-assistant | 3e901e86e1009bf464a44f870f3ee911217a9916 | [
"MIT"
] | null | null | null | Intuitive.py | Sundragon1993/Smart-real-estate-assistant | 3e901e86e1009bf464a44f870f3ee911217a9916 | [
"MIT"
] | null | null | null | Intuitive.py | Sundragon1993/Smart-real-estate-assistant | 3e901e86e1009bf464a44f870f3ee911217a9916 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from sklearn import preprocessing
from keras.layers import Dropout
from keras import regularizers
df = pd.read_csv('housepricedata.csv')
print(df)
dataset = df.values
print(dataset)
X = dataset[:, 0:10]
Y = dataset[:, 10]
print(X)
min_max_scaler = preprocessing.MinMaxScaler()
X_scale = min_max_scaler.fit_transform(X)
print(X_scale)
X_train, X_val_and_test, Y_train, Y_val_and_test = train_test_split(X_scale, Y, test_size=0.3)
X_val, X_test, Y_val, Y_test = train_test_split(X_val_and_test, Y_val_and_test, test_size=0.5)
print(X_train.shape, X_val.shape, X_test.shape, Y_train.shape, Y_val.shape, Y_test.shape)
model = Sequential([
Dense(1000, activation='relu', kernel_regularizer=regularizers.l2(0.01), input_shape=(10,)),
Dropout(0.3),
Dense(1000, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dropout(0.3),
Dense(1000, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dropout(0.3),
Dense(1000, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
Dropout(0.3),
Dense(1, activation='sigmoid', kernel_regularizer=regularizers.l2(0.01)),
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
hist = model.fit(X_train, Y_train,
batch_size=32, epochs=100,
validation_data=(X_val, Y_val))
print(model.evaluate(X_test, Y_test)[1])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper right')
plt.show()
plt.plot(hist.history['acc'])
plt.plot(hist.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='lower right')
plt.show()
| 29.757576 | 96 | 0.721996 | 304 | 1,964 | 4.470395 | 0.279605 | 0.014717 | 0.106696 | 0.114054 | 0.348786 | 0.272995 | 0.247976 | 0.247976 | 0.19794 | 0.19794 | 0 | 0.036109 | 0.125764 | 1,964 | 65 | 97 | 30.215385 | 0.755387 | 0 | 0 | 0.207547 | 0 | 0 | 0.090631 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.150943 | 0 | 0.150943 | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048ae9bb8b1e3a03ae719af21c92a5fb4cfdf9ae | 7,934 | py | Python | arxiv_miner/mining_engine.py | valayDave/arxiv-miner | 47751ac731e797d9a0e49959d11c07857e747aee | [
"MIT"
] | 95 | 2021-05-28T23:07:13.000Z | 2022-02-08T02:44:09.000Z | arxiv_miner/mining_engine.py | valayDave/arxiv-miner | 47751ac731e797d9a0e49959d11c07857e747aee | [
"MIT"
] | 7 | 2021-05-30T01:46:49.000Z | 2021-07-15T23:18:54.000Z | arxiv_miner/mining_engine.py | valayDave/arxiv-miner | 47751ac731e797d9a0e49959d11c07857e747aee | [
"MIT"
] | 6 | 2021-05-29T09:58:34.000Z | 2021-06-10T10:32:27.000Z | """
Scraping Engine creates the Identiy Data for the papers.
The Mining Engine on Instantiation
- Will check For the New Papers to Mine.
- It will Create a ArxivPaper
--> Download Latex
--> Parse Latex
--> Sematically Parse the Paper here too.
--> Mining Ontology Here Too
"""
from .database import ArxivDatabase
from .record import ArxivRecord,ArxivIdentity,ArxivSematicParsedResearch,Ontology,Author
from .logger import create_logger
from .exception import ArxivAPIException
from .ontology_miner import OntologyMiner
from .paper import ArxivPaper,ResearchPaperFactory
import time
from multiprocessing import Process,Event
from signal import signal, SIGINT
import random
import string
from typing import List
def random_string(stringLength=8):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
class MiningEngine:
"""
Run as Isolated Process
- Query Database For Unmined Paper
- Mine Paper : With ArxivPaper Object.
- If Arxiv Acts Bitchy with 500 Errors Wait and Mine.
"""
def __init__(self,\
database:ArxivDatabase,\
data_root_path,\
detex_path):
self.db = database
self.data_root_path = data_root_path
self.detex_path = detex_path
self.logger = create_logger(self.__class__.__name__+"__"+random_string())
def mine_record(self,paper_record:ArxivRecord):
paper_obj = ArxivPaper.from_arxiv_record(self.data_root_path,\
paper_record,\
detex_path=self.detex_path)
try:
paper_obj.mine_paper()
except Exception as e:
self.logger.error('Failed Mining Paper : %s\n\n%s'%(paper_obj.identity.identity,str(e)))
return None
return paper_obj
def _paper_mining_logic(self):
"""_paper_mining_logic
1. Get unmined ArxivRecord
2. Create `AxivPaper` from `ArxivRecord` and mine it
3. Create `ArxivSematicParsedResearch` from `ArxivRecord` and save it .
3. save Record and Mark as Mined.
"""
paper_record = self.db.get_unmined_paper()
paper_mined = False
if not paper_record:
return None,paper_mined
self.logger.info("Mining Paper %s"%paper_record.identity.identity)
paper_obj = self.mine_record(paper_record)
if paper_obj is not None:
paper_mined = True
self.db.save_record(paper_obj.to_arxiv_record())
paper_record = paper_obj.to_arxiv_record()
ontology = Ontology()
if OntologyMiner.is_minable:
ontology = OntologyMiner.mine_paper(paper_record.identity)
try:
self.db.set_many_ontology(ontology.union)
except:
self.logger.info("No Ontology Saved")
else:
self.logger.info("No Ontology Saved")
try:
self.db.set_many_authors( [Author(name=xp) for xp in paper_record.identity.authors])
except:
self.logger.info("No Author Saved")
self.db.set_semantic_parsed_research(ArxivSematicParsedResearch(\
identity=paper_record.identity,\
research_object=ResearchPaperFactory.from_arxiv_record(paper_record),\
ontology=ontology
))
self.db.set_mined(paper_record.identity,paper_mined)
return paper_record,paper_mined
class SourceHarvestingEngine:
"""
Run as Isolated Process
- Given a List of Ids, It will download the Tar source and Put it into a folder. Otherwise it will wait for arxiv to Forgive :)
Args :
id_list : List[str] : list of strings that will be used for the
"""
def __init__(self,id_list:List[str],data_root_path,error_sleep_time=5,scrape_sleep_time=3):
super().__init__()
self.id_list = id_list
self.data_root_path = data_root_path
self.logger = create_logger(self.__class__.__name__+"__"+random_string())
self.error_sleep_time = error_sleep_time
self.scrape_sleep_time = scrape_sleep_time
def harvest_one(self,arxiv_id:str):
paper = ArxivPaper(arxiv_id,self.data_root_path,build_paper=False)
download_path = paper.download_latex()
return download_path
def harvest(self):
harvest_papers_paths = []
retry_map = {
}
while len(self.id_list) > 0:
arxiv_id = self.id_list.pop()
try:
download_path = self.harvest_one(arxiv_id)
harvest_papers_paths.append((arxiv_id,download_path))
time.sleep(self.scrape_sleep_time)
except Exception as e: # Upon exception. Try 3 times by adding it back to list. If still Failure then dont use it.
self.logger.error(f"Latex Download {str(e)} For ID {arxiv_id}. Will be Sleeping for {self.error_sleep_time}")
if arxiv_id in retry_map:
if retry_map[arxiv_id] > 3:
continue
else:
retry_map[arxiv_id]+=1
self.id_list.append(arxiv_id)
else:
retry_map[arxiv_id]=1
self.id_list.append(arxiv_id)
time.sleep(self.error_sleep_time)
return harvest_papers_paths
class MiningProcess(Process,MiningEngine):
def __init__(self,
database:ArxivDatabase,\
data_root_path,\
detex_path,\
mining_interval=5,\
mining_limit=30,
empty_wait_time = 600,
sleep_interval_count = 10):
# Instantiate The Processes
Process.__init__(self,daemon=False) # Making it a deamon process.
MiningEngine.__init__(self,database,data_root_path,detex_path)
self.mining_interval = mining_interval
self.empty_wait_time = empty_wait_time
self.mining_limit = mining_limit
self.num_mined = 0
self.exit = Event()
self.sleep_interval_count = sleep_interval_count
signal(SIGINT, self.shutdown)
def run(self):
"""run
This will Run the MongoEngine's Miner
"""
self.start_mining()
def shutdown(self,signal_received, frame):
# Handle any cleanup here
self.logger.info('SIGINT or CTRL-C detected. Exiting gracefully')
self.exit.set()
exit(0)
def start_mining(self):
while True:
if self.mining_limit is not None:
if self.num_mined == self.mining_limit:
break
if self.exit.is_set():
break
# Sleep Every `sleep_interval_count` records
if self.num_mined % self.sleep_interval_count == 0 and self.num_mined > 0:
time.sleep(self.empty_wait_time)
time.sleep(self.mining_interval)
paper_record,mined_status = self._paper_mining_logic()
self.num_mined+=1
if not paper_record: # Sleep If DB says There are Unmined Papers
self.logger.info("No Record Found Sleeping For %d"%self.empty_wait_time)
time.sleep(self.empty_wait_time)
continue
if mined_status is False:
self.logger.error('Couldnt Mine Paper : %s'%paper_record.identity.identity)
time.sleep(self.empty_wait_time)
continue
self.logger.info('Saved Paper To DB : %s Completed Mining %d Paper'%(paper_record.identity.identity,self.num_mined))
self.logger.info('Miner Mined : %d'%self.num_mined)
| 37.961722 | 136 | 0.612806 | 947 | 7,934 | 4.87434 | 0.223865 | 0.040511 | 0.025997 | 0.018414 | 0.168328 | 0.14753 | 0.100737 | 0.075823 | 0.063692 | 0.063692 | 0 | 0.005123 | 0.311066 | 7,934 | 209 | 137 | 37.961722 | 0.839371 | 0.159188 | 0 | 0.226027 | 0 | 0.006849 | 0.053413 | 0.00352 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075342 | false | 0 | 0.082192 | 0 | 0.226027 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048b183da7874e695331f6fac93ac63e58a5596b | 3,190 | py | Python | janus/janus/analysis/tree_pairs_analysis.py | josepablocam/janus-public | 4713092b27d02386bdb408213d8edc0dc5859eec | [
"MIT"
] | null | null | null | janus/janus/analysis/tree_pairs_analysis.py | josepablocam/janus-public | 4713092b27d02386bdb408213d8edc0dc5859eec | [
"MIT"
] | null | null | null | janus/janus/analysis/tree_pairs_analysis.py | josepablocam/janus-public | 4713092b27d02386bdb408213d8edc0dc5859eec | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from argparse import ArgumentParser
from collections import defaultdict
import numpy as np
import os
import pickle
import matplotlib
matplotlib.use('pdf')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['font.size'] = 12
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from janus.repair.tree_pairs import CorpusEntry, TreePairCorpus
def create_distance_df(corpus):
records = []
for entry in corpus:
rec = {"distance": entry.distance, "method": corpus.sample_method}
records.append(rec)
return pd.DataFrame(records)
def plot_distance_ecdf(df):
fig, ax = plt.subplots(1)
methods = sorted(df["method"].unique())
colors = sns.color_palette("colorblind", len(methods))
palette = {m: c for m, c in zip(methods, colors)}
sns.ecdfplot(
data=df,
x="distance",
hue="method",
ax=ax,
palette=palette,
hue_order=methods,
)
return ax
def get_args():
parser = ArgumentParser(description="Compare post-tree sampling methods")
parser.add_argument(
"--input", type=str, nargs="+", help="TreePairCorpus files")
parser.add_argument(
"--output_dir", type=str, help="Output directory for analysis results")
return parser.parse_args()
def main():
args = get_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
dist_dfs = []
compute_times = defaultdict(lambda: [])
for path in args.input:
with open(path, "rb") as fin:
corpus = pickle.load(fin)
dist_df = create_distance_df(corpus)
dist_dfs.append(dist_df)
compute_times[corpus.sample_method].append(corpus.compute_time)
combined_dist_df = pd.concat(dist_dfs, axis=0)
print("Number of pipeline pairs")
print(combined_dist_df.groupby("method").size())
dist_ecdf = plot_distance_ecdf(combined_dist_df)
dist_ecdf.get_figure().savefig(
os.path.join(args.output_dir, "distance_ecdf.pdf"))
summary_df = combined_dist_df.groupby("method")["distance"].agg(
["mean", "max", "std"])
summary_df = summary_df.reset_index()
compute_times_info = {
k: (np.mean(v), np.std(v))
for k, v in compute_times.items()
}
summary_df["mean_compute_time_str"] = [
"{:.2f}(+/- {:.2f})".format(*compute_times_info[m])
for m in summary_df["method"]
]
summary_df["mean_distance_str"] = [
"{:.2f}(+/- {:.2f})".format(m, d)
for m, d in zip(summary_df["mean"], summary_df["std"])
]
summary_df = summary_df[[
"method", "mean_distance_str", "mean_compute_time_str"
]]
summary_df = summary_df.rename(
columns={
"method": "Sampling method",
"mean_distance_str": "Mean (SD) Distance",
"mean_compute_time_str": "Mean (SD) Time (s)"
})
summary_df.to_latex(
os.path.join(args.output_dir, "summary.tex"), index=False)
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
| 28.738739 | 79 | 0.638245 | 413 | 3,190 | 4.719128 | 0.37046 | 0.060031 | 0.02668 | 0.027707 | 0.098512 | 0.023602 | 0 | 0 | 0 | 0 | 0 | 0.005274 | 0.227273 | 3,190 | 110 | 80 | 29 | 0.785396 | 0.006583 | 0 | 0.021978 | 0 | 0 | 0.160354 | 0.019886 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043956 | false | 0 | 0.120879 | 0 | 0.197802 | 0.021978 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048b8a47b92597ba13d720ac05cab93a23005c88 | 5,807 | py | Python | models/ConvLarge.py | iSarmad/MeanTeacher-SNTG-HybridNet | 7ca9f8fc89a7be0524b2a0fb648678b9556f8843 | [
"MIT"
] | 24 | 2019-01-22T06:20:41.000Z | 2022-03-26T07:51:40.000Z | models/ConvLarge.py | iSarmad/MeanTeacher-SNTG-HybridNet | 7ca9f8fc89a7be0524b2a0fb648678b9556f8843 | [
"MIT"
] | 1 | 2021-04-12T06:27:43.000Z | 2021-04-12T06:27:43.000Z | models/ConvLarge.py | iSarmad/MeanTeacher-SNTG-HybridNet | 7ca9f8fc89a7be0524b2a0fb648678b9556f8843 | [
"MIT"
] | 12 | 2019-01-03T07:18:06.000Z | 2021-12-09T18:24:27.000Z | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.init import kaiming_normal_
from torch.nn.utils import weight_norm
from torch.autograd.variable import Variable
import math
__all__ = ['convlarge']
# noise function taken from blog : https://ferretj.github.io/ml/2018/01/22/temporal-ensembling.html?fbclid=IwAR1MEqzhwrl1swzLUDA0kZFN2oVTdcNa497c1l3pC-Xh2kYPlPjRiO0Oucc
class GaussianNoise(nn.Module):
def __init__(self, shape=(100, 1, 28, 28), std=0.05):
super(GaussianNoise, self).__init__()
self.noise1 = Variable(torch.zeros(shape).cuda())
self.std1 = std
self.register_buffer('noise2',self.noise1) # My own contribution , registering buffer for data parallel usage
def forward(self, x):
c = x.shape[0]
self.noise2.data.normal_(0, std=self.std1)
return x + self.noise2[:c]
class Net(nn.Module):
def __init__(self,args,std = 0.15):
super(Net, self).__init__()
self.args = args
self.std = std
self.gn = GaussianNoise(shape=(args.batch_size,3,32,32),std=self.std)
if self.args.BN:
self.BN1a = nn.BatchNorm2d(128)
self.BN1b = nn.BatchNorm2d(128)
self.BN1c = nn.BatchNorm2d(128)
self.conv1a = (nn.Conv2d(3, 128, 3,padding=1))
self.conv1b = (nn.Conv2d(128, 128, 3,padding=1))
self.conv1c = (nn.Conv2d(128, 128, 3,padding=1))
self.pool1 = nn.MaxPool2d(2, 2)
self.drop1 = nn.Dropout(0.5)
if self.args.BN:
self.BN2a = nn.BatchNorm2d(256)
self.BN2b = nn.BatchNorm2d(256)
self.BN2c = nn.BatchNorm2d(256)
self.conv2a = (nn.Conv2d(128, 256, 3, padding=1))
self.conv2b = (nn.Conv2d(256, 256, 3, padding=1))
self.conv2c = (nn.Conv2d(256, 256, 3, padding=1))
self.pool2 = nn.MaxPool2d(2, 2)
self.drop2 = nn.Dropout(0.5)#nn.Dropout2d
if self.args.BN:
self.BN3a = nn.BatchNorm2d(512)
self.BN3b = nn.BatchNorm2d(256)
self.BN3c = nn.BatchNorm2d(128)
self.conv3a = (nn.Conv2d(256, 512, 3))
self.conv3b = (nn.Conv2d(512, 256, 1))
self.conv3c = (nn.Conv2d(256, 128, 1))
self.pool3 = nn.AvgPool2d(6,6)
self.dense = (nn.Linear(128, 10))
if self.args.BN:
self.BNdense = nn.BatchNorm1d(10)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for m in self.modules(): # TODO THIS IS A BIG PROBLEM
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv1d) or isinstance(
# m, nn.Linear):
# kaiming_normal_(m.weight.data) # initialize weigths with normal distribution
# if m.bias is not None:
# m.bias.data.zero_() # initialize bias as zero
# elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def forward(self, x):
if self.training:
x = self.gn(x)
if self.args.BN:
x = F.leaky_relu(self.BN1a(self.conv1a(x)),negative_slope = 0.1)#self.BN1a
x = F.leaky_relu(self.BN1b(self.conv1b(x)),negative_slope = 0.1)#self.BN1b
x = F.leaky_relu(self.BN1c(self.conv1c(x)),negative_slope = 0.1)#self.BN1c
x = self.drop1(self.pool1(x))#
x = F.leaky_relu(self.BN2a(self.conv2a(x)), negative_slope = 0.1)#self.BN2a
x = F.leaky_relu(self.BN2b(self.conv2b(x)), negative_slope = 0.1)#self.BN2b
x = F.leaky_relu(self.BN2c(self.conv2c(x)), negative_slope = 0.1)#self.BN2c
x = self.drop2(self.pool2(x))#
x = F.leaky_relu(self.BN3a(self.conv3a(x)),negative_slope = 0.1)#self.BN3a
x = F.leaky_relu(self.BN3b(self.conv3b(x)),negative_slope = 0.1)#self.BN3b
x = F.leaky_relu(self.BN3c(self.conv3c(x)),negative_slope = 0.1)#self.BN3c
x = self.pool3(x)
h = x
x = x.view(-1, 128)
x = self.BNdense(self.dense(x))#F.softmax(,dim=1)# self.BNdense
else:
x = F.leaky_relu((self.conv1a(x)),negative_slope = 0.1)#self.BN1a
x = F.leaky_relu((self.conv1b(x)),negative_slope = 0.1)#self.BN1b
x = F.leaky_relu((self.conv1c(x)),negative_slope = 0.1)#self.BN1c
x = self.drop1(self.pool1(x))#
x = F.leaky_relu((self.conv2a(x)), negative_slope = 0.1)#self.BN2a
x = F.leaky_relu((self.conv2b(x)), negative_slope = 0.1)#self.BN2b
x = F.leaky_relu((self.conv2c(x)), negative_slope = 0.1)#self.BN2c
x = self.drop2(self.pool2(x))#
x = F.leaky_relu((self.conv3a(x)),negative_slope = 0.1)#self.BN3a
x = F.leaky_relu((self.conv3b(x)),negative_slope = 0.1)#self.BN3b
x = F.leaky_relu((self.conv3c(x)),negative_slope = 0.1)#self.BN3c
x = self.pool3(x)
h = x
x = x.view(-1, 128)
x =(self.dense(x))#F.softmax(,dim=1)# self.BNdense
if self.args.sntg == True:
return x,h
else:
return x
def convlarge(args,data= None,nograd=False):
model = Net(args)
if data is not None:
model.load_state_dict(data['state_dict'])
model = model.cuda()
model = nn.DataParallel(model).cuda()
if nograd:
for param in model.parameters():
param.detach_()
return model
| 37.464516 | 168 | 0.580851 | 841 | 5,807 | 3.919144 | 0.200951 | 0.042476 | 0.038228 | 0.060073 | 0.458131 | 0.359223 | 0.359223 | 0.359223 | 0.326456 | 0.290049 | 0 | 0.075552 | 0.275185 | 5,807 | 154 | 169 | 37.707792 | 0.707531 | 0.165834 | 0 | 0.174312 | 0 | 0 | 0.005206 | 0 | 0 | 0 | 0 | 0.006494 | 0 | 1 | 0.045872 | false | 0 | 0.06422 | 0 | 0.165138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048b9de36f4ed588142ded1ea107dfbc36e23fb6 | 932 | py | Python | track/urls.py | IJaccojwang/loop | c534051ac0a0744638624376c814e22dd62fbd08 | [
"MIT"
] | null | null | null | track/urls.py | IJaccojwang/loop | c534051ac0a0744638624376c814e22dd62fbd08 | [
"MIT"
] | 4 | 2021-03-19T01:02:03.000Z | 2021-09-08T01:01:45.000Z | track/urls.py | IJaccojwang/loop | c534051ac0a0744638624376c814e22dd62fbd08 | [
"MIT"
] | null | null | null | from django.conf.urls.static import static
from django.conf.urls import url
from django.conf import settings
from . import views
urlpatterns=[
url('^$', views.index, name = 'index'),
url(r'^profile$',views.profile,name='profile'),
url(r'^profile/edit$',views.edit_profile,name='edit'),
url(r'^profile/update$',views.update_profile, name='update_profile'),
url(r'^notifications$',views.news, name='notifications'),
url(r'^notifications/new$',views.new_notification, name='new_notification'),
url(r'^health$',views.health, name='health'),
url(r'^authorities$',views.authorities, name='authorities'),
url(r'^businesses$',views.businesses, name='businesses'),
url(r'^businesses/new$',views.new_business, name='new_business'),
url(r'^search/',views.search_results, name='search_results'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | 44.380952 | 81 | 0.716738 | 123 | 932 | 5.333333 | 0.252033 | 0.060976 | 0.064024 | 0.054878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104077 | 932 | 21 | 81 | 44.380952 | 0.785629 | 0 | 0 | 0 | 0 | 0 | 0.261522 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048f525023727acec650d30bcde2c29bc495c599 | 2,401 | py | Python | tests/presentation/test_create_presentation.py | abitoun-42/atpbar | fe5e4c24621b4707c5253be8ab2b5ae48f4801e3 | [
"BSD-3-Clause"
] | 72 | 2019-02-24T15:49:57.000Z | 2022-03-27T19:38:38.000Z | tests/presentation/test_create_presentation.py | abitoun-42/atpbar | fe5e4c24621b4707c5253be8ab2b5ae48f4801e3 | [
"BSD-3-Clause"
] | 24 | 2019-02-18T12:39:04.000Z | 2022-01-19T02:14:56.000Z | tests/presentation/test_create_presentation.py | abitoun-42/atpbar | fe5e4c24621b4707c5253be8ab2b5ae48f4801e3 | [
"BSD-3-Clause"
] | 10 | 2019-04-19T15:39:32.000Z | 2022-01-08T16:57:42.000Z | # Tai Sakuma <tai.sakuma@gmail.com>
import os
import sys
import pytest
import unittest.mock as mock
has_jupyter_notebook = False
try:
import ipywidgets as widgets
from IPython.display import display
has_jupyter_notebook = True
except ImportError:
pass
from atpbar.presentation.create import create_presentation
##__________________________________________________________________||
@pytest.fixture(
params=[True, False]
)
def isatty(request, monkeypatch):
ret = request.param
org_stdout = sys.stdout
f = mock.Mock(**{
'stdout.isatty.return_value': ret,
'stdout.write.side_effect': lambda x : org_stdout.write(x)
})
module = sys.modules['atpbar.presentation.create']
monkeypatch.setattr(module, 'sys', f)
return ret
##__________________________________________________________________||
if has_jupyter_notebook:
is_jupyter_notebook_parames = [True, False]
else:
is_jupyter_notebook_parames = [False]
@pytest.fixture(params=is_jupyter_notebook_parames)
def is_jupyter_notebook(request, monkeypatch):
ret = request.param
f = mock.Mock()
f.return_value = ret
module = sys.modules['atpbar.presentation.create']
monkeypatch.setattr(module, 'is_jupyter_notebook', f)
return ret
##__________________________________________________________________||
@pytest.fixture(
params=[True, False]
)
def del_ProgressBarJupyter(request, monkeypatch):
ret = request.param
module = sys.modules['atpbar.presentation.create']
if ret:
monkeypatch.delattr(module, 'ProgressBarJupyter', raising=False)
else:
m = mock.Mock()
m().__class__.__name__ = 'ProgressBarJupyter'
monkeypatch.setattr(module, 'ProgressBarJupyter', m, raising=False)
return ret
##__________________________________________________________________||
def test_create_presentation(isatty, is_jupyter_notebook, del_ProgressBarJupyter):
actual = create_presentation()
if isatty:
assert 'ProgressBar' == actual.__class__.__name__
elif is_jupyter_notebook:
if del_ProgressBarJupyter:
assert 'ProgressPrint' == actual.__class__.__name__
else:
assert 'ProgressBarJupyter' == actual.__class__.__name__
else:
assert 'ProgressPrint' == actual.__class__.__name__
##__________________________________________________________________||
| 30.392405 | 82 | 0.742191 | 234 | 2,401 | 5.884615 | 0.282051 | 0.108932 | 0.08642 | 0.061002 | 0.309368 | 0.16703 | 0.092956 | 0.092956 | 0.092956 | 0 | 0 | 0 | 0.167014 | 2,401 | 78 | 83 | 30.782051 | 0.6885 | 0.155352 | 0 | 0.311475 | 0 | 0 | 0.128472 | 0.063492 | 0 | 0 | 0 | 0 | 0.065574 | 1 | 0.065574 | false | 0.016393 | 0.131148 | 0 | 0.245902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
048fe32428b42c9f1df5738539aa6f0a4f71bad4 | 4,652 | py | Python | GridTrader_with_db.py | wcen/Leveraged-grid-trading-bot | a6920b9c0d6aecaa6e28c6e12d760b75e5d9bff2 | [
"MIT"
] | 6 | 2021-11-20T21:47:42.000Z | 2022-03-31T05:38:27.000Z | GridTrader_with_db.py | webclinic017/Leveraged-grid-trading-bot | a698bb5bea0fed00c28dce023afa751c4f82e869 | [
"MIT"
] | null | null | null | GridTrader_with_db.py | webclinic017/Leveraged-grid-trading-bot | a698bb5bea0fed00c28dce023afa751c4f82e869 | [
"MIT"
] | 5 | 2021-10-11T20:25:38.000Z | 2022-03-04T15:02:17.000Z | import sys
import time
import datetime
import asyncio
import pytz
from GridTrader import GridTrader
from db_connector import db_connector
class GridTrader_with_db(GridTrader):
def __init__(self, file='setting.json'):
super().__init__(file=file)
info=GridTrader.read_setting(file=file)
self.TableName=info['db_table_name']
self.db=db_connector(hostname=info['db_host'],user=info['db_user'],passwd=info["db_passwd"],database=info['db_database'])
self.db.execute(f'CREATE TABLE IF NOT EXISTS {self.TableName}(\
id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,\
sell_vol TEXT NOT NULL, buy_vol TEXT NOT NULL,\
sell_val TEXT NOT NULL, buy_val TEXT NOT NULL,\
maker BIGINT UNSIGNED NOT NULL, taker BIGINT UNSIGNED NOT NULL,\
fiat TEXT NOT NULL, coin TEXT NOT NULL,\
time TEXT NOT NULL, date TEXT NOT NULL,\
e_day INT NOT NULL, e_hour INT NOT NULL, e_min INT NOT NULL, e_sec INT NOT NULL)')
tmp=self.db.execute(f"SELECT * from {self.TableName} ORDER BY id DESC LIMIT 1")
if len(tmp) > 0:
self.vol['sell'] = float(tmp[0][1])
self.vol['buy'] = float(tmp[0][2])
self.val['sell'] = float(tmp[0][3])
self.val['buy'] = float(tmp[0][4])
self.liquidity['maker'] = float(tmp[0][5])
self.liquidity['taker'] = float(tmp[0][6])
self.fee['fiat'] = float(tmp[0][7])
self.fee['coin'] = float(tmp[0][8])
self.etime = float(tmp[0][11])*86400+float(tmp[0][12])*3600+float(tmp[0][13])*60+float(tmp[0][14])
else:
self.etime = 0.0
def log_trading_info(self):
coin = self.market[:self.market.find('/')]
fiat = self.market[self.market.find('/')+1:]
self.log("##########.Trading Info.##########")
self.log(f"Trade balance: {self.vol['sell']-self.vol['buy']:+.2f} {fiat}, {self.val['buy']-self.val['sell']:+.4f} {coin}",withTime=False)
trade_return=self.prof*min(self.val['buy'],self.val['sell'])/int(time.time()-self.startTime+self.etime)*86400
self.log(f"Return: {self.prof*min(self.val['buy'],self.val['sell']):.4f} {fiat}, {trade_return:.4f} {fiat}/Day",withTime=False)
self.log(f"Volume: {(self.vol['buy']+self.vol['sell']):.2f} {fiat}, {(self.vol['buy']+self.vol['sell'])/int(time.time()-self.startTime)*86400 :.2f} {fiat}/Day",withTime=False)
self.log(f"Maker ratio: {self.liquidity['maker']/max(self.liquidity['maker']+self.liquidity['taker'],1)*100:.2f}%, Total: {self.liquidity['maker']+self.liquidity['taker']:.0f}",withTime=False)
self.log(f"Fee: {self.fee['fiat']:.8f} {fiat}, {self.fee['coin']:.8f} {coin}",withTime=False)
self.log("##########.##########.##########.##########.##########",withTime=False)
info=GridTrader_with_db.time_info(int(time.time()-self.startTime+self.etime))
cmd=f'INSERT INTO {self.TableName}(sell_vol,buy_vol,sell_val,buy_val,maker,taker,fiat,coin,time,date,e_day,e_hour,e_min,e_sec) VALUES(\
"{self.vol["sell"]:.4f}", "{self.vol["buy"]:.4f}",\
"{self.val["sell"]:.4f}", "{self.val["buy"]:.4f}",\
"{self.liquidity["maker"]:.1f}","{self.liquidity["taker"]:.1f}",\
"{self.fee["fiat"]:.8f}","{self.fee["coin"]:.8f}",\
"{datetime.datetime.now(pytz.timezone("Asia/Taipei")).strftime("%H:%M:%S")}",\
"{datetime.datetime.now(pytz.timezone("Asia/Taipei")).strftime("%d/%m-%Y")}",\
{info[0]},{info[1]},{info[2]},{info[3]})'
self.db.execute(cmd)
@staticmethod
def start(trader):
while True:
try:
trader.grid_init()
startTime = time.time()
while True:
if int(time.time()-startTime) > 20 :
trader.log_trading_info()
startTime = time.time()
trader.loop_job()
time.sleep(0.5)
except:
continue
@staticmethod
def time_info(x):
day=0
hour=0
minu=0
if x>86400:
day=x//86400
x-=day*86400
if x>3600:
hour=x//3600
x-=hour*3600
if x>60:
minu=x//60
x-=minu*60
return (day,hour,minu,x)
if __name__ == '__main__':
if len(sys.argv) > 1:
GridTrader_with_db.start(trader=GridTrader_with_db(file=f'{sys.argv[1]}'))
else:
GridTrader_with_db.start(trader=GridTrader_with_db())
| 41.90991 | 200 | 0.557395 | 635 | 4,652 | 3.981102 | 0.218898 | 0.041535 | 0.042722 | 0.031646 | 0.24288 | 0.215585 | 0.160997 | 0.102848 | 0.030063 | 0.030063 | 0 | 0.034788 | 0.246131 | 4,652 | 110 | 201 | 42.290909 | 0.686056 | 0 | 0 | 0.089888 | 0 | 0.089888 | 0.234308 | 0.120163 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044944 | false | 0.011236 | 0.078652 | 0 | 0.146067 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04906bc044f904359850b223172cd50440da19f9 | 1,218 | py | Python | pstests/settings/launch_scheduler.py | sj1104/Het | 81b7e9f0f593108db969fc46a1af3df74b825230 | [
"Apache-2.0"
] | 2 | 2021-12-05T07:11:04.000Z | 2021-12-15T07:53:48.000Z | pstests/settings/launch_scheduler.py | sj1104/Het | 81b7e9f0f593108db969fc46a1af3df74b825230 | [
"Apache-2.0"
] | null | null | null | pstests/settings/launch_scheduler.py | sj1104/Het | 81b7e9f0f593108db969fc46a1af3df74b825230 | [
"Apache-2.0"
] | 3 | 2021-04-01T22:39:13.000Z | 2021-04-21T11:51:57.000Z | from athena import gpu_ops as ad
import os
import sys
import yaml
import json
import multiprocessing
import signal
def main():
def start_scheduler(settings):
for key, value in settings.items():
os.environ[key] = str(value)
assert os.environ['DMLC_ROLE'] == "scheduler"
print('Scheduler starts...')
ad.scheduler_init()
ad.scheduler_finish()
def signal_handler(sig, frame):
print("SIGINT signal caught, stop Training")
proc.kill()
exit(0)
if len(sys.argv) == 1:
settings = json.load(open('./settings/scheduler.json'))
else:
file_path = sys.argv[1]
suffix = file_path.split('.')[-1]
if suffix == 'yml':
settings = yaml.load(open(file_path).read(), Loader=yaml.FullLoader)
elif suffix == 'json':
settings = json.load(open(file_path))
else:
assert False, 'File type not supported.'
print('Scheduler settings:')
print(settings)
proc = multiprocessing.Process(target=start_scheduler, args=(settings,))
proc.start()
signal.signal(signal.SIGINT, signal_handler)
proc.join()
if __name__ == '__main__':
main()
| 26.478261 | 80 | 0.611658 | 145 | 1,218 | 5 | 0.468966 | 0.044138 | 0.022069 | 0.055172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004449 | 0.261905 | 1,218 | 45 | 81 | 27.066667 | 0.802002 | 0 | 0 | 0.052632 | 0 | 0 | 0.128079 | 0.020525 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.078947 | false | 0 | 0.184211 | 0 | 0.263158 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0491276565adb67c9144a9e141d865fe9de613d1 | 10,959 | py | Python | featureio/parsers.py | nijibabulu/featureio | 23c439de645a654f1e548c4ff0df409ecd3a2935 | [
"MIT"
] | null | null | null | featureio/parsers.py | nijibabulu/featureio | 23c439de645a654f1e548c4ff0df409ecd3a2935 | [
"MIT"
] | null | null | null | featureio/parsers.py | nijibabulu/featureio | 23c439de645a654f1e548c4ff0df409ecd3a2935 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import functools
from . import gene
def BedIterator(handle, cls=gene.Gene):
for n, line in enumerate(handle):
if line.startswith('#'):
continue
if not len(line.strip()):
continue
fields = line.strip().split('\t')
if len(fields) != 12:
raise ValueError(
'Incorrect number of fields on line {}:\n{}'.format(
n, line))
yield cls(*fields)
def parse_psl_line(matches, misMatches, repMatches, nCount, qNumInsert,
qBaseInsert, tNumInsert, tBaseInsert, strand,
name, qSize, qStart, qEnd, chrom, tSize, start, end,
block_count,
block_sizes, qStarts, block_starts, cls=gene.Gene):
corr_block_starts = ','.join([str(int(s) - int(start))
for s in block_starts.split(',') if len(s)])
return cls(chrom, start, end, name, matches, strand, 0, 0, 0,
block_count, block_sizes, corr_block_starts)
def PslIterator(handle, cls=gene.Gene):
for line in handle:
fields = line.strip().split()
yield parse_psl_line(*fields, cls=cls)
def BlatPslIterator(handle, cls=gene.Gene):
for _ in range(5): # skip the header
next(handle)
for line in handle:
yield parse_psl_line(line, cls=cls)
def AugustusGtfIterator(handle, cls=gene.Gene):
while True:
line = handle.readline()
if not line:
return
if line.startswith("# start gene"):
break
while True:
line = handle.readline()
if not line:
return # stopiteration
f = line.strip().split('\t')
if len(f) > 1 and f[2] == 'transcript':
chrom, start, end, strand, tid = (
f[0], int(f[3]), int(f[4]), f[6], f[-1])
bst, bsz = [[], []]
cs, ce = [None, None]
seq = ''
gene_id = None
while True:
line = handle.readline()
if not line:
return # stopiteration
# if we have already seen protein sequence or we are initiating
# the protein sequence add to the current sequence
if line.startswith('# protein sequence') or len(seq):
istart = (line.index('[') + 1) if line.count('[') else 2
iend = line.index(']') if line.count(']') else len(line)
seq += line[istart:iend].strip()
else:
# TODO: change to a non-block initializer to avoid these
# calcluations
f = line.strip().split('\t')
if f[2] == 'exon':
gene_id = f[-1].split()[-1].strip('";')
bst.append(int(f[3]) - start)
bsz.append(int(f[4]) - int(f[3]))
elif f[2] == 'CDS':
gene_id = f[-1].split()[-1].strip('";')
cs = min(cs, int(f[3])) if cs else int(f[3])
ce = max(cs, int(f[4])) if ce else int(f[4])
if seq is not None and ']' in line:
yield cls(
chrom, start, end, tid, 0, strand, cs, ce, 0, len(bst),
','.join(str(x) for x in bsz),
','.join(str(x) for x in bst),
seq=seq, gene_id=gene_id)
break
'''
def GFF3Iterator(handle, cls=Gene):
genes = {}
children = {}
for lineno,line in enumerate(handle):
if line.startswith('#'):
continue
try:
chrom,source,ftype,start,end,score,strand,_,a = line.strip().split('\t')
attrs = dict(tuple(a.strip() for a in attr.split('='))
for attr in a.split(';'))
except ValueError:
raise ValueError('Could not parse line {}: {}'.format(lineno,line))
if ftype == 'gene':
gene_name = attrs.get('ID', attrs.get('Name', attrs.get('gene_id')))
if gene_name is None:
raise ValueError('No ID, Name or gene_id found for gene on '
'line {}:\n{}'.format(lineno, line))
gene = children.get(gene_name, genes.setdefault(
gene_name, Gene(chrom, start, end, gene_name, score, strand,
-1, -1, 0, 0, 0, [], attrs)))
gene = genes.setdefault(gene_name, Gene(chrom, start, end, ))
'''
_readers = {"bed12": BedIterator, "psl": PslIterator,
"blatpsl": BlatPslIterator,
"augustusgtf": AugustusGtfIterator}
# "gff3": GFF3Iterator}
def parse(maybe_handle, format, mode='r', cls=gene.Gene, **kwargs):
# type: (Union[TextIO, str], str, str, Callable[[...], gene.Gene], ...) -> List[gene.Gene]
# this can be better handled with contextlib.contextmanager
if isinstance(maybe_handle, str):
fp = open(maybe_handle, mode, **kwargs)
else:
fp = maybe_handle
if format in _readers:
gen = _readers[format]
i = gen(fp, cls=cls)
for g in i:
yield g
else:
raise ValueError('Unknown format {}. Should be one of {}'.format(
format, ','.join(_readers.keys())))
def to_dict(gene_iterable):
return {g.name: g for g in gene_iterable}
class GeneWriter(object):
def __init__(self, handle, **kwargs):
self.handle = handle
def write_header(self):
pass
def write_genes(self, genes):
for i, gene in enumerate(genes):
self.count = i + 1
self.write_gene(gene)
def write_gene(self, _):
raise NotImplementedError('{} does not implement write_gene'.format(
self.__class__))
def write_footer(self):
pass
def write_file(self, genes):
self.write_header()
for gene in genes:
self.write_gene(gene)
self.write_footer()
class AugustusExonHintWriter(GeneWriter):
def __init__(self, handle, cds_exons=True, feature_type='exon',
source='featureio', augustus_source='E', priority=4):
super(AugustusExonHintWriter, self).__init__(handle)
self.exon_attr = 'cds_exons' if cds_exons else 'exons'
self.feature_type = feature_type
self.source = source
self.priority = priority
self.augustus_source = augustus_source
def write_gene(self, gene):
for exon in getattr(gene, self.exon_attr):
attrs = 'grp={};pri={};src={}'.format(
gene.name, self.priority, self.augustus_source)
self.handle.write('\t'.join(str(x) for x in [
gene.chrom,
self.source,
self.feature_type,
exon[0],
exon[1],
'.',
gene.strand,
'.',
attrs]))
self.handle.write('\n')
class Bed12Writer(GeneWriter):
def write_gene(self, gene):
self.handle.write('\t'.join(str(item) for item in
[gene.chrom, gene.start, gene.end,
gene.name, gene.score, gene.strand,
gene.cds_start, gene.cds_end,
gene.item_rgb, gene.block_count,
','.join(str(s) for s in gene.block_sizes),
','.join(str(s) for s in
gene.block_starts)]) + '\n')
class GFF3Writer(GeneWriter):
def __init__(self, handle, source='GFF3Conv', **kwargs):
super(GFF3Writer, self).__init__(handle, **kwargs)
self.source = source
def write_feature(self, gene, ftype, start, end, feature_number=1,
toplevel=False, phase='.', score='.',
name=None, attrs=None):
attrs = dict() if attrs is None else attrs
if toplevel:
attrs.update(gene.attrs)
self.handle.write('\t'.join(map(str, [
gene.chrom, self.source, ftype, start, end, score, gene.strand,
phase])))
name_base = name or gene.name
if not toplevel:
attrs.setdefault('Parent', gene.name)
attrs['ID'] = '{}.{}.{}'.format(gene.name, ftype, feature_number)
if name is not None:
attrs['Name'] = name
else:
attrs['Name'] = name_base
attrs['ID'] = gene.name
self.handle.write('\t' + ';'.join('{}={}'.format(k, v)
for k, v in attrs.items()))
self.handle.write('\n')
return attrs['ID']
def write_header(self):
self.handle.write('##gff-version 3\n')
@staticmethod
def _sorted_cds(gene):
return sorted(gene.cds_exons, key=lambda c: c[0],
reverse=gene.strand == '-')
def cds_writer(self, gene, transcript_id):
def write_cds(phase, enumerated_cds):
feature_number, cds = enumerated_cds
self.write_feature(gene, 'CDS', cds[0], cds[1], phase=phase,
feature_number=feature_number,
attrs={'Parent': transcript_id})
return (3 - ((cds[1] - cds[0] - phase) % 3)) % 3
return write_cds
def write_gene(self, gene):
self.write_feature(gene, 'gene', gene.start, gene.end, toplevel=True)
transcript_id = self.write_feature(gene, 'mRNA', gene.start, gene.end)
for n, e in enumerate(gene.exons):
self.write_feature(gene, 'exon', e[0], e[1], n,
attrs={'Parent': transcript_id})
functools.reduce(self.cds_writer(gene, transcript_id),
enumerate(GFF3Writer._sorted_cds(gene)), 0)
_writers = {"bed12": Bed12Writer,
"augustus_exon_hints": AugustusExonHintWriter,
"gff3": GFF3Writer}
valid_writers = _writers.keys()
valid_readers = _readers.keys()
def write(genes, maybe_handle, format, mode='w', **kwargs):
if isinstance(maybe_handle, str):
fp = open(maybe_handle, mode, **kwargs)
else:
fp = maybe_handle
if format in _writers:
writer = _writers[format](fp, **kwargs)
writer.write_file(genes)
else:
raise ValueError('Unknown format {}. Should be one of {}'.format(
format, ','.join(_writers.keys())))
def main():
from Bio import SeqIO
s = str(SeqIO.read('unmask_split_nv2i5/Chr1.fa', 'fasta').seq)
for g in parse('bookends_rb/Chr1.BE0.1.gff', 'augustusgtf'):
print(g.name)
print(g.strand)
cds = g.get_cds(s)
print(len(cds))
print(g.cds_length)
print(cds)
if __name__ == '__main__':
main()
| 35.237942 | 94 | 0.521763 | 1,275 | 10,959 | 4.362353 | 0.19451 | 0.018698 | 0.018878 | 0.012226 | 0.196692 | 0.156778 | 0.123697 | 0.116864 | 0.078029 | 0.070478 | 0 | 0.010492 | 0.347751 | 10,959 | 310 | 95 | 35.351613 | 0.767627 | 0.037686 | 0 | 0.237668 | 0 | 0 | 0.053387 | 0.005486 | 0 | 0 | 0 | 0.003226 | 0 | 1 | 0.112108 | false | 0.008969 | 0.013453 | 0.008969 | 0.183857 | 0.022422 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04929055f4410ade9e4497312f0a6f059a8507e0 | 8,183 | py | Python | fiber/config.py | agrimrules/fiber | e21e7dea019eae6259705b351f0e196f9eaa4835 | [
"Apache-2.0"
] | 1 | 2020-08-11T01:52:08.000Z | 2020-08-11T01:52:08.000Z | fiber/config.py | agrimrules/fiber | e21e7dea019eae6259705b351f0e196f9eaa4835 | [
"Apache-2.0"
] | null | null | null | fiber/config.py | agrimrules/fiber | e21e7dea019eae6259705b351f0e196f9eaa4835 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module deals with Fiber configurations.
There are 3 way of setting Fiber configurations: config file, environment
variable and Python code. The priorities are: Python code > environment
variable > config file.
#### Config file
Fiber config file is a plain text file following Python's
[configparser](https://docs.python.org/3.6/library/configparser.html) file
format. It needs to be named `.fiberconfig` and put into the directory where
you launch your code.
An example `.fiberconfig` file:
```
[default]
log_level=debug
log_file=stdout
backend=local
```
#### Environment variable
Alternatively, you can also use environment variables to pass configurations to
Fiber. The environment variable names are in format `FIBER_` + config name in
upper case.
For example, an equivalent way of specifying the above config using environment
variables is:
```
FIBER_LOG_LEVEL=debug FIBER_LOG_FILE=stdout FIBER_BACKEND=local python code.py ...
```
#### Python code
You can also set Fiber config in your Python code:
```python
import fiber.config as fiber_config
...
def main():
fiber_config.log_level = "debug"
fiber_config.log_file = "stdout"
fiber_cofnig.backend = "local"
```
Note that almost all of the configurations needs to be set before you launch
any Fiber processes.
"""
import os
import logging
import configparser
_current_config = None
logger = logging.getLogger('fiber')
LOG_LEVELS = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
DEFAULT_IMAGE = "fiber-test:latest"
def str2bool(text):
"""Simple function to convert a range of values to True/False."""
return text.lower() in ["true", "yes", "1"]
class Config(object):
"""Fiber configuration object. Available configurations:
| key | Type | Default | Notes |
| --------------------- |:------|:-----|:------|
| debug | bool | False | Set this to `True` to turn on debugging |
| image | str | None | Docker image to use when starting new processes |
| default_image | str | None | Default docker image to use when `image` config value is not set |
| backend | str | None | Fiber backend to use when starting new processes. Check [here](platforms.md) for available backends |
| default_backend | str | `local` | Default Fiber backend to use when `backend` config is not set |
| log_level | str/int | `logging.INFO` | Fiber log level. This config accepts either a int value (log levels from `logging` module like `logging.INFO`) or strings: `debug`, `info`, `warning`, `error`, `critical` |
| log_file | str | `/tmp/fiber.log` | Default fiber log file path. Fiber will append the process name to this value and create one log file for each process. A special value `stdout` means to print the logs to standard output |
| ipc_admin_master_port | int | `0` | The port that master process uses to communicate with child processes. Default value is `0` which means the master process will choose a random port |
"""
def __init__(self, conf_file=None):
# Not documented, people should not use this
self.merge_output = False
self.debug = False
self.image = None
self.default_image = DEFAULT_IMAGE
self.backend = None
self.default_backend = "local"
# Not documented, this should be removed because it's not used for now
self.use_bash = False
self.log_level = logging.INFO
self.log_file = "/tmp/fiber.log"
# If ipc_active is True, Fiber worker processes will connect
# to the master process. Otherwise, the master process will connect
# to worker processes.
# Not documented, should only be used internally
self.ipc_active = True
# if ipc_active is True, this can be 0, otherwise, it can only be a
# valid TCP port number. Default 0.
self.ipc_admin_master_port = 0
# Not documented, this is only used when `ipc_active` is False
self.ipc_admin_worker_port = 8000
# Not documented, need to fine tune this
self.cpu_per_job = 1
# Not documented, need to fine tune this
self.mem_per_job = None
self.use_push_queue = True
if conf_file is None:
conf_file = ".fiberconfig"
# Load config from config file
if os.path.exists(conf_file):
logger.debug("loading config from %s", conf_file)
config = configparser.ConfigParser()
config.read(conf_file)
for k in config["default"]:
if k in self.__dict__:
self.__dict__[k] = config["default"][k]
else:
raise ValueError(
'unknown config key "{}" in {}. Valid keys: '
'{}'.format(k, conf_file,
[key for key in self.__dict__]))
else:
logger.debug("no fiber config file (%s) found", conf_file)
# load environment variable overwrites
for k in self.__dict__:
name = "FIBER_" + k.upper()
val = os.environ.get(name, None)
if val:
self.__dict__[k] = val
# rewrite values
if isinstance(self.log_level, str):
level = self.log_level.lower()
if level not in LOG_LEVELS:
logger.debug("bad logging level: %s", self.log_level)
level = logging.NOTSET
else:
level = LOG_LEVELS[level]
self.log_level = level
if isinstance(self.ipc_active, str):
self.ipc_active = str2bool(self.ipc_active)
if isinstance(self.cpu_per_job, str):
self.cpu_per_job = int(self.cpu_per_job)
if isinstance(self.mem_per_job, str):
self.mem_per_job = int(self.mem_per_job)
def __repr__(self):
return repr(self.__dict__)
@classmethod
def from_dict(cls, kv):
obj = cls()
for k in kv:
setattr(obj, k, kv[k])
return obj
def get_object():
"""
Get a Config object representing current Fiber config
:returns: a Config object
"""
# When an config object is needed, call this method to get an
# concrete Fiber config object
global _current_config
return Config.from_dict(get_dict())
def get_dict():
"""
Get current Fiber config in a dictionary
:returns: a Python dictionary with all the current Fiber configurations
"""
global_vars = globals()
return {k: global_vars[k] for k in vars(_current_config)}
def init(**kwargs):
"""
Init Fiber system and set config values.
:param kwargs: If kwargs is not None, init Fiber system with corresponding
key/value pairs in kwargs as config keys and values.
:returns: A list of config keys that was updated in this function call.
"""
_config = Config()
# handle overwrites
_config.__dict__.update(kwargs)
updates = []
global_vars = globals()
for k in vars(_config):
# fine diffs and write them
val = getattr(_config, k)
if k not in global_vars or global_vars[k] != val:
global_vars[k] = val
updates.append(k)
global_vars["_current_config"] = _config
logger.debug("Inited fiber with config: %s", vars(_config))
return updates
| 32.995968 | 246 | 0.638397 | 1,088 | 8,183 | 4.674632 | 0.27114 | 0.023791 | 0.011797 | 0.010224 | 0.044436 | 0.025167 | 0.013763 | 0.013763 | 0 | 0 | 0 | 0.004033 | 0.272761 | 8,183 | 247 | 247 | 33.129555 | 0.850613 | 0.546377 | 0 | 0.053191 | 0 | 0 | 0.076577 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074468 | false | 0 | 0.031915 | 0.010638 | 0.180851 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0493ef8fc992e579828c78f162358b41a9e24ff1 | 14,188 | py | Python | compartmental.py | burakbudanur/compartmental | 9729aa2b7a7464c7a866828b72b3a09d95e0017a | [
"MIT"
] | null | null | null | compartmental.py | burakbudanur/compartmental | 9729aa2b7a7464c7a866828b72b3a09d95e0017a | [
"MIT"
] | null | null | null | compartmental.py | burakbudanur/compartmental | 9729aa2b7a7464c7a866828b72b3a09d95e0017a | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import sympy as sp
import matplotlib.pyplot as plt
import matplotlib as mpl
from sys import exit
from scipy.optimize import curve_fit
from scipy.integrate import odeint
class Model(nx.DiGraph):
"""
Base class for compartmental models.
See also:
---------
nx.DiGraph
"""
def set_compartments(self, compartments):
"""
Set model compartments.
Wrapper around nx.DiGraph.add_nodes_from
Parameters
----------
compartments: list of tuples such as
compartments = [
('S' , {"layer" : 1}),
('I' , {"layer" : 2}),
('R' , {"layer" : 3})
]
the layer attributes are only used for visualization
and have no effect on the model function
"""
self.add_nodes_from(compartments)
self.compartments = list(self.nodes)
def set_parameters(self, parameters):
"""
Set model parameters.
Parameters
----------
parameters: list of strings such as
parameters = [
'beta',
'gamma',
]
Note: Most of the lower-case Greek letters are going to
be translated to latex symbols in visualize(), see below.
"""
self.parameters = parameters
def set_inputs(self, inputs):
"""
Set model inputs.
Parameters
----------
inputs: list of strings such as
inputs = [
'f(t)',
'g(t)',
]
The functions f(t) and g(t) will be interpreted
as the model ODE's time-dependent inputs. For an example, see
Example_SIR_seasonality.ipynb
"""
self.inputs = inputs
def set_rates(self, rates):
"""
Set transition rates of the compartmental model.
Wrapper around nx.DiGraph.add_edges_from
Parameters
----------
rates: list of tuples, such as
rates = [
('S', 'I', {"label" : "beta * S * I / N"}),
('I', 'R', {"label" : "gamma * I"})
]
the label attributes of the edges should be sympy-interpretable strings,
composed of model compartments and parameters
"""
self.add_edges_from(rates)
def visualize(self, figsize=None, ax=None, replacements = [], scale = 1.0, show_rates=True):
"""
Generate a network visualization of the compartmental model
Parameters
----------
figsize: see plt.figure
replacements: list of strings to be replaced by short-hands such as
replacements=[
['(0.5 + 0.5 \, tanh(k \, (T_m - T_t)))', '(\Theta [T_m - T_t])'],
[') \\, (', ')$\n$\\times(']
]
"""
def label_to_latex(labelstr):
"""
Convert labelstr to latex-interpretable form for visualization
"""
# Small Greek letters:
latex_label = '$'+labelstr.replace('*', '\,')+'$'
latex_label = latex_label.replace('alpha', '\\alpha')
latex_label = latex_label.replace('beta', '\\beta')
latex_label = latex_label.replace('gamma', '\\gamma')
latex_label = latex_label.replace('delta', '\\delta')
latex_label = latex_label.replace('epsilon', '\\epsilon')
latex_label = latex_label.replace('zeta', '\\zeta')
latex_label = latex_label.replace(' eta', ' \\eta')
latex_label = latex_label.replace('(eta', '(\\eta')
latex_label = latex_label.replace('$eta', '$\\eta')
latex_label = latex_label.replace('theta', '\\theta')
latex_label = latex_label.replace('kappa', '\\kappa')
latex_label = latex_label.replace('lambda', '\\lambda')
latex_label = latex_label.replace('mu', '\\mu')
latex_label = latex_label.replace('nu', '\\nu')
latex_label = latex_label.replace('xi', '\\xi')
latex_label = latex_label.replace('pi', '\\pi')
latex_label = latex_label.replace('sigma', '\\sigma')
latex_label = latex_label.replace('phi', '\\phi')
latex_label = latex_label.replace('chi', '\\chi')
latex_label = latex_label.replace('psi', '\\psi')
latex_label = latex_label.replace('omega', '\\omega')
# Additional replacements
for replacement in replacements:
latex_label = latex_label.replace(replacement[0],
replacement[1])
return latex_label
node_labels = {node:label_to_latex(node)
for node in list(self.nodes)}
node_layers = {node:self.nodes[node]['layer']
for node in list(self.nodes)}
node_positions = {}
i_node_layer = 0 # Counter for the nodes in a layer
for node in list(self.nodes):
layer = node_layers[node]
num_nodes_layer = list(node_layers.values()).count(layer)
y_nodes = np.arange(
0 - num_nodes_layer/2 + 0.5,
num_nodes_layer - num_nodes_layer/2 + 0.5
)
pos_x = (np.float(node_layers[node] - 1)
/ max(node_layers.values()))
pos_y = y_nodes[i_node_layer]
node_positions[node] = np.array([pos_x, pos_y])
i_node_layer += 1
if i_node_layer == num_nodes_layer: i_node_layer = 0
edge_labels = {
edge:"\n\n"+label_to_latex(self.edges[edge]['label'])
for edge in list(self.edges)
}
# Generate a figure instance, if necessary
if ax == None:
if figsize == None:
fig = plt.figure(figsize=(6,6))
else:
fig = plt.figure(figsize=figsize)
mpl.rcParams["scatter.edgecolors"] = 'black'
nx.draw(
self,
ax = ax,
pos=node_positions,
labels=node_labels,
node_size=800 * scale,
font_size=16 * scale,
node_color='white'
)
if show_rates:
nx.draw_networkx_edge_labels(
self,
ax = ax,
pos=node_positions,
edge_labels = edge_labels,
font_size = 14 * scale,
label_pos=0.5,
bbox=dict(fc="w", ec="w", alpha=0, zorder=100),
font_color='red'
)
return
def generate_ode(self):
"""
Generate the right hand side of the ordinary differential equation
described by the compartments and transition rates
"""
# dict for sympify
if hasattr(self, "inputs"):
ns = {
sym : sp.Symbol(sym) for sym in
self.compartments + self.parameters + self.inputs
}
input_symbols = [
sp.Symbol(sym) for sym in self.inputs
]
else:
ns = {
sym : sp.Symbol(sym) for sym in
self.compartments + self.parameters
}
# Symbolic counterparts of the model compartments and parameters
compartment_symbols = [
sp.Symbol(sym) for sym in self.compartments
]
parameter_symbols = [
sp.Symbol(sym) for sym in self.parameters
]
# Dummy time symbol, only for compatibility with scipy.odeint
t_symbol = sp.Symbol('t')
# Symbolic rhs from the edge labels
dim = len(compartment_symbols)
self.rhs_symbolic = sp.Matrix(sp.symbols("rhs:" + str(dim)))
self.rhs_symbolic = self.rhs_symbolic - self.rhs_symbolic
for k, edge in enumerate(list(self.edges)):
edge_label = self.edges[edge]['label']
term = sp.sympify(edge_label, locals=ns)
iedge0 = np.argwhere(np.array(list(self.nodes)) == edge[0])[0][0]
iedge1 = np.argwhere(np.array(list(self.nodes)) == edge[1])[0][0]
self.rhs_symbolic[iedge0] -= term
self.rhs_symbolic[iedge1] += term
self.rhs_latex = sp.latex(self.rhs_symbolic)
if hasattr(self, "inputs"):
self.rhs_lambda = sp.lambdify([compartment_symbols,
t_symbol,
parameter_symbols,
input_symbols],
self.rhs_symbolic)
def rhs(pop, tim, pars, inpts):
return self.rhs_lambda(pop, tim, pars, inpts).reshape(-1)
else:
self.rhs_lambda = sp.lambdify([compartment_symbols,
t_symbol,
parameter_symbols],
self.rhs_symbolic)
def rhs(pop, tim, pars):
return self.rhs_lambda(pop, tim, pars).reshape(-1)
return self.rhs_latex, self.rhs_symbolic, rhs
def plot_compartment(self, simulation_time, solution, compartment, ax=None, scale = 1.0):
if ax == None:
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
show_only = [compartment]
for k, compartment in enumerate(self.compartments):
if not compartment in show_only: # don't show these
continue
ax.plot(simulation_time,
solution[:, k],
label='$'+compartment+'$')
ax.legend(fontsize = 16 * scale, framealpha=0.5)
ax.grid(True)
plt.tight_layout()
if ax.get_xlabel() == '':
ax.set_xlabel('Days', fontsize = 16 * scale)
if ax.get_ylabel() == '':
ax.set_ylabel('Compartment population', fontsize = 16 * scale)
return plt.gcf()
def initiate_exponential(
self, population_guess, parameters, fit_horizon, vary, take_from,
tol = 1, max_iterations = 100, verbose=False, scale=1,
):
"""
Generate an initial population such that the dynamics of the compartments
in the list vary can be well-approximated by an exponential for the
fit_horizon.
Parameters
----------
- population_guess: initial guess for the population a dictionary
{'compartment': population (float)}
- fit_horizon: array of time points to which an exponential will be fit
- parameters: dictionary of parameter values {'parameter': value (float)}
- vary: list of compartments to be varied
- take_from: compartment to be adjusted as those in vary are varied.
This would be "susceptibles" if the compartmental model is modeling the
early stage of an epidemic.
- tol: tolerance for terminating the fixed point iteration (default = 1).
Usage example
-------------
initial_population_exp = model.initiate_exponential(
initial_population, parameters, np.arange(0, 10), ['R'], 'S'
)
see SIR_init_exp.ipynb for details.
"""
for compartment in vary:
if not(compartment in self.compartments):
exit(f"{compartment} is not a compartment.")
if not(take_from in self.compartments):
exit(f"{take_from} is not a compartment.")
ode_latex, ode_symbolic, ode = self.generate_ode()
def exponential(x, a, b):
return a * np.exp(b * x)
def initial_deviation_from_exp(xdata, ydata, plot=False):
popt, pcov = curve_fit(exponential, xdata, ydata)
fit = exponential(xdata, *popt)
return fit[0] - ydata[0]
initial_population = population_guess.copy()
simulation = odeint(ode,
list(initial_population.values()),
fit_horizon,
args = (list(parameters.values()),)
)
residuals = np.zeros(len(self.compartments))
for i, compartment in enumerate(self.compartments):
if compartment in vary:
residuals[i] = initial_deviation_from_exp(
fit_horizon, simulation[:, i]
)
n_iterations = 0
if verbose:
print('Starting residuals:', residuals)
while np.any(np.abs(residuals) > tol):
if n_iterations > max_iterations:
print("Exponential initiation did not converge")
return initial_population
new_initial_population = initial_population.copy()
for i, compartment in enumerate(self.compartments):
if compartment in vary:
new_initial_population[compartment] += residuals[i] * scale
new_initial_population[take_from] -= residuals[i] * scale
new_simulation = odeint(ode,
list(new_initial_population.values()),
fit_horizon,
args = (list(parameters.values()),))
for i, compartment in enumerate(self.compartments):
if compartment in vary:
residuals[i] = initial_deviation_from_exp(
fit_horizon, new_simulation[:, i]
)
initial_population = new_initial_population.copy()
n_iterations += 1
if verbose:
print(f'Iteration {n_iterations}, residuals:', residuals)
return initial_population
| 31.740492 | 96 | 0.518889 | 1,487 | 14,188 | 4.801614 | 0.207801 | 0.064426 | 0.046218 | 0.061625 | 0.290056 | 0.187815 | 0.164706 | 0.137815 | 0.114006 | 0.088375 | 0 | 0.008401 | 0.379123 | 14,188 | 446 | 97 | 31.811659 | 0.802134 | 0.214688 | 0 | 0.18894 | 0 | 0 | 0.045778 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059908 | false | 0 | 0.036866 | 0.013825 | 0.147465 | 0.013825 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0495634b91f43d0e7597e99e33589f764a60171d | 13,207 | py | Python | tpx3format/read.py | M4I-nanoscopy/tpx3HitParser | 76147455a75effc1e799b6569c23320c5a4cf21a | [
"MIT"
] | 9 | 2021-03-02T12:13:54.000Z | 2022-03-23T15:36:09.000Z | tpx3format/read.py | M4I-nanoscopy/tpx3HitParser | 76147455a75effc1e799b6569c23320c5a4cf21a | [
"MIT"
] | 1 | 2020-10-08T10:45:15.000Z | 2021-02-15T15:27:56.000Z | tpx3format/read.py | M4I-nanoscopy/tpx3HitParser | 76147455a75effc1e799b6569c23320c5a4cf21a | [
"MIT"
] | 1 | 2022-03-25T08:42:54.000Z | 2022-03-25T08:42:54.000Z | import logging
import struct
import h5py
import numpy as np
from lib.constants import *
import os
# TODO: Logging does not work for multiprocessing processes on Windows
logger = logging.getLogger('root')
def read_positions(f):
control_events = []
i = 0
rollover_counter = 0
approaching_rollover = False
leaving_rollover = False
while True:
b = f.read(8)
cursor = f.tell()
if not b:
# Reached EOF
break
if len(b) < 8:
logger.error("Truncated file, no full header at file position %d. Continuing with what we have." % f.tell())
break
header = struct.unpack('<bbbbbbbb', b)
chip_nr = header[4]
mode = header[5]
# Check for mode
if mode != 0:
logger.error("Header packet with mode %d. Code has been developed for mode 0." % mode)
size = ((0xff & header[7]) << 8) | (0xff & header[6])
# Read the first package of the data package to figure out its type
pkg_data = f.read(8)
if len(pkg_data) < 8:
logger.error("Truncated file, no first data packet found at file position %d. Continuing with what we have." % f.tell())
break
pkg = struct.unpack("<Q", pkg_data)[0]
pkg_type = pkg >> 60
# The SPIDR time is 16 bit (65536). It has a rollover time of 26.843 seconds
time = pkg & 0xffff
rollover = rollover_counter
# Check if the time is nearing the limit of the rollover
if time > 0.9 * 65536.:
if leaving_rollover:
# We have already increased the rollover counter, so we need to reset it
rollover = rollover_counter - 1
elif not approaching_rollover:
# We must be approaching it
logger.debug("Approaching SPIDR timer rollover")
approaching_rollover = True
# We have been approaching the rollover, so if now see a low time, it probably is a rollover
if approaching_rollover and time < 0.01 * 65536.:
logger.debug("SPIDR timer rollover")
approaching_rollover = False
leaving_rollover = True
rollover_counter += 1
rollover = rollover_counter
# We are leaving the rollover, but we're far away by now
if leaving_rollover and time > 0.1 * 65536.:
logger.debug("Leaving SPIDR timer rollover")
approaching_rollover = False
leaving_rollover = False
# Parse the different package types
if pkg_type == 0x7:
control_event = parse_control_packet(pkg, size)
if control_event:
control_events.append(control_event)
elif pkg_type == 0x4:
parse_heartbeat_packet(pkg, size)
# TODO: Use heartbeat packages in calculating time
# Heartbeat packages are always followed by a 0x7145 or 0x7144 control package, and then possibly
# pixels. Continue to parse those pixels, but strip away the control package
if size - (8*2) > 0:
yield [cursor+16, size-(8*2), chip_nr, rollover]
elif pkg_type == 0x6:
pass
logger.debug("TDC timestamp at position %d len %d" % (cursor, size))
# TODO: Use TDC packages
# tdc = parse_tdc_packet(pkg)
elif pkg_type == 0xb:
yield [cursor, size, chip_nr, rollover]
i += 1
else:
logger.warning("Found packet with unknown type %d" % pkg_type)
# Skip over the data packets and to the next header
f.seek(cursor + size, 0)
def parse_heartbeat_packet(pkg, size):
time = pkg >> 16
if pkg >> 56 == 0x44:
lsb = time & 0xffffffff
logger.debug('Heartbeat (LSB). lsb %d. len %d' % (lsb, size))
if pkg >> 56 == 0x45:
msb = (time & 0xFFFFFFFF) << 32
logger.debug('Heartbeat (MSB). msb %d. len %d' % (msb, size))
return
# TDC (Time to Digital Converter) packages can come from the external trigger
def parse_tdc_packet(pkg):
tdc_type = pkg >> 56
counter = (pkg >> 44) & 0xfff
timestamp = (pkg >> 9) & 0x3ffffffff
stamp = (pkg >> 4) & 0xf
logger.debug("TDC package. Type: 0x%04x. Counter: %d. Timestamp: %d. Stamp: %d" % (tdc_type, counter, timestamp, stamp))
return
def parse_control_packet(pkg, size):
# Get SPIDR time and CHIP ID
time = pkg & 0xffff
chip_id = (pkg >> 16) & 0xffff
control_type = pkg >> 48
if size / 8 > 1:
logger.warning("Control data packet is followed by more data. This is unexpected")
if control_type == CONTROL_END_OF_COMMAND:
logger.debug('EndOfCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_END_OF_READOUT:
logger.debug('EndOfReadOut on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_END_OF_SEQUANTIAL_COMMAND:
logger.debug('EndOfResetSequentialCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
elif control_type == CONTROL_OTHER_CHIP_COMMAND:
logger.debug('OtherChipCommand on chip ID %04x at SPIDR_TIME %5d' % (chip_id, time))
else:
logger.debug('Unknown control packet (0x%04x) on chip ID %04x at SPIDR_TIME %5d' % (pkg >> 48, chip_id, time))
return [control_type, chip_id, time]
def check_tot_correction(correct_file):
if correct_file == "0" or correct_file is None:
# No ToT correction requested
return True
if not os.path.exists(correct_file):
return "ToT correction file (%s) does not exists" % correct_file
f = h5py.File(correct_file, 'r')
if 'tot_correction' not in f:
return "ToT correction file %s does not contain a tot_correction matrix" % correct_file
data = f['tot_correction']
logger.info("Found ToT correction file that was created on %s" % data.attrs['creation_date'])
return True
def read_tot_correction(correct_file):
if correct_file == "0":
# No ToT correction requested
return None
f = h5py.File(correct_file, 'r')
data = f['tot_correction']
return data[()]
def remove_cross_hits(hits):
# Maybe not the cleanest way to do this, but it's fast
ind_3x = (hits['chipId'] == 3) & (hits['x'] == 255)
ind_3y = (hits['chipId'] == 3) & (hits['y'] == 255)
ind_0x = (hits['chipId'] == 0) & (hits['x'] == 0)
ind_0y = (hits['chipId'] == 0) & (hits['y'] == 255)
ind_1x = (hits['chipId'] == 1) & (hits['x'] == 255)
ind_1y = (hits['chipId'] == 1) & (hits['y'] == 255)
ind_2x = (hits['chipId'] == 2) & (hits['x'] == 0)
ind_2y = (hits['chipId'] == 2) & (hits['y'] == 255)
# Combine all found hits
ind = ind_3x | ind_3y | ind_0x | ind_0y | ind_1x | ind_1y | ind_2x | ind_2y
indices = np.arange(len(hits))
hits = np.delete(hits, indices[ind], axis=0)
return hits
def apply_tot_correction(tot_correction, ToT, y, x, chip_id):
return tot_correction.item((ToT, y, x, chip_id))
def apply_toa_railroad_correction_phase1_um(x, cToA, chipId):
# The railroad columns for pllConfig 30
if 193 < x < 206:
cToA = cToA - 16
# Chips 2, 3, 0 in Maastricht/Basel
if chipId in (2, 3, 0) and (x == 204 or x == 205):
cToA = cToA + 16
# Chips 1 in Maastricht/Basel
if chipId == 1 and (x == 186 or x == 187):
cToA = cToA - 16
return cToA
def apply_toa_railroad_correction_phase1_basel(x, cToA, chipId):
# The railroad columns for pllConfig 30
if 193 < x < 206:
cToA = cToA - 16
# Chips 1, 3, 0 in Maastricht/Basel
if chipId in (3, 0) and (x == 204 or x == 205):
cToA = cToA + 16
# Chips 2
if chipId == 2 and (x == 186 or x == 187):
cToA = cToA - 16
return cToA
def apply_toa_railroad_correction_phase2(x, cToA):
# The railroad columns for pllConfig 94
if x == 196 or x == 197 or x == 200 or x == 201 or x == 204 or x == 205:
cToA = cToA - 16
return cToA
def apply_toa_phase2_correction(x, cToA):
# PHASE 2 (pllConfig 94)
if int(x % 4) == 2 or int(x % 4) == 3:
cToA = cToA - 8
return cToA
def calculate_image_shape(hits_cross_extra_offset):
return 512 + 2 * hits_cross_extra_offset
def combine_chips(hits, hits_cross_extra_offset):
# Chip are orientated like this
# 2 1
# 3 0
# Calculate extra offset required for the cross pixels
offset = 256 + 2 * hits_cross_extra_offset
# ChipId 0
ind = tuple([hits['chipId'] == 0])
hits['x'][ind] = hits['x'][ind] + offset
hits['y'][ind] = 255 - hits['y'][ind] + offset
# ChipId 1
ind = tuple([hits['chipId'] == 1])
hits['x'][ind] = 255 - hits['x'][ind] + offset
# hits['y'][ind] = hits['y'][ind]
# ChipId 2
ind = tuple([hits['chipId'] == 2])
hits['x'][ind] = 255 - hits['x'][ind]
# hits['y'][ind] = hits['y'][ind]
# ChipId 3
ind = tuple([hits['chipId'] == 3])
# hits['x'][ind] = hits['x'][ind]
hits['y'][ind] = 255 - hits['y'][ind] + offset
def marker_pixel(hits, pixel):
# Find hits of the marker pixel
ind = (hits['chipId'] == pixel['chipId']) & (hits['x'] == pixel['x']) & (hits['y'] == pixel['y'])
# Marker pixel not found in chunk
if np.count_nonzero(ind) == 0:
return hits, -1, -1
min_toa = np.min(hits[ind]['ToA'])
max_toa = np.max(hits[ind]['ToA'])
# Delete all hits from this marker pixel
indices = np.arange(len(hits))
hits = np.delete(hits, indices[ind], axis=0)
return hits, min_toa, max_toa
def parse_data_packages(positions, f, tot_correction, settings):
# Allocate space for storing hits
n_hits = sum(pos[1] // 8 for pos in positions)
hits = np.zeros(n_hits, dtype=dt_hit)
i = 0
for pos in positions:
for hit in parse_data_package(f, pos, tot_correction, settings.hits_tot_threshold, settings.hits_toa_phase_correction):
if hit is not None:
hits[i] = hit
i += 1
# There may have been hits that were not parsed (failed package), resize those empty rows away.
hits.resize((i,), refcheck=False)
hits, min_toa, max_toa = marker_pixel(hits, {'x': 237, 'y': 176, 'chipId': 0})
if settings.hits_remove_cross:
hits = remove_cross_hits(hits)
if settings.hits_combine_chips:
combine_chips(hits, settings.hits_cross_extra_offset)
hits = np.sort(hits, 0, 'stable', 'ToA')
return hits, min_toa, max_toa
def parse_data_package(f, pos, tot_correction, tot_threshold, toa_phase_correction):
f.seek(pos[0])
b = f.read(pos[1])
# Read pixels as unsigned longs. pos[1] contains number of bytes per position. Unsigned long is 8 bytes
struct_fmt = "<{}Q".format(pos[1] // 8)
try:
pixels = struct.unpack(struct_fmt, b)
except struct.error as e:
logger.error('Failed reading data package at position %d of file (error: %s)' % (pos[0], str(e)))
return
if pixels[0] >> 60 != 0xb:
logger.error('Failed parsing data package at position %d of file' % pos[0])
yield None
return
for i, pixel in enumerate(pixels):
col = (pixel & 0x0FE0000000000000) >> 52
super_pix = (pixel & 0x001F800000000000) >> 45
pix = (pixel & 0x0000700000000000) >> 44
x = int(col + pix / 4)
y = int(super_pix + (pix & 0x3))
spidr_time = (pixel & 0xffff)
coarse_toa = (pixel >> (16 + 14) & 0x3fff)
tot = int((pixel >> (16 + 4)) & 0x3ff)
fine_toa = (pixel >> 16) & 0xf
# Combine coarse ToA with fine ToA to form the combined ToA
toa = int(coarse_toa << 4) - int(fine_toa)
# Check if we would like to correct for phase shifts in the ToA values
if toa_phase_correction > 0:
# Shifting all cToA one full cycle forward, as I do not want to go below zero due to the correction
toa = toa + 16
if toa_phase_correction == 1:
toa = apply_toa_railroad_correction_phase1_um(x, toa, pos[2])
elif toa_phase_correction == 2:
toa = apply_toa_railroad_correction_phase1_basel(x, toa, pos[2])
elif toa_phase_correction == 3:
toa = apply_toa_phase2_correction(x, toa)
toa = apply_toa_railroad_correction_phase2(x, toa)
# Calculate the full time by using the combined info of:
# * the SPIDR time (16 bit)
# * the SPIDR rollover timer (pos[3])
# * the (corrected) combination of coarse toa (14 bit) and the fine toa (4 bit)
# The ToA can be negative, so this is added using arithmetic operation
global_time = int((spidr_time << 18) | (pos[3] << 34)) + toa
# Apply ToT correction matrix, when requested
if tot_correction is not None:
tot_correct = tot + apply_tot_correction(tot_correction, tot, y, x, pos[2])
else:
tot_correct = tot
if tot_correct < tot_threshold:
yield None
else:
yield pos[2], x, y, tot_correct, global_time
| 32.609877 | 132 | 0.602181 | 1,877 | 13,207 | 4.108684 | 0.20032 | 0.035399 | 0.008299 | 0.020228 | 0.314964 | 0.248703 | 0.220436 | 0.169476 | 0.10529 | 0.09777 | 0 | 0.047392 | 0.285833 | 13,207 | 404 | 133 | 32.690594 | 0.77025 | 0.185962 | 0 | 0.222689 | 0 | 0.004202 | 0.122802 | 0.002525 | 0 | 0 | 0.015432 | 0.002475 | 0 | 1 | 0.071429 | false | 0.004202 | 0.02521 | 0.008403 | 0.184874 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04959864b1c4805376ca408fc8be78c66f6a18b8 | 4,717 | py | Python | pysen/ext/isort_wrapper.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 423 | 2021-03-22T08:45:12.000Z | 2022-03-31T21:05:53.000Z | pysen/ext/isort_wrapper.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 1 | 2022-02-23T08:53:24.000Z | 2022-03-23T14:11:54.000Z | pysen/ext/isort_wrapper.py | linshoK/pysen | 2b84a15240c5a47cadd8e3fc8392c54c2995b0b1 | [
"MIT"
] | 9 | 2021-03-26T14:20:07.000Z | 2022-03-24T13:17:06.000Z | import copy
import dataclasses
import enum
import functools
import pathlib
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
from pysen import process_utils
from pysen.command import check_command_installed
from pysen.dist_version import get_version
from pysen.error_lines import parse_error_diffs
from pysen.exceptions import IncompatibleVersionError, UnexpectedErrorFormat
from pysen.path import change_dir
from pysen.py_version import VersionRepresentation
from pysen.reporter import Reporter
from pysen.setting import SettingBase
_SettingFileName = "pyproject.toml"
class IsortSectionName(enum.Enum):
FUTURE = "FUTURE"
STDLIB = "STDLIB"
THIRDPARTY = "THIRDPARTY"
FIRSTPARTY = "FIRSTPARTY"
LOCALFOLDER = "LOCALFOLDER"
@functools.lru_cache(1)
def _get_isort_version() -> VersionRepresentation:
version = get_version("isort")
if version.major not in [4, 5]:
raise IncompatibleVersionError(
"pysen only supports isort versions 4 and 5. "
f"version {version} is not supported."
)
return version
def _check_version_compatibility(
ensure_newline_before_comments: Optional[bool],
version: VersionRepresentation,
) -> None:
if version.major == 4 and ensure_newline_before_comments is not None:
raise IncompatibleVersionError(
"isort option `ensure_newline_before_comments`"
f"is not supported in your isort version {version}"
)
@dataclasses.dataclass
class IsortSetting(SettingBase):
force_grid_wrap: int = 0
force_single_line: bool = False
include_trailing_comma: bool = True
known_first_party: Optional[Set[str]] = None
known_third_party: Optional[Set[str]] = None
line_length: int = 88
multi_line_output: int = 3
default_section: Optional[IsortSectionName] = None
sections: Optional[List[IsortSectionName]] = None
use_parentheses: bool = True
ensure_newline_before_comments: Optional[bool] = None
@staticmethod
def default() -> "IsortSetting":
return IsortSetting()
def to_black_compatible(self) -> "IsortSetting":
# NOTE(igarashi)
# multi_line_output: black uses 3 (Vertical Hanging Indent)
# include_trailing_comma: black appends trailing comma
# force_grid_wrap: the property means isort grid-wrap the statement regardless
# of line length if the number of `from` imports is greater than
# the property. black doesn't grid wrap the statemenet if it
# doesn't exceed the line length.
# use_parentheses: use parenthesis for line continuation instead of `\`
new = copy.deepcopy(self)
new.multi_line_output = 3
new.include_trailing_comma = True
new.force_grid_wrap = 0
new.use_parentheses = True
# See issue #277
isort_version = _get_isort_version().major
if isort_version >= 5:
new.ensure_newline_before_comments = True
return new
def export(self) -> Tuple[List[str], Dict[str, Any]]:
section_name = ["tool", "isort"]
_check_version_compatibility(
self.ensure_newline_before_comments,
_get_isort_version(),
)
entries = self.asdict(
omit_none=True, type_hooks={IsortSectionName: lambda x: x.value}
)
return section_name, entries
def _parse_file_path(file_path: str) -> pathlib.Path:
ret = file_path.split(" ")[0]
before_suffix = ":before"
after_suffix = ":after"
if ret.endswith(before_suffix):
return pathlib.Path(ret.rsplit(before_suffix, 1)[0])
elif ret.endswith(after_suffix):
return pathlib.Path(ret.rsplit(after_suffix, 1)[0])
else:
raise UnexpectedErrorFormat(file_path)
def run(
reporter: Reporter,
base_dir: pathlib.Path,
setting_path: pathlib.Path,
sources: Iterable[pathlib.Path],
inplace_edit: bool,
) -> int:
check_command_installed(*process_utils.add_python_executable("isort", "--version"))
version = _get_isort_version()
targets = [str(d) for d in sources]
if len(targets) == 0:
return 0
cmd = ["isort", "--settings-path", str(setting_path)]
if version.major == 4:
cmd.append("--recursive")
if not inplace_edit:
cmd += ["--diff", "--check-only"]
cmd += targets
with change_dir(base_dir):
ret, stdout, _ = process_utils.run(
process_utils.add_python_executable(*cmd), reporter
)
diagnostics = parse_error_diffs(stdout, _parse_file_path, logger=reporter.logger)
reporter.report_diagnostics(list(diagnostics))
return ret
| 32.088435 | 89 | 0.682213 | 560 | 4,717 | 5.530357 | 0.328571 | 0.026154 | 0.03681 | 0.052309 | 0.080723 | 0.045851 | 0 | 0 | 0 | 0 | 0 | 0.006895 | 0.231291 | 4,717 | 146 | 90 | 32.308219 | 0.847215 | 0.104092 | 0 | 0.018018 | 0 | 0 | 0.081613 | 0.007592 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063063 | false | 0 | 0.135135 | 0.009009 | 0.432432 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0498bc969c8391d24523c65b85a0eaca372772f0 | 1,816 | py | Python | lib/cello_util/plasmid_map/plasmid_html.py | OGalOz/cello | 1b62807e9881900daf8f3f59eafb11743ad84d6d | [
"MIT"
] | null | null | null | lib/cello_util/plasmid_map/plasmid_html.py | OGalOz/cello | 1b62807e9881900daf8f3f59eafb11743ad84d6d | [
"MIT"
] | null | null | null | lib/cello_util/plasmid_map/plasmid_html.py | OGalOz/cello | 1b62807e9881900daf8f3f59eafb11743ad84d6d | [
"MIT"
] | null | null | null | #python3
"""
Takes template and inserts javascript into template html
"""
import json
"""
Inputs:
plasmid_js: (str) filepath to file containing javascript string.
out_fp: (str) filepath to where we'll write the file out.
"""
def html_prepare(plasmid_js_fp, template_html_fp, out_fp, config_fp):
with open (plasmid_js_fp, "r") as f:
js_str = f.read()
with open(template_html_fp, "r") as g:
html_str = g.read()
with open(config_fp, "r") as g:
config_dict = json.loads(g.read())
html_str = html_str.replace("&{--Highlight Color--}&",
'#' + config_dict['js_info']['highlight_color'])
html_str = html_str.replace("SVG_ID_HERE",
uniq_dict['svg_id'])
html_and_js_str = html_str.replace("{--Insert JS--}", js_str)
with open(out_fp, "w") as h:
h.write(html_and_js_str)
return 0
def div_html_prepare(plasmid_js_fp, template_html_fp, config_fp,
plasmid_info_fp, uniq_dict):
with open (plasmid_js_fp, "r") as f:
js_str = f.read()
with open(template_html_fp, "r") as g:
html_str = g.read()
with open(config_fp, "r") as g:
config_dict = json.loads(g.read())
with open(plasmid_info_fp, "r") as g:
plasmid_info_dict = json.loads(g.read())
html_str = html_str.replace("SVG_ID_HERE",
uniq_dict['svg_id'])
html_and_js_str = html_str.replace("{--Insert JS--}", js_str)
html_and_js_str = html_and_js_str.replace("&{--Highlight Color--}&",
'#' + config_dict['js_info']['highlight_color'])
html_dict = {
"plasmid_name": plasmid_info_dict["plasmid_name"],
"complete_div_str": html_and_js_str
}
'''
with open(out_fp, "w") as h:
h.write(html_and_js_str)
'''
return html_dict
| 25.577465 | 72 | 0.623899 | 275 | 1,816 | 3.778182 | 0.196364 | 0.057748 | 0.033686 | 0.080847 | 0.697786 | 0.688162 | 0.66795 | 0.66795 | 0.598653 | 0.598653 | 0 | 0.001441 | 0.235683 | 1,816 | 70 | 73 | 25.942857 | 0.747118 | 0.035242 | 0 | 0.555556 | 0 | 0 | 0.134299 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.027778 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049aa68e09eda0578871d1ae318ec877fd5826d7 | 1,985 | py | Python | tests/test_cluster_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | 5 | 2021-12-16T14:08:00.000Z | 2022-03-02T14:08:10.000Z | tests/test_cluster_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | null | null | null | tests/test_cluster_01.py | wavestoweather/enstools | d0f612b0187b0ad54dfbbb78aa678564f46eaedf | [
"Apache-2.0"
] | null | null | null | import numpy
from enstools.clustering import prepare
from enstools.clustering import cluster
from sklearn.cluster import KMeans
variables = []
ens_members = 20
n_variables = 2
def setup():
"""
create two variables for clustering
"""
for ivar in range(n_variables):
var = numpy.random.randn(ens_members, 10, 10)
for iens in range(ens_members):
if iens % 2 == 0:
var[iens, 0:5, :] += 1
else:
var[iens, 5:10, :] += 1
variables.append(var)
def test_prepare():
"""
test of the variable preparation enstools.cluster.prepare
"""
# input with different shapes
with numpy.testing.assert_raises(ValueError):
x = prepare(numpy.zeros((ens_members, 10, 11)), *variables)
# test with valid input
x = prepare(*variables)
numpy.testing.assert_array_equal(x.shape, (ens_members, 200))
def test_prepare_kmeans():
"""
use the prepare function to perform a kmeans clustering
"""
# prepare the data
x = prepare(*variables)
# perform the clustering
labels = KMeans(n_clusters=2).fit_predict(x)
# all even elements should be in one cluster, all odd in another
even = labels[0:ens_members:2]
odd = labels[1:ens_members:2]
numpy.testing.assert_array_equal(even, numpy.repeat(even[0], ens_members / 2))
numpy.testing.assert_array_equal(odd, numpy.repeat(odd[0], ens_members / 2))
numpy.testing.assert_equal(even[0] != odd[0], True)
def test_prepare_cluster():
"""
test sklearn wrapper with automatic number of cluster estimation
"""
# prepare the data
x = prepare(*variables)
# perform the clustering, the number of clusters should be 2
labels = cluster("kmeans", x)
numpy.testing.assert_equal(numpy.unique(labels).size, 2)
# same test with agglomerative clustering, method ward
labels = cluster("agglo", x)
numpy.testing.assert_equal(numpy.unique(labels).size, 2)
| 28.357143 | 82 | 0.663476 | 267 | 1,985 | 4.827715 | 0.322097 | 0.069822 | 0.09775 | 0.05353 | 0.256788 | 0.235066 | 0.235066 | 0.211016 | 0.150504 | 0.071373 | 0 | 0.024167 | 0.228715 | 1,985 | 69 | 83 | 28.768116 | 0.817766 | 0.250378 | 0 | 0.142857 | 0 | 0 | 0.007746 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.114286 | false | 0 | 0.114286 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049b4aaa6c9b6af5b5f7a084af5bbd63b6108f08 | 17,349 | py | Python | magpylib/_src/display/display_matplotlib.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | magpylib/_src/display/display_matplotlib.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | magpylib/_src/display/display_matplotlib.py | OrtnerMichael/magPyLib | 4c7e7f56f6e0b915ec0e024c172c460fa80126e5 | [
"BSD-2-Clause"
] | null | null | null | """ matplotlib draw-functionalities"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from magpylib._src.defaults.defaults_classes import default_settings as Config
from magpylib._src.display.display_utility import draw_arrow_from_vertices
from magpylib._src.display.display_utility import draw_arrowed_circle
from magpylib._src.display.display_utility import faces_cuboid
from magpylib._src.display.display_utility import faces_cylinder
from magpylib._src.display.display_utility import faces_cylinder_segment
from magpylib._src.display.display_utility import faces_sphere
from magpylib._src.display.display_utility import get_flatten_objects_properties
from magpylib._src.display.display_utility import get_rot_pos_from_path
from magpylib._src.display.display_utility import MagpyMarkers
from magpylib._src.display.display_utility import place_and_orient_model3d
from magpylib._src.display.display_utility import system_size
from magpylib._src.input_checks import check_excitations
from magpylib._src.style import get_style
def draw_directs_faced(faced_objects, colors, ax, show_path, size_direction):
"""draw direction of magnetization of faced magnets
Parameters
----------
- faced_objects(list of src objects): with magnetization vector to be drawn
- colors: colors of faced_objects
- ax(Pyplot 3D axis): to draw in
- show_path(bool or int): draw on every position where object is displayed
"""
# pylint: disable=protected-access
# pylint: disable=too-many-branches
points = []
for col, obj in zip(colors, faced_objects):
# add src attributes position and orientation depending on show_path
rots, poss, inds = get_rot_pos_from_path(obj, show_path)
# vector length, color and magnetization
if obj._object_type in ("Cuboid", "Cylinder"):
length = 1.8 * np.amax(obj.dimension)
elif obj._object_type == "CylinderSegment":
length = 1.8 * np.amax(obj.dimension[:3]) # d1,d2,h
else:
length = 1.8 * obj.diameter # Sphere
mag = obj.magnetization
# collect all draw positions and directions
draw_pos, draw_direc = [], []
for rot, pos, ind in zip(rots, poss, inds):
if obj._object_type == "CylinderSegment":
# change cylinder_tile draw_pos to barycenter
pos = obj._barycenter[ind]
draw_pos += [pos]
direc = mag / (np.linalg.norm(mag) + 1e-6)
draw_direc += [rot.apply(direc)]
draw_pos = np.array(draw_pos)
draw_direc = np.array(draw_direc)
# use quiver() separately for each object to easier control
# color and vector length
ax.quiver(
draw_pos[:, 0],
draw_pos[:, 1],
draw_pos[:, 2],
draw_direc[:, 0],
draw_direc[:, 1],
draw_direc[:, 2],
length=length * size_direction,
color=col,
)
arrow_tip_pos = ((draw_direc * length * size_direction) + draw_pos)[0]
points.append(arrow_tip_pos)
return points
def draw_markers(markers, ax, color, symbol, size):
"""draws magpylib markers"""
ax.plot(
markers[:, 0],
markers[:, 1],
markers[:, 2],
color=color,
ls="",
marker=symbol,
ms=size,
)
def draw_path(
obj, col, marker_symbol, marker_size, marker_color, line_style, line_width, ax
):
"""draw path in given color and return list of path-points"""
# pylint: disable=protected-access
path = obj._position
if len(path) > 1:
ax.plot(
path[:, 0],
path[:, 1],
path[:, 2],
ls=line_style,
lw=line_width,
color=col,
marker=marker_symbol,
mfc=marker_color,
mec=marker_color,
ms=marker_size,
)
ax.plot(
[path[0, 0]], [path[0, 1]], [path[0, 2]], marker="o", ms=4, mfc=col, mec="k"
)
return list(path)
def draw_faces(faces, col, lw, alpha, ax):
"""draw faces in respective color and return list of vertex-points"""
cuboid_faces = Poly3DCollection(
faces,
facecolors=col,
linewidths=lw,
edgecolors="k",
alpha=alpha,
)
ax.add_collection3d(cuboid_faces)
return faces
def draw_pixel(sensors, ax, col, pixel_col, pixel_size, pixel_symb, show_path):
"""draw pixels and return a list of pixel-points in global CS"""
# pylint: disable=protected-access
# collect sensor and pixel positions in global CS
pos_sens, pos_pixel = [], []
for sens in sensors:
rots, poss, _ = get_rot_pos_from_path(sens, show_path)
pos_pixel_flat = np.reshape(sens.pixel, (-1, 3))
for rot, pos in zip(rots, poss):
pos_sens += [pos]
for pix in pos_pixel_flat:
pos_pixel += [pos + rot.apply(pix)]
pos_all = pos_sens + pos_pixel
pos_pixel = np.array(pos_pixel)
# display pixel positions
ax.plot(
pos_pixel[:, 0],
pos_pixel[:, 1],
pos_pixel[:, 2],
marker=pixel_symb,
mfc=pixel_col,
mew=pixel_size,
mec=col,
ms=pixel_size * 4,
ls="",
)
# return all positions for system size evaluation
return list(pos_all)
def draw_sensors(sensors, ax, sys_size, show_path, size, arrows_style):
"""draw sensor cross"""
# pylint: disable=protected-access
arrowlength = sys_size * size / Config.display.autosizefactor
# collect plot data
possis, exs, eys, ezs = [], [], [], []
for sens in sensors:
rots, poss, _ = get_rot_pos_from_path(sens, show_path)
for rot, pos in zip(rots, poss):
possis += [pos]
exs += [rot.apply((1, 0, 0))]
eys += [rot.apply((0, 1, 0))]
ezs += [rot.apply((0, 0, 1))]
possis = np.array(possis)
coords = np.array([exs, eys, ezs])
# quiver plot of basis vectors
arrow_colors = (
arrows_style.x.color,
arrows_style.y.color,
arrows_style.z.color,
)
arrow_show = (arrows_style.x.show, arrows_style.y.show, arrows_style.z.show)
for acol, ashow, es in zip(arrow_colors, arrow_show, coords):
if ashow:
ax.quiver(
possis[:, 0],
possis[:, 1],
possis[:, 2],
es[:, 0],
es[:, 1],
es[:, 2],
color=acol,
length=arrowlength,
)
def draw_dipoles(dipoles, ax, sys_size, show_path, size, color, pivot):
"""draw dipoles"""
# pylint: disable=protected-access
# collect plot data
possis, moms = [], []
for dip in dipoles:
rots, poss, _ = get_rot_pos_from_path(dip, show_path)
mom = dip.moment / np.linalg.norm(dip.moment)
for rot, pos in zip(rots, poss):
possis += [pos]
moms += [rot.apply(mom)]
possis = np.array(possis)
moms = np.array(moms)
# quiver plot of basis vectors
arrowlength = sys_size * size / Config.display.autosizefactor
ax.quiver(
possis[:, 0],
possis[:, 1],
possis[:, 2],
moms[:, 0],
moms[:, 1],
moms[:, 2],
color=color,
length=arrowlength,
pivot=pivot, # {'tail', 'middle', 'tip'},
)
def draw_circular(circulars, show_path, col, size, width, ax):
"""draw circulars and return a list of positions"""
# pylint: disable=protected-access
# graphical settings
discret = 72 + 1
lw = width
draw_pos = [] # line positions
for circ in circulars:
# add src attributes position and orientation depending on show_path
rots, poss, _ = get_rot_pos_from_path(circ, show_path)
# init orientation line positions
vertices = draw_arrowed_circle(circ.current, circ.diameter, size, discret).T
# apply pos and rot, draw, store line positions
for rot, pos in zip(rots, poss):
possis1 = rot.apply(vertices) + pos
ax.plot(possis1[:, 0], possis1[:, 1], possis1[:, 2], color=col, lw=lw)
draw_pos += list(possis1)
return draw_pos
def draw_line(lines, show_path, col, size, width, ax) -> list:
"""draw lines and return a list of positions"""
# pylint: disable=protected-access
# graphical settings
lw = width
draw_pos = [] # line positions
for line in lines:
# add src attributes position and orientation depending on show_path
rots, poss, _ = get_rot_pos_from_path(line, show_path)
# init orientation line positions
if size != 0:
vertices = draw_arrow_from_vertices(line.vertices, line.current, size)
else:
vertices = np.array(line.vertices).T
# apply pos and rot, draw, store line positions
for rot, pos in zip(rots, poss):
possis1 = rot.apply(vertices.T) + pos
ax.plot(possis1[:, 0], possis1[:, 1], possis1[:, 2], color=col, lw=lw)
draw_pos += list(possis1)
return draw_pos
def draw_model3d_extra(obj, style, show_path, ax, color):
"""positions, orients and draws extra 3d model including path positions
returns True if at least one the traces is now new default"""
extra_model3d_traces = style.model3d.data if style.model3d.data is not None else []
points = []
rots, poss, _ = get_rot_pos_from_path(obj, show_path)
for orient, pos in zip(rots, poss):
for extr in extra_model3d_traces:
if extr.show:
extr.update(extr.updatefunc())
if extr.backend == "matplotlib":
kwargs = extr.kwargs() if callable(extr.kwargs) else extr.kwargs
args = extr.args() if callable(extr.args) else extr.args
kwargs, args, vertices = place_and_orient_model3d(
model_kwargs=kwargs,
model_args=args,
orientation=orient,
position=pos,
coordsargs=extr.coordsargs,
scale=extr.scale,
return_vertices=True,
return_model_args=True,
)
points.append(vertices.T)
if "color" not in kwargs or kwargs["color"] is None:
kwargs.update(color=color)
getattr(ax, extr.constructor)(*args, **kwargs)
return points
def display_matplotlib(
*obj_list_semi_flat,
axis=None,
markers=None,
zoom=0,
color_sequence=None,
**kwargs,
):
"""
Display objects and paths graphically with the matplotlib backend.
- axis: matplotlib axis3d object
- markers: list of marker positions
- path: bool / int / list of ints
- zoom: zoom level, 0=tight boundaries
- color_sequence: list of colors for object coloring
"""
# pylint: disable=protected-access
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
# apply config default values if None
# create or set plotting axis
if axis is None:
fig = plt.figure(dpi=80, figsize=(8, 8))
ax = fig.add_subplot(111, projection="3d")
ax.set_box_aspect((1, 1, 1))
generate_output = True
else:
ax = axis
generate_output = False
# draw objects and evaluate system size --------------------------------------
# draw faced objects and store vertices
points = []
dipoles = []
sensors = []
flat_objs_props = get_flatten_objects_properties(
*obj_list_semi_flat, color_sequence=color_sequence
)
for obj, props in flat_objs_props.items():
color = props["color"]
style = get_style(obj, Config, **kwargs)
path_frames = style.path.frames
if path_frames is None:
path_frames = True
obj_color = style.color if style.color is not None else color
lw = 0.25
faces = None
if obj.style.model3d.data:
pts = draw_model3d_extra(obj, style, path_frames, ax, obj_color)
points += pts
if obj.style.model3d.showdefault:
if obj._object_type == "Cuboid":
lw = 0.5
faces = faces_cuboid(obj, path_frames)
elif obj._object_type == "Cylinder":
faces = faces_cylinder(obj, path_frames)
elif obj._object_type == "CylinderSegment":
faces = faces_cylinder_segment(obj, path_frames)
elif obj._object_type == "Sphere":
faces = faces_sphere(obj, path_frames)
elif obj._object_type == "Line":
if style.arrow.show:
check_excitations([obj])
arrow_size = style.arrow.size if style.arrow.show else 0
arrow_width = style.arrow.width
points += draw_line(
[obj], path_frames, obj_color, arrow_size, arrow_width, ax
)
elif obj._object_type == "Loop":
if style.arrow.show:
check_excitations([obj])
arrow_width = style.arrow.width
arrow_size = style.arrow.size if style.arrow.show else 0
points += draw_circular(
[obj], path_frames, obj_color, arrow_size, arrow_width, ax
)
elif obj._object_type == "Sensor":
sensors.append((obj, obj_color))
points += draw_pixel(
[obj],
ax,
obj_color,
style.pixel.color,
style.pixel.size,
style.pixel.symbol,
path_frames,
)
elif obj._object_type == "Dipole":
dipoles.append((obj, obj_color))
points += [obj.position]
elif obj._object_type == "CustomSource":
draw_markers(
np.array([obj.position]), ax, obj_color, symbol="*", size=10
)
label = (
obj.style.label
if obj.style.label is not None
else str(type(obj).__name__)
)
ax.text(*obj.position, label, horizontalalignment="center")
points += [obj.position]
if faces is not None:
alpha = style.opacity
pts = draw_faces(faces, obj_color, lw, alpha, ax)
points += [np.vstack(pts).reshape(-1, 3)]
if style.magnetization.show:
check_excitations([obj])
pts = draw_directs_faced(
[obj],
[obj_color],
ax,
path_frames,
style.magnetization.size,
)
points += pts
if style.path.show:
marker, line = style.path.marker, style.path.line
points += draw_path(
obj,
obj_color,
marker.symbol,
marker.size,
marker.color,
line.style,
line.width,
ax,
)
# markers -------------------------------------------------------
if markers is not None and markers:
m = MagpyMarkers()
style = get_style(m, Config, **kwargs)
markers = np.array(markers)
s = style.marker
draw_markers(markers, ax, s.color, s.symbol, s.size)
points += [markers]
# draw direction arrows (based on src size) -------------------------
# objects with faces
# determine system size -----------------------------------------
limx1, limx0, limy1, limy0, limz1, limz0 = system_size(points)
# make sure ranges are not null
limits = np.array([[limx0, limx1], [limy0, limy1], [limz0, limz1]])
limits[np.squeeze(np.diff(limits)) == 0] += np.array([-1, 1])
sys_size = np.max(np.diff(limits))
c = limits.mean(axis=1)
m = sys_size.max() / 2
ranges = np.array([c - m * (1 + zoom), c + m * (1 + zoom)]).T
# draw all system sized based quantities -------------------------
# not optimal for loop if many sensors/dipoles
for sens in sensors:
sensor, color = sens
style = get_style(sensor, Config, **kwargs)
draw_sensors([sensor], ax, sys_size, path_frames, style.size, style.arrows)
for dip in dipoles:
dipole, color = dip
style = get_style(dipole, Config, **kwargs)
draw_dipoles(
[dipole], ax, sys_size, path_frames, style.size, color, style.pivot
)
# plot styling --------------------------------------------------
ax.set(
**{f"{k}label": f"{k} [mm]" for k in "xyz"},
**{f"{k}lim": r for k, r in zip("xyz", ranges)},
)
# generate output ------------------------------------------------
if generate_output:
plt.show()
| 34.837349 | 88 | 0.560551 | 2,056 | 17,349 | 4.565661 | 0.159047 | 0.01534 | 0.022371 | 0.02578 | 0.320017 | 0.278896 | 0.259188 | 0.198786 | 0.149249 | 0.118994 | 0 | 0.011916 | 0.322785 | 17,349 | 497 | 89 | 34.907445 | 0.787046 | 0.170212 | 0 | 0.236769 | 0 | 0 | 0.012375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030641 | false | 0 | 0.047354 | 0 | 0.097493 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049c51b8f00635b7afe59969e4b68f6036acb2b1 | 3,493 | py | Python | solum/builder/controllers/v1/image.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | solum/builder/controllers/v1/image.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | solum/builder/controllers/v1/image.py | ed-/solum | 2d23edb7fb53e1bdeff510710824658575d166c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 - Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import types as api_types
from solum.builder.handlers import image_handler
from solum.common import exception
from solum import objects
STATE_KIND = wtypes.Enum(str, *objects.image.States.values())
IMAGE_KIND = wtypes.Enum(str, 'auto', 'qcow2', 'docker')
SOURCE_KIND = wtypes.Enum(str, 'auto', 'heroku',
'dib', 'dockerfile')
class Image(api_types.Base):
"""The Image resource represents an image."""
source_uri = wtypes.text
"""The URI of the app/element."""
source_format = SOURCE_KIND
"""The source repository format."""
state = STATE_KIND
"""The state of the image. """
base_image_id = wtypes.text
"""The id (in glance) of the image to customize."""
image_format = IMAGE_KIND
"""The image format."""
created_image_id = wtypes.text
"""The id of the created image in glance."""
@classmethod
def sample(cls):
return cls(uri='http://example.com/v1/images/b3e0d79',
source_uri='git://example.com/project/app.git',
source_format='heroku',
name='php-web-app',
type='image',
description='A php web application',
tags=['group_xyz'],
project_id='1dae5a09ef2b4d8cbf3594b0eb4f6b94',
user_id='55f41cf46df74320b9486a35f5d28a11',
base_image_id='4dae5a09ef2b4d8cbf3594b0eb4f6b94',
created_image_id='4afasa09ef2b4d8cbf3594b0ec4f6b94',
image_format='docker')
class ImageController(rest.RestController):
"""Manages operations on a single image."""
def __init__(self, image_id):
super(ImageController, self).__init__()
self._id = image_id
@exception.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Image)
def get(self):
"""Return this image."""
handler = image_handler.ImageHandler(
pecan.request.security_context)
host_url = pecan.request.host_url
return Image.from_db_model(handler.get(self._id), host_url)
class ImagesController(rest.RestController):
"""Manages operations on the images collection."""
@pecan.expose()
def _lookup(self, image_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ImageController(image_id), remainder
@wsme_pecan.wsexpose(Image, body=Image, status_code=201)
def post(self, data):
"""Create a new image."""
handler = image_handler.ImageHandler(
pecan.request.security_context)
host_url = pecan.request.host_url
return Image.from_db_model(
handler.create(data.as_dict(objects.registry.Image)), host_url)
| 33.912621 | 75 | 0.665331 | 428 | 3,493 | 5.28271 | 0.399533 | 0.024768 | 0.018576 | 0.022556 | 0.175144 | 0.123839 | 0.104379 | 0.104379 | 0.104379 | 0.104379 | 0 | 0.033271 | 0.234183 | 3,493 | 102 | 76 | 34.245098 | 0.811963 | 0.206127 | 0 | 0.105263 | 0 | 0 | 0.116316 | 0.063914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.140351 | 0.017544 | 0.45614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049db6a644f9d8354d438e91d794bf040ae0bcbe | 1,091 | py | Python | pyAI-OpenMV4/3.机器学习/2.笑脸识别/nn_haar_smile_detection.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | pyAI-OpenMV4/3.机器学习/2.笑脸识别/nn_haar_smile_detection.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | null | null | null | pyAI-OpenMV4/3.机器学习/2.笑脸识别/nn_haar_smile_detection.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z | # 笑脸识别示例( Haar Cascade + CNN 模型).
#
#翻译和注释:01Studio
import sensor, time, image, os, nn
#设置摄像头
sensor.reset() # Reset and initialize the sensor.
sensor.set_contrast(2)
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)
# 加载笑脸识别神经网络模型
net = nn.load('/smile.network')
# 加载人脸识别模型 Haar Cascade
face_cascade = image.HaarCascade("frontalface", stages=25)
print(face_cascade)
# FPS clock
clock = time.clock()
while (True):
clock.tick()
# Capture snapshot
img = sensor.snapshot()
# 找出图像中的人脸.
objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25)
# 识别笑脸
for r in objects:
# 重新构建识别的位置,缩小识别区域至人脸位置
r = [r[0]+10, r[1]+25, int(r[2]*0.70), int(r[2]*0.70)]
img.draw_rectangle(r)
out = net.forward(img, roi=r, softmax=True)
img.draw_string(r[0], r[1], ':)' if (out[0] > 0.8) else ':(', color=(255), scale=2)
print(clock.fps())
| 25.372093 | 91 | 0.64528 | 159 | 1,091 | 4.345912 | 0.553459 | 0.052098 | 0.014472 | 0.017366 | 0.023155 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057803 | 0.207149 | 1,091 | 42 | 92 | 25.97619 | 0.74104 | 0.221815 | 0 | 0 | 0 | 0 | 0.034772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049f32d34a3c6febe0ef73ec987f383e72dcf188 | 1,170 | py | Python | mapshader/__init__.py | lex-c/mapshader | 321246b3011e36abe580d0beff4f07ec5ee38d95 | [
"MIT"
] | null | null | null | mapshader/__init__.py | lex-c/mapshader | 321246b3011e36abe580d0beff4f07ec5ee38d95 | [
"MIT"
] | null | null | null | mapshader/__init__.py | lex-c/mapshader | 321246b3011e36abe580d0beff4f07ec5ee38d95 | [
"MIT"
] | null | null | null | import sys
try:
from ._version import __version__
except ImportError:
__version__ = "Unknown"
def test():
"""Run the mapshader test suite."""
import os
try:
import pytest
except ImportError:
import sys
sys.stderr.write("You need to install py.test to run tests.\n\n")
raise
pytest.main([os.path.dirname(__file__)])
def hello(services=None):
msg = r'''
__ __ __ ____ ___ _ _ __ ____ ____ ____
( \/ ) /__\ ( _ \/ __)( )_( ) /__\ ( _ \( ___)( _ \
) ( /(__)\ )___/\__ \ ) _ ( /(__)\ )(_) ))__) ) /
(_/\/\_)(__)(__)(__) (___/(_) (_)(__)(__)(____/(____)(_)\_)
___ ___ ___ ___ ___ ___ ___ ___ ___ ___ ___ ___
(___)(___)(___)(___)(___)(___)(___)(___)(___)(___)(___)(___)
''' + f'\n\t Version: {__version__}\n'
print(msg, file=sys.stdout)
print('\tServices', file=sys.stdout)
print('\t--------\n', file=sys.stdout)
for s in services:
service_msg = f'\t > {s.name} - {s.service_type} - {s.source.geometry_type} - {s.source.description}'
print(service_msg, file=sys.stdout)
| 31.621622 | 109 | 0.529915 | 102 | 1,170 | 4.362745 | 0.480392 | 0.062921 | 0.116854 | 0.07191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.286325 | 1,170 | 36 | 110 | 32.5 | 0.532934 | 0.024786 | 0 | 0.206897 | 0 | 0.172414 | 0.532159 | 0.118943 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.241379 | 0 | 0.310345 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
049f8780ded61cd749db86845928ea603541d12d | 1,357 | py | Python | server/apps/bot/dispatcher/callbacks/list_movies.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | server/apps/bot/dispatcher/callbacks/list_movies.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | server/apps/bot/dispatcher/callbacks/list_movies.py | LowerDeez/movies_finder | 3763bfe4c0d1cfe36e081c45a9cc9cdaa85e0ee4 | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
from apps.bot.dispatcher.consts import (
CONSTS,
ACTION_CHOICES,
STATE_CHOICES
)
from apps.bot.dispatcher.services import (
get_current_page,
render_movies,
get_last_movie_keyboard
)
from apps.bot.tmdb import TMDBWrapper
if TYPE_CHECKING:
from telegram import Update
from telegram.ext import CallbackContext
__all__ = (
'list_movies_callback',
)
def list_movies_callback(update: 'Update', context: 'CallbackContext'):
print('List movies...')
update.callback_query.answer()
page = get_current_page()
list_method = context.user_data.get(CONSTS.list_method)
callback_data = update.callback_query.data
if callback_data == ACTION_CHOICES.next_movies:
page += 1
else:
# new list method
if list_method != callback_data:
page = 1
list_method = callback_data
print('List method:', list_method)
context.user_data[CONSTS.list_method] = list_method
tmdb = TMDBWrapper(language=context.user_data.get('language'))
method = getattr(tmdb, list_method)
movies = method(page=page)
render_movies(
context=context,
movies=movies,
message=update.callback_query.message,
reply_markup=get_last_movie_keyboard(movies=movies)
)
return STATE_CHOICES.listing_movies
| 23.807018 | 71 | 0.704495 | 165 | 1,357 | 5.509091 | 0.30303 | 0.110011 | 0.036304 | 0.072607 | 0.055006 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001869 | 0.211496 | 1,357 | 56 | 72 | 24.232143 | 0.847664 | 0.011054 | 0 | 0 | 0 | 0 | 0.05597 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.142857 | 0 | 0.190476 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a27c0d4d3c374f1212533d3cfbf7e2202872c1 | 1,704 | py | Python | subt/test_subt.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | subt/test_subt.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | subt/test_subt.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | import unittest
import logging
import math
from unittest.mock import MagicMock, call
from osgar.bus import Bus
from osgar.lib import quaternion
from subt.main import SubTChallenge
from subt.trace import distance3D
from subt import simulation
g_logger = logging.getLogger(__name__)
def entrance_reached(sim):
corrected = [(rr - oo) for rr, oo in zip(sim.xyz, sim.origin)]
goal = [0.5, 0, 0] # note, that in real run the Y coordinate depends on choise left/righ
if distance3D(corrected, goal) < 2:
return True
return False
class SubTChallengeTest(unittest.TestCase):
def Xtest_go_to_entrance(self):
config = {'virtual_world': True, 'max_speed': 1.0, 'gap_size': 0.8, 'wall_dist': 0.8, 'timeout': 600, 'symmetric': False, 'right_wall': 'auto'}
bus = Bus(simulation.SimLogger())
app = SubTChallenge(config, bus.handle('app'))
sim = simulation.Simulation(bus.handle('sim'), end_condition=entrance_reached)
g_logger.info("connecting:")
for o in bus.handle('sim').out:
if o == 'pose2d':
continue # connect 'pose3d' only
g_logger.info(f' sim.{o} → app.{o}')
bus.connect(f'sim.{o}', f'app.{o}')
for o in bus.handle('app').out:
g_logger.info(f' app.{o} → sim.{o}')
bus.connect(f'app.{o}', f'sim.{o}')
g_logger.info("done.")
app.start()
sim.start()
sim.join()
app.request_stop()
app.join()
self.assertTrue(entrance_reached(sim))
if __name__ == "__main__":
import sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
unittest.main()
# vim: expandtab sw=4 ts=4
| 29.894737 | 151 | 0.627934 | 237 | 1,704 | 4.396624 | 0.464135 | 0.033589 | 0.042226 | 0.017274 | 0.028791 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015349 | 0.235329 | 1,704 | 56 | 152 | 30.428571 | 0.782809 | 0.066901 | 0 | 0 | 0 | 0 | 0.111672 | 0 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a51c3634fa37cb65a083a5e078715450463600 | 260 | py | Python | registration/get_image.py | roguen/jive-derby | bc9e0f63e4298c3f1869288345d79e320fd20b65 | [
"Apache-2.0"
] | null | null | null | registration/get_image.py | roguen/jive-derby | bc9e0f63e4298c3f1869288345d79e320fd20b65 | [
"Apache-2.0"
] | null | null | null | registration/get_image.py | roguen/jive-derby | bc9e0f63e4298c3f1869288345d79e320fd20b65 | [
"Apache-2.0"
] | null | null | null | from picamera import PiCamera
import time
def captureImage():
camera = PiCamera()
camera.start_preview()
camera.rotation = 90
time.sleep(1)
camera.capture('static/images/test.jpg')
camera.stop_preview()
if __name__ == '__main__':
captureImage()
| 17.333333 | 41 | 0.730769 | 32 | 260 | 5.625 | 0.6875 | 0.155556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 0.146154 | 260 | 14 | 42 | 18.571429 | 0.797297 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 0.084615 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a725fd777c3aa01c9d14a46b6d2bdbf741ca2a | 4,012 | py | Python | gpvdm_gui/gui/scan_tab_ribbon.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 12 | 2016-09-13T08:58:13.000Z | 2022-01-17T07:04:52.000Z | gpvdm_gui/gui/scan_tab_ribbon.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 3 | 2017-11-11T12:33:02.000Z | 2019-03-08T00:48:08.000Z | gpvdm_gui/gui/scan_tab_ribbon.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 6 | 2019-01-03T06:17:12.000Z | 2022-01-01T15:59:00.000Z | # -*- coding: utf-8 -*-
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package scan_tab_ribbon
# The ribbon for the scan window.
#
import os
from cal_path import get_css_path
#qt
from PyQt5.QtWidgets import QMainWindow, QTextEdit, QAction, QApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize, Qt,QFile,QIODevice
from PyQt5.QtWidgets import QWidget,QSizePolicy,QVBoxLayout,QHBoxLayout,QPushButton,QDialog,QFileDialog,QToolBar, QLineEdit, QToolButton
from PyQt5.QtWidgets import QTabWidget
from icon_lib import icon_get
from about import about_dlg
from util import wrap_text
from ribbon_base import ribbon_base
from play import play
class scan_tab_ribbon(ribbon_base):
def simulations(self):
toolbar = QToolBar()
toolbar.setToolButtonStyle( Qt.ToolButtonTextUnderIcon)
toolbar.setIconSize(QSize(42, 42))
self.tb_simulate = play(self,"scan_play",play_icon="forward",run_text=wrap_text(_("Run scan"),2))#QAction(icon_get("build_play2"), wrap_text(_("Run scan"),2), self)
toolbar.addAction(self.tb_simulate)
toolbar.addSeparator()
self.tb_plot = QAction(icon_get("plot"), wrap_text(_("Plot"),4), self)
toolbar.addAction(self.tb_plot)
#self.tb_plot_time = QAction(icon_get("plot_time"), wrap_text(_("Time domain plot"),6), self)
#toolbar.addAction(self.tb_plot_time)
self.tb_clean = QAction(icon_get("clean"), wrap_text(_("Clean simulation"),4), self)
toolbar.addAction(self.tb_clean)
self.box_widget=QWidget()
self.box=QVBoxLayout()
self.box_widget.setLayout(self.box)
self.box_tb0=QToolBar()
self.box_tb0.setIconSize(QSize(32, 32))
self.box.addWidget(self.box_tb0)
self.box_tb1=QToolBar()
self.box_tb1.setIconSize(QSize(32, 32))
self.box.addWidget(self.box_tb1)
self.tb_build = QAction(icon_get("cog"), wrap_text(_("Build scan"),2), self)
self.box_tb0.addAction(self.tb_build)
self.tb_rerun = QAction(icon_get("play-green"), wrap_text(_("Rerun"),2), self)
#self.box_tb0.addAction(self.tb_rerun)
self.tb_zip = QAction(icon_get("package-x-generic"), wrap_text(_("Archive simulations"),2), self)
self.box_tb0.addAction(self.tb_zip)
self.tb_notes = QAction(icon_get("text-x-generic"), wrap_text(_("Notes"),3), self)
toolbar.addAction(self.tb_notes)
toolbar.addWidget(self.box_widget)
return toolbar
def update(self):
print("update")
#self.device.update()
#self.simulations.update()
#self.configure.update()
#self.home.update()
def callback_about_dialog(self):
dlg=about_dlg()
dlg.exec_()
def __init__(self):
ribbon_base.__init__(self)
self.setMaximumHeight(130)
#self.setStyleSheet("QWidget { background-color:cyan; }")
self.about = QToolButton(self)
self.about.setText(_("About"))
self.about.pressed.connect(self.callback_about_dialog)
self.setCornerWidget(self.about)
w=self.simulations()
self.addTab(w,_("Simulations"))
#w=self.advanced()
#self.addTab(w,_("Advanced"))
#w=self.ml()
#self.addTab(w,_("ML"))
sheet=self.readStyleSheet(os.path.join(get_css_path(),"style.css"))
if sheet!=None:
sheet=str(sheet,'utf-8')
self.setStyleSheet(sheet)
| 30.165414 | 166 | 0.738285 | 586 | 4,012 | 4.890785 | 0.366894 | 0.033496 | 0.039079 | 0.04187 | 0.150035 | 0.111654 | 0.06141 | 0.06141 | 0.030007 | 0 | 0 | 0.018721 | 0.134596 | 4,012 | 132 | 167 | 30.393939 | 0.80674 | 0.356431 | 0 | 0 | 0 | 0 | 0.06769 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.3 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a7d1ad3d2c4355c91792c04e9a6461198ad07a | 2,855 | py | Python | Components/text.py | amauri-bz/az_log_viewer | 3d11bf94993e34ce1253266b9bcf64db4c4ba6c4 | [
"MIT"
] | null | null | null | Components/text.py | amauri-bz/az_log_viewer | 3d11bf94993e34ce1253266b9bcf64db4c4ba6c4 | [
"MIT"
] | null | null | null | Components/text.py | amauri-bz/az_log_viewer | 3d11bf94993e34ce1253266b9bcf64db4c4ba6c4 | [
"MIT"
] | null | null | null | import re
import tkinter as tk
from tkinter import ttk
from Components.db import Database
from Components.pop_up_menu import PopUpMenu
from Components.text_sync import TextSync
from Components.custom_text import *
class TextScrollCombo(tk.Frame):
def __init__(self, root, tab):
super().__init__(root, bg='#e6e6e6')
self.root = root
self.tab = tab
self.changed = False
self.refresh_enable = False
self.refresh_interv = 5
# create a Text widget
self.text = CustomText(self)
self.linenumbers = TextLineNumbers(self, width=30)
self.linenumbers.attach(self.text)
# create a Scrollbar and associate it with txt
scrollb = ttk.Scrollbar(self, command=self.text.yview)
self.text['yscrollcommand'] = scrollb.set
self.linenumbers.pack(side="left", fill="y")
scrollb.pack(side="right", fill="y")
self.text.pack(side="right", fill="both", expand=True)
db = Database.instance()
if db.global_font != None:
self.text.config(font=db.global_font, undo=True, wrap='word')
else:
self.text.config(font=("consolas", 10), undo=True, wrap='word')
self.text.config(borderwidth=3, relief="sunken")
style = ttk.Style()
style.theme_use('clam')
self.bind_text_event()
self.saved_path = ""
self.popup_menu = PopUpMenu(self.root, self.tab)
self.text_sync = TextSync(self.tab, self, self.text)
def bind_text_event(self):
self.text.bind("<<Highlight>>", self.highlight)
self.text.bind("<Button-3>", self.popup)
self.text.bind("<<Change>>", self._on_change)
self.text.bind("<Configure>", self._on_change)
def _on_change(self, event):
if self.text.index(tk.INSERT) != "1.0":
self.changed = True
self.linenumbers.redraw()
def popup(self, event):
try:
self.popup_menu.menubar.tk_popup(event.x_root+70, event.y_root, 0)
finally:
self.popup_menu.menubar.grab_release()
def clean_highlight(self):
for tag in self.text.tag_names():
x = re.search("ptrn-", tag)
if x != None:
self.text.tag_remove(tag, '1.0', tk.END)
def highlight(self, event):
self.clean_highlight()
lastline = self.text.index("end").split(".")[0]
for i in range(1, int(lastline)):
contents = self.text.get("%s.0" % i, "%s.end" % i)
db = Database.instance()
for pattern in db.get_keys():
x = re.search(pattern, contents)
if(x != None):
self.text.tag_configure("ptrn-" + pattern, background=db.get_value(pattern))
self.text.tag_add("ptrn-" + pattern, "%s.0" % i, "%s.end" % i)
| 33.588235 | 96 | 0.594396 | 370 | 2,855 | 4.467568 | 0.345946 | 0.101633 | 0.029038 | 0.020569 | 0.031458 | 0.031458 | 0 | 0 | 0 | 0 | 0 | 0.010057 | 0.268651 | 2,855 | 84 | 97 | 33.988095 | 0.781609 | 0.022767 | 0 | 0.030769 | 0 | 0 | 0.055974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092308 | false | 0 | 0.107692 | 0 | 0.215385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a9860077d854d091c1198ef151308897080df1 | 1,913 | py | Python | applications/plugins/Compliant/examples/chain.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/Compliant/examples/chain.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | applications/plugins/Compliant/examples/chain.py | sofa-framework/issofa | 94855f488465bc3ed41223cbde987581dfca5389 | [
"OML"
] | null | null | null | import Sofa
from Compliant import StructuralAPI
StructuralAPI.geometric_stiffness=2
def createScene(root):
# root node setup
root.createObject('RequiredPlugin', pluginName = 'Compliant')
root.createObject('VisualStyle', displayFlags="showBehavior" )
root.createObject('CompliantAttachButtonSetting' )
# simuation parameters
root.dt = 1e-2
root.gravity = [0, -9.8, 0]
# ode solver
ode = root.createObject('CompliantImplicitSolver', neglecting_compliance_forces_in_geometric_stiffness=False, stabilization = "pre-stabilization")
# numerical solver
root.createObject('SequentialSolver', name='numsolver', iterations=250, precision=1e-14, iterateOnBilaterals=True)
root.createObject('LDLTResponse', name='response')
#root.createObject('LUResponse', name='response')
# scene node
scene = root.createChild('scene')
# script variables
nbLink = 10
linkSize = 2
# links creation
links = []
# rigid bodies
for i in xrange(nbLink):
body = StructuralAPI.RigidBody(root, "link-{0}".format(i))
body.setManually(offset = [0, -1.*linkSize * i, 0, 0,0,0,1], inertia_forces = True)
body.dofs.showObject = True
body.dofs.showObjectScale = 0.25*linkSize
links.append( body )
# attach first link
links[0].setFixed()
# joints creation
for i in xrange( nbLink-1 ):
off1 = links[i].addOffset("offset-{0}-{1}".format(i, i+1), [0, -0.5*linkSize, 0, 0,0,0,1])
off2 = links[i+1].addOffset("offset-{0}-{1}".format(i+1, i), [0, 0.5*linkSize, 0, 0,0,0,1])
StructuralAPI.HingeRigidJoint(2, "joint-{0}-{1}".format(i, i+1), off1.node, off2.node, isCompliance=True, compliance=0)
#StructuralAPI.BallAndSocketRigidJoint("joint-{0}-{1}".format(i, i+1), off1.node, off2.node, isCompliance=True, compliance=0)
| 35.425926 | 150 | 0.651856 | 229 | 1,913 | 5.414847 | 0.393013 | 0.017742 | 0.014516 | 0.029032 | 0.194355 | 0.16129 | 0.120968 | 0.120968 | 0.120968 | 0.095161 | 0 | 0.04405 | 0.204914 | 1,913 | 53 | 151 | 36.09434 | 0.771203 | 0.170413 | 0 | 0 | 0 | 0 | 0.135324 | 0.032402 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.074074 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04a9cf8fa788b8c75de2f502190ad9f5825f1285 | 5,636 | py | Python | cwe_sfp_to_threatspec.py | threatspec/pythreatspec | 77c0c7d3052adaeb91ea28ae61e60538c6d3fa9b | [
"MIT"
] | 10 | 2018-01-14T22:53:29.000Z | 2019-05-22T08:26:56.000Z | cwe_sfp_to_threatspec.py | threatspec/pythreatspec | 77c0c7d3052adaeb91ea28ae61e60538c6d3fa9b | [
"MIT"
] | 1 | 2019-02-17T23:39:19.000Z | 2019-02-17T23:39:19.000Z | cwe_sfp_to_threatspec.py | threatspec/pythreatspec | 77c0c7d3052adaeb91ea28ae61e60538c6d3fa9b | [
"MIT"
] | 4 | 2018-03-03T03:16:37.000Z | 2019-02-17T03:09:33.000Z | #!/usr/bin/env python
import re
import sys
import xmltodict
import json
import time
import collections
from pprint import pprint
def create_id(obj):
identifier = obj["@ID"]
name = obj["@Name"]
name = re.sub(r'[^a-zA-Z0-9_ \-]', "", name)
name = re.sub(r'[ \-]', "_", name)
name = name.lower()
return "@cwe_{}_{}".format(identifier, name)
def create_name(obj):
return obj["@Name"]
def create_description(obj):
if not "Description" in obj:
return "No description available."
description = obj["Description"]["Description_Summary"]
description = re.sub(r'[^a-zA-Z0-9_ \-\;\:\.\,]', " ", description)
description = re.sub(r' +', " ", description)
if "Extended_Description" in obj["Description"] and obj["Description"]["Extended_Description"]:
if isinstance(obj["Description"]["Extended_Description"]["Text"], list):
extended_description = ".".join(obj["Description"]["Extended_Description"]["Text"])
else:
extended_description = obj["Description"]["Extended_Description"]["Text"]
extended_description = re.sub(r'[^a-zA-Z0-9_ \-\;\:\.\,]', " ", extended_description)
extended_description = re.sub(r' +', " ", extended_description)
return description+" "+extended_description
else:
return description
def create_refs(obj):
identifier = "CWE {}".format(obj["@ID"])
url = "https://cwe.mitre.org/data/definitions/{}.html".format(obj["@ID"])
return [identifier, url]
def create_category_id(obj):
name = create_category_name(obj)
name = re.sub(r'[^a-zA-Z0-9_ \-]', "", name)
name = re.sub(r'[ \-]', "_", name)
name = name.lower()
return "@{}".format(name)
def create_category_name(obj):
if ":" in obj["@Name"]:
return obj["@Name"].split(":")[1].strip()
else:
return obj["@Name"]
if len(sys.argv) != 2:
print("Usage: cwe_to_threatspec.py CWE_XML_FILE")
sys.exit(1)
filename = sys.argv[1]
print("Parsing CWE file {}".format(filename))
with open(filename) as fh:
cwes = xmltodict.parse(fh.read())
threats = {}
spf_id = ""
spf_categories = {}
for view in cwes["Weakness_Catalog"]["Views"]["View"]:
if view["@Name"] == "Software Fault Pattern (SFP) Clusters":
spf_id = view["@ID"]
for relationship in view["Relationships"]["Relationship"]:
if relationship["Relationship_Target_Form"] == "Category" and relationship["Relationship_Nature"] == "HasMember":
lookup_id = relationship["Relationship_Target_ID"]
spf_categories[lookup_id] = ""
category_map = {}
for category in cwes["Weakness_Catalog"]["Categories"]["Category"]:
category_id = create_category_id(category)
category_name = create_category_name(category)
identifier = category["@ID"]
category_map[identifier] = {
"id": category_id,
"name": category_name
}
if identifier in spf_categories:
# Top level categories
spf_categories[identifier] = category_id
threats[category_id] = {
"name": category_name,
"parent": "@sfp",
"refs": [identifier]
}
for category in cwes["Weakness_Catalog"]["Categories"]["Category"]:
if "Relationships" in category:
# Sub-categories
relationship = category["Relationships"]["Relationship"]
if not isinstance(relationship, collections.OrderedDict):
continue
if isinstance(relationship["Relationship_Views"]["Relationship_View_ID"], list):
views = relationship["Relationship_Views"]["Relationship_View_ID"]
else:
views = [relationship["Relationship_Views"]["Relationship_View_ID"]]
for view in views:
if view["#text"] == spf_id:
identifier = category["@ID"]
category_id = category_map[identifier]["id"]
category_name = category_map[identifier]["name"]
parent_identifier = relationship["Relationship_Target_ID"]
parent_id = category_map[parent_identifier]["id"]
spf_categories[identifier] = category_id
threats[category_id] = {
"name": category_name,
"parent": parent_id,
"refs": [identifier]
}
for cwe in cwes["Weakness_Catalog"]["Weaknesses"]["Weakness"]:
sfp_category_id = ""
if "Relationships" in cwe and "Relationship" in cwe["Relationships"]:
for relationship in cwe["Relationships"]["Relationship"]:
if isinstance(relationship, collections.OrderedDict) and relationship["Relationship_Target_Form"] == "Category" and relationship["Relationship_Nature"] == "ChildOf" and relationship["Relationship_Target_ID"] in spf_categories:
sfp_category_id = spf_categories[relationship["Relationship_Target_ID"]]
break
if not sfp_category_id:
continue
identifier = create_id(cwe)
name = create_name(cwe)
desc = create_description(cwe)
refs = create_refs(cwe)
parent = sfp_category_id
threats[identifier] = {
"name": name,
"description": desc,
"references": refs,
"parent": parent
}
now = int(round(time.time() * 1000))
doc = {
"specification": {
"version": "0.1.0",
"name": "ThreatSpec"
},
"document": {
"created": now,
"updated": now
},
"threats": threats
}
print("Writing library to sfp_library.threatspec.json")
with open("sfp_library.threatspec.json", "w") as fh:
json.dump(doc, fh, indent=2)
| 32.767442 | 238 | 0.617814 | 611 | 5,636 | 5.513912 | 0.199673 | 0.044524 | 0.014248 | 0.011873 | 0.280499 | 0.231226 | 0.217275 | 0.162066 | 0.11873 | 0.074206 | 0 | 0.004631 | 0.233676 | 5,636 | 171 | 239 | 32.959064 | 0.775411 | 0.009936 | 0 | 0.189781 | 0 | 0 | 0.24081 | 0.034069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043796 | false | 0 | 0.051095 | 0.007299 | 0.160584 | 0.029197 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ad585a9a2b68ae8175682dbfc62c2645117883 | 300 | py | Python | baseline/crosswalk.py | wko/clinical-trial-criteria-translation | 1478c5bab3511c7a92313a488510641d161deeff | [
"Apache-2.0"
] | null | null | null | baseline/crosswalk.py | wko/clinical-trial-criteria-translation | 1478c5bab3511c7a92313a488510641d161deeff | [
"Apache-2.0"
] | null | null | null | baseline/crosswalk.py | wko/clinical-trial-criteria-translation | 1478c5bab3511c7a92313a488510641d161deeff | [
"Apache-2.0"
] | null | null | null | import requests
import os
# translate CUIs into SNOMED Ids
def crosswalk(cui):
headers = {'Accept': 'application/xml'}
data = {"data": cui}
mapping = requests.post(f"{os.environ['METAMAP_WEB_URL']}crosswalk", data = data)
print(mapping.text)
return mapping.text.splitlines()
| 27.272727 | 85 | 0.683333 | 38 | 300 | 5.342105 | 0.710526 | 0.078818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18 | 300 | 10 | 86 | 30 | 0.825203 | 0.1 | 0 | 0 | 0 | 0 | 0.242537 | 0.149254 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.5 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ae09bb61ba231f35c7bc90f21c1f434a2f9b74 | 4,469 | py | Python | DAO.py | arturj9/sistema-mercearia-python | 000c7e887350768483d7a7a6058f2a5d0dcaa83d | [
"MIT"
] | null | null | null | DAO.py | arturj9/sistema-mercearia-python | 000c7e887350768483d7a7a6058f2a5d0dcaa83d | [
"MIT"
] | null | null | null | DAO.py | arturj9/sistema-mercearia-python | 000c7e887350768483d7a7a6058f2a5d0dcaa83d | [
"MIT"
] | null | null | null | from Models import *
class DaoCategoria:
@classmethod
def salvar(cls, categoria):
with open('arquivos-txt/categoria.txt', 'a') as arq:
arq.writelines(categoria)
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/categoria.txt', 'r') as arq:
cls.categoria = arq.readlines()
cls.categoria = list(map(lambda x: x.replace('\n', ''), cls.categoria))
cat = []
for i in cls.categoria:
cat.append(Categoria(i))
return cat
class DaoVenda:
@classmethod
def salvar(cls, venda: Venda):
with open('arquivos-txt/venda.txt', 'a') as arq:
arq.writelines(venda.itensVendido.nome + '|' +
venda.itensVendido.preco + '|' +
venda.itensVendido.categoria + '|' +
venda.vendedor + '|' + venda.comprador + '|' +
str(venda.quantidadeVendida) + '|' + venda.data)
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/venda.txt', 'r') as arq:
cls.venda = arq.readlines()
cls.venda = list(map(lambda x: x.replace('\n', ''), cls.venda))
cls.venda = list(map(lambda x: x.split('|'), cls.venda))
vend = []
for i in cls.venda:
vend.append(Venda(Produtos(i[0], i[1], i[2]), i[3], i[4], i[5], i[6]))
return vend
class DaoEstoque:
@classmethod
def salvar(cls, produto: Produtos, quantidade):
with open('arquivos-txt/estoque.txt', 'a') as arq:
arq.writelines(produto.nome + '|' + produto.preco +
'|' + produto.categoria + '|' + str(quantidade))
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/estoque.txt', 'r') as arq:
cls.estoque = arq.readlines()
cls.estoque = list(map(lambda x: x.replace('\n', ''), cls.estoque))
cls.estoque = list(map(lambda x: x.split('|'), cls.estoque))
est = []
for i in cls.estoque:
est.append(Estoque(Produtos(i[0], i[1], i[2]), int(i[3])))
return est
class DaoFornecedor:
@classmethod
def salvar(cls, fornecedor: Fornecedor):
with open('arquivos-txt/fornecedores.txt', 'a') as arq:
arq.writelines(fornecedor.nome + '|' + fornecedor.cnpj +
'|' + fornecedor.telefone + '|' + fornecedor.categoria)
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/fornecedores.txt', 'r') as arq:
cls.fornecedores = arq.readlines()
cls.fornecedores = list(map(lambda x: x.replace('\n', ''), cls.fornecedores))
cls.fornecedores = list(map(lambda x: x.split('|'), cls.fornecedores))
forn = []
for i in cls.fornecedores:
forn.append(Fornecedor(i[0], i[1], i[2], i[3]))
return forn
class DaoPessoa:
@classmethod
def salvar(cls, pessoas: Pessoa):
with open('arquivos-txt/clientes.txt', 'a') as arq:
arq.writelines(pessoas.nome + '|' + pessoas.telefone +
'|' + pessoas.cpf + '|' + pessoas.email +
'|' + pessoas.endereco)
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/clientes.txt', 'r') as arq:
cls.clientes = arq.readlines()
cls.clientes = list(map(lambda x: x.replace('\n', ''), cls.clientes))
cls.clientes = list(map(lambda x: x.split('|'), cls.clientes))
clie = []
for i in cls.clientes:
clie.append(Pessoa(i[0], i[1], i[2], i[3], i[4]))
return clie
class DaoFuncionario:
@classmethod
def salvar(cls, funcionario: Funcionario):
with open('arquivos-txt/funcionarios.txt', 'a') as arq:
arq.writelines(funcionario.clt + '|' + funcionario.nome +
'|' + funcionario.telefone + '|' + funcionario.cpf +
'|' + funcionario.email + '|' + funcionario.endereco)
arq.writelines('\n')
@classmethod
def ler(cls):
with open('arquivos-txt/funcionarios.txt', 'r') as arq:
cls.funcionarios = arq.readlines()
cls.funcionarios = list(map(lambda x: x.replace('\n', ''), cls.funcionarios))
cls.funcionarios = list(map(lambda x: x.split('|'), cls.funcionarios))
func = []
for i in cls.funcionarios:
func.append(Funcionario(i[0], i[1], i[2], i[3], i[4], i[5]))
return func | 42.160377 | 85 | 0.55829 | 532 | 4,469 | 4.68985 | 0.146617 | 0.067335 | 0.076954 | 0.091383 | 0.492585 | 0.470942 | 0.322645 | 0.220441 | 0.154709 | 0.154709 | 0 | 0.008032 | 0.275677 | 4,469 | 106 | 86 | 42.160377 | 0.762743 | 0 | 0 | 0.226415 | 0 | 0 | 0.083221 | 0.069351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.009434 | 0 | 0.235849 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ae9a2e77bc4446dfbf168a4becf211916997ba | 1,221 | py | Python | invenio_previewer/extensions/gis.py | nyudlts/invenio-previewer | 89da1108a8ed8c38e98bebc470d33e0447bea277 | [
"MIT"
] | null | null | null | invenio_previewer/extensions/gis.py | nyudlts/invenio-previewer | 89da1108a8ed8c38e98bebc470d33e0447bea277 | [
"MIT"
] | null | null | null | invenio_previewer/extensions/gis.py | nyudlts/invenio-previewer | 89da1108a8ed8c38e98bebc470d33e0447bea277 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""GIS data previewer"""
from __future__ import absolute_import, print_function
from flask import render_template
from ..proxies import current_previewer
import zipfile
import cchardet as chardet
from six import binary_type
previewable_extensions = ['zip']
def can_preview(file):
"""Check if file can be previewed"""
return file.is_local() and file.has_extensions('.zip')
# return true if SHP, SHX, and DBF present
with file.open() as fp:
zf = zipfile.ZipFile(fp)
# Detect filenames encoding.
sample = ' '.join(zf.namelist())
if not isinstance(sample, binary_type):
sample = sample.encode('utf-16be')
encoding = chardet.detect(sample).get('encoding', 'utf-8')
for i, info in enumerate(zf.infolist()):
if i.endswith('.shp') or i.endswith('.dbf') or i.endswith('.shx'):
continue
else:
continue
def preview(file):
"""Preview file."""
return render_template(
'invenio_previewer/gis.html',
file=file,
js_bundles=current_previewer.js_bundles + ['gis_js.js'],
css_bundles=current_previewer.css_bundles + ["gis_css.css"]
)
| 30.525 | 78 | 0.638002 | 153 | 1,221 | 4.934641 | 0.490196 | 0.063576 | 0.029139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004306 | 0.239148 | 1,221 | 39 | 79 | 31.307692 | 0.808396 | 0.126126 | 0 | 0.074074 | 0 | 0 | 0.082936 | 0.024786 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.222222 | 0 | 0.37037 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b24242aa80e1798826b605b90f49e757941f2e | 4,153 | py | Python | code/entity2facts.py | Program-Bear/WalkingNetwork | b19ab91ba85e08992bf6afa865655deeaa19c509 | [
"BSD-3-Clause"
] | null | null | null | code/entity2facts.py | Program-Bear/WalkingNetwork | b19ab91ba85e08992bf6afa865655deeaa19c509 | [
"BSD-3-Clause"
] | null | null | null | code/entity2facts.py | Program-Bear/WalkingNetwork | b19ab91ba85e08992bf6afa865655deeaa19c509 | [
"BSD-3-Clause"
] | 3 | 2018-04-13T05:50:09.000Z | 2019-01-05T06:55:53.000Z | import numpy as np
import json
from tqdm import tqdm
def process(temp):
temp = temp.split('(')[1]
temp = temp.split(')')[0]
_id = temp.split(',')[0]
_start = temp.split(',')[1]
_len = temp.split(',')[2]
# print(_id)
return int(_id),int(_start),int(_len)
def genDict():
dic = {}
file_object = open("entityInfo.dat",'r')
lines = file_object.readlines()
print("start generating dictionary")
for line in tqdm(lines):
_id, _start, _len = process(line)
temp = []
temp.append(_start)
temp.append(_len)
dic[_id] = tuple(temp)
return dic
def get_max(dic,entities):
ans = 1
for entity in entities:
try:
ans = max(ans, dic[entity][1])
except:
ans = ans
return ans
def get_new_facts(facts, entity_vocab, relation_vocab):
new_facts = []
print("start generating new facts list")
for mem in tqdm(facts):
temp = []
temp.append(entity_vocab[mem['e1']] if mem['e1'] in entity_vocab else entity_vocab['UNK'])
temp.append(relation_vocab[mem['r']] if mem['r'] in relation_vocab else relation_vocab['UNK'])
temp.append(entity_vocab[mem['e2']] if mem['e2'] in entity_vocab else entity_vocab['UNK'])
new_facts.append(temp)
return new_facts
def entity2facts(facts, dic, entity_vocab, relation_vocab, max_mem, entities):
max_num = get_max(dic,entities)
answer = np.ones([len(entities), max_num*len(entities), 3])
answer[:, :, 0].fill(entity_vocab['DUMMY_MEM'])
answer[:, :, 1].fill(relation_vocab['DUMMY_MEM'])
answer[:, :, 2].fill(entity_vocab['DUMMY_MEM'])
mem_counter = 0
for counter, entity in enumerate(entities):
try:
tu = dic[entity]
except:
# print("%d does not exists"%entity)
continue
for mem_index in xrange(tu[0], tu[0]+tu[1]):
# print("memory of %d"%mem_counter)
mem = facts[mem_index]
#e1_int = entity_vocab[mem['e1']] if mem['e1'] in entity_vocab else entity_vocab['UNK']
#e2_int = entity_vocab[mem['e2']] if mem['e2'] in entity_vocab else entity_vocab['UNK']
#r_int = relation_vocab[mem['r']] if mem['r'] in relation_vocab else relation_vocab['UNK']
e1_int = mem[0]
r_int = mem[1]
e2_int = mem[2]
answer[counter][mem_counter][0] = e1_int
answer[counter][mem_counter][1] = r_int
answer[counter][mem_counter][2] = e2_int
mem_counter += 1
if (mem_counter == max_mem):
print("over!")
break
return answer
def read_kb_facts():
facts = []
#facts_list = defaultdict(list)
print('Reading kb file at {}'.format("../kb/freebase.spades.txt"))
with open("../kb/freebase.spades.txt") as fb:
for counter, line in tqdm(enumerate(fb)):
line = line.strip()
line = line[1:-1]
e1, r1, r2, e2 = [a.strip('\'') for a in [x.strip() for x in line.split(',')]]
r = r1 + '_' + r2
facts.append({'e1': e1, 'r': r, 'e2': e2})
# facts_list[e1].append(counter) # just store the fact counter instead of the fact
return facts
def prepare():
global entity_object, relation_object, entity_voc, relation_voc, facts, new_facts
entity_object = open("../vocab/entity_vocab.json")
relation_object = open("../vocab/relation_vocab.json")
entity_voc = json.loads(entity_object.read())
relation_voc = json.loads(relation_object.read())
facts = read_kb_facts()
new_facts = get_new_facts(facts, entity_voc, relation_voc)
dic = genDict()
prepare()
def get_memory(entities):
mem = entity2facts(new_facts, dic, entity_voc, relation_voc, 2147483647, entities)
return mem
if __name__ == "__main__":
# file_object = open("entityInfo.txt",'r')
# lines = file_object.readlines()
# a,b,c = process(lines[0])
# print(a)
# print(b)
# print(c)
mem = get_memory([824729,824730,407779])
print(mem)
#print(dic[824729])
#print(dic[407784])
| 31.462121 | 102 | 0.596196 | 566 | 4,153 | 4.176678 | 0.194346 | 0.079103 | 0.023689 | 0.028765 | 0.230118 | 0.14044 | 0.14044 | 0.14044 | 0.14044 | 0.14044 | 0 | 0.029793 | 0.256441 | 4,153 | 131 | 103 | 31.70229 | 0.735751 | 0.147604 | 0 | 0.065217 | 0 | 0 | 0.088352 | 0.029545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.032609 | 0 | 0.195652 | 0.054348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b54af5409bd211bebf59e0e6f1b320669915e5 | 4,533 | py | Python | EyePatterns/prepare_data/format_data.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | 1 | 2021-12-07T08:02:30.000Z | 2021-12-07T08:02:30.000Z | EyePatterns/prepare_data/format_data.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | null | null | null | EyePatterns/prepare_data/format_data.py | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 15c832f26dce98bb95445f9f39f454f99bbb6029 | [
"MIT"
] | null | null | null | import os
import numpy as np
# collecting all csv files from forwarded directory
def collect_csv_data_collection_from_directory(path):
import pandas as pd
import os
data_collection = []
for csv_name in os.listdir(path):
csv_path = os.path.join(path, csv_name)
data_collection.append(pd.read_csv(csv_path))
return data_collection
# filtering collected data
def filter_collected_data(data_collection):
filtered_important_data_collection = []
for data in data_collection:
# collecting only important rows (time and AOI labels)
important_data = data[['TIME', 'AOI']].values
important_data_without_nan = clear_nan_values(important_data)
first_time_concatenated_data = concatenate_rows_with_same_successive_aoi(important_data_without_nan)
# second concatenating needs to be done because as result of first time concatenating (which removes rows with
# less than 200 ms time spent on aoi field) can cause another successive repetition of rows with same aoi
second_time_concatenated_data = concatenate_rows_adding_time_with_same_successive_aoi(
first_time_concatenated_data)
filtered_important_data_collection.append(second_time_concatenated_data)
return filtered_important_data_collection
# clearing NaN values from the begin and the end of data sequence
def clear_nan_values(data):
# clearing Nan values from front of the data sequence
found_first_aoi = False
data_cleared_from_front = []
for row in data:
if isinstance(row[1], float):
if found_first_aoi:
data_cleared_from_front.append(([row[0], "prazno"]))
else:
found_first_aoi = True
data_cleared_from_front.append(([row[0], row[1]]))
# clearing NaN values from the end of the sequence
found_last_aoi = False
reversed_cleared_data = []
for i in range(len(data_cleared_from_front) - 1, 0, -1):
if isinstance(data_cleared_from_front[i][1], float):
if found_last_aoi:
reversed_cleared_data.append(([data_cleared_from_front[i][0], data_cleared_from_front[i][1]]))
else:
found_last_aoi = True
reversed_cleared_data.append(([data_cleared_from_front[i][0], data_cleared_from_front[i][1]]))
cleared_data = list(reversed(reversed_cleared_data))
return cleared_data
# concatenating time spent on successive AOI fields in data sequence and removing ones with time spent less than 200ms
def concatenate_rows_with_same_successive_aoi(data):
import numpy as np
concatenated_data = []
current_aoi = ''
aoi_first_time_seen = 0.0
for row in data:
if current_aoi == row[1]:
continue
else:
# Clearing rows which have time spent on less than 200ms
if row[0] - aoi_first_time_seen > 0.20:
concatenated_data.append([row[0] - aoi_first_time_seen, current_aoi])
current_aoi = row[1]
aoi_first_time_seen = row[0]
# last AOI that is left to append to sequence
concatenated_data.append([data[len(data) - 1][0] - aoi_first_time_seen, current_aoi])
return concatenated_data
# concatenating time spent on successive AOI fields (based on adding time between them, not calculating time spent)
# in data sequence
def concatenate_rows_adding_time_with_same_successive_aoi(data):
import numpy as np
concatenated_data = []
current_aoi = ''
time_spent_on_aoi = 0.0
for row in data:
if current_aoi == row[1]:
time_spent_on_aoi += row[0]
continue
else:
concatenated_data.append([time_spent_on_aoi, current_aoi])
current_aoi = row[1]
time_spent_on_aoi = row[0]
# last AOI that is left to append to sequence
concatenated_data.append([time_spent_on_aoi, current_aoi])
concatenated_data = np.array(concatenated_data)
return concatenated_data
# saving csv files into forwarded path
def save_data_as_csv(data, path):
import csv
import os
filename = os.path.join(path + ".csv")
with open(filename, "w") as f:
writer = csv.writer(f)
writer.writerows(data)
# MAIN
collected_data = collect_csv_data_collection_from_directory("../collected_data")
filtered_data = filter_collected_data(collected_data)
for i in range(0, len(filtered_data)):
save_data_as_csv(filtered_data[i],
"../filtered_colected_data/" + str(i))
| 35.139535 | 118 | 0.698875 | 632 | 4,533 | 4.705696 | 0.197785 | 0.075319 | 0.033289 | 0.060525 | 0.408204 | 0.346671 | 0.284802 | 0.246469 | 0.190989 | 0.167451 | 0 | 0.011432 | 0.228105 | 4,533 | 128 | 119 | 35.414063 | 0.838525 | 0.206927 | 0 | 0.345238 | 0 | 0 | 0.017049 | 0.007267 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.166667 | 0 | 0.297619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b5bdc0bd251cb1ebbbc0b150a7e991848278cf | 6,432 | py | Python | workon/utils/cache.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 1 | 2018-01-19T16:08:54.000Z | 2018-01-19T16:08:54.000Z | workon/utils/cache.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 1 | 2020-07-06T08:35:18.000Z | 2020-07-06T08:35:18.000Z | workon/utils/cache.py | devittek/django-workon | c39ddecac2649406a7a58922646478c5615d4cfd | [
"BSD-3-Clause"
] | 4 | 2020-04-08T06:14:46.000Z | 2020-12-11T14:28:06.000Z | from time import time
import threading
from functools import partial
from django.utils.functional import cached_property
from django.core.cache import cache
try:
import asyncio
except (ImportError, SyntaxError):
asyncio = None
__all__ = [
"cached_property",
"cached_property_with_ttl",
"memoize",
'cached',
"cache_get_or_set"
]
class memoize(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
fn = partial(self.__call__, obj)
fn.reset = self._reset
return fn
def _reset(self):
self.cache = {}
class cached(memoize): pass
def cache_get_or_set(key, value, ttl):
return cache.get_or_set(key, value, ttl)
class cached_property:
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.__doc__ = getattr(func, '__doc__')
self.name = name or func.__name__
def __get__(self, instance, cls=None):
"""
Call the function and put the return value in instance.__dict__ so that
subsequent attribute access on the instance returns the cached value
instead of calling cached_property.__get__().
"""
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
# class cached_property(object):
# """
# A property that is only computed once per instance and then replaces itself
# with an ordinary attribute. Deleting the attribute resets the property.
# Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
# """ # noqa
# def __init__(self, func):
# self.__doc__ = getattr(func, "__doc__")
# self.func = func
# def __get__(self, obj, cls):
# if obj is None:
# return self
# if asyncio and asyncio.iscoroutinefunction(self.func):
# return self._wrap_in_coroutine(obj)
# value = obj.__dict__[self.func.__name__] = self.func(obj)
# return value
# def _wrap_in_coroutine(self, obj):
# @asyncio.coroutine
# def wrapper():
# future = asyncio.ensure_future(self.func(obj))
# obj.__dict__[self.func.__name__] = future
# return future
# return wrapper()
class threaded_cached_property(object):
"""
A cached_property version for use in environments where multiple threads
might concurrently try to access the property.
"""
def __init__(self, func):
self.__doc__ = getattr(func, "__doc__")
self.func = func
self.lock = threading.RLock()
def __get__(self, obj, cls):
if obj is None:
return self
obj_dict = obj.__dict__
name = self.func.__name__
with self.lock:
try:
# check if the value was computed before the lock was acquired
return obj_dict[name]
except KeyError:
# if not, do the calculation and release the lock
return obj_dict.setdefault(name, self.func(obj))
class cached_property_with_ttl(object):
"""
A property that is only computed once per instance and then replaces itself
with an ordinary attribute. Setting the ttl to a number expresses how long
the property will last before being timed out.
"""
def __init__(self, ttl=None):
if callable(ttl):
func = ttl
ttl = None
else:
func = None
self.ttl = ttl
self._prepare_func(func)
def __call__(self, func):
self._prepare_func(func)
return self
def __get__(self, obj, cls):
if obj is None:
return self
now = time()
obj_dict = obj.__dict__
name = self.__name__
try:
value, last_updated = obj_dict[name]
except KeyError:
pass
else:
ttl_expired = self.ttl and self.ttl < now - last_updated
if not ttl_expired:
return value
value = self.func(obj)
obj_dict[name] = (value, now)
return value
def __delete__(self, obj):
obj.__dict__.pop(self.__name__, None)
def __set__(self, obj, value):
obj.__dict__[self.__name__] = (value, time())
def _prepare_func(self, func):
self.func = func
if func:
self.__doc__ = func.__doc__
self.__name__ = func.__name__
self.__module__ = func.__module__
# Aliases to make cached_property_with_ttl easier to use
cached_property_ttl = cached_property_with_ttl
timed_cached_property = cached_property_with_ttl
class threaded_cached_property_with_ttl(cached_property_with_ttl):
"""
A cached_property version for use in environments where multiple threads
might concurrently try to access the property.
"""
def __init__(self, ttl=None):
super(threaded_cached_property_with_ttl, self).__init__(ttl)
self.lock = threading.RLock()
def __get__(self, obj, cls):
with self.lock:
return super(threaded_cached_property_with_ttl, self).__get__(obj, cls)
# Alias to make threaded_cached_property_with_ttl easier to use
threaded_cached_property_ttl = threaded_cached_property_with_ttl
timed_threaded_cached_property = threaded_cached_property_with_ttl | 29.504587 | 96 | 0.632929 | 796 | 6,432 | 4.741206 | 0.226131 | 0.09274 | 0.057234 | 0.066773 | 0.360625 | 0.263646 | 0.233439 | 0.175941 | 0.175941 | 0.160042 | 0 | 0.005877 | 0.285759 | 6,432 | 218 | 97 | 29.504587 | 0.815629 | 0.367693 | 0 | 0.33913 | 0 | 0 | 0.021118 | 0.006181 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156522 | false | 0.017391 | 0.06087 | 0.008696 | 0.408696 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b6a85ac9932e9cbfd03042b0f9d5385ad450ca | 22,179 | py | Python | src/roto/roto.py | joshbriegal/roto | 58694285932e101286e407bc521b2fa80e4eed47 | [
"MIT"
] | null | null | null | src/roto/roto.py | joshbriegal/roto | 58694285932e101286e407bc521b2fa80e4eed47 | [
"MIT"
] | 3 | 2021-09-15T10:08:47.000Z | 2021-09-16T17:15:55.000Z | src/roto/roto.py | joshbriegal/roto | 58694285932e101286e407bc521b2fa80e4eed47 | [
"MIT"
] | null | null | null | import logging
from itertools import cycle
from typing import Dict, List, Optional, Tuple, Union
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from matplotlib.ticker import ScalarFormatter
from scipy.stats import gaussian_kde, median_abs_deviation
from roto.methods.fft import FFTPeriodFinder
from roto.methods.gacf import GACFPeriodFinder
from roto.methods.gaussianprocess import GPPeriodFinder
from roto.methods.lombscargle import LombScarglePeriodFinder
from roto.methods.periodfinder import PeriodResult
from roto.plotting.plotting_tools import (
calculate_phase,
create_axis_with_formatter,
round_sig,
split_phase,
)
DEFAULT_COLOUR_CYCLE = cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
logger = logging.getLogger(__name__)
class RoTo:
METHODS = {
"lombscargle": LombScarglePeriodFinder,
"fft": FFTPeriodFinder,
"gacf": GACFPeriodFinder,
"gp": GPPeriodFinder, # keep at end of dictionary to allow seed period generation from other methods.
}
METHOD_NAMES = {
"lombscargle": "Lomb-Scargle",
"fft": "Fast Fourier Transform",
"gacf": "G-ACF",
"gp": "Gaussian Process Regression",
}
PLOTTING_COLOURS = {method: next(DEFAULT_COLOUR_CYCLE) for method in METHODS}
def __init__(
self,
timeseries: np.ndarray,
flux: np.ndarray,
flux_errors: Optional[np.ndarray] = None,
methods_parameters: Optional[dict] = None,
name: str = "Unnamed RoTo Object",
time_units: str = "days",
flux_units: str = "relative flux units",
):
self.name = name
self.timeseries = timeseries
self.flux = flux
self.flux_errors = flux_errors
timeseries_diffs = np.diff(self.timeseries)
self.regular_sampling = (timeseries_diffs.max() - timeseries_diffs.min()) < 1e-5
self.time_units = time_units
if self.time_units != "days":
logger.warning(
"GP prior scaled to expect timeseries data in days. Check prior or convert units."
)
self.flux_units = flux_units
if self.flux_units != "relative flux units":
logger.warning(
"GP prior scaled to expect flux data in relative flux units. Check prior or convert units."
)
self.methods = self._parse_constructor_parameters(methods_parameters)
self.periods = {}
def _parse_constructor_parameters(
self,
methods_parameters: Optional[dict] = None,
) -> dict:
if methods_parameters is None:
return {
name: method(
self.timeseries,
self.flux,
self.flux_errors,
time_units=self.time_units,
flux_units=self.flux_units,
)
for name, method in self.METHODS.items()
if (name != "fft") or (self.regular_sampling)
}
methods = {}
if list(methods_parameters.keys()) == ["gp"]:
# if just a GP, use a lomb scargle also to seed GP period.
methods_parameters = {"lombscargle": {}, **methods_parameters}
for method, kwargs in methods_parameters.items():
methods[method] = self.METHODS[method](
self.timeseries,
self.flux,
self.flux_errors,
time_units=self.time_units,
flux_units=self.flux_units,
**kwargs,
)
return methods
def __call__(self, **kwargs):
for name, method in self.methods.items():
if name == "gp":
if "gp_seed_period" not in kwargs:
average_period = np.median(
[
period_result.period
for period_result in self.periods.values()
]
)
kwargs["gp_seed_period"] = average_period
try:
self.periods[name] = method(**kwargs)
except Exception as e:
logger.error("Unable to run method %s" % name)
logger.error(e, exc_info=True)
continue
def periods_to_table(self) -> pd.DataFrame:
"""Convert roto.periods into a DataFrame for display.
Returns:
pd.DataFrame: Dataframe with all outputted periods
"""
columns = {"period": [], "neg_error": [], "pos_error": [], "method": []}
if not self.periods:
return pd.DataFrame()
for period_result in self.periods.values():
columns["period"].append(period_result.period)
columns["neg_error"].append(period_result.neg_error)
columns["pos_error"].append(period_result.pos_error)
columns["method"].append(period_result.method)
period_df = pd.DataFrame.from_dict(columns)
return period_df
def __str__(self):
return self.periods_to_table().to_string(index=False)
def best_period(
self,
method: str = "mean",
include: Optional[List] = None,
exclude: Optional[List] = None,
) -> PeriodResult:
"""Calculate best period based on methods already run. If called before
running the period finding methods, will return None.
Args:
method (str, optional): method should be one of 'mean', 'median' or a period finding method. Defaults to "mean".
include (Optional[List], optional): Method outputs to include. Defaults to [].
exclude (Optional[List], optional): Method outputs to exclude. Defaults to [].
Raises:
ValueError: If method specified incorrect.
Returns:
PeriodResult: CombinedPeriodResult.
"""
if not self.periods:
return None
periods_to_use = self.periods.values()
try:
if include:
include_classes = [
self.METHODS[method_to_include].__name__
for method_to_include in include
]
periods_to_use = [
period_result
for period_result in periods_to_use
if period_result.method in include_classes
]
if exclude:
exclude_classes = [
self.METHODS[method_to_exclude].__name__
for method_to_exclude in exclude
]
periods_to_use = [
period_result
for period_result in periods_to_use
if period_result.method not in exclude_classes
]
if not periods_to_use:
raise ValueError(
"Provided incompatible list of include / exclude values. No best period calculated. \n include: {include} \n exclude: {exclude}"
)
except KeyError:
raise ValueError(
f"Unable to parse include / exclude values given. \n include: {include} \n exclude: {exclude}"
)
if method == "mean":
mean = np.mean([p.period for p in periods_to_use])
std = np.std([p.period for p in periods_to_use]) / np.sqrt(
len(periods_to_use)
)
return PeriodResult(
period=mean, neg_error=std, pos_error=std, method="CombinedPeriodResult"
)
elif method == "median":
median = np.median([p.period for p in periods_to_use])
std = (
1.4826
* median_abs_deviation([p.period for p in periods_to_use])
/ np.sqrt(len(periods_to_use))
)
return PeriodResult(
period=median,
neg_error=std,
pos_error=std,
method="CombinedPeriodResult",
)
elif method in self.periods:
return self.periods[method]
raise ValueError(
f"Parameter 'method' must be one of ['mean', 'median'] or {list(self.METHODS.keys())}]. Did you specify a period extraction method not run?"
)
def plot(
self,
savefig: bool = False,
filename: Optional[str] = None,
include: Optional[List] = None,
exclude: Optional[List] = None,
plot_gp: bool = True,
show: bool = True,
summary: bool = False,
scientific: bool = False,
return_fig_ax: bool = False,
) -> Union[None, Tuple[Figure, Dict]]:
"""Generate summary plot of RoTo object run.
Args:
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
include (Optional[List], optional): Methods to include. Defaults to None (all methods).
exclude (Optional[List], optional): Methods to exclude. Defaults to None (no methods).
plot_gp (bool, optional): Plot Gaussian Process prediction & residuals. Defaults to True.
show (bool, optional): Show interactive plot. Defaults to True.
summary (bool, optional): Just plot summary, no methods. Defaults to False.
scientific (bool, optional): Scientific formatting of numbers vs linear scale. Defaults to False.
return_fig_ax (bool, optional): Return figure and axis tuples for further processing. Defaults to False.
Returns:
Union[None, Tuple(Figure, Dict)]: None or a tuple (matplotlib figure, dictionary of matplotlib axes)
"""
if savefig and not filename:
filename = f"{self.name}.pdf"
if (not include) or (not self.periods):
include = list(self.periods.keys())
plot_gp = plot_gp and ("gp" in self.periods)
fig, ax_dict = self._setup_figure(
include=include,
exclude=exclude,
summary=summary,
scientific=scientific,
plot_gp=plot_gp,
)
epoch = self.timeseries.min()
self.plot_data(ax_dict["data"])
self.plot_periods(ax_dict["distributions"])
self.plot_phase_folded_data(
ax_dict["phase_fold"], self.best_period().period, epoch=epoch
)
if not summary:
for method_name, method in self.methods.items():
try:
if method_name == "gp":
if plot_gp:
ax_dict["data"].get_xaxis().set_visible(False)
method.plot_gp_predictions(
ax_dict["data"],
colour=self.PLOTTING_COLOURS[method_name],
)
method.plot_gp_residuals(
ax_dict["residuals"],
colour=self.PLOTTING_COLOURS[method_name],
)
ax_dict["residuals"].set_xlim(ax_dict["data"].get_xlim())
method.plot(
ax_dict[method_name]["method"],
self.periods[method_name],
colour=self.PLOTTING_COLOURS[method_name],
)
self.plot_phase_folded_data(
ax_dict[method_name]["phase_fold"],
self.periods[method_name].period,
epoch=epoch,
)
except KeyError as err:
logger.warning(f"Not plotting method {method} as no results found")
continue
if savefig:
fig.savefig(filename, bbox_inches="tight", pad_inches=0.25)
if show:
plt.show()
if return_fig_ax:
return fig, ax_dict
def plot_summary(
self,
savefig: bool = False,
filename: Optional[str] = None,
plot_gp: bool = True,
show: bool = True,
):
"""Helper function to create just summary plots, same as calling self.plot(summary=True)
Args:
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
plot_gp (bool, optional): Plot Gaussian Process prediction & residuals. Defaults to True.
show (bool, optional): Show interactive plot. Defaults to True.
"""
self.plot(
savefig=savefig, filename=filename, plot_gp=plot_gp, show=show, summary=True
)
def plot_periods(self, ax: Axes) -> Axes:
"""Plot figure comparing outputted periods and errors.
Args:
ax (Axes): Matplotlib axis
Returns:
Axes: Matplotlib axis
"""
for name, period in self.periods.items():
if period.period_distribution is not None:
# plot as distribution
density = gaussian_kde(period.period_distribution)
pmin = max(0, period.period - 5 * period.neg_error)
pmax = period.period + 5 * period.pos_error
xs = np.linspace(pmin, pmax, 100)
kde_plot = density(xs)
kde_plot *= 1.0 / kde_plot.max()
ax.plot(xs, kde_plot, color=self.PLOTTING_COLOURS[name])
ax.axvline(
period.period,
label=self.METHOD_NAMES[name],
color=self.PLOTTING_COLOURS[name],
)
ax.axvspan(
period.period - period.neg_error,
period.period + period.pos_error,
color=self.PLOTTING_COLOURS[name],
alpha=0.2,
)
# plot best period as a single point with error bars
best_period = self.best_period()
ax.errorbar(
best_period.period,
0.5,
xerr=[[best_period.neg_error], [best_period.pos_error]],
ms=10,
marker="s",
c="k",
capsize=10,
)
ax.set_xlim(
[
best_period.period - 5 * best_period.neg_error,
best_period.period + 5 * best_period.pos_error,
]
)
ax.set_ylim([0, 1])
ax.get_yaxis().set_visible(False)
ax.set_xlabel("Period")
two_sided_error = np.average([best_period.neg_error, best_period.pos_error])
error_rounded, error_precision = round_sig(two_sided_error, 2, return_dp=True)
ax.set_title(
f"Adopted Period: {round(best_period.period, error_precision)} ± {error_rounded} {self.time_units}"
)
ax.legend()
return ax
def plot_gp_diagnostics(
self,
show: bool = True,
savefig: bool = False,
filename: str = "",
fileext: str = "pdf",
):
"""Plot Gaussian Process diagnostic outputs figures.
Args:
show (bool, optional): Show interactive plot. Defaults to True.
savefig (bool, optional): Save figure to pdf. Defaults to False.
filename (Optional[str], optional): Name of pdf. Defaults to None.
fileext (str, optional): File extension to save figure. Defaults to "pdf".
Raises:
RuntimeError: If no GP found.
"""
if "gp" not in self.methods:
raise RuntimeError("Cannot plot GP diagnostics, no GP method found.")
if savefig and not filename:
filename = f"{self.name}.pdf"
try:
self.methods["gp"].plot_trace(
show=show, savefig=savefig, filename=filename, fileext=fileext
)
except RuntimeError as trace_err:
logger.error("Unable to plot trace")
logger.error(trace_err, exc_info=True)
try:
self.methods["gp"].plot_distributions(
show=show, savefig=savefig, filename=filename, fileext=fileext
)
except (RuntimeError, ValueError) as dist_err:
logger.error("Unable to plot GP distributions")
logger.error(dist_err, exc_info=True)
def plot_data(self, ax: Axes) -> Axes:
"""Scatter plot of input data.
Args:
ax (Axes): Matplotlib axis
Returns:
Axes: Matplotlib axis
"""
if "gp" in self.methods:
mask = self.methods["gp"].mask
else:
mask = np.ones(len(self.timeseries), dtype=bool)
ax.errorbar(
self.timeseries[mask],
self.flux[mask],
self.flux_errors[mask],
markersize=2,
errorevery=1,
linestyle="none",
marker="o",
color="k",
ecolor="gray",
alpha=0.7,
capsize=0,
elinewidth=1,
mec="none",
)
ax.errorbar(
self.timeseries[~mask],
self.flux[~mask],
self.flux_errors[~mask],
markersize=2,
errorevery=1,
linestyle="none",
marker="o",
color="k",
ecolor="gray",
alpha=0.3,
capsize=0,
elinewidth=1,
mec="none",
)
ax.set_xlabel(f"Time / {self.time_units}")
ax.set_ylabel(f"Flux / {self.flux_units}")
ymin = np.min(self.flux - self.flux_errors)
ymax = np.max(self.flux + self.flux_errors)
yextent = ymax - ymin
ax.set_ylim([ymin - (yextent * 0.01), ymax + (yextent * 0.01)])
return ax
def plot_phase_folded_data(self, ax: Axes, period: float, epoch: float = 0) -> Axes:
"""Plot data phase folded on period and epoch.
Colour scale incremented for each period.
Args:
ax (Axes): Matplotlib axis
period (float): Period on which to phase fold.
epoch (float, optional): Epoch on which to phase fold. Defaults to 0.
Returns:
Axes: Matplotlib axis
"""
phased_timeseries = calculate_phase(self.timeseries, period, epoch)
split_phases, split_flux = split_phase(phased_timeseries, self.flux)
colours = iter(cm.viridis(np.r_[0 : 1 : len(split_phases) * 1j]))
for phase, flux in zip(split_phases, split_flux):
ax.scatter(phase, flux, color=next(colours), s=1)
ax.set_title(f"Period: {period:.4f} {self.time_units}")
ax.set_xlim([0, 1])
ax.set_xlabel("Phase")
ax.set_ylabel(f"Flux / {self.flux_units}")
return ax
def _setup_figure(
self,
include: Optional[List] = [],
exclude: Optional[List] = [],
summary: bool = False,
scientific: bool = False,
plot_gp: bool = False,
):
unit_grid_width = 5
unit_grid_height = 1
data_plot_size = (2, 3) # in units of grid width, height
residuals_plot_size = (2, 1)
distributions_plot_size = (1, 3)
phase_fold_plot_size = (1, 3)
method_plot_size = (1, 3)
spacer_plot_size = (2, 1)
if summary:
# just plot summary stats, no method plots.
methods = {}
else:
methods = {name: method for name, method in self.methods.items()}
if include:
methods = {
name: method for name, method in methods.items() if name in include
}
if exclude:
methods = {
name: method
for name, method in methods.items()
if name not in exclude
}
n_grid_units_width = 2
n_grid_units_height = (
data_plot_size[1]
+ (residuals_plot_size[1] * int(plot_gp))
+ distributions_plot_size[1]
+ method_plot_size[1] * len(methods)
+ spacer_plot_size[1] * (1 + len(methods))
)
figsize = (
unit_grid_width * n_grid_units_width,
unit_grid_height * n_grid_units_height,
)
fig = plt.figure(figsize=figsize)
gridspec = fig.add_gridspec(n_grid_units_height, n_grid_units_width)
plt.subplots_adjust(hspace=0.0, wspace=0.2)
axes = {}
formatter = ScalarFormatter()
formatter.set_scientific(scientific)
height = 0
axes["data"] = create_axis_with_formatter(
fig, gridspec[height : height + data_plot_size[1], :], formatter
)
height += data_plot_size[1]
if plot_gp:
axes["residuals"] = create_axis_with_formatter(
fig,
gridspec[height : height + residuals_plot_size[1], :],
formatter,
sharex=axes["data"],
)
height += residuals_plot_size[1]
height += spacer_plot_size[1]
axes["distributions"] = create_axis_with_formatter(
fig, gridspec[height : height + distributions_plot_size[1], 0], formatter
)
axes["phase_fold"] = create_axis_with_formatter(
fig, gridspec[height : height + phase_fold_plot_size[1], 1], formatter
)
height += phase_fold_plot_size[1]
height += spacer_plot_size[1]
for method in methods:
axes[method] = {
"method": create_axis_with_formatter(
fig, gridspec[height : height + method_plot_size[1], 0], formatter
),
"phase_fold": create_axis_with_formatter(
fig, gridspec[height : height + method_plot_size[1], 1], formatter
),
}
height += method_plot_size[1]
height += spacer_plot_size[1]
axes["data"].set_title(self.name)
return fig, axes
| 34.927559 | 152 | 0.550881 | 2,425 | 22,179 | 4.865567 | 0.148041 | 0.016273 | 0.016018 | 0.013645 | 0.368421 | 0.313925 | 0.257988 | 0.235528 | 0.187982 | 0.158742 | 0 | 0.007081 | 0.356914 | 22,179 | 634 | 153 | 34.98265 | 0.820094 | 0.142026 | 0 | 0.252654 | 0 | 0.008493 | 0.079682 | 0.002971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027601 | false | 0 | 0.036093 | 0.002123 | 0.101911 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b810df629398a8386d9f310c6d63fef1c5faa9 | 2,638 | py | Python | testPrintProfile2.py | dmsteck/paper-regularized-qn-benchmark | b07ed54ca50af4bf2cf45952c103f3f80b62f5e2 | [
"MIT"
] | null | null | null | testPrintProfile2.py | dmsteck/paper-regularized-qn-benchmark | b07ed54ca50af4bf2cf45952c103f3f80b62f5e2 | [
"MIT"
] | null | null | null | testPrintProfile2.py | dmsteck/paper-regularized-qn-benchmark | b07ed54ca50af4bf2cf45952c103f3f80b62f5e2 | [
"MIT"
] | 1 | 2019-12-05T11:55:16.000Z | 2019-12-05T11:55:16.000Z | """
...
"""
import numpy as np
import matplotlib.pyplot as plt
from utility import parameters
from utility import perfprof
algorithms = ['regLBFGS', 'armijoLBFGS', 'wolfeLBFGS']
# Read all the data in the ugliest fashion possible
nfM = np.array([np.loadtxt(f"results/{a}_solve.csv", delimiter=',')[:, 0] for a in algorithms])
iterM = np.array([np.loadtxt(f"results/{a}_solve.csv", delimiter=',')[:, 1] for a in algorithms])
fxM = np.array([np.loadtxt(f"results/{a}_solve.csv", delimiter=',')[:, 2] for a in algorithms])
optM = np.array([np.loadtxt(f"results/{a}_solve.csv", delimiter=',')[:, 3] for a in algorithms])
nfN = np.array([np.loadtxt(f"results/{a}_solveNonmonotone.csv", delimiter=',')[:, 0] for a in algorithms])
iterN = np.array([np.loadtxt(f"results/{a}_solveNonmonotone.csv", delimiter=',')[:, 1] for a in algorithms])
fxN = np.array([np.loadtxt(f"results/{a}_solveNonmonotone.csv", delimiter=',')[:, 2] for a in algorithms])
optN = np.array([np.loadtxt(f"results/{a}_solveNonmonotone.csv", delimiter=',')[:, 3] for a in algorithms])
# Load LMTR results
lmtrResults = np.loadtxt('results/lmtr.csv', delimiter=',')
fxM = np.vstack([fxM, lmtrResults[:, 2]])
optM = np.vstack([optM, lmtrResults[:, 3]])
iterM = np.vstack([iterM, lmtrResults[:, 0]])
nfM = np.vstack([nfM, lmtrResults[:, 1]])
# Discard problems that weren't solved
nfM[optM > parameters.tolGrad] = np.inf
nfN[optN > parameters.tolGrad] = np.inf
# Print!
palette = ['o-r', 'o:b', 'o--c', 'o-.g', 'o:k', 'o-y', 'o:m', 'o--b']
perfprof.perfprof(nfM.T, linestyle=palette, thmax=5., markersize=4, markevery=[0])
plt.legend(['regLBFGS', 'armijoLBFGS', 'wolfeLBFGS', 'eigLBFGS'], loc=4, fontsize=16)
plt.savefig("figures/Monotone2.pdf", bbox_inches='tight', pad_inches=0)
plt.show()
perfprof.perfprof(nfN.T, linestyle=palette, thmax=5., markersize=4, markevery=[0])
plt.legend([r'regLBFGS$_n$', r'armijoLBFGS$_n$', r'wolfeLBFGS$_n$'], loc=4, fontsize=16)
plt.savefig("figures/Nonmonotone2.pdf", bbox_inches='tight', pad_inches=0)
plt.show()
# Print monotone vs nonmonotone comparison
perfprof.perfprof(np.vstack([nfM[1:3, :], nfN[1:3, :]]).T, linestyle=palette, thmax=5., markersize=4, markevery=[0])
plt.legend(['armijoLFBGS', 'wolfeLBFGS', r'armijoLFBGS$_n$', r'wolfeLBFGS$_n$'], loc=4, fontsize=16)
plt.savefig("figures/All21.pdf", bbox_inches='tight', pad_inches=0)
plt.show()
perfprof.perfprof(np.vstack([nfM[0, :], nfM[3, :], nfN[0, :]]).T, linestyle=palette, thmax=5., markersize=4, markevery=[0])
plt.legend(['regLBFGS', 'eigLBFGS', r'regLBFGS$_n$'], loc=4, fontsize=16)
plt.savefig("figures/All22.pdf", bbox_inches='tight', pad_inches=0)
plt.show()
| 46.280702 | 123 | 0.690675 | 397 | 2,638 | 4.534005 | 0.241814 | 0.045 | 0.04 | 0.071111 | 0.613889 | 0.592778 | 0.592778 | 0.5 | 0.482222 | 0.443333 | 0 | 0.022148 | 0.092873 | 2,638 | 56 | 124 | 47.107143 | 0.730046 | 0.059515 | 0 | 0.108108 | 0 | 0 | 0.221952 | 0.104091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.108108 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b93f24806cca8ae65455322559cca6b35b9e43 | 5,315 | py | Python | src/train_eval.py | 1amG4bor/mlflow-for-model-improvement | 9ec653a8d20b353b1d049dd509cd1b680b6152b1 | [
"MIT"
] | 1 | 2021-11-27T23:05:36.000Z | 2021-11-27T23:05:36.000Z | src/train_eval.py | 1amG4bor/mlflow-for-model-improvement | 9ec653a8d20b353b1d049dd509cd1b680b6152b1 | [
"MIT"
] | null | null | null | src/train_eval.py | 1amG4bor/mlflow-for-model-improvement | 9ec653a8d20b353b1d049dd509cd1b680b6152b1 | [
"MIT"
] | null | null | null | """Train and evaluate Tool
The script utilizes the 2 main inputs which are the config.yml and the CLI params.
With the combination of these configuration 2 running branches are possible:
- Training workflow:
- Load the dataset
- Create a model with the provided configuration
- Train then save the model
- Evaluate the model
- Log the hyper-parameters, values, model, and the train/test result with MLflow
- Load-evaluate workflow:
- load the dataset and a given model
- Evaluate the model
For more information about configuration see:
:py:func:`~setup ARGS from CLI options <helper.config.setup_arguments>`
:ref:`~config file <config.yml>`
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Set TensorFlow log-level
import logging
import mlflow.keras
from helper import config
from service import data_service, model_service
from helper.config import DEFAULT_TRACKING_URI, DEFAULT_EXPERIMENT_NAME
log_format = '%(asctime)s >>%(levelname)s<< %(filename)s|%(funcName)s: ln.%(lineno)d => %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
logger = logging.getLogger('Train_predict')
def init():
""" Initialize parameters and MLflow
Params will set the corresponding MLflow parameters
"""
logger.info('Initialization..')
ARGS = config.setup_arguments()
img_height = img_width = 128
input_shape = (img_height, img_width, 1)
return ARGS, input_shape
def prepare_data(ARGS, input_shape):
""" Data Augmentation Ingestion & Segregation
- Data Ingestion: gather the data that only need to be fed into the pipeline
- Data Preparation: assume that the dataset is already prepared, ready-to-use
(No internal step to analyze and/or modify the dataset)
- Data Segregation: Split the dataset into subsets of training-set and testing-set
Validation-set will be separated from the training-dataset (80/20) just before training
"""
batch = ARGS.batch
dataset_path = data_service.data_sourcing(ARGS)
train_data, test_data, class_labels = data_service.data_segregation(
dataset_path, input_shape, batch, ARGS.test_split)
logger.info(f'Classes of the dataset: ({len(class_labels)}) => {class_labels}')
return train_data, test_data, class_labels
if __name__ == '__main__':
RUN_ARGS, feature_shape = init()
# Data Extraction
train_ds, test_ds, labels = prepare_data(RUN_ARGS, feature_shape)
# Modelling
model_name = RUN_ARGS.name
if RUN_ARGS.create:
# Run with MLflow
run_name = RUN_ARGS.run_name
tracking_uri = config.get_value('mlflow', 'tracking_uri') or DEFAULT_TRACKING_URI
experiment_name = config.get_value('mlflow', 'experiment_name') or DEFAULT_EXPERIMENT_NAME
mlflow.set_tracking_uri(tracking_uri)
mlflow.set_experiment(experiment_name)
mlflow.tensorflow.autolog()
with mlflow.start_run(run_name=run_name) as run:
run_id = run.info.run_id
mlflow.set_tag('experiment_id', run.info.experiment_id)
mlflow.set_tag('run_id', run_id)
mlflow.log_param('input-shape', feature_shape)
params_to_log = ['name', 'subset_name', 'dataset', 'epoch', 'batch', 'test_split', 'validation_split']
params = {i: vars(RUN_ARGS).get(i) for i in params_to_log}
mlflow.log_params({
'cfg_model_name': params.get('subset_name') or params.get('name'),
'cfg_dataset_name': params.get('dataset'),
'cfg_labels': labels,
'HP_epochs': params.get('epoch'),
'HP_batch_size': params.get('batch'),
'HP_test_split': params.get('test_split'),
'HP_validation_split': params.get('validation_split'),
})
# Create, train, and save model
model = model_service.create(model_name, feature_shape, labels)
model_service.train(model, train_ds, RUN_ARGS.epoch, RUN_ARGS.validation_split)
model_service.save(model, model_name)
# Validation
model_service.evaluate_model(model, test_ds)
stat, cumulative_accuracy = model_service.validate_classification(model, test_ds, labels, False)
for key, value in stat.items():
acc = round(value['right'] / (value['right'] + value['wrong']) * 100, 1)
mlflow.log_param(f'accuracy.{key}', acc)
mlflow.log_param(f'stat.{key}', value)
# Register the model
if cumulative_accuracy >= RUN_ARGS.deploy_limit:
logger.info(f"The '{model_name}' model with runId of '{run_id}' and '{cumulative_accuracy}' accuracy "
f"is registered to the model-registry as '{run_name}'.")
mlflow.register_model(
model_uri=f'runs:/{run_id}/model',
name=run_name
)
logger.info(f'The run has been finished, check: `{tracking_uri}` for the result and for more information!')
else:
# Load model
model = model_service.load(model_name)
# Validation
model_service.evaluate_model(model, test_ds)
model_service.validate_classification(model, test_ds, labels, False)
| 41.850394 | 118 | 0.665663 | 690 | 5,315 | 4.908696 | 0.284058 | 0.031887 | 0.012991 | 0.012991 | 0.082079 | 0.082079 | 0.065545 | 0.065545 | 0.065545 | 0.032477 | 0 | 0.003692 | 0.23556 | 5,315 | 126 | 119 | 42.18254 | 0.829929 | 0.253246 | 0 | 0.028169 | 0 | 0.014085 | 0.198106 | 0.017917 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.084507 | 0 | 0.140845 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04b9c4193420eabe9013aae3e035a41bbd7078ad | 5,959 | py | Python | model_tools/brain_transformation/neural.py | franzigeiger/model-tools-1 | 124e1ee688161d1e671dad33c0ebefd001d78ef6 | [
"MIT"
] | null | null | null | model_tools/brain_transformation/neural.py | franzigeiger/model-tools-1 | 124e1ee688161d1e671dad33c0ebefd001d78ef6 | [
"MIT"
] | null | null | null | model_tools/brain_transformation/neural.py | franzigeiger/model-tools-1 | 124e1ee688161d1e671dad33c0ebefd001d78ef6 | [
"MIT"
] | null | null | null | import logging
from collections import Iterable
from typing import Optional, Union
from tqdm import tqdm
from brainscore.metrics import Score
from brainscore.model_interface import BrainModel
from brainscore.utils import fullname
from model_tools.activations.pca import LayerPCA
from model_tools.brain_transformation import TemporalIgnore
from result_caching import store_xarray, store
class LayerMappedModel(BrainModel):
def __init__(self, identifier, activations_model, region_layer_map: Optional[dict] = None):
self.identifier = identifier
self.activations_model = activations_model
self.region_layer_map = region_layer_map or {}
self.recorded_regions = []
def look_at(self, stimuli):
layer_regions = {}
for region in self.recorded_regions:
layers = self.region_layer_map[region]
if not isinstance(layers, Iterable) or isinstance(layers, (str, bytes)):
layers = [layers]
for layer in layers:
assert layer not in layer_regions, f"layer {layer} has already been assigned for {layer_regions[layer]}"
layer_regions[layer] = region
activations = self.activations_model(stimuli, layers=list(layer_regions.keys()))
activations['region'] = 'neuroid', [layer_regions[layer] for layer in activations['layer'].values]
return activations
def start_task(self, task):
if task != BrainModel.Task.passive:
raise NotImplementedError()
def start_recording(self, recording_target: BrainModel.RecordingTarget):
if str(recording_target) not in self.region_layer_map:
raise NotImplementedError(f"Region {recording_target} is not committed")
self.recorded_regions = [recording_target]
def commit(self, region: str, layer: Union[str, list, tuple]):
if isinstance(layer, list):
layer = tuple(layer)
self.region_layer_map[region] = layer
class LayerSelection:
def __init__(self, model_identifier, activations_model, layers):
"""
:param model_identifier: this is separate from the container name because it only refers to
the combination of (model, preprocessing), i.e. no mapping.
"""
self.model_identifier = model_identifier
self._layer_scoring = LayerScores(model_identifier=model_identifier, activations_model=activations_model)
self.layers = layers
self._logger = logging.getLogger(fullname(self))
def __call__(self, selection_identifier, benchmark):
# for layer-mapping, attach LayerPCA so that we can cache activations
model_identifier = self.model_identifier
pca_hooked = LayerPCA.is_hooked(self._layer_scoring._activations_model)
if not pca_hooked:
pca_handle = LayerPCA.hook(self._layer_scoring._activations_model, n_components=1000)
identifier = self._layer_scoring._activations_model.identifier
self._layer_scoring._activations_model.identifier = identifier + "-pca_1000"
model_identifier += "-pca_1000"
result = self._call(model_identifier=model_identifier, selection_identifier=selection_identifier,
benchmark=benchmark)
if not pca_hooked:
pca_handle.remove()
self._layer_scoring._activations_model.identifier = identifier
return result
@store(identifier_ignore=['assembly'])
def _call(self, model_identifier, selection_identifier, benchmark):
self._logger.debug("Finding best layer")
layer_scores = self._layer_scoring(benchmark=benchmark, benchmark_identifier=selection_identifier,
layers=self.layers, prerun=True)
self._logger.debug("Layer scores (unceiled): " + ", ".join([
f"{layer} -> {layer_scores.raw.sel(layer=layer, aggregation='center').values:.3f}"
f"+-{layer_scores.raw.sel(layer=layer, aggregation='error').values:.3f}"
for layer in layer_scores['layer'].values]))
best_layer = layer_scores['layer'].values[layer_scores.sel(aggregation='center').argmax()]
return best_layer
class LayerScores:
def __init__(self, model_identifier, activations_model):
self.model_identifier = model_identifier
self._activations_model = activations_model
self._logger = logging.getLogger(fullname(self))
def __call__(self, benchmark, layers, benchmark_identifier=None, prerun=False):
return self._call(model_identifier=self.model_identifier,
benchmark_identifier=benchmark_identifier or benchmark.identifier,
model=self._activations_model, benchmark=benchmark, layers=layers, prerun=prerun)
@store_xarray(identifier_ignore=['model', 'benchmark', 'layers', 'prerun'], combine_fields={'layers': 'layer'})
def _call(self, model_identifier, benchmark_identifier, # storage fields
model, benchmark, layers, prerun=False):
if prerun:
# pre-run activations together to avoid running every layer separately
model(layers=layers, stimuli=benchmark._assembly.stimulus_set)
layer_scores = []
for layer in tqdm(layers, desc="layers"):
layer_model = LayerMappedModel(identifier=f"{model_identifier}-{layer}",
# per-layer identifier to avoid overlap
activations_model=model, region_layer_map={benchmark.region: layer})
layer_model = TemporalIgnore(layer_model)
score = benchmark(layer_model)
score = score.expand_dims('layer')
score['layer'] = [layer]
layer_scores.append(score)
layer_scores = Score.merge(*layer_scores)
layer_scores = layer_scores.sel(layer=layers) # preserve layer ordering
return layer_scores
| 48.056452 | 120 | 0.681322 | 654 | 5,959 | 5.954128 | 0.235474 | 0.084746 | 0.039034 | 0.034669 | 0.248074 | 0.172573 | 0.130971 | 0.025167 | 0.025167 | 0 | 0 | 0.003059 | 0.232086 | 5,959 | 123 | 121 | 48.447154 | 0.847902 | 0.062091 | 0 | 0.0625 | 0 | 0.010417 | 0.079236 | 0.032955 | 0 | 0 | 0 | 0 | 0.010417 | 1 | 0.114583 | false | 0.010417 | 0.104167 | 0.010417 | 0.302083 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04bab0fcc8f0c4e3268cebb2cb3c07963e8a3db4 | 920 | py | Python | test/test_extension/test_analytics.py | maximest-pierre/WebCore | 543bfb79c0737917d1bd2a148eb61761ab6f6319 | [
"MIT"
] | 56 | 2015-05-13T16:08:06.000Z | 2021-12-26T22:24:46.000Z | test/test_extension/test_analytics.py | maximest-pierre/WebCore | 543bfb79c0737917d1bd2a148eb61761ab6f6319 | [
"MIT"
] | 104 | 2015-01-20T23:55:28.000Z | 2021-03-01T03:29:47.000Z | test/test_extension/test_analytics.py | maximest-pierre/WebCore | 543bfb79c0737917d1bd2a148eb61761ab6f6319 | [
"MIT"
] | 12 | 2015-05-22T15:46:39.000Z | 2021-09-16T00:38:54.000Z | # encoding: utf-8
import time
import pytest
from webob import Request
from web.core import Application
from web.core.context import Context
from web.ext.analytics import AnalyticsExtension
def endpoint(context):
time.sleep(0.1)
return "Hi."
sample = Application(endpoint, extensions=[AnalyticsExtension()])
def test_analytics_extension():
ctx = Context(response=Context(headers=dict()))
ext = AnalyticsExtension()
assert not hasattr(ctx, '_start_time')
ext.prepare(ctx)
assert hasattr(ctx, '_start_time')
ext.before(ctx)
time.sleep(0.1)
ext.after(ctx)
assert 100 <= float(ctx.response.headers['X-Generation-Time']) <= 200
def test_analytics_extension_in_context():
try:
__import__('web.dispatch.object')
except ImportError:
pytest.skip("web.dispatch.object not installed")
resp = Request.blank('/').get_response(sample)
assert 100 <= float(resp.headers['X-Generation-Time']) <= 200
| 20.909091 | 70 | 0.746739 | 123 | 920 | 5.463415 | 0.439024 | 0.03125 | 0.032738 | 0.032738 | 0.139881 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021066 | 0.122826 | 920 | 43 | 71 | 21.395349 | 0.811648 | 0.016304 | 0 | 0.074074 | 0 | 0 | 0.124169 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 1 | 0.111111 | false | 0 | 0.296296 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04c05fe0493b6c15658cd872dfc53737c5f84819 | 737 | py | Python | pymongolab/__init__.py | puentesarrin/pymongolab | 159a46c6cf88313c11522503d9243e2e16d3d72c | [
"Apache-2.0"
] | 2 | 2015-04-09T08:17:03.000Z | 2016-05-17T23:42:36.000Z | pymongolab/__init__.py | puentesarrin/pymongolab | 159a46c6cf88313c11522503d9243e2e16d3d72c | [
"Apache-2.0"
] | 1 | 2015-02-12T17:25:17.000Z | 2015-02-12T17:53:09.000Z | pymongolab/__init__.py | puentesarrin/pymongolab | 159a46c6cf88313c11522503d9243e2e16d3d72c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 *-*
"""PyMongo_-flavored package for accessing to MongoLab databases via
`MongoLabClient`.
.. _PyMongo: http://api.mongodb.org/python/current/"""
ASCENDING = 1
"""Ascending sort order."""
DESCENDING = -1
"""Descending sort order."""
OFF = 0
"""No database profiling."""
SLOW_ONLY = 1
"""Only profile slow operations."""
ALL = 2
"""Profile all operations."""
version_tuple = (1, 2, '+')
def get_version_string():
if isinstance(version_tuple[-1], basestring):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
from pymongolab.connection import Connection
from pymongolab.mongo_client import MongoClient
| 21.057143 | 73 | 0.694708 | 92 | 737 | 5.423913 | 0.586957 | 0.12024 | 0.104208 | 0.064128 | 0.112224 | 0.112224 | 0 | 0 | 0 | 0 | 0 | 0.017405 | 0.142469 | 737 | 34 | 74 | 21.676471 | 0.772152 | 0.215739 | 0 | 0 | 0 | 0 | 0.007059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04c092098ea8bed63aafcff967ba75911ecb4e0e | 2,065 | py | Python | Week 03/exercise05.py | JessicaHamilton/PHYS-3210 | 997fb9fbc43852ed32badaca68bed39bef2a1b0b | [
"MIT"
] | null | null | null | Week 03/exercise05.py | JessicaHamilton/PHYS-3210 | 997fb9fbc43852ed32badaca68bed39bef2a1b0b | [
"MIT"
] | null | null | null | Week 03/exercise05.py | JessicaHamilton/PHYS-3210 | 997fb9fbc43852ed32badaca68bed39bef2a1b0b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 4 10:03:35 2019
@author: hamil
"""
import numpy as np
import matplotlib.pyplot as plt
def up_harmonic(value_n):
H_up = 0.0
summ_array1 = []
new_x = value_n + 1
x_array1 = np.arange(1,new_x)
for each_value in x_array1:
numm1 = 1/each_value
H_up = H_up + numm1
summ_array1.append(H_up)
return H_up, summ_array1
#test = up_harmonic(20)
#up_sum = test[0]
#up_values = test[1]
#print("Sum up value is:", test[0])
#print("The Up values are:", up_values)
def down_harmonic(value_n):
H_down = 0.0
summ_array2 = []
new_x = value_n + 1
x_array = np.arange(1,new_x)
x_array2 = x_array[::-1]
for each_value in x_array2:
numm2 = 1/each_value
H_down = H_down + numm2
summ_array2.append(H_down)
return H_down, summ_array2
#test1 = down_harmonic(20)
#down_sum = test1[0]
#down_values = test1[1]
#print("Sum down value is:", test1[0])
#print("The down values are:", down_values)
fraction_array = []
x_values = np.arange(1,50)
for new_value in x_values:
test1 = up_harmonic(new_value)
test2 = down_harmonic(new_value)
up_sum = test1[0]
down_sum = test2[0]
up_array = test1[1]
down_array = test2[1]
print("The up sum is:", up_sum)
print("The down sum is:", down_sum)
sub = up_sum - down_sum
abs_add = np.abs(up_sum) + np.abs(down_sum)
fraction = sub / abs_add
fraction_array.append(fraction)
plt.plot(x_values,fraction_array)
# When looking at the values for the sum up versus sum down, the sum down is more precise due to the fact that
#the larger the number decimal place-wise, the less values the computer can store, it will reach built in limit
#that computer can store values. Therefore when adding smaller and smaller value numbers to already larger
#decimal placed numbers,the computer will just drop them and will not change the value. But With the sum down
#approach, you start with the small numbers and then slowly add more and more larger valued numbers. | 27.905405 | 112 | 0.684746 | 355 | 2,065 | 3.794366 | 0.295775 | 0.022272 | 0.020045 | 0.022272 | 0.059391 | 0.017817 | 0 | 0 | 0 | 0 | 0 | 0.039604 | 0.217433 | 2,065 | 74 | 113 | 27.905405 | 0.793936 | 0.426634 | 0 | 0.051282 | 0 | 0 | 0.02584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.051282 | 0 | 0.153846 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04c0b65e42c25329491e1c4286e88a08b669efe7 | 2,458 | py | Python | src/cliptools/modules/data_loader.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | null | null | null | src/cliptools/modules/data_loader.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | 6 | 2019-04-02T18:25:35.000Z | 2019-08-21T20:24:16.000Z | src/cliptools/modules/data_loader.py | bigbirdcode/cliptools | 992ddf2088462477992734af8eb00453bde3ce85 | [
"MIT"
] | null | null | null | """ClipTools clipboard manager and text processing tools
with a lines based GUI interface
Data loader, search for available personal data.
WARNING, python file will be executed!
When making python personal file, take care not allow uncontrolled changes!
yaml is safer from this point of view.
Note: logging is not part of ClipTools yet. Only minimalistic feedback is give.
If personal txt file not found then tool will silently read the default.
If file load has an error then a print to a console (if available) is given.
"""
import pathlib
from .. import config
def load_data():
"""Load available personal or sample text data
data is an OrderedDict or similar structure,
where keys are the names, values are list of texts.
"""
ext_data = pathlib.Path(config.EXTERNAL_DATA)
if not ext_data.is_absolute():
ext_data = pathlib.Path.home() / ext_data
if ext_data.exists():
try:
return load_ext_data(ext_data)
except Exception as exc: # pylint: disable=broad-except
# fallback to sample data
print('Cannot load: {}, exception: {}'.format(config.EXTERNAL_DATA, exc))
return load_sample_data()
def load_ext_data(ext_data):
"""Load external data, raise exception if something is not ok."""
if ext_data.suffix.lower() == '.py':
return load_ext_py_data(ext_data)
if ext_data.suffix.lower() == '.yml':
return load_ext_yml_data(ext_data)
raise RuntimeError('Type not supported')
def load_ext_py_data(ext_data):
"""Load external python data.
WARNING, python file will be executed, take care not allow uncontrolled changes!
raise exception if something is not ok."""
content = ext_data.read_text(encoding='utf-8')
glo = dict()
loc = dict()
exec(content, glo, loc) # pylint: disable=exec-used
return loc['DEFINED_TEXTS']
def load_ext_yml_data(ext_data):
"""Load external yaml data,
raise exception if something is not ok."""
import strictyaml # pylint: disable=import-outside-toplevel
content = ext_data.read_text(encoding='utf-8')
return strictyaml.load(content).data
def load_sample_data():
"""Load provided sample data"""
try:
from .. import text_data # pylint: disable=import-outside-toplevel
except Exception: # pylint: disable=broad-except
# No data at all, return an empty dictionary
return dict()
return text_data.DEFINED_TEXTS
| 33.671233 | 85 | 0.702197 | 353 | 2,458 | 4.76204 | 0.365439 | 0.070791 | 0.039262 | 0.02677 | 0.350982 | 0.234384 | 0.143962 | 0.083284 | 0 | 0 | 0 | 0.001031 | 0.21074 | 2,458 | 72 | 86 | 34.138889 | 0.865464 | 0.485761 | 0 | 0.117647 | 0 | 0 | 0.064946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.117647 | 0 | 0.5 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04c1820dcc2e20cd468a64c1d6ca1319a04e2cf5 | 25,660 | py | Python | sysevr/slicer/access_db_operate.py | Saleh-Ibtasham/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | 1 | 2021-04-12T12:59:33.000Z | 2021-04-12T12:59:33.000Z | sysevr/slicer/access_db_operate.py | Jokers-grin/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | sysevr/slicer/access_db_operate.py | Jokers-grin/VulScrape | 738d17e9dd7e5edc2341d106361651fd28f99c61 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | ## -*- coding: utf-8 -*-
from joern.all import JoernSteps
from igraph import *
from .general_op import *
import pickle
from py2neo.packages.httpstream import http
http.socket_timeout = 9999
def get_all_use_bydefnode(db, node_id):
query_str = "g.v(%d).in('USE')" % node_id
results = db.runGremlinQuery(query_str)
list_re = []
for re in results:
if re.properties['type'] == 'Statement':
continue
else:
list_re.append(re)
return list_re
def get_all_def_bydefnode(db, node_id):
query_str = "g.v(%d).in('DEF')" % node_id
results = db.runGremlinQuery(query_str)
list_re = []
for re in results:
if re.properties['type'] == 'Statement':
continue
else:
list_re.append(re)
return list_re
def get_exprstmt_node(db):
query_expr_str = "queryNodeIndex('type:ExpressionStatement')"
#results = db.runGremlinQuery(query_expr_str)
results_1 = db.runGremlinQuery(query_expr_str)
query_iddecl_str = 'queryNodeIndex("type:IdentifierDeclStatement")'
results_2 = db.runGremlinQuery(query_iddecl_str)
results = results_1 + results_2
return results
def get_pointers_node(db):
list_pointers_node = []
query_iddecl_str = 'queryNodeIndex("type:IdentifierDeclStatement")'
results = db.runGremlinQuery(query_iddecl_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find('*') != -1:
list_pointers_node.append(re)
query_param_str = 'queryNodeIndex("type:Parameter")'
results = db.runGremlinQuery(query_param_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find('*') != -1:
list_pointers_node.append(re)
return list_pointers_node
def get_arrays_node(db):
list_arrays_node = []
query_iddecl_str = "queryNodeIndex('type:IdentifierDeclStatement')"
results = db.runGremlinQuery(query_iddecl_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find(' [ ') != -1:
list_arrays_node.append(re)
query_param_str = "queryNodeIndex('type:Parameter')"
results = db.runGremlinQuery(query_param_str)
if results != []:
for re in results:
code = re.properties['code']
if code.find(' = ') != -1:
code = code.split(' = ')[0]
if code.find(' [ ') != -1:
list_arrays_node.append(re)
return list_arrays_node
def get_def_node(db, cfg_node_id):
query_str = "g.v(%d).out('DEF')" % cfg_node_id
results = db.runGremlinQuery(query_str)
return results
def getFunctionNodeByName(db, funcname):
query_str = "queryNodeIndex('type:Function AND name:%s')" % funcname
results = db.runGremlinQuery(query_str)
return results
def get_parameter_by_funcid(db, func_id):
query_str = "g.v(%d).out('IS_FUNCTION_OF_CFG').out('CONTROLS').filter{ it.type == 'Parameter' }.id" % func_id
results = db.runGremlinQuery(query_str)
return results
def isNodeExist(g, nodeName):
if not g.vs:
return False
else:
return nodeName in g.vs['name']
def getALLFuncNode(db):
query_str = "queryNodeIndex('type:Function')"
results = db.runGremlinQuery(query_str)
return results
def getFuncNode(db, func_name):
query_str = 'getFunctionsByName("' + func_name + '")'
func_node = db.runGremlinQuery(query_str)
return func_node
def getFuncFile(db, func_id):
query_str = "g.v(%d).in('IS_FILE_OF').filepath" % func_id
ret = db.runGremlinQuery(query_str)
print(ret)
return ret[0]
def getCFGNodes(db, func_id):
query_str = 'queryNodeIndex("functionId:%s AND isCFGNode:True")' % func_id
cfgNodes = db.runGremlinQuery(query_str)
return cfgNodes
def getDDGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('REACHES')""" % (func_id)
ddgEdges = db.runGremlinQuery(query_str)
return ddgEdges
def getCDGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('CONTROLS')""" % (func_id)
cdgEdges = db.runGremlinQuery(query_str)
return cdgEdges
def getCFGEdges(db, func_id):
query_str = """queryNodeIndex('functionId:%s AND isCFGNode:True').outE('FLOWS_TO')""" % (func_id)
cfgEdges = db.runGremlinQuery(query_str)
return cfgEdges
def drawGraph(db, edges, func_entry_node, graph_type):
g = Graph(directed=True)
func_id = func_entry_node._id
filepath = getFuncFile(db, func_id)
for edge in edges:
if edge.start_node.properties['code'] == 'ENTRY':
startNode = str(edge.start_node.properties['functionId'])
else:
startNode = str(edge.start_node._id)
if edge.start_node.properties['code'] == 'ERROR':
continue
if isNodeExist(g, startNode) == False:
if edge.start_node.properties['code'] == 'ENTRY':
node_prop = {'code': func_entry_node.properties['name'], 'type': func_entry_node.properties['type'],
'location': func_entry_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.start_node.properties['functionId'])}
else:
node_prop = {'code': edge.start_node.properties['code'], 'type': edge.start_node.properties['type'],
'location': edge.start_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.start_node.properties['functionId'])}
g.add_vertex(startNode, **node_prop)#id is 'name'
endNode = str(edge.end_node._id)
if isNodeExist(g, endNode) == False:
if graph_type == 'pdg' and edge.end_node.properties['code'] == 'EXIT':
continue
if edge.end_node.properties['code'] == 'ERROR':
continue
node_prop = {'code': edge.end_node.properties['code'], 'type': edge.end_node.properties['type'],
'location': edge.end_node.properties['location'], 'filepath':filepath, 'functionId':str(edge.end_node.properties['functionId'])}
g.add_vertex(endNode, **node_prop)
if graph_type == 'pdg':
edge_prop = {'var': edge.properties['var']}
else:
edge_prop = {'var': edge.properties['flowLabel']}
g.add_edge(startNode, endNode, **edge_prop)
return g
def translatePDGByNode(db, func_node):
func_id = func_node._id
ddgEdges = getDDGEdges(db, func_id)
cdgEdges = getCDGEdges(db, func_id)
Edges = ddgEdges + cdgEdges
graph_type = 'pdg'
g = drawGraph(db, Edges, func_node, graph_type)
return g
def translateCFGByNode(db, func_node):
func_id = func_node._id
Edges = getCFGEdges(db, func_id)
graph_type = 'cfg'
g = drawGraph(db, Edges, func_node, graph_type)
return g
def getUSENodesVar(db, func_id):
query = "g.v(%s).out('USE').code" % func_id
ret = db.runGremlinQuery(query)
if ret == []:
return False
else:
return ret
def getDEFNodesVar(db, func_id):
query = "g.v(%s).out('DEF').code" % func_id
ret = db.runGremlinQuery(query)
if ret == []:
return False
else:
return ret
def getUseDefVarByPDG(db, pdg):
dict_cfg2use = {}
dict_cfg2def = {}
#print pdg
#need_to_addedge_node = []
for node in pdg.vs:
if node['type'] == 'Function':
continue
func_id = node['name']
use_node = getUSENodesVar(db, func_id)
def_node = getDEFNodesVar(db, func_id)
if node['type'] == 'Statement':
if def_node == False:
code = node['code'].replace('\n', ' ')
if code.find(" = ") != -1:
value = code.split(" = ")[0].strip().split(' ')
if value[-1] == ']':
newvalue = code.split(" [ ")[0].strip().split(' ')
if '->' in newvalue:
a_index = newvalue.index('->')
n_value = ' '.join([newvalue[a_index-1], '->', newvalue[a_index+1]])
newvalue[a_index-1] = n_value
del newvalue[a_index]
del newvalue[a_index]
def_node = newvalue
else:
if '->' in value:
a_index = value.index('->')
n_value = ' '.join([value[a_index-1], '->', value[a_index+1]])
ob_value = value[a_index-1]
value[a_index-1] = n_value
del value[a_index]
del value[a_index]
value.append(ob_value.replace('*', ''))
def_node = value
#need_to_addedge_node.append(node['name'])
if use_node == False:
if code.find(" = ") != -1:
value = code.split(" = ")[1].strip().split(' ')
newvalue = []
for v in value:
if v == '*' or v == '+' or v == '-' or v == '->' or v == '(' or v == ')' or v == '[' or v == ']' or v == '&' or v == '.' or v == '::' or v == ';' or v == ',':
continue
else:
newvalue.append(v.strip())
else:
value = code.split(' ')
newvalue = []
for v in value:
if v == '*' or v == '+' or v == '-' or v == '->' or v == '(' or v == ')' or v == '[' or v == ']' or v == '&' or v == '.' or v == '::' or v == ';' or v == ',':
continue
else:
newvalue.append(v.strip())
use_node = newvalue
if use_node:
use_node = [code.replace('*', '').replace('&', '').strip() for code in use_node]
if def_node:
def_node = [code.replace('*', '').replace('&', '').strip() for code in def_node]
else:#add define node
new_def_node = getReturnVarOfAPI(node['code'])#get modify value of api_func
if node['name'] == '2078':
print("new_def_node", new_def_node)
if new_def_node:
def_node = []
for code in new_def_node:
new_code = code.replace('*', '').replace('&', '').strip()
def_node.append(new_code)
if new_code not in use_node:
use_node.append(new_code)
if use_node:
dict_cfg2use[node['name']] = use_node
if def_node:
dict_cfg2def[node['name']] = def_node
return dict_cfg2use, dict_cfg2def
def getFuncNodeByFile(db, filenodeID):
query_str = 'g.v(%d).out("IS_FILE_OF")' % filenodeID
results = db.runGremlinQuery(query_str)
_list = []
for re in results:
if re.properties['type'] == 'Function':
_list.append(re)
else:
continue
return _list
def getAllFuncfileByTestID(db, testID):
testID = '*/' + testID + '/*'
query_str = "queryNodeIndex('type:File AND filepath:%s').id" % testID
results = db.runGremlinQuery(query_str)
return results
def get_calls_id(db, func_name):
query_str = 'getCallsTo("%s").id' % func_name
results = db.runGremlinQuery(query_str)
return results
def getCFGNodeByCallee(db, node_ast_id):
#print "start"
query_str = "g.v(%s).in('IS_AST_PARENT')" % node_ast_id
results = db.runGremlinQuery(query_str)
#print "end"
if results == []:
return None
for node in results:
if 'isCFGNode' in node.properties and node.properties['isCFGNode'] == 'True':
return node
else:
node = getCFGNodeByCallee(db, node._id)
return node
def getCalleeNode(db, func_id):
query_str = "queryNodeIndex('type:Callee AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_calls_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
print("lenth", len(list_all_funcID))
if len(list_all_funcID)>130:
print(">100")
return False
list_all_callee_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_callee_node += getCalleeNode(db, func_id)
if list_all_callee_node == []:
return False
else:
return [(str(node._id), node.properties['code'], str(node.properties['functionId'])) for node in list_all_callee_node]
def getFuncNodeInTestID(db, testID):
list_all_file_id = getAllFuncfileByTestID(db, testID)
list_all_func_node = []
if list_all_file_id == []:
return False
for file_id in list_all_file_id:
list_func_node = getFuncNodeByFile(db, file_id)
list_all_func_node += list_func_node
return list_all_func_node
def getClassByObjectAndFuncID(db, objectname, func_id):
#print objectname, func_id
all_cfg_node = getCFGNodes(db, func_id)
for cfg_node in all_cfg_node:
if cfg_node.properties['code'] == objectname and cfg_node.properties['type'] == 'Statement':
print(objectname, func_id, cfg_node.properties['code'], cfg_node._id)
query_str_1 = "queryNodeIndex('type:Statement AND code:%s AND functionId:%s')" % (objectname, func_id)
results_1 = db.runGremlinQuery(query_str_1)
if results_1 == []:
return False
else:
ob_cfgNode = results_1[0]
location_row = ob_cfgNode.properties['location'].split(':')[0]
query_str_2 = "queryNodeIndex('type:ExpressionStatement AND functionId:%s')" % func_id
results_2 = db.runGremlinQuery(query_str_2)
if results_2 == []:
return False
classname = False
for node in results_2:
print(node.properties['location'].split(':')[0], location_row)
if node.properties['location'].split(':')[0] == location_row:
classname = node.properties['code']
break
else:
continue
return classname
elif cfg_node.properties['code'].find(' '+objectname+' = new') != -1:
temp_value = cfg_node.properties['code'].split(' '+objectname+' = new')[1].replace('*', '').strip()
if temp_value.split(' ')[0] != 'const':
classname = temp_value.split(' ')[0]
else:
classname = temp_value.split(' ')[1]
return classname
def getDeleteNode(db, func_id):
query_str = "queryNodeIndex('code:delete AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_delete_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
list_all_delete_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_delete_node += getDeleteNode(db, func_id)
if list_all_delete_node == []:
return False
else:
return list_all_delete_node
def getDeclNode(db, func_id):
query_str = "queryNodeIndex('type:IdentifierDeclStatement AND functionId:%d')" % func_id
results = db.runGremlinQuery(query_str)
return results
def get_all_iddecl_node(db, testID):
list_all_funcID = [node._id for node in getFuncNodeInTestID(db, testID)]
print("list_all_funcID", list_all_funcID)
list_all_decl_node = []
for func_id in list_all_funcID:#allfile in a testID
list_all_decl_node += getDeclNode(db, func_id)
if list_all_decl_node == []:
return False
else:
return list_all_decl_node
def getCallGraph(db, testID):
list_all_func_node = getFuncNodeInTestID(db, testID)
#print "list_all_func_node", list_all_func_node
if list_all_func_node == [] or list_all_func_node == False:
return False
call_g = Graph(directed=True)
for func_node in list_all_func_node:
# print(func_node)
prop = {'funcname':func_node.properties['name'], 'type': func_node.properties['type'], 'filepath': func_node.properties['filepath']}
call_g.add_vertex(str(func_node._id), **prop)
list_all_callee = get_all_calls_node(db, testID)#we must limit result in testID, it already get callee node
#print '3 ', list_all_callee
if list_all_callee == False:
return False
for func_node in list_all_func_node:
function_name = func_node.properties['name']
#print "function_name", function_name
tag = False
if function_name.find('::') != -1:#if is a function in class, have two problems
func_name = function_name.split('::')[-1].strip()
classname = function_name.split('::')[0].strip()
if func_name == classname:#is a class::class, is a statementnode or a iddeclnode
print(1)
list_callee_id = []
list_delete_node = get_all_delete_node(db, testID)
if list_delete_node == False:
continue
for node in list_delete_node:
functionID = node.properties["functionId"]
all_cfg_node = getCFGNodes(db, functionID)
delete_loc = node.properties['location'].split(':')[0]
for cfg_node in all_cfg_node:
if cfg_node.properties['location'] != None and cfg_node.properties['location'].split(':')[0] == delete_loc and cfg_node.properties['code'] != 'delete' and cfg_node.properties['code'] != '[' and cfg_node.properties['code'] != '[':
objectname = cfg_node.properties['code']
ob_classname = getClassByObjectAndFuncID(db, objectname, functionID)
pdg = getFuncPDGByfuncIDAndtestID(functionID, testID)
if pdg == False:
continue
if ob_classname == classname:
for p_n in pdg.vs:
#print p_n['name'], str(node._id), str(cfg_node._id)
if p_n['name'] == str(node._id):
list_s = p_n.predecessors()
for edge in pdg.es:
if pdg.vs[edge.tuple[0]] in list_s and pdg.vs[edge.tuple[1]] == p_n and edge['var'] == objectname:
#print (functionID, str(pdg.vs[edge.tuple[0]]['name']))
list_callee_id.append((str(functionID), str(pdg.vs[edge.tuple[0]]['name'])))
else:
continue
elif p_n['name'] == str(cfg_node._id):
list_s = p_n.predecessors()
for edge in pdg.es:
if pdg.vs[edge.tuple[0]] in list_s and pdg.vs[edge.tuple[1]] == p_n and edge['var'] == objectname:
list_callee_id.append((functionID, str(pdg.vs[edge.tuple[0]]['name'])))
else:
continue
else:
continue
else:
continue
elif func_name.replace('~', '') == classname:#is a class::~class
list_callee_id = []
list_delete_node = get_all_delete_node(db, testID)
if list_delete_node == False:
continue
for node in list_delete_node:
functionID = node.properties["functionId"]
all_cfg_node = getCFGNodes(db, functionID)
delete_loc = node.properties['location'].split(':')[0]
for cfg_node in all_cfg_node:
if cfg_node.properties['location'] != None and cfg_node.properties['location'].split(':')[0] == delete_loc and cfg_node.properties['code'] != 'delete' and cfg_node.properties['code'] != '[' and cfg_node.properties['code'] != '[':
objectname = cfg_node.properties['code']
#print objectname
ob_classname = getClassByObjectAndFuncID(db, objectname, functionID)
if ob_classname == classname:
pdg = getFuncPDGByfuncIDAndtestID(functionID, testID)
if pdg == False:
continue
for p_n in pdg.vs:
if p_n['name'] == str(node._id):
list_callee_id.append((functionID, str(node._id)))
elif p_n['name'] == str(cfg_node._id):
list_callee_id.append((functionID, str(cfg_node._id))) #delete and its object node
else:
continue
else:
continue
else:
print(3)
tag = 'func'
list_callee_id = []
for _t in list_all_callee:#_t is a tuple, _t[0] is nodeid, 1 is funcname, 2 is func_id
if _t[1].find('-> '+ func_name) != -1:#maybe is a class->funcname()
objectname = _t[1].split(' -> '+ func_name)[0].strip()
ob_classname = getClassByObjectAndFuncID(db, objectname, _t[2])
if ob_classname == classname:
list_callee_id.append(_t[0])
else:
continue
else:
continue
else:
tag = 'func'
list_callee_id = []
for _t in list_all_callee:
if _t[1] == function_name:
list_callee_id.append(_t[0])
#print 4, list_callee_id
if list_callee_id == []:
continue
else:
#change ast node to cfgnode
list_callee_CFGNode = []
if tag == 'func':
#print 'z'
for node_id in list_callee_id:
#print 1
callee_cfgnode = getCFGNodeByCallee(db, node_id)
#print callee_cfgnode
#print 2
if callee_cfgnode == None:
print('ERROR', callee_cfgnode)
continue
else:
list_callee_CFGNode.append(callee_cfgnode)
#print 'x'
for node in list_callee_CFGNode:
startNode = str(node.properties['functionId'])
endNode = str(func_node._id)
var = str(node._id)
call_g = addDataEdge(call_g, startNode, endNode, var)#var is callee node id
else:
#print 'y'
for node in list_callee_id:
startNode = str(node[0])
endNode = str(func_node._id)
var = str(node[1])
call_g = addDataEdge(call_g, startNode, endNode, var)#var is callee node id
return call_g
def deliverCallGraph(working_directory):
j = JoernSteps()
j.connectToDatabase()
call_graph_path = working_directory + 'dict_call2cfgNodeID_funcID'
pdg_db_path = working_directory + 'pdg_db'
list_testID = os.listdir(pdg_db_path)
for testID in list_testID:
#if testID != '69055':
# continue
if os.path.exists(os.path.join(call_graph_path, str(testID))):
continue
call_g = getCallGraph(j, testID)
if call_g == False:
continue
_dict = {}
for edge in call_g.es:
endnode = call_g.vs[edge.tuple[1]]
if endnode['name'] not in _dict:
_dict[endnode['name']] = [(edge['var'], call_g.vs[edge.tuple[0]]['name'])]
else:
_dict[endnode['name']].append((edge['var'], call_g.vs[edge.tuple[0]]['name']))
if not os.path.exists(os.path.join(call_graph_path, str(testID))):
os.makedirs(os.path.join(call_graph_path, str(testID)))
filepath = os.path.join(call_graph_path, str(testID), "dict.pkl")
f = open(filepath, 'wb')
pickle.dump(_dict, f, True)
f.close()
| 35.49101 | 253 | 0.539517 | 2,891 | 25,660 | 4.554825 | 0.080249 | 0.054222 | 0.050122 | 0.010024 | 0.576018 | 0.484204 | 0.436437 | 0.396264 | 0.327764 | 0.308779 | 0 | 0.006687 | 0.341465 | 25,660 | 722 | 254 | 35.540166 | 0.772577 | 0.039712 | 0 | 0.467308 | 0 | 0.001923 | 0.088296 | 0.037603 | 0.003846 | 0 | 0 | 0 | 0 | 1 | 0.069231 | false | 0 | 0.009615 | 0 | 0.175 | 0.023077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04c30ca6dae699405806c7919712bb4e2c2022d3 | 3,818 | py | Python | consolidate_json.py | squirrel2038/archive-archdruid-report | cd121a2466887999062e4e674998af971cd416e2 | [
"MIT"
] | 1 | 2022-01-30T11:01:11.000Z | 2022-01-30T11:01:11.000Z | consolidate_json.py | squirrel2038/thearchdruidreport-archive | cd121a2466887999062e4e674998af971cd416e2 | [
"MIT"
] | null | null | null | consolidate_json.py | squirrel2038/thearchdruidreport-archive | cd121a2466887999062e4e674998af971cd416e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Consolidate all the raw Blogger JSON files into a single, simplified JSON file.
#
from collections import OrderedDict
import html
import io
import json
import sys
import lxml.etree as ET
import lxml.html
import re
import feeds
import util
posts = feeds.json_post_entries_list()
output = []
for jpost in posts:
npost = OrderedDict()
output.append(npost)
npost["postid"] = re.match(r"tag:blogger.com,1999:blog-27481991.post-(\d+)$", jpost["id"]["$t"]).group(1)
assert jpost["title"]["type"] == "text"
npost["title"] = jpost["title"]["$t"]
(link,) = [x for x in jpost["link"] if x["rel"] == "alternate"]
npost["title_formatted"] = link["title"]
m = re.match(r"http://thearchdruidreport\.blogspot\.com/(20../../.*\.html)$", link["href"])
url = "https://thearchdruidreport.blogspot.com/" + m.group(1)
npost["url"] = url
npost["published"] = jpost["published"]["$t"] # e.g.: 2017-03-08T13:28:00.001-08:00
npost["updated"] = jpost["updated"]["$t"] # e.g.: 2017-03-08T13:32:19.336-08:00
assert jpost["content"]["type"] == "html"
npost["content"] = jpost["content"]["$t"]
npost["comments"] = []
for jcomment in feeds.comments_json(npost["postid"]):
ncomment = OrderedDict()
npost["comments"].append(ncomment)
ncomment["commentid"] = re.match(r"tag:blogger.com,1999:blog-27481991.post-(\d+)$", jcomment["id"]["$t"]).group(1)
(author,) = jcomment["author"]
ncomment["author"] = author["name"]["$t"]
ncomment["profile"] = author["uri"]["$t"]
avatar_url = author["gd$image"]["src"]
avatar_size = (int(author["gd$image"]["width"]), int(author["gd$image"]["height"]))
small_avatar = avatar_size[0] < 30 and avatar_size[1] < 30
if small_avatar:
if avatar_size == (16, 16) and avatar_url == "http://img1.blogblog.com/img/b16-rounded.gif":
ncomment["avatar"] = {"type": "blogger"}
elif avatar_size == (16, 16) and avatar_url == "http://img1.blogblog.com/img/openid16-rounded.gif":
ncomment["avatar"] = {"type": "openid"}
else:
raise RuntimeError("Invalid avatar info on comment (%s/%s, %s, %s)" % (
npost["postid"], ncomment["commentid"], avatar_url, avatar_size))
else:
ncomment["avatar"] = {"type": "url", "size": avatar_size, "url": avatar_url}
ncomment["published"] = jcomment["published"]["$t"]
ncomment["updated"] = jcomment["updated"]["$t"]
(display_time,) = [p for p in jcomment["gd$extendedProperty"] if p["name"] == "blogger.displayTime"]
ncomment["display_time"] = display_time["value"]
ncomment["comment_removed"] = (
len([p for p in jcomment["gd$extendedProperty"] if
(p["name"], p["value"]) == ("blogger.contentRemoved", "true")]) > 0)
related = [x for x in jcomment["link"] if x["rel"] == "related"]
if len(related) > 0:
(related,) = related
related = re.match(r"http://www\.blogger\.com/feeds/27481991/\d+/comments/default/(\d+)\?v=2$", related["href"])
ncomment["in_reply_to"] = related.group(1)
else:
ncomment["in_reply_to"] = None
ncomment["title"] = jcomment["title"]["$t"]
assert jcomment["content"]["type"] == "html"
ncomment["content"] = jcomment["content"]["$t"]
#html_parser = ET.HTMLParser()
#html = ET.HTML(content)
# doc = ET.parse(io.StringIO(content), html_parser)
# print(type(doc))
#print(ET.tostring(html))
#e = lxml.html.fragment_fromstring(content, create_parent="p")
#print(e)
#break
util.set_file_text("blog.json", json.dumps(output, indent=2))
| 38.959184 | 124 | 0.585385 | 475 | 3,818 | 4.635789 | 0.330526 | 0.031789 | 0.014532 | 0.009991 | 0.156222 | 0.13079 | 0.118074 | 0.118074 | 0.118074 | 0.118074 | 0 | 0.035141 | 0.217391 | 3,818 | 97 | 125 | 39.360825 | 0.701807 | 0.102148 | 0 | 0.045455 | 0 | 0.015152 | 0.285589 | 0.033392 | 0 | 0 | 0 | 0 | 0.045455 | 1 | 0 | false | 0 | 0.151515 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ca253f5e82296a1e2958dc72286f826aff2f7b | 2,619 | py | Python | preprocess.py | wonderlr/news_aggregator | 123a6b912988013fd83080393ff978ff85a76dc0 | [
"MIT"
] | null | null | null | preprocess.py | wonderlr/news_aggregator | 123a6b912988013fd83080393ff978ff85a76dc0 | [
"MIT"
] | null | null | null | preprocess.py | wonderlr/news_aggregator | 123a6b912988013fd83080393ff978ff85a76dc0 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
import nltk
import pdb
import pickle
import pandas as pd
import numpy as np
import json
stemmer = nltk.stem.porter.PorterStemmer()
stop_words = set(nltk.corpus.stopwords.words('english'))
def is_alphanumeric(character):
to_ord = ord(character)
is_alpha = (to_ord >= ord('A') and to_ord <= ord('Z')) or (to_ord >= ord('a') and to_ord <= ord('z'))
is_numeric = to_ord >= ord('0') and to_ord <= ord('9')
return is_alpha or is_numeric
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def reduce_stem(stem):
# Remove unwanted characters such as punctuations
reduced = []
for character in stem:
if not is_alphanumeric(character):
continue
reduced.append(character)
return ''.join(reduced)
def tokenize(text):
text = text.decode('utf-8').lower()
# Replace periods with spaces. This fixes cases
# where there's no space after a period. Punctuation
# will be dropped later in processing, anyway.
text.replace('.', ' ')
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
# Remove punctuations and stop words
stems_reduced = []
for ix, stem in enumerate(stems):
if stem in stop_words:
continue
reduced_stem = reduce_stem(stem)
if len(reduced_stem) > 0:
stems_reduced.append(reduced_stem)
return stems_reduced
articles = []
article_id = 0
def cache_for_analysis(url, title, stems, feed_id):
global article_id
article_id = article_id + 1
articles.append((article_id, url, title, stems, feed_id))
def dump_articles():
pickle.dump(articles, open("articles.pickle", "wb"))
def article_to_json(row, all_terms):
stems = row[3]
vec = [("1" if term in stems else "0") for term in all_terms]
vec_string = "".join(vec)
return {
"article_id": row[0],
"url": row[1],
"title": row[2],
"feed_id": row[4],
"vec": vec_string
}
def analyze_articles():
seen_stems = set()
# do some kind of clustering with tf/idf!
# build up a data frame with schema:
# terms (string) | a1_terms (bool) | a2_terms
for row in articles:
stems = row[3]
for stem in stems:
seen_stems.add(stem)
all_terms = list(seen_stems)
for row in articles:
json_article = article_to_json(row, all_terms)
print(json.dumps(json_article))
if __name__ == "__main__":
articles = pickle.load(open("articles.pickle", "rb"))
analyze_articles()
| 28.78022 | 105 | 0.644139 | 365 | 2,619 | 4.443836 | 0.369863 | 0.021578 | 0.034525 | 0.020345 | 0.078915 | 0.055487 | 0.025894 | 0.025894 | 0.025894 | 0 | 0 | 0.008538 | 0.239786 | 2,619 | 90 | 106 | 29.1 | 0.806128 | 0.138221 | 0 | 0.085714 | 0 | 0 | 0.040943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.085714 | 0 | 0.271429 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04cad953622a83f0e9965ec0a898a7a9f104aa3c | 22,312 | py | Python | test/intelliflow/core/application/test_aws_application_execution_hooks.py | amzn/rheoceros | 5e8f79d97f8b21d693d3c869b0df70de3d5fd068 | [
"Apache-2.0",
"MIT-0"
] | 4 | 2022-03-24T04:39:02.000Z | 2022-03-31T16:41:50.000Z | test/intelliflow/core/application/test_aws_application_execution_hooks.py | amzn/rheoceros | 5e8f79d97f8b21d693d3c869b0df70de3d5fd068 | [
"Apache-2.0",
"MIT-0"
] | null | null | null | test/intelliflow/core/application/test_aws_application_execution_hooks.py | amzn/rheoceros | 5e8f79d97f8b21d693d3c869b0df70de3d5fd068 | [
"Apache-2.0",
"MIT-0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import threading
import time
from typing import Callable
import pytest
from mock import MagicMock
import intelliflow.api_ext as flow
from intelliflow.api_ext import *
from intelliflow.core.application.application import Application
from intelliflow.core.platform import development as development_module
from intelliflow.core.platform.compute_targets.email import EMAIL
from intelliflow.core.platform.compute_targets.slack import Slack
from intelliflow.core.platform.constructs import ConstructPermission
from intelliflow.core.platform.definitions.compute import (
ComputeFailedSessionState,
ComputeFailedSessionStateType,
ComputeResourceDesc,
ComputeResponse,
ComputeSessionDesc,
ComputeSessionState,
ComputeSuccessfulResponse,
ComputeSuccessfulResponseType,
)
from intelliflow.core.signal_processing import Slot
from intelliflow.core.signal_processing.signal import *
from intelliflow.mixins.aws.test import AWSTestBase
from intelliflow.utils.test.hook import GenericComputeDescriptorHookVerifier, GenericRoutingHookImpl, OnExecBeginHookImpl
from intelliflow.utils.test.inlined_compute import NOOPCompute
class TestAWSApplicationExecutionHooks(AWSTestBase):
def _create_test_application(self, id_or_app: Union[str, Application]):
if isinstance(id_or_app, str):
id = id_or_app
app = AWSApplication(id, region=self.region)
else:
app = id_or_app
id = app.id
ducsi_data = app.marshal_external_data(
GlueTable("booker", "d_unified_cust_shipment_items", partition_keys=["region_id", "ship_day"]),
"DEXML_DUCSI",
{"region_id": {"type": DimensionType.LONG, "ship_day": {"format": "%Y-%m-%d", "type": DimensionType.DATETIME}}},
{"1": {"*": {"timezone": "PST"}}},
SignalIntegrityProtocol("FILE_CHECK", {"file": ["SNAPSHOT"]}),
)
# add a dimensionless table (important corner-case)
ship_options = app.marshal_external_data(
GlueTable(
"dexbi",
"d_ship_option",
partition_keys=[],
),
"ship_options",
{},
{},
SignalIntegrityProtocol("FILE_CHECK", {"file": ["DELTA", "SNAPSHOT"]}),
)
return app
def _test_all_application_hooks(self, hook_generator: Callable):
from test.intelliflow.core.application.test_aws_application_execution_control import TestAWSApplicationExecutionControl
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = self._create_test_application("exec-hooks")
ducsi_data = app.get_data("DEXML_DUCSI", context=Application.QueryContext.DEV_CONTEXT)[0]
ship_options = app.get_data("ship_options", context=Application.QueryContext.DEV_CONTEXT)[0]
email_obj = EMAIL(sender="if-test-list@amazon.com", recipient_list=["yunusko@amazon.com"])
on_exec_begin_hook = hook_generator()
on_exec_skipped_hook = hook_generator()
on_compute_success_hook = hook_generator()
on_compute_failure_hook = hook_generator()
on_compute_retry_hook = hook_generator()
on_success_hook = hook_generator()
on_failure_hook = hook_generator()
exec_checkpoints = [
RouteCheckpoint(checkpoint_in_secs=10, slot=hook_generator()),
RouteCheckpoint(checkpoint_in_secs=20, slot=hook_generator()),
]
repeat_ducsi = app.create_data(
id="REPEAT_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets="output=DEXML_DUCSI.limit(100)",
execution_hook=RouteExecutionHook(
on_exec_begin=on_exec_begin_hook,
on_exec_skipped=on_exec_skipped_hook,
on_compute_success=on_compute_success_hook,
on_compute_failure=on_compute_failure_hook,
on_compute_retry=on_compute_retry_hook,
on_success=on_success_hook,
on_failure=on_failure_hook,
checkpoints=exec_checkpoints,
),
)
on_exec_skipped_hook_2 = hook_generator()
on_pending_node_created_hook = hook_generator()
on_expiration_hook = hook_generator()
pending_node_checkpoints = [RouteCheckpoint(checkpoint_in_secs=10, slot=hook_generator())]
# we will be using this second node for Pending Node checks mostly
app.create_data(
id="DUCSI_WITH_SO",
inputs={"DEXML_DUCSI": ducsi_data["*"][:-2], "SHIP_OPTIONS": ship_options},
compute_targets="output=DEXML_DUCSI.limit(100).join(SHIP_OPTIONS, DEXML_DUCSI.customer_ship_option == SHIP_OPTIONS.ship_option)",
execution_hook=RouteExecutionHook(on_exec_skipped=on_exec_skipped_hook_2),
pending_node_hook=RoutePendingNodeHook(
on_pending_node_created=on_pending_node_created_hook, on_expiration=on_expiration_hook, checkpoints=pending_node_checkpoints
),
pending_node_expiration_ttl_in_secs=20,
)
# SERIALIZATION: inject serialize/deserialize sequence for enhanced serialization coverage
json_str = app.dev_context.to_json()
dev_context = CoreData.from_json(json_str)
app._dev_context = dev_context
#
app.activate()
# 1- Inject DUCSI event to trigger execution on the first node/route and create a pending node on the second.
# mock batch_compute response
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "REPEAT_DUCSI"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
assert session_desc.session_id == "job_id"
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ducsi_data[1]["2020-12-25"],
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert len(app.get_active_routes()) == 1
# check if the first exec hook has been hit and done with its own logic
assert on_exec_begin_hook.verify(app)
assert not on_exec_skipped_hook.verify(app)
assert not on_compute_failure_hook.verify(app)
assert not on_compute_success_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
assert not on_success_hook.verify(app)
assert not on_failure_hook.verify(app)
# check the pending node hooks registered on the second route.
assert on_pending_node_created_hook.verify(app)
assert not on_exec_skipped_hook_2.verify(app)
assert not on_expiration_hook.verify(app)
# emulate runtime Processor behaviour, to check the routes otherwise checkpoints won't be checked.
# reason: in unit-tests the Processor does not 'run' in the background. So the following call is somewhat like
# a 'next cycle/tick',
app.update_active_routes_status()
assert not any([c.slot.verify(app) for c in exec_checkpoints])
assert not any([c.slot.verify(app) for c in pending_node_checkpoints])
time.sleep(10)
# next-cycle again
app.update_active_routes_status()
# execution passed the checkpoint 10 secs
assert exec_checkpoints[0].slot.verify(app)
# pending node passed the checkpoint 10 secs
assert pending_node_checkpoints[0].slot.verify(app)
# now the second internal data node (route) in the system actually waits for its second input dependency
# 'ship_options'. Previous process call with ducsi has created a pending node in it as well. a signal for
# 'ship_options' will complete that pending node and cause a trigger.
ship_options = app.get_data("ship_options", context=Application.QueryContext.DEV_CONTEXT)[0]
# mock again
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "DUCSI_WITH_SO"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id2")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ship_options,
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert len(app.get_active_routes()) == 2
# check idempotency
app.process(ducsi_data[1]["2020-12-25"], with_activated_processor=False)
# now we can check the skipped hook due to idempotency related call above
assert on_exec_skipped_hook.verify(app)
# no effect (still the same count on the mock objects)
app.process(ship_options, with_activated_processor=False)
assert on_exec_skipped_hook_2.verify(app)
# initiate another trigger on 'REPEAT_DUCSI' with a different partition (12-26)
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "REPEAT_DUCSI"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id3")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(ducsi_data[1]["2020-12-26"], with_activated_processor=False)
# finish first job (from 12-25 on both routes), since Processor is not running in the background now
# we will have to use related app API to force update RoutingTable status.
# only active record remaining should be the most recent one (12-26):
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
raise RuntimeError(
"This should not be called since we are not suppoed to yield a new active record" "at this point in this test"
)
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
if session_desc.session_id in ["job_id"]: # first active record
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.COMPLETED, session_desc
)
else:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.PROCESSING, session_desc
)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
assert on_compute_success_hook.verify(app)
assert not on_compute_failure_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
assert on_success_hook.verify(app)
assert not on_failure_hook.verify(app)
# we now have only one active record (active batch compute session) and a pending node, move 15 secs to:
# - cause expiration on the only job of the second route
time.sleep(20)
app.update_active_routes_status()
assert on_expiration_hook.verify(app)
# move 10 more to:
# - cause second checkpoint to be called on the first route (due to second execution)
time.sleep(25)
app.update_active_routes_status()
assert exec_checkpoints[1].slot.verify(app)
# finish the third job (12-26) of the first route with FAILURE
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
if session_desc.session_id in ["job_id3"]: # third active record
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.APP_INTERNAL, session_desc
)
else:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.PROCESSING, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
assert on_compute_failure_hook.verify(app)
assert on_failure_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
self.patch_aws_stop()
def test_all_application_hooks_generic(self):
self._test_all_application_hooks(lambda: GenericRoutingHookImpl())
def test_all_application_hooks_with_EMAIL(self):
email_obj = EMAIL(sender="if-test-list@amazon.com", recipient_list=["yunusko@amazon.com"])
self._test_all_application_hooks(lambda: GenericComputeDescriptorHookVerifier(email_obj.action()))
def test_all_application_hooks_with_slack(self):
slack_obj = Slack(recipient_list=["https://hooks.slack.com/workflows/1/"], message="test message")
self._test_all_application_hooks(lambda: GenericComputeDescriptorHookVerifier(slack_obj.action()))
def test_application_hooks_generate_right_permissions(self):
"""Test system provided compute targets' compatibility and runtime permission contribution as hooks"""
self.patch_aws_start(glue_catalog_has_all_tables=True)
self.app = AWSApplication(app_name="sys-hooks", region=self.region)
email_obj = EMAIL(sender="if-test-list@amazon.com", recipient_list=["yunusko@amazon.com"])
self.app.create_data(
id="dummy_node_EMAIL_as_pending_trigger_hook",
compute_targets=[NOOPCompute],
pending_node_hook=RoutePendingNodeHook(on_pending_node_created=email_obj.action()),
)
# Test permissions applied to runtime / exec role as well
# keep reference of actual policy updater method so that we can retore it at the end.
real_put_inlined_policy = development_module.put_inlined_policy
def put_inlined_policy(
role_name: str, policy_name: str, action_resource_pairs: Set[ConstructPermission], base_session: "boto3.Session"
) -> None:
if "IntelliFlowExeRole" in role_name:
# check EMAIL resource in runtime permission resources (SES ARN, etc)
assert any([email_obj.sender in resource for perm in action_resource_pairs for resource in perm.resource])
development_module.put_inlined_policy = MagicMock(side_effect=put_inlined_policy)
# above mock / callback should be called during the activation
self.app.activate()
# just make sure that it was called actually (otherwise there is no point in this test :)
assert development_module.put_inlined_policy.call_count > 0
# restore
development_module.put_inlined_policy = real_put_inlined_policy
self.patch_aws_stop()
# Test permissions applied to runtime / exec role as well
def test_application_retry_hook(self):
from test.intelliflow.core.application.test_aws_application_execution_control import TestAWSApplicationExecutionControl
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = self._create_test_application("exec-hooks")
ducsi_data = app.get_data("DEXML_DUCSI", context=Application.QueryContext.DEV_CONTEXT)[0]
on_failure_hook = GenericRoutingHookImpl()
on_compute_retry_hook = GenericRoutingHookImpl()
on_failure_hook2 = GenericRoutingHookImpl()
on_compute_retry_hook2 = GenericRoutingHookImpl()
app.create_data(
id="REPEAT_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets=[GlueBatchCompute(code="output=DEXML_DUCSI.limit(100)", retry_count=1)],
execution_hook=RouteExecutionHook(on_compute_retry=on_compute_retry_hook, on_failure=on_failure_hook),
)
app.create_data(
id="REPEAT_DUCSI2",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets=[GlueBatchCompute(code="output=DEXML_DUCSI.limit(100)", retry_count=0)],
execution_hook=RouteExecutionHook(on_compute_retry=on_compute_retry_hook2, on_failure=on_failure_hook2),
)
app.activate()
# 1- Inject DUCSI event to trigger execution on the nodes/routes
# mock batch_compute response
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
# both of the nodes will have a new compute session
return TestAWSApplicationExecutionControl.create_batch_compute_response(
ComputeSuccessfulResponseType.PROCESSING, f"job_id-{materialized_output.alias}"
)
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ducsi_data[1]["2020-12-25"],
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert not on_compute_retry_hook.verify(app)
assert not on_compute_retry_hook2.verify(app)
assert not on_failure_hook.verify(app)
assert not on_failure_hook2.verify(app)
# now make sure that during the periodical check both of the nodes fails in a transient way.
# this causes implicit retries and yields brand new sessions, however this should not count towards retry limit.
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.TRANSIENT, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
# emulate runtime Processor behaviour, to check the routes otherwise checkpoints won't be checked.
# reason: in unit-tests the Processor does not 'run' in the background. So the following call is somewhat like
# a 'next cycle/tick',
app.update_active_routes_status()
assert not on_compute_retry_hook.verify(app)
assert not on_compute_retry_hook2.verify(app)
assert not on_failure_hook.verify(app)
assert not on_failure_hook2.verify(app)
# now emulate a job failure:
# only the node with retry > 0 should be retried
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.APP_INTERNAL, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
# retried!
assert on_compute_retry_hook.verify(app)
assert not on_failure_hook.verify(app)
# will never be retried since retry_count is 0.
# this should actually be failed and terminated.
assert not on_compute_retry_hook2.verify(app)
assert on_failure_hook2.verify(app)
# now during the second check max_retry_count of 1 must be hit and the compute must fail.
app.update_active_routes_status()
assert on_failure_hook.verify(app)
self.patch_aws_stop()
| 47.777302 | 141 | 0.701327 | 2,572 | 22,312 | 5.794323 | 0.153966 | 0.023552 | 0.02268 | 0.022948 | 0.642153 | 0.585721 | 0.560558 | 0.529826 | 0.504462 | 0.466282 | 0 | 0.007371 | 0.22777 | 22,312 | 466 | 142 | 47.879828 | 0.857574 | 0.158973 | 0 | 0.443425 | 0 | 0 | 0.068335 | 0.030585 | 0 | 0 | 0 | 0 | 0.143731 | 1 | 0.06422 | false | 0 | 0.061162 | 0.018349 | 0.174312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04cb89b6281e761d43339b9fbbae2aa13203f250 | 6,988 | py | Python | Plot/4.13.His_Plot_Global.py | hliu119/Phenological-Dynamics-revealed-by-SIF | 2584d09837ea37387dc4d5ad39fcaba0ed714e94 | [
"Apache-2.0"
] | null | null | null | Plot/4.13.His_Plot_Global.py | hliu119/Phenological-Dynamics-revealed-by-SIF | 2584d09837ea37387dc4d5ad39fcaba0ed714e94 | [
"Apache-2.0"
] | null | null | null | Plot/4.13.His_Plot_Global.py | hliu119/Phenological-Dynamics-revealed-by-SIF | 2584d09837ea37387dc4d5ad39fcaba0ed714e94 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 26 14:58:57 2019
@author: Administrator
MCD12 LandCover Types:
DBF == 4:DBF,5:MF
EBF == 2:EBF
NF == 1:ENF,3:DNF
CRO == 12: CRO, 14: CRO&NV
GRA == 10: GRA
SHR == 6:CSH, 7:OSH
SAV == 8:WSA, 9:SAV
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from osgeo import gdal_array
import matplotlib.gridspec as gridspec
import warnings
warnings.filterwarnings('ignore')
in_file_path = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step14.Boreal_Map"
landcover_file = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step14.Boreal_Map\MOD12_LandUse_N.tif"
plot_path = r"F:\Chlorophyll_Fluorescence\Process\Europe\Step4.Table\Polygon\4.11.Box_Plot_His"
Products = ["TROPOMI_SC","TROPOMI_SR","NIRv","EVI"]
LandCover = ["DBF", "NF", "SAV", "GRA", "SHR", "CRO"]
LC_CODE = [[4,5],[1,3],[8,9],[10],[6,7],[12,14]]
Metrics = [1,3,5] #1,3,5分别代表SOS,EOS,LOS
Name = ["DBF", "NF", "SAV", "GRA", "SHR", "CRO"]
nrows = 2
ncols = 3
figure_scale_row = nrows * 2.0
figure_scale_col = ncols * 2.0
fig = plt.figure(figsize=(figure_scale_col, figure_scale_row))#, sharey=True)
gs = gridspec.GridSpec(nrows, ncols, wspace=0.1, hspace=0.6)
fs = 5 # fontsize
x_labels = ['SOS', 'EOS', 'GSL'] #'Tropomi_SR', 'MODIS', 'OCO-2']
y_lables = [0, 100, 200, 300]
min_y = 0
max_y = 365
ny = 4 #y轴刻度个数
bar_width = 0.15 #柱状体宽度
capsize = 1.2 #柱状体标准差参数1
capthick = 0.8 #柱状体标准差参数2
elinewidth = 0.8 #柱状体标准差参数3
linewidth = 1.0 #边框线宽度
ftsize = 10 #字体大小
ftfamily = "Times New Roman"
axlength = 2.0 #轴刻度长度
axwidth = 1.2 #轴刻度宽度
legendcols = 5 #图例一行的个数
for i in range(len(LandCover)):
mean = []
err = []
for j in range(len(Metrics)):
lc_array = gdal_array.LoadFile(landcover_file)
SIF_SC_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.SC_GSL_Mask/SC_{0}.tif".format(Metrics[j])))
SIF_SR_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.SR_GSL_Mask/SR_{0}.tif".format(Metrics[j])))
dcSIF_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.dcSIF_GSL_Mask/dcSIF_{0}.tif".format(Metrics[j])))
EVI_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.EVI_GSL_Mask/EVI_{0}.tif".format(Metrics[j])))
NIRv_array = gdal_array.LoadFile(os.path.join(in_file_path, "14.1.NIRv_GSL_Mask/NIRv_{0}.tif".format(Metrics[j])))
SIF_SC = []
SIF_SR = []
dcSIF = []
NIRv = []
EVI = []
for m in range(len(lc_array)):
for n in range(len(lc_array[0])):
if lc_array[m,n] in LC_CODE[i]:
if abs(SIF_SC_array[m,n]) < 999:
SIF_SC.append(SIF_SC_array[m,n])
if abs(SIF_SR_array[m,n]) < 999:
SIF_SR.append(SIF_SR_array[m,n])
if abs(dcSIF_array[m,n]) < 999:
dcSIF.append(dcSIF_array[m,n])
if abs(NIRv_array[m,n]) < 999:
NIRv.append(NIRv_array[m,n])
if abs(EVI_array[m,n]) < 999:
EVI.append(EVI_array[m,n])
mean.append([np.nanmean(SIF_SC), np.nanmean(SIF_SR), np.nanmean(dcSIF), np.nanmean(NIRv), np.nanmean(EVI)])
err.append([np.nanstd(SIF_SC), np.nanstd(SIF_SR), np.nanstd(dcSIF), np.nanstd(NIRv), np.nanstd(EVI)])
mean = np.array(mean)
err = np.array(err)
#画直方图
x = np.arange(int(len(x_labels)))
col = int(i % ncols)
row = int(i / ncols)
axes = fig.add_subplot(gs[row, col])
SIF_SC = axes.bar(x + 0 * bar_width, mean[:,0], bar_width, yerr = err[:,0], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="olivedrab", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_t$$_o$$_t$$_a$$_l$$_-$$_S$$_C$", align="center", alpha=1)
SIF_SR = axes.bar(x + 1 * bar_width, mean[:,1], bar_width, yerr = err[:,1], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="yellowgreen", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_t$$_o$$_t$$_a$$_l$$_-$$_S$$_R$", align="center", alpha=1)
dcSIF = axes.bar(x + 2 * bar_width, mean[:,2], bar_width, yerr = err[:,2], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="forestgreen", label = "SIF$_T$$_R$$_O$$_P$$_O$$_M$$_I$$_\_$$_O$$_b$$_s$", align="center", alpha=1)
NIRv = axes.bar(x + 3 * bar_width, mean[:,3], bar_width, yerr = err[:,3], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="darkgoldenrod", label = "NIRv", align="center", alpha=1)
EVI = axes.bar(x + 4 * bar_width, mean[:,4], bar_width, yerr = err[:,4], error_kw = {'ecolor' : '0.2', 'elinewidth':elinewidth, 'capsize' :capsize, 'capthick' :capthick}, color="gold", label = "EVI", align="center", alpha=1)
axes.set_title("({0}) {1}".format(chr(97+i),Name[i]), fontsize = ftsize/1.2, family = ftfamily)
axes.set_xticks(x + 2 * bar_width)
axes.set_xticklabels(x_labels, fontsize = fs, family = ftfamily)
axes.set_ylim(min_y, max_y)
axes.spines['left'].set_linewidth(linewidth)
axes.spines['right'].set_linewidth(linewidth)
axes.spines['top'].set_linewidth(linewidth)
axes.spines['bottom'].set_linewidth(linewidth)
axes.tick_params(axis='both', length = axlength, width = axwidth, labelsize = ftsize/1.5)
if col == 0:
axes.set_ylabel('Day of Year (days)', fontsize = ftsize/1.6, family=ftfamily)
axes.set_yticks(np.linspace(min_y, max_y - 65, ny))
axes.set_yticklabels(y_lables, fontsize = fs + 2, family=ftfamily)
else:
axes.yaxis.set_visible(False)
axes.set_xlabel('Phenological Metrics', fontsize = ftsize/1.5, family=ftfamily)
handles = [SIF_SC, SIF_SR, dcSIF, NIRv, EVI]
labels = ['SIF$_\mathdefault{total-SC}$',\
'SIF$_\mathdefault{total-SR}$',\
'SIF$_\mathdefault{Obs}$',\
'NIR$_\mathdefault{V}$','EVI']
"""
if i == 0:
axes.legend(handles, labels, loc ='upper left', fancybox = False, shadow = False,frameon = False,
ncol = legendcols, prop={'family':ftfamily, 'size':ftsize/3})
"""
fig.legend(handles, labels, loc ='lower center', fancybox = False, shadow = False,frameon = False,
ncol = legendcols, handletextpad = 0.2, columnspacing = 1.5, prop={'family':"Times New Roman", 'size':ftsize/1.3})
fig.tight_layout()
fig.subplots_adjust(left = None, right = None, bottom = 0.15)
Plot_path = os.path.join(plot_path, "Rs2-Global.jpg")
plt.show()
fig.savefig(Plot_path, dpi=600, quality=100,bbox_inches='tight')
| 48.867133 | 306 | 0.598598 | 1,021 | 6,988 | 3.905975 | 0.25857 | 0.024072 | 0.019308 | 0.033099 | 0.31996 | 0.251003 | 0.232698 | 0.21013 | 0.185055 | 0.154714 | 0 | 0.038547 | 0.224098 | 6,988 | 143 | 307 | 48.867133 | 0.696975 | 0.054665 | 0 | 0 | 0 | 0.028302 | 0.180985 | 0.10473 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.056604 | 0 | 0.056604 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04cbe616581fc2cfbc97890b4ef12ba243847e7e | 6,260 | py | Python | CountryReconciler/reconciler/country_normalizer.py | weso/landportal-importers | 6edfa3c301422bbe8c09cb877b1cbddbcd902463 | [
"Unlicense"
] | null | null | null | CountryReconciler/reconciler/country_normalizer.py | weso/landportal-importers | 6edfa3c301422bbe8c09cb877b1cbddbcd902463 | [
"Unlicense"
] | 8 | 2016-02-16T13:05:37.000Z | 2017-01-04T14:38:03.000Z | CountryReconciler/reconciler/country_normalizer.py | landportal/landbook-importers | f0e246f493329b9c5741c50f3a0495d27ee5c54b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on 10/02/2014
@author: Dani
"""
import re
import codecs
from reconciler.entities.normalized_country import NormalizedCountry
from reconciler.exceptions.unknown_country_error import UnknownCountryError
class CountryNormalizer(object):
"""
In this class we'll implement the normalizer methods responsible
for returning a NormalizedCountry object from a certain distinctive
value. If a reconciliation is needed we'll also implement it here.
"""
# Conflictive expressions
EN_REMOVABLE_EXPRESSIONS = "(the|in|of|and|&)"
ES_REMOVABLE_EXPRESSIONS = "(el|las|los|la|lo|de|y|&|del|en)"
FR_REMOVABLE_EXPRESSIONS = "(les|las|le|la|et|&|dans|de|d|l)"
A_VOWEL_FORMS = "(Á|À|Â|Ä|á|à|â|ä)"
E_VOWEL_FORMS = "(É|È|Ê|Ë|é|è|ê|ë)"
I_VOWEL_FORMS = "(Í|Ì|Î|Ï|í|ì|î|ï)"
O_VOWEL_FORMS = "(Ó|Ò|Ô|Ö|ó|ò|ô|ö)"
U_VOWEL_FORMS = "(Ú|Ù|Û|Ü|ú|ù|û|ü)"
N_FORMS = "ñ"
C_FORMS = "(ç|þ)" # For "Curaçao" and "Curaþao"
PUNCTUATION_SYMBOLS = "(\.|,|-|:|;|_|`|'|´|!|¡|¿|\?|\^|¨)"
@staticmethod
def _equals_ignore_case(str1, str2):
if str1.lower() == str2.lower():
return True
return False
#DONE
@staticmethod
def normalize_country_by_en_name(en_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(en_name,
CountryNormalizer.EN_REMOVABLE_EXPRESSIONS)
@staticmethod
def normalize_country_by_es_name(es_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(es_name,
CountryNormalizer.ES_REMOVABLE_EXPRESSIONS)
@staticmethod
def normalize_country_by_fr_name(fr_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(fr_name,
CountryNormalizer.FR_REMOVABLE_EXPRESSIONS)
@staticmethod
def _normalize_country_by_given_language_removable_expressions(original_string, given_exp_removables):
# print "---------# NORMALIZER"
result = str(original_string)
# print result
result = CountryNormalizer._substitute_conflictive_chars(result)
# print result
result = CountryNormalizer._delete_text_between_brackets(result)
# print result
result = result.lower()
# print result
result = CountryNormalizer._substitute_commom_abreviations(result)
# print result
result = CountryNormalizer._rem_words_by_language(result, given_exp_removables)
# print result
result = CountryNormalizer._rem_white_spaces(result)
# print result
# print "---------# NORMALIZER"
return result
@staticmethod
def _substitute_commom_abreviations(original_string):
result = original_string
#Republic
result = re.sub("(republic|republica|republique)", "rep", result)
#Democratic
result = re.sub('(democratic|democratica|democratique)', "dem", result)
#Monarchy
result = re.sub('(monarchy|monarquia|monarchie)', "mon", result)
#Federation
result = re.sub('(federation|federacion)', "fed", result)
return result
@staticmethod
def _rem_white_spaces(original_string):
result = original_string.replace(" ", "")
result = result.replace("\n", "")
result = result.replace("\t", "")
result = result.replace("\r", "")
return result
@staticmethod
def _delete_text_between_brackets(original_string):
if original_string.__contains__("(") and original_string.__contains__(")"):
index_beg = original_string.index("(")
index_end = original_string.index(")") + 1
return original_string[0:index_beg] + original_string[index_end:1]
else:
return original_string
@staticmethod
def _substitute_conflictive_chars(original_string):
# print "MIRAD MI PENEEEEE"
result = original_string
result = re.sub(CountryNormalizer.A_VOWEL_FORMS, 'a', result)
result = re.sub(CountryNormalizer.E_VOWEL_FORMS, 'e', result)
result = re.sub(CountryNormalizer.I_VOWEL_FORMS, 'i', result)
result = re.sub(CountryNormalizer.O_VOWEL_FORMS, 'o', result)
result = re.sub(CountryNormalizer.U_VOWEL_FORMS, 'u', result)
result = re.sub(CountryNormalizer.N_FORMS, 'n', result)
result = re.sub(CountryNormalizer.C_FORMS, 'c', result)
result = re.sub(CountryNormalizer.PUNCTUATION_SYMBOLS, " ", result)
return result
@staticmethod
def _rem_words_by_language(original, sub_exp):
# regex_exp contains a list of non-significant words that should be replaced
# by a blank. To fit in the regex, each word should be in the middle of
# some of this pairs:
# - [white_space] word [white_space]
# - [white_space] word [end_of_string]
# - [start_of_the_string] word [end_of_string]
#
regex_exp = "(\A" + sub_exp + "\s)|(\s" + sub_exp + "\s)|(\s" + sub_exp + "\Z)"
version1 = ""
version2 = original
while version1 != version2:
version1 = version2
version2 = re.sub(regex_exp, " ", version1)
# The previous loop, applying re.sub more than one time to the original chain
# should be done because if more than 1 unsignificant words come in a streak,
# some of them coul be ignored by the regex. E.g.: "Republic of the Congo".
# " of " will fit in the regex, but that means that " the " won´t be recognized.
# The white space between "of" and "the" will be used only in one of the substrings,
# so in fact we hace " of " and "the ", and the resultant string will be
# "Republic the Congo". If we apply more than one time the regex, this things
# would be avoided
return version2
| 39.872611 | 116 | 0.62476 | 728 | 6,260 | 5.14011 | 0.315934 | 0.054516 | 0.035275 | 0.059861 | 0.299572 | 0.138963 | 0.113843 | 0.062533 | 0.062533 | 0.062533 | 0 | 0.005717 | 0.273482 | 6,260 | 156 | 117 | 40.128205 | 0.815963 | 0.235623 | 0 | 0.183908 | 0 | 0 | 0.080314 | 0.046408 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114943 | false | 0 | 0.045977 | 0.034483 | 0.436782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04cc4c2755a30d4af02b82ac645949400c8f8805 | 1,573 | py | Python | tests/affine_test.py | martin-luecke/xdsl | b96d19d97a5282823e7735da06764fa57a781429 | [
"Apache-2.0"
] | null | null | null | tests/affine_test.py | martin-luecke/xdsl | b96d19d97a5282823e7735da06764fa57a781429 | [
"Apache-2.0"
] | null | null | null | tests/affine_test.py | martin-luecke/xdsl | b96d19d97a5282823e7735da06764fa57a781429 | [
"Apache-2.0"
] | null | null | null | from xdsl.dialects.builtin import *
from xdsl.dialects.std import *
from xdsl.dialects.arith import *
from xdsl.printer import Printer
from xdsl.dialects.affine import *
def get_example_affine_program(ctx: MLContext, builtin: Builtin, std: Std,
affine: Affine) -> Operation:
def affine_mm(arg0: BlockArgument, arg1: BlockArgument,
arg2: BlockArgument) -> List[Operation]:
# yapf: disable
return [
affine.for_(0, 256, Block.from_callable([i64], lambda i: [
affine.for_(0, 256, Block.from_callable([i64], lambda j: [
affine.for_(0, 250, Block.from_callable([i64], lambda k: [
l := affine.load(arg0, i, k),
r := affine.load(arg1, k, j),
o := affine.load(arg2, i, j),
m := Mulf.get(l, r),
a := Mulf.get(o, m),
affine.store(a, arg2, i, j)
]))
]))
])),
Return.get(arg2)
]
# yapf: enable
f = FuncOp.from_callable("affine_mm", [f32, f32, f32], [f32], affine_mm)
return f
def test_affine():
ctx = MLContext()
builtin = Builtin(ctx)
std = Std(ctx)
arith = Arith(ctx)
affine = Affine(ctx)
test_empty = new_op("test_empty", 0, 0, 0)
ctx.register_op(test_empty)
op = test_empty()
f = get_example_affine_program(ctx, builtin, std, affine)
f.verify()
printer = Printer()
printer.print_op(f)
| 31.46 | 78 | 0.530833 | 187 | 1,573 | 4.336898 | 0.294118 | 0.049322 | 0.078915 | 0.073983 | 0.192355 | 0.096178 | 0.096178 | 0.096178 | 0.096178 | 0 | 0 | 0.035922 | 0.3452 | 1,573 | 49 | 79 | 32.102041 | 0.751456 | 0.016529 | 0 | 0.051282 | 0 | 0 | 0.012306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.128205 | 0.025641 | 0.25641 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04cd48f165a97bf20274ed413e6996f8b236819c | 1,632 | py | Python | setup.py | remifan/commplax | e8ee5bc86ab0dfd90773202579237ecf42488cd0 | [
"Apache-2.0"
] | 20 | 2021-03-09T08:33:51.000Z | 2021-11-29T05:04:55.000Z | setup.py | remifan/commplax | e8ee5bc86ab0dfd90773202579237ecf42488cd0 | [
"Apache-2.0"
] | null | null | null | setup.py | remifan/commplax | e8ee5bc86ab0dfd90773202579237ecf42488cd0 | [
"Apache-2.0"
] | 6 | 2021-03-09T08:34:01.000Z | 2021-12-03T15:14:42.000Z | # Copyright 2021 The Commplax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install commplax"""
from setuptools import setup, find_packages
_dct = {}
with open('commplax/version.py') as f:
exec(f.read(), _dct)
__version__ = _dct['__version__']
setup(name='commplax',
version=__version__,
description='differentiable DSP library for optical communication',
author='Commplax team',
author_email='remi.qr.fan@gmail.com',
url='https://github.com/remifan/commplax',
packages=find_packages(),
install_requires=[
'jax>=0.2.13',
'jaxlib>=0.1.66',
'flax>=0.3.4',
'seaborn',
'quantumrandom'
],
extras_require={
'dev': [
'attr',
'mock',
'pytest',
'parameterized',
'ipykernel',
'ipympl',
],
'fs': [
'zarr',
's3fs',
'fsspec'
],
'all': [
'zarr[jupyter]==2.9.5',
's3fs',
'fsspec',
'plotly',
'tqdm'
]
},
license='Apache-2.0',
)
| 26.322581 | 74 | 0.582721 | 188 | 1,632 | 4.952128 | 0.664894 | 0.064447 | 0.027927 | 0.034372 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022453 | 0.290441 | 1,632 | 61 | 75 | 26.754098 | 0.78152 | 0.352941 | 0 | 0.116279 | 0 | 0 | 0.316956 | 0.020231 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.023256 | 0 | 0.023256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04d0c052c025367c548447ff41b0b30285e71781 | 1,069 | py | Python | src/MetaSeg/functions/meta_nn.py | RonMcKay/Detection-and-Retrieval-of-OOD-Objects | 561dc4362226f67b5a85d94843ee439d67fad4ab | [
"MIT"
] | null | null | null | src/MetaSeg/functions/meta_nn.py | RonMcKay/Detection-and-Retrieval-of-OOD-Objects | 561dc4362226f67b5a85d94843ee439d67fad4ab | [
"MIT"
] | null | null | null | src/MetaSeg/functions/meta_nn.py | RonMcKay/Detection-and-Retrieval-of-OOD-Objects | 561dc4362226f67b5a85d94843ee439d67fad4ab | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.utils.data import Dataset
class MetricDataset(Dataset):
def __init__(self, data):
super(MetricDataset, self).__init__()
self.data = data[0].squeeze()
self.targets = data[1].squeeze()
def __getitem__(self, index):
return (
torch.from_numpy(self.data[index]).float().flatten(),
torch.tensor(self.targets[index]).float().flatten(),
)
def __len__(self):
return self.data.shape[0]
class MetaNN(nn.Module):
def __init__(self, input_size):
super(MetaNN, self).__init__()
self.act = nn.ReLU()
self.layers = nn.Sequential(
nn.Linear(input_size, 50),
self.act,
nn.Linear(50, 40),
self.act,
nn.Linear(40, 30),
self.act,
nn.Linear(30, 20),
self.act,
nn.Linear(20, 10),
self.act,
nn.Linear(10, 1),
)
def forward(self, x):
return self.layers(x).view(x.shape[0], -1)
| 25.452381 | 65 | 0.540692 | 131 | 1,069 | 4.206107 | 0.335878 | 0.076225 | 0.098004 | 0.136116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035813 | 0.320861 | 1,069 | 41 | 66 | 26.073171 | 0.723141 | 0 | 0 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147059 | false | 0 | 0.088235 | 0.088235 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04d9a64e16d05c00923b86847acd17f241da5519 | 4,704 | py | Python | python/experiments/lnpdfs/StopAndFrisk/frisk.py | DrawZeroPoint/VIPS | 730f4e18c24afa6f561b13d1fe8af53ae89990a7 | [
"MIT"
] | 12 | 2018-07-11T14:35:51.000Z | 2020-12-07T03:54:28.000Z | python/experiments/lnpdfs/StopAndFrisk/frisk.py | ykwon0407/VIPS | 91d940304b34d702c1a8b12363b5fff38455ef88 | [
"MIT"
] | null | null | null | python/experiments/lnpdfs/StopAndFrisk/frisk.py | ykwon0407/VIPS | 91d940304b34d702c1a8b12363b5fff38455ef88 | [
"MIT"
] | 10 | 2018-07-11T14:36:00.000Z | 2022-01-14T21:41:41.000Z | """
Implementation of the hierarchical poisson glm model, with a precinct-specific
term, an ethnicity specific term, and an offset term.
The data are tuples of (ethnicity, precinct, num_stops, total_arrests), where
the count variables num_stops and total_arrests refer to the number of stops
and total arrests of an ethnicity in the specified precinct over a period of
15 months. The rate we are measuring is the rate of stops-per-arrest
for certain ethnicities in different precincts.
Y_ep = num stops of ethnicity e in precinct p
N_ep = num arests of e in p
log lam_ep = alpha_e + beta_p + mu + log(N_ep * 15/12) #yearly correction term
Y_ep ~ Pois(lam_ep)
"""
import numpy as np
import numpy.random as npr
import scipy.misc as scpm
import pandas as pd
import os
# credit dataset
def process_dataset():
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir)) + "/data/datasets/frisk_with_noise.dat"
df = pd.read_csv(data_dir, skiprows=6, delim_whitespace=True)
# compute proportion black in precinct, black = 1
# first aggregate by precinct/ethnicity, and sum over populations
popdf = df[['pop', 'precinct', 'eth']]. \
groupby(['precinct', 'eth'])['pop'].apply(sum)
percent_black = np.array([ popdf[i][1] / float(popdf[i].sum())
for i in range(1, 76)] )
precinct_type = pd.cut(percent_black, [0, .1, .4, 1.]) #
df['precinct_type'] = precinct_type.codes[df.precinct.values-1]
return df
df = process_dataset()
def make_model_funs(crime=1., precinct_type=1):
""" crime: 1=violent, 2=weapons, 3=property, 4=drug
eth : 1=black, 2 = hispanic, 3=white
precincts: 1-75
precinct_type = (0, .1], (.1, .4], (.4, 1.]
"""
# subselect crime/precinct, set up design matrix
sdf = df[ (df['crime']==crime) & (df['precinct_type']==precinct_type) ]
# make dummies for precincts, etc
one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
precincts = np.sort(np.unique(sdf['precinct']))
Xprecinct = one_hot(sdf['precinct'], 76)[:, precincts]
Xeth = one_hot(sdf['eth'], 4)[:, 1:-1]
yep = sdf['stops'].values
lnep = np.log(sdf['past.arrests'].values) + np.log(15./12)
num_eth = Xeth.shape[1]
num_precinct = Xprecinct.shape[1]
# unpack a flat param vector
aslice = slice(0, num_eth)
bslice = slice(num_eth, num_eth + num_precinct)
mslice = slice(bslice.stop, bslice.stop + 1)
lnsa_slice = slice(mslice.stop, mslice.stop + 1)
lnsb_slice = slice(lnsa_slice.stop, lnsa_slice.stop+1)
num_params = lnsb_slice.stop
pname = lambda s, stub: ['%s_%d'%(stub, i)
for i in range(s.stop-s.start)]
param_names = [pname(s, stub)
for s, stub in zip([aslice, bslice, mslice, lnsa_slice, lnsb_slice],
['alpha', 'beta', 'mu', 'lnsigma_a', 'lnsigma_b'])]
param_names = [s for pn in param_names for s in pn]
def unpack(th):
""" unpack vectorized lndf """
th = np.atleast_2d(th)
alpha_eth, beta_prec, mu, lnsigma_alpha, lnsigma_beta = \
th[:, aslice], th[:, bslice], th[:, mslice], \
th[:, lnsa_slice], th[:, lnsb_slice]
return alpha_eth, beta_prec, mu, lnsigma_alpha, lnsigma_beta
hyper_lnstd = np.array([[np.log(10.)]])
def lnpdf(th):
# params
alpha, beta, mu, lns_alpha, lns_beta = unpack(th)
# priors
ll_alpha = normal_lnpdf(alpha, 0, lns_alpha)
ll_beta = normal_lnpdf(beta, 0, lns_beta)
ll_mu = normal_lnpdf(mu, 0, hyper_lnstd)
ll_salpha = normal_lnpdf(np.exp(lns_alpha), 0, hyper_lnstd)
ll_sbeta = normal_lnpdf(np.exp(lns_beta), 0, hyper_lnstd)
logprior = ll_alpha + ll_beta + ll_mu + ll_salpha + ll_sbeta
# likelihood
lnlam = (mu + lnep[None,:]) + \
np.dot(alpha, Xeth.T) + np.dot(beta, Xprecinct.T)
loglike = np.sum(lnpoiss(yep, lnlam), 1)
return (loglike + logprior).squeeze()
return lnpdf, unpack, num_params, sdf, param_names
from scipy.special import gammaln
def lnpoiss(y, lnlam):
""" log likelihood of poisson """
return y*lnlam - np.exp(lnlam) - gammaln(y+1)
def normal_lnpdf(x, mean, ln_std):
x = np.atleast_2d(x)
D = x.shape[1]
dcoef = 1.
if ln_std.shape[1] != D:
dcoef = D
qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*ln_std), axis=1)
coef = -.5*D * np.log(2.*np.pi) - dcoef * np.sum(ln_std, axis=1)
return qterm + coef
| 38.557377 | 155 | 0.616709 | 704 | 4,704 | 3.975852 | 0.3125 | 0.030011 | 0.010718 | 0.017149 | 0.072883 | 0.040729 | 0.029296 | 0.029296 | 0.029296 | 0 | 0 | 0.020068 | 0.247874 | 4,704 | 121 | 156 | 38.876033 | 0.771057 | 0.244473 | 0 | 0 | 0 | 0 | 0.047018 | 0.010034 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.085714 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04dc2b56346a0029e3867882b541bc4c488cd002 | 12,578 | py | Python | script.module.saltsrd.shared/lib/kodi.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2019-03-05T09:37:15.000Z | 2019-03-05T09:37:15.000Z | script.module.saltsrd.shared/lib/kodi.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | null | null | null | script.module.saltsrd.shared/lib/kodi.py | TheWardoctor/wardoctors-repo | 893f646d9e27251ffc00ca5f918e4eb859a5c8f0 | [
"Apache-2.0"
] | 1 | 2021-11-05T20:48:09.000Z | 2021-11-05T20:48:09.000Z | """
Salts RD Lite shared module
Copyright (C) 2016 creits -2- tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import xbmcaddon
import xbmcplugin
import xbmcgui
import xbmc
import xbmcvfs
import urllib
import urlparse
import sys
import os
import re
import json
import time
import CustomProgressDialog
addon = xbmcaddon.Addon()
get_setting = addon.getSetting
show_settings = addon.openSettings
sleep = xbmc.sleep
_log = xbmc.log
def execute_jsonrpc(command):
if not isinstance(command, basestring):
command = json.dumps(command)
response = xbmc.executeJSONRPC(command)
return json.loads(response)
def get_path():
return addon.getAddonInfo('path').decode('utf-8')
def get_profile():
return addon.getAddonInfo('profile').decode('utf-8')
def translate_path(path):
return xbmc.translatePath(path).decode('utf-8')
def set_setting(id, value):
if not isinstance(value, basestring): value = str(value)
addon.setSetting(id, value)
def accumulate_setting(setting, addend=1):
cur_value = get_setting(setting)
cur_value = int(cur_value) if cur_value else 0
set_setting(setting, cur_value + addend)
def get_version():
return addon.getAddonInfo('version')
def get_id():
return addon.getAddonInfo('id')
def get_name():
return addon.getAddonInfo('name')
def has_addon(addon_id):
return xbmc.getCondVisibility('System.HasAddon(%s)' % (addon_id)) == 1
def get_kodi_version():
class MetaClass(type):
def __str__(self):
return '|%s| -> |%s|%s|%s|%s|%s|' % (self.version, self.major, self.minor, self.tag, self.tag_version, self.revision)
class KodiVersion(object):
__metaclass__ = MetaClass
version = xbmc.getInfoLabel('System.BuildVersion').decode('utf-8')
match = re.search('([0-9]+)\.([0-9]+)', version)
if match: major, minor = match.groups()
match = re.search('-([a-zA-Z]+)([0-9]*)', version)
if match: tag, tag_version = match.groups()
match = re.search('\w+:(\w+-\w+)', version)
if match: revision = match.group(1)
try: major = int(major)
except: major = 0
try: minor = int(minor)
except: minor = 0
try: revision = revision.decode('utf-8')
except: revision = u''
try: tag = tag.decode('utf-8')
except: tag = u''
try: tag_version = int(tag_version)
except: tag_version = 0
return KodiVersion
def get_plugin_url(queries):
try:
query = urllib.urlencode(queries)
except UnicodeEncodeError:
for k in queries:
if isinstance(queries[k], unicode):
queries[k] = queries[k].encode('utf-8')
query = urllib.urlencode(queries)
return sys.argv[0] + '?' + query
def end_of_directory(cache_to_disc=True):
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=cache_to_disc)
def set_content(content):
xbmcplugin.setContent(int(sys.argv[1]), content)
def create_item(queries, label, thumb='', fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if not thumb: thumb = os.path.join(get_path(), 'icon.png')
list_item = xbmcgui.ListItem(label, iconImage=thumb, thumbnailImage=thumb)
add_item(queries, list_item, fanart, is_folder, is_playable, total_items, menu_items, replace_menu)
def add_item(queries, list_item, fanart='', is_folder=None, is_playable=None, total_items=0, menu_items=None, replace_menu=False):
if not fanart: fanart = os.path.join(get_path(), 'fanart.jpg')
if menu_items is None: menu_items = []
if is_folder is None:
is_folder = False if is_playable else True
if is_playable is None:
playable = 'false' if is_folder else 'true'
else:
playable = 'true' if is_playable else 'false'
liz_url = queries if isinstance(queries, basestring) else get_plugin_url(queries)
if not list_item.getProperty('fanart_image'): list_item.setProperty('fanart_image', fanart)
list_item.setInfo('video', {'title': list_item.getLabel()})
list_item.setProperty('isPlayable', playable)
list_item.addContextMenuItems(menu_items, replaceItems=replace_menu)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), liz_url, list_item, isFolder=is_folder, totalItems=total_items)
def parse_query(query):
q = {'mode': 'main'}
if query.startswith('?'): query = query[1:]
queries = urlparse.parse_qs(query)
for key in queries:
if len(queries[key]) == 1:
q[key] = queries[key][0]
else:
q[key] = queries[key]
return q
def notify(header=None, msg='', duration=2000, sound=None, icon_path=None):
if header is None: header = get_name()
if sound is None: sound = get_setting('mute_notifications') == 'false'
if icon_path is None: icon_path = os.path.join(get_path(), 'icon.png')
try:
xbmcgui.Dialog().notification(header, msg, icon_path, duration, sound)
except:
builtin = "XBMC.Notification(%s,%s, %s, %s)" % (header, msg, duration, icon_path)
xbmc.executebuiltin(builtin)
def close_all():
xbmc.executebuiltin('Dialog.Close(all)')
def get_current_view():
window = xbmcgui.Window(xbmcgui.getCurrentWindowId())
return str(window.getFocusId())
def set_view(content, set_view=False, set_sort=False):
# set content type so library shows more views and info
if content:
set_content(content)
if set_view:
view = get_setting('%s_view' % (content))
if view and view != '0':
_log('Setting View to %s (%s)' % (view, content), xbmc.LOGDEBUG)
xbmc.executebuiltin('Container.SetViewMode(%s)' % (view))
# set sort methods - probably we don't need all of them
if set_sort:
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_MPAA_RATING)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_VIDEO_RUNTIME)
xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_GENRE)
def refresh_container():
xbmc.executebuiltin("XBMC.Container.Refresh")
def update_container(url):
xbmc.executebuiltin('Container.Update(%s)' % (url))
def get_keyboard(heading, default=''):
keyboard = xbmc.Keyboard()
keyboard.setHeading(heading)
if default: keyboard.setDefault(default)
keyboard.doModal()
if keyboard.isConfirmed():
return keyboard.getText()
else:
return None
class Translations(object):
def __init__(self, strings):
self.strings = strings
def i18n(self, string_id):
try:
return addon.getLocalizedString(self.strings[string_id]).encode('utf-8', 'ignore')
except Exception as e:
xbmc.log('%s: Failed String Lookup: %s (%s)' % (get_name(), string_id, e), xbmc.LOGWARNING)
return string_id
class WorkingDialog(object):
wd = None
def __init__(self):
try:
self.wd = xbmcgui.DialogBusy()
self.wd.create()
self.update(0)
except:
xbmc.executebuiltin('ActivateWindow(busydialog)')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.wd is not None:
self.wd.close()
else:
xbmc.executebuiltin('Dialog.Close(busydialog)')
def is_canceled(self):
if self.wd is not None:
return self.wd.iscanceled()
else:
return False
def update(self, percent):
if self.wd is not None:
self.wd.update(percent)
class ProgressDialog(object):
pd = None
def __init__(self, heading, line1='', line2='', line3='', background=False, active=True, timer=0):
self.begin = time.time()
self.timer = timer
self.background = background
self.heading = heading
if active and not timer:
self.pd = self.__create_dialog(line1, line2, line3)
self.pd.update(0)
def __create_dialog(self, line1, line2, line3):
if self.background:
pd = xbmcgui.DialogProgressBG()
msg = line1 + line2 + line3
pd.create(self.heading, msg)
else:
if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
pd.create(self.heading, line1, line2, line3)
return pd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
def is_canceled(self):
if self.pd is not None and not self.background:
return self.pd.iscanceled()
else:
return False
def update(self, percent, line1='', line2='', line3=''):
if self.pd is None and self.timer and (time.time() - self.begin) >= self.timer:
self.pd = self.__create_dialog(line1, line2, line3)
if self.pd is not None:
if self.background:
msg = line1 + line2 + line3
self.pd.update(percent, self.heading, msg)
else:
self.pd.update(percent, line1, line2, line3)
class CountdownDialog(object):
__INTERVALS = 5
pd = None
def __init__(self, heading, line1='', line2='', line3='', active=True, countdown=60, interval=5):
self.heading = heading
self.countdown = countdown
self.interval = interval
self.line3 = line3
if active:
if xbmc.getCondVisibility('Window.IsVisible(progressdialog)'):
pd = CustomProgressDialog.ProgressDialog()
else:
pd = xbmcgui.DialogProgress()
if not self.line3: line3 = 'Expires in: %s seconds' % (countdown)
pd.create(self.heading, line1, line2, line3)
pd.update(100)
self.pd = pd
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.pd is not None:
self.pd.close()
def start(self, func, args=None, kwargs=None):
if args is None: args = []
if kwargs is None: kwargs = {}
result = func(*args, **kwargs)
if result:
return result
start = time.time()
expires = time_left = int(self.countdown)
interval = self.interval
while time_left > 0:
for _ in range(CountdownDialog.__INTERVALS):
sleep(interval * 1000 / CountdownDialog.__INTERVALS)
if self.is_canceled(): return
time_left = expires - int(time.time() - start)
if time_left < 0: time_left = 0
progress = time_left * 100 / expires
line3 = 'Expires in: %s seconds' % (time_left) if not self.line3 else ''
self.update(progress, line3=line3)
result = func(*args, **kwargs)
if result:
return result
def is_canceled(self):
if self.pd is None:
return False
else:
return self.pd.iscanceled()
def update(self, percent, line1='', line2='', line3=''):
if self.pd is not None:
self.pd.update(percent, line1, line2, line3)
| 35.430986 | 139 | 0.63134 | 1,561 | 12,578 | 4.950673 | 0.213965 | 0.013975 | 0.025233 | 0.014234 | 0.290502 | 0.259964 | 0.242365 | 0.219332 | 0.179865 | 0.155797 | 0 | 0.012488 | 0.255128 | 12,578 | 354 | 140 | 35.531073 | 0.81236 | 0.062649 | 0 | 0.26259 | 0 | 0 | 0.054717 | 0.015767 | 0 | 0 | 0 | 0 | 0 | 1 | 0.158273 | false | 0 | 0.046763 | 0.039568 | 0.363309 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04dc6f790341edc14815fce9aa1146cef3491bdc | 11,554 | py | Python | data/aicharacter.py | kennethsinder/exit-dash-hyperion | d44ea77ef5e8a6ce9490e3f802636da5b44e6e74 | [
"MIT"
] | 2 | 2018-01-29T14:34:51.000Z | 2020-10-20T23:30:26.000Z | data/aicharacter.py | kennethsinder/exit-dash-hyperion | d44ea77ef5e8a6ce9490e3f802636da5b44e6e74 | [
"MIT"
] | null | null | null | data/aicharacter.py | kennethsinder/exit-dash-hyperion | d44ea77ef5e8a6ce9490e3f802636da5b44e6e74 | [
"MIT"
] | null | null | null | # coding=utf-8
from random import randint
import pygame, math
from character import *
class AICharacter(Character):
def __init__(self, x, y, Vx, Vy, properties=('slime', -1, -1)):
# Properties should be a tuple of the form (STRING mobName, INT leftLimit,
# INT rightLimit) where leftLimit and rightLimit can be -1 to remove the limit
self.mobType = properties[0]
self.limit = [properties[1], properties[2]]
# Call base class implementation
Character.__init__(self, x, y, Vx, Vy)
# Decide colour if slime
self.colour = 'Blue'
if self.mobType == 'slime' and randint(0, 1) == 0:
self.colour = 'Green'
# Load images
# slime
self.slimeDL = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour +'_squashed.png').convert_alpha()
self.slimeDR = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_squashedR.png').convert_alpha()
self.slimeL = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_walk.png').convert_alpha()
self.slimeR = pygame.image.load('enemies'+os.sep+'slime'+os.sep+'slime' + self.colour + '_walkR.png').convert_alpha()
# fly
self.flyDL = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_dead.png').convert_alpha()
self.flyDR = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_dead_r.png').convert_alpha()
self.flyL = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_fly.png').convert_alpha()
self.flyR = pygame.image.load('enemies'+os.sep+'fly'+os.sep+'fly_fly_r.png').convert_alpha()
# fish
self.fishDL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_dead.png').convert_alpha()
self.fishDR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_dead_r.png').convert_alpha()
self.fishL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_swim.png').convert_alpha()
self.fishR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'fishGreen_swim_r.png').convert_alpha()
# snail
self.snailL1 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk1.png').convert_alpha()
self.snailL2 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk2.png').convert_alpha()
self.snailR1 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk1R.png').convert_alpha()
self.snailR2 = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailWalk2R.png').convert_alpha()
self.snailDL = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailShell.png').convert_alpha()
self.snailDR = pygame.image.load('enemies'+os.sep+'other'+os.sep+'snailShellR.png').convert_alpha()
# general image properties
self.imageL1, self.imageL2, self.imageR1, self.imageR2, self.imageDL, self.imageDR = [None] * 6
self.deadWidth, self.deadHeight = [None] * 2
# Other control variables
self.originalHeight = y
self.alive = True
self.health = 1
self.gravity = 1
self.runSpeed = abs(self.Vx)
self.currentStep = 0
self.takenAction = False
self.updateFrequency = 2
# -----------------------------------------------------------------------------------------------------------------
@staticmethod
def distance(p0, p1):
return math.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)
# -----------------------------------------------------------------------------------------------------------------
def updateAI(self, platforms, mainChar, blocks):
# Increment position by velocity
self.x += self.Vx
self.y += self.Vy
# Determine direction for draw() method
if self.Vx > 0:
self.direction = 1
elif self.Vx < 0:
self.direction = 0
# Check if character is still alive
if self.health <= 0:
self.alive = False
# Set a terminal velocity
if self.Vy >= platforms[0].height:
self.Vy = platforms[0].height - 5
if not self.onGround and self.Vy >= platforms[0].height - 15 and self.y > platforms[self.lowestPlatform][1]:
self.dispose()
# Apply gravity if necessary
if self.onGround:
self.Vy = 0
elif ((self.mobType == 'fly' and not self.alive) or self.mobType != 'fly') and (self.mobType != 'fish' or
(self.mobType == 'fish' and not self.alive)):
self.Vy += self.gravity
# Keep character within bounds
if self.limit[0] != -1 and self.x <= self.limit[0]:
self.x += self.runSpeed
self.Vx = abs(self.Vx)
if self.limit[1] != -1 and self.x >= self.limit[1]:
self.x -= self.runSpeed
self.Vx = -abs(self.Vx)
# Switch to a dead state if close to explosion
explosionRadius = 400
for block in blocks:
distanceFromBlock = self.distance((self.x + 0.5 * self.width, self.y + 0.5 * self.height),
(block.x + 0.5 * block.width, block.y + 0.5 * block.height))
if block.disabled and block.willExplode and block.explosionStep == 1 and \
distanceFromBlock < explosionRadius:
self.health = 0
# Prevent AI from falling off the lowest platform
if self.mobType == 'slime' or self.mobType == 'snail':
testXLeft = self.x - 25
testXRight = self.x + 25 + self.width
lowestPlatLeft = platforms[self.lowestPlatform][0]
lowestPlatRight = platforms[self.lowestPlatform][2]
onLowestPlatform = self.currentPlatform == self.lowestPlatform
if onLowestPlatform and testXLeft <= lowestPlatLeft and self.Vx < 0:
self.x += self.runSpeed
self.Vx *= -1
elif onLowestPlatform and testXRight >= lowestPlatRight and self.Vx > 0:
self.x -= self.runSpeed
self.Vx *= -1
# Implement simple AI
if self.mobType == 'slime' or self.mobType == 'snail' and randint(0, 10 - self.updateFrequency) == 0:
platformsBelowSelf = []
currentPlatformHeight = platforms[self.currentPlatform][1]
limitBackup = [self.limit[0], self.limit[1]]
self.limit[0] = platforms[self.currentPlatform][0] + 5
self.limit[1] = platforms[self.currentPlatform][2] - 40
safePlatformDropLeft, safePlatformDropRight = False, False
for i in range(0, len(platforms)):
if platforms[i][1] > currentPlatformHeight:
platformsBelowSelf.append(platforms[i])
for platform in platformsBelowSelf:
if platform[0] < platforms[self.currentPlatform][0] < platform[2]:
safePlatformDropLeft = True
if platform[0] < platforms[self.currentPlatform][2] and platform[2] > platforms[self.currentPlatform][
2]:
safePlatformDropRight = True
if safePlatformDropLeft:
self.limit[0] = limitBackup[0]
if safePlatformDropRight:
self.limit[1] = limitBackup[1]
elif self.mobType == 'fly' and self.alive and randint(0, 10 - self.updateFrequency) == 0:
self.limit[0] = platforms[0][0]
for i in range(0, len(platforms)):
if self.x + self.width + 5 >= platforms[i][0] and self.x <= platforms[i][2] and \
platforms[i][1] <= self.y <= platforms[i][3]:
self.limit[1] = platforms[i][0]
self.Vx *= -1
self.x -= self.runSpeed
# -----------------------------------------------------------------------------------------------------------------
def update(self, platforms, ev, movableObjects, blocks, aiCharacters, mainChar, pool, surface, FPS, torches=None):
# Collide with other objects
Character.collide(self, platforms, blocks, aiCharacters, pool, torches)
# Update motion and AI actions
self.updateAI(platforms, mainChar, blocks)
# Draw correct character
self.draw(surface, FPS)
# -----------------------------------------------------------------------------------------------------------------
def draw(self, surface, fps=60):
# Return immediately if mob is invisibile
if not self.visible:
return
# Determine the correct image to use
if self.mobType == 'slime' and not self.imageL1:
self.imageL1 = self.imageL2 = self.slimeL
self.imageR1 = self.imageR2 = self.slimeR
self.imageDL = self.slimeDL
self.imageDR = self.slimeDR
elif self.mobType == 'fly' and not self.imageL1:
self.imageL1 = self.imageL2 = self.flyL
self.imageR1 = self.imageR2 = self.flyR
self.imageDL = self.flyDL
self.imageDR = self.flyDR
elif self.mobType == 'fish' and not self.imageL1:
self.imageL1 = self.fishL
self.imageL2 = self.fishL
self.imageR1 = self.fishR
self.imageR2 = self.fishR
self.imageDL = self.fishDL
self.imageDR = self.fishDR
elif self.mobType == 'snail' and not self.imageL1:
self.imageL1 = self.snailL1
self.imageL2 = self.snailL2
self.imageR1 = self.snailR1
self.imageR2 = self.snailR2
self.imageDL = self.snailDL
self.imageDR = self.snailDR
# Get image widths and heights
self.width = pygame.Surface.get_width(self.imageL1)
self.height = pygame.Surface.get_height(self.imageL1)
self.deadWidth = pygame.Surface.get_width(self.imageDL)
self.deadHeight = pygame.Surface.get_height(self.imageDL)
# Increment the walking/moving frame
footstepRarity = 1
if pygame.time.get_ticks() % footstepRarity == 0:
self.walkFrame += 1
if self.walkFrame > 1:
self.walkFrame = 0
if self.direction == 1 and self.alive and self.walkFrame == 0:
surface.blit(self.imageR1, (self.x, self.y))
elif self.direction == 0 and self.alive and self.walkFrame == 0:
surface.blit(self.imageL1, (self.x, self.y))
elif self.direction == 1 and self.alive and self.walkFrame == 1:
surface.blit(self.imageR2, (self.x, self.y))
elif self.direction == 0 and self.alive and self.walkFrame == 1:
surface.blit(self.imageL2, (self.x, self.y))
elif self.direction == 1 and not self.alive:
surface.blit(self.imageDR, (self.x, self.y))
elif self.direction == 0 and not self.alive:
surface.blit(self.imageDL, (self.x, self.y))
# Recalculate the image width and height, and stop horizontal motion if the AI char is dead
if not self.alive:
self.width = self.deadWidth
self.height = self.deadHeight
self.Vx = 0
# -----------------------------------------------------------------------------------------------------------------
| 49.587983 | 131 | 0.552017 | 1,323 | 11,554 | 4.785336 | 0.176115 | 0.028432 | 0.042647 | 0.062549 | 0.380351 | 0.304375 | 0.26931 | 0.232823 | 0.213236 | 0.138051 | 0 | 0.020906 | 0.283798 | 11,554 | 232 | 132 | 49.801724 | 0.744169 | 0.127402 | 0 | 0.059172 | 0 | 0 | 0.057056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029586 | false | 0 | 0.017751 | 0.005917 | 0.065089 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e078577ead750421a7020ed8aeebb5726ef2c5 | 2,181 | py | Python | HotPotato/hot_potato.py | caternuson/CircuitPython-Demos | cffb226750614caed5368d7ca31c06be321f54d8 | [
"MIT"
] | null | null | null | HotPotato/hot_potato.py | caternuson/CircuitPython-Demos | cffb226750614caed5368d7ca31c06be321f54d8 | [
"MIT"
] | null | null | null | HotPotato/hot_potato.py | caternuson/CircuitPython-Demos | cffb226750614caed5368d7ca31c06be321f54d8 | [
"MIT"
] | null | null | null | # Circuit Playground Express Hot Potato
#
# Author: Carter Nelson
# MIT License (https://opensource.org/licenses/MIT)
import time
import random
import math
import board
from analogio import AnalogIn
from adafruit_circuitplayground.express import cpx
# This brings in the song to play
import melody
number_of_notes = len(melody.melody)
SHAKE_THRESHOLD = 30
def get_total_accel():
# Compute total acceleration
X = 0
Y = 0
Z = 0
for count in range(10):
x,y,z = cpx.acceleration
X = X + x
Y = Y + y
Z = Z + z
time.sleep(0.001)
X = X / 10
Y = Y / 10
Z = Z / 10
return math.sqrt(X*X + Y*Y + Z*Z)
# Seed the random function with noise
a4 = AnalogIn(board.A4)
a5 = AnalogIn(board.A5)
a6 = AnalogIn(board.A6)
a7 = AnalogIn(board.A7)
seed = a4.value
seed += a5.value
seed += a6.value
seed += a7.value
random.seed(seed)
# Set the NeoPixels all red
cpx.pixels.fill(0xFF0000)
# Loop forever
while True:
# Wait for shaking
while get_total_accel() < SHAKE_THRESHOLD:
pass # do nothing
# Game length
game_length = random.randint(number_of_notes, 6*number_of_notes)
# Game play with melody
note_to_play = 0
for game_step in range(game_length):
# Add some flare using the NeoPixels
cpx.pixels.fill(0)
cpx.pixels[random.randint(0,9)] = ( random.randint(0,255),
random.randint(0,255),
random.randint(0,255) )
# Play the note
note_duration = 1 / melody.tempo[note_to_play]
note = melody.melody[note_to_play]
note = note if note <= 3500 else 3500
if note == 0:
time.sleep(note_duration)
else:
cpx.play_tone(note, note_duration)
# Increment and check the note counter
note_to_play += 1
note_to_play = note_to_play if note_to_play < number_of_notes else 0
#
# GAME OVER
#
# Set the NeoPixels all red
cpx.pixels.fill(0xFF0000)
# Delay a bit so can't just reset with a shake
time.sleep(2) | 23.967033 | 76 | 0.600642 | 311 | 2,181 | 4.096463 | 0.37299 | 0.037677 | 0.054945 | 0.040031 | 0.105965 | 0.105965 | 0.105965 | 0.105965 | 0.065934 | 0 | 0 | 0.046154 | 0.314535 | 2,181 | 91 | 77 | 23.967033 | 0.80602 | 0.220083 | 0 | 0.037037 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009524 | 0 | 0 | 1 | 0.018519 | false | 0.018519 | 0.12963 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e0b45d4a2c91bb016d8c40ce99ce0942e046b2 | 7,728 | py | Python | evaluator/design_table.py | vita-epfl/Trajectory-Transformer | 922e17741c532678fd34b041257395acfaecec04 | [
"MIT"
] | null | null | null | evaluator/design_table.py | vita-epfl/Trajectory-Transformer | 922e17741c532678fd34b041257395acfaecec04 | [
"MIT"
] | null | null | null | evaluator/design_table.py | vita-epfl/Trajectory-Transformer | 922e17741c532678fd34b041257395acfaecec04 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from collections import OrderedDict
import pandas as pd
from evaluator.evaluator_helpers import Categories, Sub_categories, Metrics
class Table(object):
"""docstring for Table"""
def __init__(self, arg=None):
super(Table, self).__init__()
self.entries = {}
self.sub_entries = {}
self.arg = arg
self.results = {}
self.sub_results = {}
self.collision_test = {}
def add_collision_entry(self, name, result):
self.collision_test[name] = result
def add_entry(self, name, results):
final_results = []
sub_final_results = []
## Overall metrics ADE, FDE, ColI, ColII, Topk_ade, Topk_fde, NLL
table_metrics = Metrics(*([0]*8))
## Metrics for the 4 types of trajectories and interactions
table_categories = Categories(*[Metrics(*([0]*8)) for i in range(1,5)])
table_sub_categories = Sub_categories(*[Metrics(*([0]*8)) for i in range(1,5)])
for dataset, (metrics, categories, sub_categories) in results.items():
## Overall
table_metrics += metrics
## Main Types
table_categories.static_scenes += categories.static_scenes
table_categories.linear_scenes += categories.linear_scenes
table_categories.forced_non_linear_scenes += categories.forced_non_linear_scenes
table_categories.non_linear_scenes += categories.non_linear_scenes
## Sub Types
table_sub_categories.lf += sub_categories.lf
table_sub_categories.ca += sub_categories.ca
table_sub_categories.grp += sub_categories.grp
table_sub_categories.others += sub_categories.others
final_results += table_categories.static_scenes.avg_vals_to_list()
final_results += table_categories.linear_scenes.avg_vals_to_list()
final_results += table_categories.forced_non_linear_scenes.avg_vals_to_list()
final_results += table_categories.non_linear_scenes.avg_vals_to_list()
final_results += table_metrics.avg_vals_to_list()
sub_final_results += table_sub_categories.lf.avg_vals_to_list()
sub_final_results += table_sub_categories.ca.avg_vals_to_list()
sub_final_results += table_sub_categories.grp.avg_vals_to_list()
sub_final_results += table_sub_categories.others.avg_vals_to_list()
self.results[name] = final_results
self.sub_results[name] = sub_final_results
return final_results, sub_final_results
def add_result(self, name, final_results, sub_final_results):
self.results[name] = final_results
self.sub_results[name] = sub_final_results
def render_mpl_table(self, data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, cellLoc='center', **kwargs)
for (row, col), cell in mpl_table.get_celld().items():
if (row == 0) or (col == 1) or (col == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
return ax
def print_table(self):
fig = plt.figure(figsize=(20, 20))
# ------------------------------------------ TABLES -------------------------------------------
# Overall Table #
ax1 = fig.add_subplot(311)
ax1.axis('tight')
ax1.axis('off')
df = pd.DataFrame(columns=['', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL', 'Col_test'])
it = 0
len_name = 10
for key in self.results:
df.loc[it] = ['Overall'] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(32, 40)] + [self.collision_test[key]]
it += 1
ax1 = self.render_mpl_table(df, header_columns=0, col_width=2.0, bbox=[0, 0.9, 1, 0.1*len(self.results)], ax=ax1)
ax2 = fig.add_subplot(312)
ax2.axis('tight')
ax2.axis('off')
# Overall Table #
df = pd.DataFrame(columns=['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL'])
type_list = [['I', ''], ['II', ''], ['III', ''], ['III', 'LF'], ['III', 'CA'], ['III', 'Grp'], ['III', 'Oth'], ['IV', '']]
it = 0
##Type I
for key in self.results:
df.loc[it] = type_list[0] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(8)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type II
for key in self.results:
df.loc[it] = type_list[1] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(8, 16)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III
for key in self.results:
df.loc[it] = type_list[2] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(16, 24)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: LF
for key in self.results:
df.loc[it] = type_list[3] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(8)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: CA
for key in self.results:
df.loc[it] = type_list[4] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(8, 16)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: Grp
for key in self.results:
df.loc[it] = type_list[5] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(16, 24)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: Others
for key in self.results:
df.loc[it] = type_list[6] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(24, 32)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type IV
for key in self.results:
df.loc[it] = type_list[7] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(24, 32)]
it += 1
ax2 = self.render_mpl_table(df, header_columns=0, col_width=2.0, bbox=[0, -1.6, 1, 0.6*len(self.results)], ax=ax2)
fig.savefig('Results.png')
| 43.41573 | 158 | 0.566511 | 1,049 | 7,728 | 3.954242 | 0.156339 | 0.050386 | 0.027001 | 0.039778 | 0.536644 | 0.502411 | 0.492285 | 0.492285 | 0.486017 | 0.474928 | 0 | 0.027826 | 0.260611 | 7,728 | 178 | 159 | 43.41573 | 0.69811 | 0.048395 | 0 | 0.31405 | 0 | 0 | 0.084711 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049587 | false | 0 | 0.049587 | 0 | 0.123967 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e1d79de3ac3f1998a9096e9d35c2880a417cdc | 4,667 | py | Python | bench/sample.py | MustardForBreakfast/safetywrap | 170f836e12df455aed9b6dce5e7c634f6b9e8f87 | [
"Apache-2.0"
] | 21 | 2019-10-31T17:43:18.000Z | 2022-03-19T13:46:05.000Z | bench/sample.py | MustardForBreakfast/safetywrap | 170f836e12df455aed9b6dce5e7c634f6b9e8f87 | [
"Apache-2.0"
] | null | null | null | bench/sample.py | MustardForBreakfast/safetywrap | 170f836e12df455aed9b6dce5e7c634f6b9e8f87 | [
"Apache-2.0"
] | 3 | 2019-11-01T17:50:07.000Z | 2021-12-15T07:23:21.000Z | """A benchmark to be run externally.
Executes a program that might make heavy use of Result/Option types
in one of two ways: classically, with exceptions, or using result types.
The program checks several data stores (in memory to minimize interference
from slow IO &c.) in order for a key. If it finds it, it gets the value,
adds something to it, and then overwrites the value.
"""
import sys
import typing as t
from timeit import timeit
from safetywrap import Some, Nothing, Ok, Err, Option, Result
T = t.TypeVar("T")
class ClassicalDataStore:
def __init__(self, values: dict = None) -> None:
self._values = values or {}
def connect(self, fail: bool = False) -> "ClassicalDataStore":
"""'Connect' to the store."""
if fail:
raise RuntimeError("Failed to connect")
return self
def get(self, key: str) -> t.Any:
"""Return a value from the store."""
return self._values.get(key)
def insert(self, key: str, val: T, overwrite: bool = False) -> T:
"""Insert the value and return it."""
if key in self._values and not overwrite:
raise KeyError("Key already exists")
self._values[key] = val
return val
class MonadicDataStore:
"""Using the monadic types."""
def __init__(self, values: dict = None) -> None:
self._values = values or {}
def connect(self, fail: bool = False) -> Result["MonadicDataStore", str]:
if fail:
return Err("failed to connect")
return Ok(self)
def get(self, key: str) -> Option[t.Any]:
"""Return a value from the store."""
if key in self._values:
return Some(self._values[key])
return Nothing()
def insert(
self, key: str, val: T, overwrite: bool = False
) -> Result[T, str]:
"""Insert the value and return it."""
if key in self._values and not overwrite:
return Err("Key already exists")
self._values[key] = val
return Ok(val)
class Classical:
"""Run the program in the classical way."""
def __init__(self) -> None:
self._stores = {
0: ClassicalDataStore(),
1: ClassicalDataStore(),
2: ClassicalDataStore(),
3: ClassicalDataStore({"you": "me"}),
}
def run(self) -> None:
"""Run the program."""
for store in self._stores.values():
try:
store = store.connect()
except RuntimeError:
continue
val = store.get("you")
if val is not None:
new_val = val + "et"
try:
inserted = store.insert("you", new_val)
except KeyError:
# oops, need to specify overwrite
inserted = store.insert("you", new_val, overwrite=True)
assert inserted == "meet"
break
else:
raise RuntimeError("Could not get value anywhere.")
class Monadic:
"""Use the monadic types."""
def __init__(self) -> None:
self._stores = {
0: MonadicDataStore(),
1: MonadicDataStore(),
2: MonadicDataStore(),
3: MonadicDataStore({"you": "me"}),
}
def run(self) -> None:
"""Run the program."""
for unconnected in self._stores.values():
connected = unconnected.connect()
if connected.is_err():
continue
store = connected.unwrap()
inserted = (
store.get("you")
.ok_or("no such val")
.map(lambda val: str(val + "et"))
.and_then(
lambda val: store.insert("you", val).or_else(
lambda _: store.insert("you", val, overwrite=True)
)
)
)
if inserted.is_ok():
assert inserted.unwrap() == "meet"
break
else:
raise RuntimeError("Could not get value anywhere")
if __name__ == "__main__":
to_run = sys.argv[1].lower()
switch: t.Dict[str, t.Callable[[], None]] = {
"classical": lambda: Classical().run(),
"monadic": lambda: Monadic().run(),
}
if to_run not in switch:
raise RuntimeError("No such method: {}".format(to_run))
if len(sys.argv) > 2 and sys.argv[2] == "timeit":
# run internal timings
NUMBER = int(1e6)
taken = timeit("switch[to_run]()", globals=globals(), number=NUMBER)
print(taken / NUMBER)
else:
switch[to_run]()
| 29.916667 | 77 | 0.543604 | 535 | 4,667 | 4.646729 | 0.263551 | 0.044248 | 0.017699 | 0.013274 | 0.341512 | 0.334674 | 0.28399 | 0.263073 | 0.209976 | 0.209976 | 0 | 0.004217 | 0.339404 | 4,667 | 155 | 78 | 30.109677 | 0.802141 | 0.150204 | 0 | 0.238095 | 0 | 0 | 0.07088 | 0 | 0 | 0 | 0 | 0 | 0.019048 | 1 | 0.114286 | false | 0 | 0.038095 | 0 | 0.27619 | 0.009524 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e23249464eb1af09688cb5261f0910c0a92916 | 1,775 | py | Python | schemato/validationresult.py | emmett9001/schema.to | cfb5eb128faee391e1ac5c69093776b2d2cfd6b6 | [
"Apache-2.0"
] | 38 | 2015-01-16T11:28:12.000Z | 2017-10-26T07:24:32.000Z | schemato/validationresult.py | emmett9001/schema.to | cfb5eb128faee391e1ac5c69093776b2d2cfd6b6 | [
"Apache-2.0"
] | 11 | 2015-03-20T13:54:49.000Z | 2016-10-05T18:38:38.000Z | schemato/validationresult.py | emmett9001/schema.to | cfb5eb128faee391e1ac5c69093776b2d2cfd6b6 | [
"Apache-2.0"
] | 5 | 2015-02-17T18:08:35.000Z | 2021-12-02T12:33:07.000Z | import json
class ValidationResult(object):
ERROR = 1
WARNING = 2
def __init__(self, namespace, classname):
super(ValidationResult, self).__init__()
self.warnings = []
self.errors = []
self.namespace = namespace
self.classname = classname
def add_error(self, warning):
if warning:
if warning.level == ValidationResult.WARNING:
self.warnings.append(warning)
elif warning.level == ValidationResult.ERROR:
self.errors.append(warning)
def to_json(self):
mapping = self.to_dict()
return json.dumps(mapping)
def to_dict(self):
mapping = {}
mapping['warnings'] = []
for warning in self.warnings:
mapping['warnings'].append(warning.to_dict())
mapping['errors'] = []
for error in self.errors:
mapping['errors'].append(error.to_dict())
mapping['namespace'] = self.namespace
mapping['classname'] = self.classname
return mapping
def __len__(self):
return len(self.warnings) + len(self.errors)
class ValidationWarning(object):
def __init__(self, level, string, line, line_num):
super(ValidationWarning, self).__init__()
self.level = level
self.string = string
self.line_num = line_num
self.line_text = line
def to_dict(self):
mapping = {}
mapping['level'] = \
"Error" if self.level == ValidationResult.ERROR else "Warning"
mapping['string'] = self.string
mapping['line'] = self.line_text
mapping['num'] = self.line_num
return mapping
def to_json(self):
mapping = self.to_dict()
return json.dumps(mapping)
| 28.629032 | 74 | 0.593239 | 189 | 1,775 | 5.386243 | 0.190476 | 0.035363 | 0.021611 | 0.02554 | 0.155206 | 0.155206 | 0.102161 | 0.102161 | 0.102161 | 0.102161 | 0 | 0.001597 | 0.294648 | 1,775 | 61 | 75 | 29.098361 | 0.811502 | 0 | 0 | 0.24 | 0 | 0 | 0.042817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.02 | 0.02 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e2eacd84c84c864a83a13c251d959ab593d0a5 | 2,049 | py | Python | robosuite/scripts/playback_demonstrations_from_hdf5.py | gsp-27/robosuite | 642065ebba4ea0c2c1f3ae6e96a8f14262146019 | [
"MIT"
] | 1 | 2021-12-22T13:10:46.000Z | 2021-12-22T13:10:46.000Z | robosuite/scripts/playback_demonstrations_from_hdf5.py | gsp-27/robosuite | 642065ebba4ea0c2c1f3ae6e96a8f14262146019 | [
"MIT"
] | null | null | null | robosuite/scripts/playback_demonstrations_from_hdf5.py | gsp-27/robosuite | 642065ebba4ea0c2c1f3ae6e96a8f14262146019 | [
"MIT"
] | 1 | 2020-12-29T01:38:01.000Z | 2020-12-29T01:38:01.000Z | """
A convenience script to playback random demonstrations from
a set of demonstrations stored in a hdf5 file.
Example:
$ python playback_demonstrations_from_hdf5.py --folder ../models/assets/demonstrations/SawyerPickPlace/
"""
import os
import h5py
import argparse
import random
import numpy as np
import robosuite
from robosuite.utils.mjcf_utils import postprocess_model_xml
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--folder",
type=str,
default=os.path.join(
robosuite.models.assets_root, "demonstrations/SawyerNutAssembly"
),
)
args = parser.parse_args()
demo_path = args.folder
hdf5_path = os.path.join(demo_path, "demo.hdf5")
f = h5py.File(hdf5_path, "r")
env_name = f["data"].attrs["env"]
env = robosuite.make(
env_name,
has_renderer=True,
ignore_done=True,
use_camera_obs=False,
gripper_visualization=True,
reward_shaping=True,
control_freq=100,
)
# list of all demonstrations episodes
demos = list(f["data"].keys())
while True:
print("Playing back random episode... (press ESC to quit)")
# # select an episode randomly
ep = random.choice(demos)
# read the model xml, using the metadata stored in the attribute for this episode
model_file = f["data/{}".format(ep)].attrs["model_file"]
model_path = os.path.join(demo_path, "models", model_file)
with open(model_path, "r") as model_f:
model_xml = model_f.read()
env.reset()
xml = postprocess_model_xml(model_xml)
env.reset_from_xml_string(xml)
env.viewer.set_camera(0)
# load the flattened mujoco states
states = f["data/{}/states".format(ep)].value
# force the sequence of internal mujoco states one by one
for state in states:
env.sim.set_state_from_flattened(state)
env.sim.forward()
env.render()
f.close()
| 28.068493 | 107 | 0.644705 | 261 | 2,049 | 4.873563 | 0.45977 | 0.031447 | 0.023585 | 0.022013 | 0.034591 | 0.034591 | 0 | 0 | 0 | 0 | 0 | 0.007194 | 0.253782 | 2,049 | 72 | 108 | 28.458333 | 0.824722 | 0.223524 | 0 | 0 | 0 | 0 | 0.099493 | 0.020279 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.145833 | 0 | 0.145833 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04e8053cd909a8f30dd024e9c4b9845248ff59fe | 1,518 | py | Python | awacs/xray.py | mprince/awacs | f6a16af326ac7fd11e2e2be3a48180475f150611 | [
"BSD-2-Clause"
] | null | null | null | awacs/xray.py | mprince/awacs | f6a16af326ac7fd11e2e2be3a48180475f150611 | [
"BSD-2-Clause"
] | null | null | null | awacs/xray.py | mprince/awacs | f6a16af326ac7fd11e2e2be3a48180475f150611 | [
"BSD-2-Clause"
] | 1 | 2020-04-03T06:37:42.000Z | 2020-04-03T06:37:42.000Z | # Copyright (c) 2012-2013, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from aws import Action as BaseAction
from aws import BaseARN
service_name = 'AWS XRay'
prefix = 'xray'
class Action(BaseAction):
def __init__(self, action=None):
sup = super(Action, self)
sup.__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource='', region='', account=''):
sup = super(ARN, self)
sup.__init__(service=prefix, resource=resource, region=region,
account=account)
BatchGetTraces = Action('BatchGetTraces')
CreateGroup = Action('CreateGroup')
CreateSamplingRule = Action('CreateSamplingRule')
DeleteGroup = Action('DeleteGroup')
DeleteSamplingRule = Action('DeleteSamplingRule')
GetEncryptionConfig = Action('GetEncryptionConfig')
GetGroup = Action('GetGroup')
GetGroups = Action('GetGroups')
GetSamplingRules = Action('GetSamplingRules')
GetSamplingStatisticSummaries = Action('GetSamplingStatisticSummaries')
GetSamplingTargets = Action('GetSamplingTargets')
GetServiceGraph = Action('GetServiceGraph')
GetTimeSeriesServiceStatistics = Action('GetTimeSeriesServiceStatistics')
GetTraceGraph = Action('GetTraceGraph')
GetTraceSummaries = Action('GetTraceSummaries')
PutEncryptionConfig = Action('PutEncryptionConfig')
PutTelemetryRecords = Action('PutTelemetryRecords')
PutTraceSegments = Action('PutTraceSegments')
UpdateGroup = Action('UpdateGroup')
UpdateSamplingRule = Action('UpdateSamplingRule')
| 33 | 73 | 0.763505 | 133 | 1,518 | 8.586466 | 0.428571 | 0.014011 | 0.022767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006029 | 0.125823 | 1,518 | 45 | 74 | 33.733333 | 0.854559 | 0.069829 | 0 | 0 | 0 | 0 | 0.24236 | 0.041933 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.060606 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04eacbed2960320b74f280d23dd6e09fb5dbbb17 | 6,839 | py | Python | cmake/CMakeBuild/bin/pyhhi/cmbuild/bjambld.py | myfan/VTM-1.1_sKLT | adf549a14e630167fe22f9b8c73a307174160d7c | [
"BSD-3-Clause"
] | null | null | null | cmake/CMakeBuild/bin/pyhhi/cmbuild/bjambld.py | myfan/VTM-1.1_sKLT | adf549a14e630167fe22f9b8c73a307174160d7c | [
"BSD-3-Clause"
] | null | null | null | cmake/CMakeBuild/bin/pyhhi/cmbuild/bjambld.py | myfan/VTM-1.1_sKLT | adf549a14e630167fe22f9b8c73a307174160d7c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import os
import shutil
import subprocess
import logging
import pyhhi.build.common.ver as ver
import pyhhi.build.common.bldtools as bldtools
from pyhhi.build.common.system import SystemInfo
class BjamBuilder(object):
"""The BjamBuilder class supports building a new bjam executable."""
def __init__(self, sys_info, top_dir, bb_version):
self._logger = logging.getLogger(__name__)
bjam_src_tree_list = []
self._sys_info = sys_info
self._bjam_src_dir = None
self._top_dir = top_dir
self._bb_version = bb_version
self._toolset = None
self._tmp_dirs = []
if self._sys_info.is_windows():
self._bjam_names = ('b2.exe', 'bjam.exe')
else:
self._bjam_names = ('b2', 'bjam')
if sys_info.is_windows():
build_script = 'build.bat'
else:
build_script = 'build.sh'
# the bjam source is supposed to come from the boost source tree.
assert bb_version is not None
boost_tools_dir = os.path.join(self._top_dir, 'tools')
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'src', 'engine'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'v2', 'engine'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'build', 'v2', 'engine', 'src'))
bjam_src_tree_list.append(os.path.join(boost_tools_dir, 'jam', 'src'))
for d in bjam_src_tree_list:
# check for the build script to figure out which source location holds the bjam source files.
if os.path.exists(os.path.join(d, build_script)):
self._bjam_src_dir = d
break
if self._bjam_src_dir is not None:
# create a new bldtools suitable to build bjam on this platform.
self._toolset = bldtools.BjamToolset(sys_info, bb_version)
def build(self, target_arch='x86_64'):
"""Builds the b2 executable from source and returns the full path to the executable."""
assert self._bjam_src_dir is not None
if self._sys_info.is_windows() and (ver.version_compare(self._bb_version, (1, 66, 0)) >= 0):
target_arch = 'x86'
# create a new list of temporary directories to be removed after the bjam executable has been installed.
self._tmp_dirs = []
bjam_bin_dir = os.path.join(self._bjam_src_dir, self._get_bjam_bin_dir_folder(target_arch))
self._tmp_dirs.append(bjam_bin_dir)
b2_prog_path = os.path.join(bjam_bin_dir, self._bjam_names[0])
bjam_prog_path = os.path.join(bjam_bin_dir, self._bjam_names[1])
bootstrap_dir = os.path.join(self._bjam_src_dir, 'bootstrap')
self._tmp_dirs.append(bootstrap_dir)
if os.path.exists(bootstrap_dir):
# in case a previous build failed to remove the temporary files, remove bootstrap completely.
shutil.rmtree(bootstrap_dir)
cur_dir = os.getcwd()
os.chdir(self._bjam_src_dir)
print("========================================================")
print("Start building bjam in", self._bjam_src_dir, "...")
print("========================================================")
build_script_args = []
if self._sys_info.is_windows():
build_script = os.path.join(self._bjam_src_dir, 'build.bat')
build_script_args.append(build_script)
bjam_toolset_arg = self._toolset.get_bjam_toolset(build_script_format=True)
build_script_args.append(bjam_toolset_arg)
if target_arch == 'x86_64':
# build.bat builds a 32 bit b2 executable by default but we prefer a native b2.
if bjam_toolset_arg in ['vc141', 'vc14']:
build_script_args.append('amd64')
else:
build_script_args.append('x86_amd64')
else:
build_script = os.path.join(self._bjam_src_dir, 'build.sh')
build_script_args.append(build_script)
retv = subprocess.call(build_script_args)
if retv != 0:
raise Exception("Building bjam failed. Please contact technical support.")
# restore the previous current working directory
os.chdir(cur_dir)
if os.path.exists(b2_prog_path):
return b2_prog_path
elif os.path.exists(bjam_prog_path):
return bjam_prog_path
else:
assert False
return None
def remove_tmp_files(self):
"""Removes all temporary files created by the bjam build script."""
for d in self._tmp_dirs:
if os.path.exists(d):
try:
shutil.rmtree(d)
except WindowsError as exc:
print("WARNING: ignoring spurious windows error [" + str(exc.winerror) + "]: " + exc.strerror + " raised by shutil.rmtree().")
if os.path.exists(d):
file_list = os.listdir(d)
if file_list:
print("The directory '" + d + "' is not empty for unknown reason: ", file_list)
self._tmp_dirs = []
def _get_bjam_bin_dir_folder(self, target_arch='x86_64'):
if self._sys_info.is_windows():
bin_dir = 'bin.nt' + target_arch
elif self._sys_info.is_linux():
bin_dir = 'bin.linux' + target_arch
elif self._sys_info.is_macosx():
bin_dir = 'bin.macosx' + target_arch
else:
assert False
return bin_dir
class BjamLauncher(object):
def __init__(self, sys_info=None, verbosity=1):
self._logger = logging.getLogger(__name__)
if sys_info is None:
sys_info = SystemInfo()
self._sys_info = sys_info
self._verbosity_level = verbosity
def get_optimal_number_bjam_jobs(self):
"""Returns the optimal number of bjam jobs."""
bjam_jobs = self._sys_info.get_number_processors()
if 'BJAM_MAX_JOBS' in os.environ:
bjam_max_jobs = int(os.environ['BJAM_MAX_JOBS'], 10)
if bjam_jobs > bjam_max_jobs:
bjam_jobs = bjam_max_jobs
assert bjam_jobs >= 1
return bjam_jobs
def launch(self, argv):
"""Launch a bjam build and block until it terminates."""
if self._verbosity_level > 0:
# assemble the bjam command line for logging purposes
joiner = ' '
cmd_line = joiner.join(argv)
print("Launching: " + cmd_line)
retv = subprocess.call(argv)
if retv < 0:
self._logger.debug("child was terminated by signal: %d", -retv)
else:
self._logger.debug("child returned: %d", retv)
return retv
| 39.531792 | 146 | 0.607106 | 895 | 6,839 | 4.338547 | 0.231285 | 0.027814 | 0.030904 | 0.036055 | 0.288694 | 0.193665 | 0.136235 | 0.110482 | 0.096575 | 0.096575 | 0 | 0.010419 | 0.284252 | 6,839 | 172 | 147 | 39.761628 | 0.78284 | 0.130136 | 0 | 0.193798 | 0 | 0 | 0.09973 | 0.018932 | 0 | 0 | 0 | 0 | 0.03876 | 1 | 0.054264 | false | 0 | 0.062016 | 0 | 0.178295 | 0.054264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ebbba0f5550d29b2188709fea832733cf15cb6 | 2,773 | py | Python | Exercise 2-3.py | Falli0o/linear_regression | b21a682bae9e4a3d5aa8aa6223233aa6dea8749c | [
"Apache-2.0"
] | null | null | null | Exercise 2-3.py | Falli0o/linear_regression | b21a682bae9e4a3d5aa8aa6223233aa6dea8749c | [
"Apache-2.0"
] | null | null | null | Exercise 2-3.py | Falli0o/linear_regression | b21a682bae9e4a3d5aa8aa6223233aa6dea8749c | [
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[ ]:
from __future__ import division
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import math
import multivarlinreg
import rmse
# In[ ]:
#Linear regression
red_train = np.loadtxt('redwine_training.txt')
red_test = np.loadtxt('redwine_testing.txt')
red_train_data = red_train[:, :11]
red_train_score = red_train[:, 11]
red_test_data = red_test[:, :11]
red_test_score = red_test[:, 11]
#red_train.shape
# In[ ]:
"""
def multivarlinreg(data, ground_truth):
#data = full_data[:, :-1]
X = np.hstack((data, np.repeat(1, data.shape[0]).reshape(-1, 1)))
X_T_X = np.dot(X.T, X)
# if full-rank matrix or positive definite matrix:
#check if it invertible
if np.linalg.det(X_T_X) != 0:
inverse = np.linalg.inv(X_T_X)
w = np.dot(np.dot(inverse, X.T), ground_truth) #w0 at the last column
#print w
return w
else:
print "use other method"
"""
# In[ ]:
#only contains the first feature (fixed acidity)
train_fixed_acidity = red_train_data[:, 0].reshape(-1, 1)
train_w_acidity = multivarlinreg.multivarlinreg(train_fixed_acidity, red_train_score)
train_w_acidity
#the propotion of acidity is not very high; bias is very large for it???
#actually we can not use it to predivt the wine's quality very well
#array([0.05035934, 5.2057261 ])
# In[ ]:
#physiochemical
w_all = multivarlinreg.multivarlinreg(red_train_data, red_train_score)
w_all.shape
np.set_printoptions(suppress=True)
w_all
#positive relate negative relation
#the first weight for acidity is changed
#Some features play important roles in wine's quality. Some features are negatively related.
# In[ ]:
"""#Exercise 3 (Evaluating Linear Regression).
def rmse(predicted_value, ground_truth):
diff = ground_truth - predicted_value
diff_square = np.dot(diff, diff)
#rmse = np.sqrt(np.divide(diff_square, ground_truth.shape[0]))
rmse = np.sqrt(diff_square/ground_truth.shape[0])
return rmse
"""
# In[ ]:
#1-dimensional input variables using the training set
#first feature for the test set
test_fixed_acidity = red_test_data[:, 0].reshape(-1, 1)
test_X_acidity = np.hstack((test_fixed_acidity, np.repeat(1, test_fixed_acidity.shape[0]).reshape(-1, 1)))
predicted_score_acidity = np.dot(test_X_acidity, train_w_acidity.T)
#predicted_score_acidity = predicted_value(train_fixed_acidity, test_fixed_acidity, red_test_score)
rmse.rmse(predicted_score_acidity, red_test_score)
#0.7860892754162216
# In[ ]:
#full 11-dimensional input variables
test_X = np.hstack((red_test_data, np.repeat(1, red_test_data.shape[0]).reshape(-1, 1)))
predicted_score = np.dot(test_X, w_all.T)
rmse.rmse(predicted_score, red_test_score)
#0.644717277241364
| 25.440367 | 106 | 0.728814 | 434 | 2,773 | 4.426267 | 0.31106 | 0.040083 | 0.023425 | 0.026028 | 0.155648 | 0.070276 | 0.030193 | 0 | 0 | 0 | 0 | 0.03658 | 0.152182 | 2,773 | 108 | 107 | 25.675926 | 0.780519 | 0.268301 | 0 | 0 | 0 | 0 | 0.046784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04ee4a305162e4e3bdcb164213ede6ebc3c2c03c | 3,260 | py | Python | Sketches/JL/IRC/Translator.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/JL/IRC/Translator.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/JL/IRC/Translator.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Axon.Component import component
class Translator(component):
Inboxes = {"inbox" : " standard inbox",
"control": "shutdown messages"}
Outboxes = {"outbox": "",
"signal" : ""}
def __init__(self, nick):
super(Translator, self).__init__()
self.nick = nick
def main(self):
while 1:
if not self.anyReady():
self.pause()
yield 1
data = ""
if self.dataReady('privmsg'):
formatted = self.formatPrivmsg(self.recv('privmsg'))
self.send(formatted)
if self.dataReady('channel'):
formatted = self.formatChannelMsg(self.recv('channel'))
self.send(formatted)
if self.dataReady('nonPrivmsg'):
formatted = self.formatMisc(self.recv('channel'))
self.send(formatted)
if self.dataReady('notice'):
formatted = self.formatNotice(self.recv('notice'))
self.send(formatted)
if self.dataReady('ERR'):
formatted = self.formatError(self.recv('ERR'))
self.send(formatted)
if self.dataReady('RPL'):
formatted = self.formatNumReply(self.recv('RPL'))
self.send(formatted)
def formatPrivmsg(self, msg):
temp, sender, recipient, body = msg
if body[0] == 'ACTION':
send = "*** %s %s" % (sender, body[body.find('ACTION') + 7])
else:
send = "%s: %s" % (sender, body)
return send
def formatChannelMsg(self, msg):
return msg
def formatMisc(self, msg):
return msg
def formatNotice(self, msg):
return msg
def formatError(self, msg):
return msg
def formatNumReply(self, msg):
return msg
if __name__ == '__main__':
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
from Kamaelia.Chassis.Graphline import Graphline
from Kamaelia.Chassis.Pipeline import Pipeline
from Prefab import ComplexIRCClientPrefab
client = Graphline(
prefab = ComplexIRCClientPrefab(host="irc.freenode.net", nick="kamaeliabot", defaultChannel="#kamtest"),
formatter = Translator("kamaeliabot"),
linkages = {("prefab", "outbox") : ("formatter", "privmsg")}
)
Pipeline(ConsoleReader(), client, ConsoleEchoer()).run()
| 34.315789 | 112 | 0.607975 | 353 | 3,260 | 5.569405 | 0.424929 | 0.030519 | 0.045778 | 0.048321 | 0.151577 | 0.096643 | 0.047813 | 0.047813 | 0.047813 | 0 | 0 | 0.006397 | 0.280675 | 3,260 | 94 | 113 | 34.680851 | 0.831983 | 0.235583 | 0 | 0.180328 | 0 | 0 | 0.094622 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.081967 | 0.081967 | 0.360656 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04efafcb6db1afd7a3030ec016d52f31b51e110e | 11,926 | py | Python | ClientSide2.py | MatthewGong/DiffractionClassification | 68be6cf3960f09388253c79bab13cbd9dc07edbb | [
"MIT"
] | 7 | 2019-08-14T18:14:06.000Z | 2021-01-14T01:43:50.000Z | ClientSide2.py | MatthewGong/DiffractionClassification | 68be6cf3960f09388253c79bab13cbd9dc07edbb | [
"MIT"
] | null | null | null | ClientSide2.py | MatthewGong/DiffractionClassification | 68be6cf3960f09388253c79bab13cbd9dc07edbb | [
"MIT"
] | 2 | 2019-11-06T20:19:23.000Z | 2020-08-04T21:06:36.000Z | from __future__ import print_function
from __future__ import division
from Notation import SpaceGroupsDict as spgs
SpGr = spgs.spacegroups()
notation_dictionary = spgs.spacegroups()
import PeakFinding2 as pfnd #custom library to handle the functions behind Find_Peaks
import UniversalLoader2 as uvll #custom library to handle the functions behind UniversalLoader
import requests
import numpy as np
def Load_Profile(path,get_metadata=False):
"""
Loads an image and extracts the relevant metadata for the image based in file type
Inputs:
path : string, contains the location of the file on the local machine
Outputs:
image_data : np.array, the array of values in the image
calibration : dictionary, the essential metadata to convert from pixels to two_theta space
"""
"""
NEEDS A BUNCH OF CONDITIONALS TO DETERMINE
THE NATURE OF THE DATA, BEING IN SOOOOO MANY FORMS
"""
valid_filetypes={".csv":uvll.csv_extract,
".txt":uvll.txt_extract}
file_type = path[-4:]
# Check that the provided file is a supported file type
if file_type in valid_filetypes.keys():
# Call the appropriate extraction function
profile,scale = valid_filetypes[file_type](path)
else:
raise ValueError("Unsupported file type: please use a {}".format(valid_filetypes.keys()))
return profile,scale
def Find_Peaks(profile, scale, **kwargs):
"""
Pulls out the peaks from a radial profile
Inputs:
profile : dictionary, contains intensity profile and pixel scale of
diffraction pattern
calibration : dictionary, contains camera parameters to scale data
properly in two theta space
is_profile : boolean, changes processing for profiles vs 2D patterns
scale_bar : string, determines which conversions need to be run
to convert to two theta
display_type: string, determines which plots to show
Outputs:
peak_locs : dictionary, contains two_theta, d_spacings, and input_vector arrays
peaks locations found in the profile
"""
max_numpeaks = kwargs.get('max_numpeaks', 75)
scale_range = kwargs.get('dspace_range',[0.5, 6])
squished_scale = [True if x<scale_range[1] and x >scale_range[0] else False for x in scale]
print(squished_scale)
filter_size_default=max(int(scale[squished_scale].shape[0]/50),3)
print(filter_size_default)
kwargs['filter_size'] = kwargs.get('filter_size',filter_size_default)
print('filter size')
print(kwargs['filter_size'])
# find the location of the peaks in pixel space
peaks = pfnd.vote_peaks(profile[squished_scale], **kwargs)
peaks_d = scale[squished_scale][peaks>0]
scale_d = scale
thresh = 0
orig_length = len(peaks_d)
if len(peaks_d) > max_numpeaks:
print(len(peaks_d))
print("WARNING: {} peaks were detected," +
" some of the peaks will be trimmed."+
"\nFor best results. Please check calibration or run manual peak detection.".format(len(peaks_d)))
srt_peaks = np.sort(peaks[peaks>0])
thresh = srt_peaks[len(peaks_d)-max_numpeaks]
if len(scale[squished_scale][peaks>thresh]) ==0 and thresh>0:
thresh -=1
peaks_d = scale[squished_scale][peaks>thresh]
print(len(peaks_d))
print(thresh)
print(srt_peaks)
if len(peaks_d) == orig_length:
print("WARNING: reduction based on votes unsuccessful. try other parameters")
elif len(peaks_d)> max_numpeaks:
print("WARNING: partial reduction to {} peaks.".format(len(peaks_d)))
peak_locs = {"d_spacing":scale[squished_scale][peaks>thresh],
"vec":[int(round((x-.5)*164))-1 for x in peaks_d]
}
# Display the data
peaks_h = pfnd.plot_peaks(profile[squished_scale], scale[squished_scale], peaks, thresh, **kwargs)
if len(peak_locs['vec']) <= 4:
print("WARNING: only {} peaks were detected," +
" this is lower than the recommended 4+ peaks needed"+
"\nFor best results. Please check calibration.".format(len(peaks_d)))
return peak_locs, peaks_h
def find_name_in_dict(name,dict):
o_ind = False
for ind, nm in dict.items():
if nm == name:
o_ind = ind
return o_ind
def Send_For_Classification(peak_locations, chem_vec, mode, crystal_family, user_info, URL, prediction_per_level, fam=None):
"""
Input:
peak_locs : dictionary, contains two_theta, d_spacings, and input_vector arrays
peaks locations found in the profile
user_info : dictionary, contains user profile information for tracking
and security purposes
Outputs:
payload : dictionary, contains classification statistics and predictions
Calls:
URL: POST, sends peak locations to the server for classification
"""
int_to_fam = {0:"triclinic",
1:"monoclinic",
2:"orthorhombic",
3:"tetragonal",
4:"trigonal",
5:"hexagonal",
6:"cubic"}
payload = {'peaks':peak_locations['vec'],
'chemistry':chem_vec,
'level':"Family",
'mode': mode,
'number':0
}
# print(payload)
payload['prediction_per_level'] = prediction_per_level
skip_family = False
# reproduce the gen 1 ability to specify the family to look it. Use this if the family prediction seems suspect.
if crystal_family:
print(" setting the family to search in is old functionality that is no longer needed for most predictions")
number = find_name_in_dict(crystal_family,int_to_fam)
if number:
payload['family'] = crystal_family
payload['family_1'] = crystal_family
payload['fam_confidence_1'] = float("nan")
payload['number'] = number+1
skip_family = True
payload = Classify_Family(payload, user_info, URL, 1, 1)
for k in range(1,prediction_per_level[0]):
payload['family_'+str(1+k)] = float("nan")
payload['fam_confidence_'+str(1+k)] = float("nan")
for l in range(0,prediction_per_level[1]):
num_l = (k)*prediction_per_level[1]+l+1
payload['genus_'+str(num_l)] = float("nan")
payload['gen_confidence_'+str(num_l)] = float("nan")
for m in range(0,prediction_per_level[2]):
num_m = (num_l-1)*prediction_per_level[2]+m+1
payload['species_'+str(num_m)] = float("nan")
payload['spec_confidence_'+str(num_m)] = float("nan")
else:
print("family name not recognized, ignoring input.")
if not skip_family:
print(requests.post(URL+"predict", json=payload).text)
family = requests.post(URL+"predict", json=payload).json()
print(family['votes'])
fam_votes = family['votes']
pred = []
# pred.append(np.argmax(family['votes']))
# payload['family_1'] = int_to_fam[pred[0]]
fam_confidence = confidence(fam_votes)
# payload['fam_confidence_1'] = fam_confidence[pred[0]]
# payload['number'] = int(pred[0])+1
#
# print(pred[0])
# Classify_Family(peak_locations,payload,user_info,URL,1)
# print(fam_confidence)
# print(payload)
for k in range(prediction_per_level[0]):
pred.append(np.argmax(fam_votes))
payload['family'] = int_to_fam[pred[k]]
payload['family_'+str(k+1)] = int_to_fam[pred[k]]
payload['fam_confidence_'+str(k+1)] =fam_confidence[pred[k]]
payload['number'] = int(pred[k])+1
w = fam_confidence[pred[k]]
payload = Classify_Family(payload, user_info, URL, w, k+1)
# for next iteration
fam_votes[pred[k]] = -float("inf")
# print(pred[k])
# print(fam_votes)
return payload
def confidence(array):
# softmax like normalization
np_array = np.array(array)
total = np.sum(np.exp(np_array))
# total = np.sum(np_array[np_array>0])
# print('softmax -')
# print(np_array)
# print(total)
# print(np.exp(np_array)/total)
#L = -np_array+np.log(total)
#L = -np.log(np.exp(np_array)/total)
L = np.exp(np_array)/total
# L = np_array/total
return L
def Classify_Family(payload, user_info, URL, weight, pred_number):
payload['level'] = "Genera"
# Once the family is known, predicts the genus
# print(requests.post(URL+"predict", json=payload,timeout=30))
print("----")
print(payload)
genus = requests.post(URL+"predict", json=payload,timeout=30).json()
print("---genus---")
# print(genus['votes'])
# genera_votes = np.sum(genus['votes'],axis=0).tolist()
# genera_votes_1 = int(np.argmax(genus['votes']))
genera_votes = genus['votes']
genera_con = confidence(genera_votes)
pred=[]
genera_pred = []
for k in range(payload['prediction_per_level'][1]):
pred.append(int(np.argmax(genera_votes)))
# print(pred[k])
g_pred_num = (pred_number-1)*payload['prediction_per_level'][1]+k+1
genera_pred.append(pred[k]+ notation_dictionary.edges["genus"][payload['family']][0])
payload['genus_'+str(g_pred_num)] = genera_pred[k]
payload['gen_confidence_'+str(g_pred_num)] = genera_con[pred[k]] * weight
payload['number'] = genera_pred[k]
# print('genus prediction = ',genera_pred[k])
# print('genus_number = ',g_pred_num)
w = genera_con[pred[k]] * weight
payload = Classify_Genus(payload,user_info,URL,w, g_pred_num)
genera_votes[pred[k]] = - float("inf")
# print(pred[k])
# print(genera_votes)
return payload
# pred_2 = int(np.argmax(genera_votes))
# genera_pred_2 = pred_2+ notation_dictionary.edges["genus"][payload['family']][0]
#
#
# payload['genus_2'] = genera_pred_2
# payload['gen_confidence_2'] = genera_con[pred_2]
# Configure payload json for next request
def Classify_Genus(payload, user_info, URL, weight, pred_number):
# species prediction 1
print("---species ---")
payload['level'] = "Species"
# print(requests.post(URL+"predict", json=payload,timeout=30))
species = requests.post(URL+"predict", json=payload,timeout=30).json()
# print(species)
# print(species['votes'])
# Formatting the response to be saved more easily
species_votes = species['votes']
spec_confidence = confidence(species_votes)
pred = []
species_pred = []
# print(payload)
for k in range(payload['prediction_per_level'][2]):
pred.append(int(np.argmax(species_votes)))
species_pred.append(pred[k] + notation_dictionary.edges["species"][payload['genus_'+str(pred_number)]][0])
num = (pred_number-1)*payload['prediction_per_level'][2]+k+1
# print('species number = ',num)
payload["species_"+str(num)] = species_pred[k]
payload["spec_confidence_"+str(num)] = spec_confidence[pred[k]] * weight
payload["hall_"+str(num)] = SpGr.sgs_to_group[str(species_pred[k])]
species_votes[pred[k]] = -float("inf")
return payload
| 32.673973 | 124 | 0.606322 | 1,491 | 11,926 | 4.663984 | 0.205902 | 0.015099 | 0.03365 | 0.015531 | 0.303422 | 0.222174 | 0.156888 | 0.109002 | 0.080242 | 0.041703 | 0 | 0.011397 | 0.27897 | 11,926 | 364 | 125 | 32.763736 | 0.797302 | 0.28811 | 0 | 0.060976 | 0 | 0 | 0.147232 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042683 | false | 0 | 0.042683 | 0 | 0.128049 | 0.128049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f023bee1d7ff03aceebf8ab92a9d06c9bdc3a1 | 802 | py | Python | examples/application_commands/choices.py | freepizzas-dev/nextcord | 58dd18d4d60d50a0462e87b2400db980e73da72c | [
"MIT"
] | null | null | null | examples/application_commands/choices.py | freepizzas-dev/nextcord | 58dd18d4d60d50a0462e87b2400db980e73da72c | [
"MIT"
] | null | null | null | examples/application_commands/choices.py | freepizzas-dev/nextcord | 58dd18d4d60d50a0462e87b2400db980e73da72c | [
"MIT"
] | null | null | null | import nextcord
from nextcord import SlashOption
from nextcord.interactions import Interaction
client = nextcord.Client()
@client.slash_command(guild_ids=[...]) # Limits the guildes
async def choose_a_number(
interaction: Interaction,
number: str = SlashOption(
name="picker",
description="The number you want",
choices={"1": 1, "2": 2, "3": 3},
),
):
await interaction.response.send_message(f"You chose {number}!")
@client.slash_command(guild_ids=[...]) # limits the guilds with this command
async def hi(
interaction: Interaction,
member: nextcord.Member = SlashOption(name="user", description="the user to say hi to"),
):
await interaction.response.send_message(f"{interaction.user} just said hi to {member.mention}")
client.run("TOKEN")
| 27.655172 | 99 | 0.699501 | 101 | 802 | 5.475248 | 0.475248 | 0.0434 | 0.065099 | 0.083183 | 0.256781 | 0.256781 | 0.126582 | 0 | 0 | 0 | 0 | 0.009023 | 0.170823 | 802 | 28 | 100 | 28.642857 | 0.822556 | 0.067332 | 0 | 0.285714 | 0 | 0 | 0.171812 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f24dbabf997234bb3f9394b5d4778cde915e92 | 729 | py | Python | debug/testUSB.py | sgoadhouse/dcps-visa | 3021168bb15bb66fdd74d2a235d4dc3e0e3a4733 | [
"MIT"
] | 2 | 2019-10-30T08:23:14.000Z | 2021-09-15T14:20:53.000Z | debug/testUSB.py | sgoadhouse/dcps-visa | 3021168bb15bb66fdd74d2a235d4dc3e0e3a4733 | [
"MIT"
] | null | null | null | debug/testUSB.py | sgoadhouse/dcps-visa | 3021168bb15bb66fdd74d2a235d4dc3e0e3a4733 | [
"MIT"
] | 6 | 2019-10-11T18:52:40.000Z | 2022-03-28T10:14:38.000Z |
import usb.core
import usb.util
import sys
# got these using the command lsusb -vv
VENDOR_ID = 0x1AB1
PRODUCT_ID = 0x0E11
DATA_SIZE = 1
device = usb.core.find(idVendor=VENDOR_ID, idProduct=PRODUCT_ID)
#@@@#print(device.is_kernel_driver_active(0))
# was it found?
if device is None:
raise ValueError('USB Device not found')
try:
# set the active configuration. With no arguments, the first
# configuration will be the active one
device.set_configuration()
except usb.core.USBError as e:
raise Exception("failed to set configuration\n %s" % e)
cfg = device.get_active_configuration()
for cfg in device:
sys.stdout.write(str(cfg.bConfigurationValue) + '\n')
#@@@#device.read(0x81, 255, 1000000)
| 22.78125 | 64 | 0.728395 | 109 | 729 | 4.770642 | 0.633028 | 0.040385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036244 | 0.167353 | 729 | 31 | 65 | 23.516129 | 0.820428 | 0.30727 | 0 | 0 | 0 | 0 | 0.109091 | 0 | 0 | 0 | 0.024242 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f30aaa86b0d421b2d40dfdaa3750bdc67bcc4e | 809 | py | Python | test_ruge_stuben_solver.py | CoderMoray/AMG-for-finity-element | 1f4194eb3d1b8b6007733cb1122bdab7d67424bc | [
"MIT"
] | null | null | null | test_ruge_stuben_solver.py | CoderMoray/AMG-for-finity-element | 1f4194eb3d1b8b6007733cb1122bdab7d67424bc | [
"MIT"
] | null | null | null | test_ruge_stuben_solver.py | CoderMoray/AMG-for-finity-element | 1f4194eb3d1b8b6007733cb1122bdab7d67424bc | [
"MIT"
] | null | null | null | import pyamg
import numpy as np
import time
from prettytable import PrettyTable
A = pyamg.gallery.poisson((500,500), format='csr') # 2D Poisson problem on 500x500 grid
# print('A : ', A )
ml = pyamg.ruge_stuben_solver(A)
b = np.random.rand(A.shape[0]) # pick a random right hand side
# t0=
# solve Ax=b to a tolerance of 1e-10
# print("residual: ", np.linalg.norm(b-A*x))
loops = [100]
nb_times = []
times=[]
for loop in loops:
t0 = time.time()
for i in range(loop):
y = ml.solve(b, tol=1e-10)
t1 = time.time()
t_sc = t1-t0
times.append(t_sc/loop)
tb = PrettyTable()
tb.field_names = [""]+["{} loops".format(loop) for loop in loops]
tb.add_row(["ruge stuben"]+["{:0.4f} ms".format(time*1000) for time in times])
print(tb) | 28.892857 | 88 | 0.608158 | 131 | 809 | 3.70229 | 0.519084 | 0.041237 | 0.037113 | 0.057732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055016 | 0.236094 | 809 | 28 | 89 | 28.892857 | 0.729773 | 0.202719 | 0 | 0 | 0 | 0 | 0.050235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f33c7544bbc8ed87639ae554a4185b880e29cc | 7,467 | py | Python | app/telegram_handler.py | badgateway666/Telegram2MQTT | 387709bae98e99f1c5a83a27981ebd2172ad356b | [
"MIT"
] | 1 | 2020-05-22T17:00:27.000Z | 2020-05-22T17:00:27.000Z | app/telegram_handler.py | badgateway666/Telegram2MQTT | 387709bae98e99f1c5a83a27981ebd2172ad356b | [
"MIT"
] | null | null | null | app/telegram_handler.py | badgateway666/Telegram2MQTT | 387709bae98e99f1c5a83a27981ebd2172ad356b | [
"MIT"
] | null | null | null | import logging
import re
from queue import SimpleQueue
from telegram.bot import Bot as Telegram_Bot
from telegram.ext import Updater, CommandHandler, Filters
class TelegramHandler(object):
def __init__(self, bot_token, allowed_telegram_user_ids):
self.logger = logging.getLogger("telegram2mqtt.bot")
self.telegram_bot = Telegram_Bot(bot_token)
# self.telegram_bot.get_me() # For debugging purposes
self.allowed_telegram_user_ids = allowed_telegram_user_ids
self.updater = Updater(bot=self.telegram_bot, use_context=True)
self.topics_to_uid = {}
self.sub_queue = SimpleQueue()
self.unsub_queue = SimpleQueue()
self.pub_queue = SimpleQueue()
# Register Handlers
self.updater.dispatcher.add_handler(
CommandHandler(
"sub",
self.sub_handler,
filters=Filters.user(self.allowed_telegram_user_ids),
)
)
self.updater.dispatcher.add_handler(
CommandHandler(
"unsub",
self.unsub_handler,
filters=Filters.user(self.allowed_telegram_user_ids),
)
)
self.updater.dispatcher.add_handler(
CommandHandler(
"pub",
self.pub_handler,
filters=Filters.user(self.allowed_telegram_user_ids),
)
)
self.logger.info("Telegram-Handler is initialized.")
def __call__(self):
self.logger.info("Telegram-Handler started.")
for uid in self.allowed_telegram_user_ids:
self.telegram_bot.send_message(uid, "Telegram2MQTT Bot is online.")
self.updater.start_polling()
def stop(self):
for uid in self.allowed_telegram_user_ids:
self.telegram_bot.send_message(uid, "Telegram2MQTT Bot is offline.")
self.logger.info("Telegram-Handler stopped.")
self.updater.stop()
def sub_handler(self, update, context):
"""
For subscriptions, two wildcard characters are supported:
- A '#' character represents a complete sub-tree of the hierarchy
and thus must be the last character in a subscription topic string, such as SENSOR/#.
This will match any topic starting with SENSOR/, such as SENSOR/1/TEMP and SENSOR/2/HUMIDITY.
- A '+' character represents a single level of the hierarchy and is used between delimiters.
For example, SENSOR/+/TEMP will match SENSOR/1/TEMP and SENSOR/2/TEMP.
+ --> \w+
# --> (\w|\/)+
"""
self.logger.debug(f"Sub Handler received args: '{context.args}'")
# Validate context.args
topic = context.args[0]
if topic.count("#") >= 2:
self.logger.warning(
f"Invalid topic '{topic}' for subscription: Multiple '#' character used."
)
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"Invalid topic '{topic}' for subscription: Multiple '#' character used.",
)
return
if "#" in topic and not topic.endswith("#"):
self.logger.warning(
f"Invalid topic '{topic}' for subscription: '#' not used as last character."
)
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"Invalid topic '{topic}' for subscription: '#' not used as last character.",
)
return
if topic not in self.topics_to_uid:
self.logger.info(f"Subscribe to topic '{topic}'")
self.topics_to_uid[topic] = set()
self.sub_queue.put(topic)
if update.effective_chat.id not in self.topics_to_uid[topic]:
self.topics_to_uid[topic].add(update.effective_chat.id)
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"Sub on topic '{topic}' received.",
)
else:
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"It seems that you already subcribed to topic '{topic}'.",
)
def unsub_handler(self, update, context):
self.logger.debug(f"Unsub Handler received args: '{context.args}'")
# Validate context.args
topic = context.args[0]
if (
topic not in self.topics_to_uid
or update.effective_chat.id not in self.topics_to_uid[topic]
):
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"It seems that you aren't subcribed on topic '{topic}'.",
)
return
self.topics_to_uid[topic].remove(update.effective_chat.id)
context.bot.send_message(
chat_id=update.effective_chat.id, text=f"Unsubcribed on topic '{topic}'."
)
if len(self.topics_to_uid[topic]) == 0:
self.logger.info(f"Unsubscribe from topic '{topic}'")
self.unsub_queue.put(topic)
del self.topics_to_uid[topic]
def pub_handler(self, update, context):
topic, message = context.args[0], " ".join(context.args[1:])
self.logger.info(
f"Pub Handler received pub on topic '{topic}'. Message: '{message}'"
)
# Validate topic
if "#" in topic or "+" in topic:
self.logger.warning(
f"Pub Handler received topic with wildcard-character: '{topic}'."
)
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"Invalid input: wildcard-character in topic '{topic}' detected.",
)
return
# Validate msg, should empty messages be valid?
self.pub_queue.put((topic, message))
context.bot.send_message(
chat_id=update.effective_chat.id,
text=f"Pub on topic '{topic}'.\nMessage: {message}",
)
def publish_to_telegram(self, topic, message):
self.logger.debug(f"Message from MQTT. Topic: '{topic}' Message: '{message}'")
# TODO: Handle wildcards here
# 1. Search for known subs to topics which match the mqtt messages topic
make_regex_from_topic = lambda t: re.compile(
t.replace("/", "\/")
.replace(r"+", r"\w+")
.replace(r"#", r"(\w|\/)+")
.replace("$", "\$")
)
matched_topics = [
t for t in self.topics_to_uid if re.match(make_regex_from_topic(t), topic)
]
logging.debug(f"Matched topics for topic '{topic}': {matched_topics}")
# 2. Collect all users which subbed to any of these topics, store as set to deduplicate
recipients = set()
for t in matched_topics:
for uid in self.topics_to_uid[t]:
recipients.add(uid)
if len(recipients) == 0:
self.logger.error(
f"Couldn't publish message '{message}' to topic '{topic}', no matching user id found."
)
return
# 3. Forward message
self.logger.info("Forwarding mqtt-message to telegram.")
telegram_msg = f"Received message on topic '{topic}':\n{message}"
for uid in recipients:
self.telegram_bot.send_message(uid, telegram_msg)
| 38.096939 | 109 | 0.583367 | 876 | 7,467 | 4.826484 | 0.206621 | 0.028382 | 0.034059 | 0.042573 | 0.456244 | 0.397824 | 0.342952 | 0.342952 | 0.331835 | 0.306055 | 0 | 0.00331 | 0.312174 | 7,467 | 195 | 110 | 38.292308 | 0.819899 | 0.124146 | 0 | 0.244898 | 0 | 0 | 0.202202 | 0.003256 | 0 | 0 | 0 | 0.005128 | 0 | 1 | 0.047619 | false | 0 | 0.034014 | 0 | 0.122449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f40581777dd72ef62292b53638cb5d976a365c | 1,574 | py | Python | forcefield_utilities/utils.py | mosdef-hub/forcefield-utilities | 6424f9c8c2935399171ee7056a0d6b408cabda81 | [
"MIT"
] | 1 | 2022-03-15T15:14:02.000Z | 2022-03-15T15:14:02.000Z | forcefield_utilities/utils.py | mosdef-hub/forcefield-utilities | 6424f9c8c2935399171ee7056a0d6b408cabda81 | [
"MIT"
] | 5 | 2021-11-12T01:35:55.000Z | 2022-03-23T15:45:29.000Z | forcefield_utilities/utils.py | mosdef-hub/forcefield-utilities | 6424f9c8c2935399171ee7056a0d6b408cabda81 | [
"MIT"
] | 2 | 2022-03-15T15:14:06.000Z | 2022-03-23T00:11:24.000Z | import functools
import warnings
def call_on_import(func):
"""Declare a decorator that will run `func` when imported."""
func()
def get_package_file_path(from_package, relative_path):
"""Use source of a python package to locate and cache the address of a file."""
from pkg_resources import resource_filename
return resource_filename(from_package, relative_path)
def deprecate_kwargs(deprecated_kwargs=None):
if deprecated_kwargs is None:
deprecated_kwargs = set()
def decorate_deprecate_kwargs(func):
@functools.wraps(func)
def wrapper(self_or_cls, *args, **kwargs):
_deprecate_kwargs(kwargs, deprecated_kwargs)
return func(self_or_cls, *args, **kwargs)
return wrapper
return decorate_deprecate_kwargs
def _deprecate_kwargs(kwargs, deprecated_kwargs):
added_args = []
for kwarg in kwargs:
if kwarg in deprecated_kwargs:
added_args.append(kwarg)
if len(added_args) > 1:
message = (
"Keyword arguments `{dep_args}` are deprecated and will be removed in the "
"next minor release of the package. Please update your code accordingly"
)
else:
message = (
"Keyword argument `{dep_args}` is deprecated and will be removed in the "
"next minor release of the package. Please update your code accordingly"
)
if added_args:
warnings.warn(
message.format(dep_args=", ".join(added_args)),
DeprecationWarning,
3,
)
| 29.698113 | 87 | 0.656925 | 192 | 1,574 | 5.182292 | 0.411458 | 0.096482 | 0.066332 | 0.046231 | 0.293467 | 0.180905 | 0.180905 | 0.180905 | 0.180905 | 0.180905 | 0 | 0.001735 | 0.267471 | 1,574 | 52 | 88 | 30.269231 | 0.861232 | 0.081957 | 0 | 0.105263 | 0 | 0 | 0.199442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0 | 0.105263 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f45b90500ae631520f30af9f99758d2ff28520 | 889 | py | Python | decbin.py | faisalkhan91/Dec2Bin | 623f1640aa6a4b619d95fad9df869c4901cd4c96 | [
"MIT"
] | null | null | null | decbin.py | faisalkhan91/Dec2Bin | 623f1640aa6a4b619d95fad9df869c4901cd4c96 | [
"MIT"
] | null | null | null | decbin.py | faisalkhan91/Dec2Bin | 623f1640aa6a4b619d95fad9df869c4901cd4c96 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
decimal = input("Please enter a real number: ")
index = 0
while index < len(decimal) and decimal[index] != ".":
index += 1
if index == 10:
intpart = decimal[:]
fracpart = ""
else :
intpart = decimal[:index]
fracpart = decimal[index+1:]
# print("int len = ", len(intpart), "int part = ", intpart, "frac len = ", len(fracpart), " fracpart = ", fracpart)
if intpart == "":
binint = "0"
else :
binint = ""
intpart = int(intpart)
while intpart != 0:
if intpart % 2 == 0:
binint = "0" + binint
else :
binint = "1" + binint
intpart //= 2
if fracpart == "":
fracint = "0"
else :
fracint = ""
fracpart = float(fracpart) / 10 ** len(fracpart)
while fracpart != 0:
if fracpart * 2 >= 1:
fracint += "1"
else :
fracint += "0"
fracpart = fracpart * 2 - int(fracpart * 2)
print(decimal, "is equivalent to", binint + "." + fracint)
| 18.914894 | 115 | 0.590551 | 112 | 889 | 4.6875 | 0.294643 | 0.068571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033626 | 0.230596 | 889 | 46 | 116 | 19.326087 | 0.733918 | 0.154106 | 0 | 0.151515 | 0 | 0 | 0.069333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04f5f1b977e5486329255efbf34e12d42bdcf0a3 | 2,716 | py | Python | src/installer/src/tortuga/db/nicsDbHandler.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/installer/src/tortuga/db/nicsDbHandler.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/installer/src/tortuga/db/nicsDbHandler.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=not-callable,multiple-statements,no-member,no-self-use
from sqlalchemy.orm.exc import NoResultFound
from tortuga.db.networkDevicesDbHandler import NetworkDevicesDbHandler
from tortuga.db.tortugaDbObjectHandler import TortugaDbObjectHandler
from tortuga.exceptions.nicAlreadyExists import NicAlreadyExists
from tortuga.exceptions.nicNotFound import NicNotFound
from .models.nic import Nic
class NicsDbHandler(TortugaDbObjectHandler):
"""
This class handles nics table.
"""
def __init__(self):
super().__init__()
self._networkDevicesDbHandler = NetworkDevicesDbHandler()
def getNic(self, session, mac):
"""
Return nic.
This method should be named 'getNicByMAC()'
"""
self._logger.debug(
'Retrieving NIC with MAC address [%s]' % (mac))
try:
return session.query(Nic).filter(Nic.mac == mac).one()
except NoResultFound:
raise NicNotFound(
'NIC with MAC address [%s] not found.' % (mac))
def getNicById(self, session, _id):
"""
Return nic.
"""
self._logger.debug('Retrieving NIC ID [%s]' % _id)
dbNic = session.query(Nic).get(_id)
if not dbNic:
raise NicNotFound('NIC ID [%s] not found.' % (_id))
return dbNic
def addNic(self, session, nic):
"""
Insert nic into the db.
"""
if nic.getMac():
self._logger.debug('Inserting NIC [%s]' % (nic))
try:
self.getNic(session, nic.getMac())
raise NicAlreadyExists('NIC [%s] already exists' % (nic))
except NicNotFound:
# OK.
pass
dbNic = Nic(
mac=nic.getMac(),
nodeId=nic.getNodeId(),
networkId=nic.getNetworkId(),
ip=nic.getIp(),
boot=nic.getBoot())
dbNic.networkdevice = \
self._networkDevicesDbHandler.createNetworkDeviceIfNotExists(
session, nic.getNetworkDevice().getName())
return dbNic
| 28.893617 | 74 | 0.626657 | 297 | 2,716 | 5.673401 | 0.474747 | 0.035608 | 0.026706 | 0.018991 | 0.052819 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006094 | 0.275037 | 2,716 | 93 | 75 | 29.204301 | 0.84967 | 0.279087 | 0 | 0.095238 | 0 | 0 | 0.084819 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.02381 | 0.142857 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04fa50f216dce4da7cb555026a335c1664fdf05b | 516 | py | Python | topCoder/srms/300s/srm378/div2/two_rotation_cypher.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | topCoder/srms/300s/srm378/div2/two_rotation_cypher.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | topCoder/srms/300s/srm378/div2/two_rotation_cypher.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | from string import ascii_lowercase
class TwoRotationCypher:
def encrypt(self, firstSize, firstRotate, secondRotate, message):
f, s, r = ascii_lowercase[:firstSize], ascii_lowercase[firstSize:], ''
for e in message:
if e == ' ':
r += e
elif e in f:
r += chr(ord(f[0]) + (ord(e) - ord(f[0]) + firstRotate)%firstSize)
else:
r += chr(ord(s[0]) + (ord(e) - ord(s[0]) + secondRotate)%(26-firstSize))
return r
| 36.857143 | 88 | 0.52907 | 63 | 516 | 4.285714 | 0.444444 | 0.155556 | 0.17037 | 0.059259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017291 | 0.327519 | 516 | 13 | 89 | 39.692308 | 0.760807 | 0 | 0 | 0 | 0 | 0 | 0.001938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04faa5333b2d83edb78730a93736b95eddd63ea2 | 5,675 | py | Python | src/CoRe.py | SkBlaz/core | 2833191021ddf7113d375daeb9e9740faa068957 | [
"BSD-3-Clause"
] | null | null | null | src/CoRe.py | SkBlaz/core | 2833191021ddf7113d375daeb9e9740faa068957 | [
"BSD-3-Clause"
] | null | null | null | src/CoRe.py | SkBlaz/core | 2833191021ddf7113d375daeb9e9740faa068957 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from class_neural import GenericAutoencoder
from class_clustering_reduction import ReductionCluster
from class_subspace_reduction import ReductionSubspace
try:
import umap
except Exception as es:
print(f"UMAP unavailable. {es}")
from sklearn.decomposition import TruncatedSVD
from sklearn.manifold import LocallyLinearEmbedding
import logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%d-%b-%y %H:%M:%S")
logging.getLogger(__name__).setLevel(logging.INFO)
class CoRe:
"""
The main CoRe class.
"""
def __init__(
self,
tau=2,
verbose=True,
embedding_algorithm="CoRe-small",
store_intermediary=False,
):
self.verbose = verbose
self.store_intermediary = store_intermediary
self.intermediary_representations = []
self.k = tau
self.rep_scores = []
self.embedding_algorithm = embedding_algorithm
if "-direct" in self.embedding_algorithm:
self.direct_projection = True
else:
self.direct_projection = False
def dimension_series(self, max_y):
dim_series = []
cdim = max_y
while cdim > self.k:
temp_dim = int(cdim / self.k)
if temp_dim > self.k:
dim_series.append(temp_dim)
cdim = temp_dim
else:
break
dim_series.append(self.k)
self.dim_series = dim_series
logging.info("Initialized dimension series: {}".format(self.dim_series))
def measure_complexity(self, matrix):
norms = np.sqrt(np.einsum("ij,ij->i", matrix, matrix)) / matrix.shape[1]
mnorm = (norms - np.mean(norms)) / (np.max(norms) - np.min(norms))
mnorm = np.std(mnorm)
return mnorm
def fit(self, dataframe):
dimension_y = min(dataframe.shape[1], dataframe.shape[0])
self.dimension_series(dimension_y)
encoders = []
intermediary_representations = [dataframe]
for dim in self.dim_series:
if self.verbose:
logging.info(f"Re-embedding into {dim} dimensions.")
if "CoRe-large" in self.embedding_algorithm:
encoder = GenericAutoencoder(
n_components=dim, verbose=self.verbose, nn_type="large"
)
if "CoRe-small" in self.embedding_algorithm:
encoder = GenericAutoencoder(
n_components=dim, verbose=self.verbose, nn_type="mini"
)
elif "UMAP" in self.embedding_algorithm:
encoder = umap.UMAP(n_components=dim)
elif "RandomSubspace" in self.embedding_algorithm:
encoder = ReductionSubspace(n_components=dim)
elif "SparseRandom" in self.embedding_algorithm:
encoder = SparseRandomProjection(n_components=dim)
elif "NMF" in self.embedding_algorithm:
encoder = NMF(n_components=dim)
elif "Cluster-mean" in self.embedding_algorithm:
encoder = ReductionCluster(n_dim=dim, aggregation="mean")
elif "Cluster-median" in self.embedding_algorithm:
encoder = ReductionCluster(n_dim=dim, aggregation="median")
elif "Cluster-max" in self.embedding_algorithm:
encoder = ReductionCluster(n_dim=dim, aggregation="max")
elif "LLE" in self.embedding_algorithm:
encoder = LocallyLinearEmbedding(n_components=dim)
elif "SVD" in self.embedding_algorithm:
encoder = TruncatedSVD(n_components=dim)
# encode the initial representation
if self.direct_projection:
encoded_representation = encoder.fit_transform(
intermediary_representations[0]
)
# encode current representation
else:
encoded_representation = encoder.fit_transform(
intermediary_representations[-1]
)
self.rep_scores.append(self.measure_complexity(encoded_representation))
encoders.append(encoder)
intermediary_representations.append(encoded_representation)
if self.store_intermediary:
self.intermediary_representations = intermediary_representations
self.encoder_space = encoders
def transform(self, dataframe, keep_intermediary=True):
current_df = dataframe
if self.verbose:
logging.info("Encoding new data.")
if keep_intermediary:
intermediary_representations = [dataframe]
for encoder in self.encoder_space:
tmp_df = encoder.transform(current_df)
if keep_intermediary:
intermediary_representations.append(tmp_df)
if self.direct_projection:
current_df = dataframe
else:
current_df = tmp_df
if self.verbose:
logging.info("Encoding obtained.")
if keep_intermediary:
return intermediary_representations
else:
return current_df
def fit_transform(self, dataframe, keep_intermediary=False):
self.fit(dataframe)
return self.transform(dataframe, keep_intermediary)
if __name__ == "__main__":
import numpy as np
X = np.random.random((100, 100))
core_instance = CoRe(
verbose=False, embedding_algorithm="CoRe-small", store_intermediary=False
)
core_instance.fit(X)
intermediary = core_instance.transform(X, keep_intermediary=True)
print(len(intermediary))
| 32.994186 | 84 | 0.622379 | 584 | 5,675 | 5.847603 | 0.234589 | 0.084334 | 0.083748 | 0.084334 | 0.332943 | 0.194729 | 0.175988 | 0.108053 | 0.108053 | 0.108053 | 0 | 0.003002 | 0.295507 | 5,675 | 171 | 85 | 33.187135 | 0.851176 | 0.014978 | 0 | 0.178295 | 0 | 0 | 0.058855 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.069767 | 0 | 0.155039 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04fccb1478ce692e14caa6ca3962aae84ed836d8 | 1,771 | py | Python | p314_Binary_Tree_Vertical_Order_Traversal.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | p314_Binary_Tree_Vertical_Order_Traversal.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | p314_Binary_Tree_Vertical_Order_Traversal.py | bzhou26/leetcode_sol | 82506521e2cc412f96cd1dfc3c8c3ab635f67f73 | [
"MIT"
] | null | null | null | '''
- Leetcode problem: 314
- Difficulty: Medium
- Brief problem description:
Given a binary tree, return the vertical order traversal of its nodes' values. (ie, from top to bottom, column by column).
If two nodes are in the same row and column, the order should be from left to right.
Examples 1:
Input: [3,9,20,null,null,15,7]
3
/\
/ \
9 20
/\
/ \
15 7
Output:
[
[9],
[3,15],
[20],
[7]
]
Examples 2:
Input: [3,9,8,4,0,1,7]
3
/\
/ \
9 8
/\ /\
/ \/ \
4 01 7
Output:
[
[4],
[9],
[3,0,1],
[8],
[7]
]
Examples 3:
Input: [3,9,8,4,0,1,7,null,null,null,2,5] (0's right child is 2 and 1's left child is 5)
3
/\
/ \
9 8
/\ /\
/ \/ \
4 01 7
/\
/ \
5 2
Output:
[
[4],
[9,5],
[3,0,1],
[8,2],
[7]
]
- Solution Summary:
- Used Resources:
--- Bo Zhou
'''
import collections
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def verticalOrder(self, root: TreeNode) -> List[List[int]]:
orderMap = collections.defaultdict(list)
dq = collections.deque()
dq.append((root, 0))
while dq:
n = len(dq)
for i in range(n):
node, order = dq.popleft()
if node:
orderMap[order].append(node.val)
dq.append((node.left, order - 1))
dq.append((node.right, order + 1))
orderedDict = collections.OrderedDict(sorted(orderMap.items()))
result = []
for k, v in orderedDict.items():
result.append(v)
return result
| 15.8125 | 122 | 0.512705 | 244 | 1,771 | 3.704918 | 0.393443 | 0.013274 | 0.013274 | 0.017699 | 0.042035 | 0.042035 | 0.026549 | 0.026549 | 0 | 0 | 0 | 0.07088 | 0.338792 | 1,771 | 111 | 123 | 15.954955 | 0.70111 | 0.57764 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
04feec89ef011be3222eac2c8a548d3c3d472132 | 2,548 | py | Python | tests/pterradactyl/commands/test_lookup.py | GiampaoloFalqui/pterradactyl | 3d34f8088784c53e3d0871d8ca81f2381f2c2be9 | [
"Apache-2.0"
] | 2 | 2022-03-31T09:02:49.000Z | 2022-03-31T18:56:52.000Z | tests/pterradactyl/commands/test_lookup.py | GiampaoloFalqui/pterradactyl | 3d34f8088784c53e3d0871d8ca81f2381f2c2be9 | [
"Apache-2.0"
] | 4 | 2021-12-20T18:31:47.000Z | 2022-03-14T16:31:49.000Z | tests/pterradactyl/commands/test_lookup.py | GiampaoloFalqui/pterradactyl | 3d34f8088784c53e3d0871d8ca81f2381f2c2be9 | [
"Apache-2.0"
] | null | null | null | import unittest
from pterradactyl.commands.lookup import LookupCommand
from mock import patch
import os
import argparse
import pytest
class TestLookupCommands(unittest.TestCase):
def setUp(self) -> None:
self.base_path = os.path.dirname(os.path.abspath(__file__))
self.config = os.path.join(os.getcwd(), 'tests/resources/config/pterra.yaml')
self.facts = os.path.join(os.getcwd(), 'tests/resources/config/facts.yaml')
self.facts_invalid = os.path.join(os.getcwd(), 'tests/resources/config/facts_invalid.yaml')
self.parser = self.create_parser()
def create_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument('--test', '-t')
return parser
def test_parser_args(self):
with patch('os.getcwd') as cwd_mock:
cwd_mock.return_value = self.config
self.lc = LookupCommand(config=self.config, parser=self.parser)
parsed = self.parser.parse_args((['--backend', 'yaml', '--facts', self.facts, '--set', 'foo=bar', '--set', 'foo1=bar1']))
self.assertEqual(parsed.backend, 'yaml')
self.assertEqual(parsed.set, ['foo=bar', 'foo1=bar1'])
self.assertEqual(parsed.facts, self.facts)
self.lc.execute(parsed, ['hierarchy'])
def test_lookup_should_exit_with_invalid_yaml_file(self):
with patch('os.getcwd') as cwd_mock_exception:
with pytest.raises(SystemExit) as pytest_wrapped_e:
cwd_mock_exception.return_value = self.config
self.lc = LookupCommand(config=self.config, parser=self.parser)
parsed = self.parser.parse_args((['--backend', 'yaml', '--facts', self.facts_invalid, '--set', 'foo=bar', '--set', 'foo1=bar1']))
self.lc.execute(parsed, ['hierarchy'])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 42
def test_lookup_invalid_set_facts(self):
with patch('os.getcwd') as cwd_mock_exception:
with pytest.raises(SystemExit) as pytest_wrapped_e:
cwd_mock_exception.return_value = self.config
self.lc = LookupCommand(config=self.config, parser=self.parser)
parsed = self.parser.parse_args((['--backend', 'yaml', '--facts', self.facts, '--set', 'foo=bar', '--set', 'foo:bar']))
self.lc.execute(parsed, ['hierarchy'])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 42
| 49 | 145 | 0.637363 | 310 | 2,548 | 5.070968 | 0.222581 | 0.050891 | 0.053435 | 0.050891 | 0.655216 | 0.608142 | 0.608142 | 0.587786 | 0.544529 | 0.489822 | 0 | 0.005056 | 0.223705 | 2,548 | 51 | 146 | 49.960784 | 0.789687 | 0 | 0 | 0.363636 | 0 | 0 | 0.127943 | 0.042386 | 0 | 0 | 0 | 0 | 0.159091 | 1 | 0.113636 | false | 0 | 0.136364 | 0 | 0.295455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |