hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
496edd8c9bff891faaf9c8bdb6f9aa3387be78dd | 1,684 | py | Python | pinax/checkers/style.py | pinax/pinax-checkers | 29ce93da5dc64228150ef8f36159a35e90cb5f39 | [
"MIT"
] | 1 | 2015-06-02T11:34:07.000Z | 2015-06-02T11:34:07.000Z | pinax/checkers/style.py | pinax/pinax-checkers | 29ce93da5dc64228150ef8f36159a35e90cb5f39 | [
"MIT"
] | null | null | null | pinax/checkers/style.py | pinax/pinax-checkers | 29ce93da5dc64228150ef8f36159a35e90cb5f39 | [
"MIT"
] | null | null | null | import sys
import tokenize
from pylint.interfaces import IRawChecker
from pylint.checkers import BaseChecker
class PinaxStyleChecker(BaseChecker):
def process_module(self, node):
"""
extracts encoding from the stream and decodes each line, so that
international text's length is properly calculated.
"""
stream = node.file_stream
stream.seek(0) # XXX may be removed with astng > 0.23
readline = stream.readline
if sys.version_info < (3, 0):
if node.file_encoding is not None:
readline = lambda: stream.readline().decode(
node.file_encoding,
"replace"
)
self.process_tokens(tokenize.generate_tokens(readline))
class QuotationStyleChecker(PinaxStyleChecker):
"""
Check for use of double-quotes instead of single-quotes
"""
__implements__ = IRawChecker
name = "quotation"
msgs = {
"C9801": (
"Single-quotes are in use instead of double-quotes",
"Pinax coding standard requires double-quotes instead of "
"single-quotes."
),
}
options = ()
def process_tokens(self, tokens):
for (tok_type, token, start, _, _) in tokens:
if tok_type == 3:
if token.startswith("'") and token.endswith("'"):
if '"' not in token:
self.add_message("C9801", line=start[0])
def get_offset(line):
if line.isspace():
return len(line) - 1
return len(line) - len(line.lstrip())
def register(linter):
linter.register_checker(QuotationStyleChecker(linter))
| 28.542373 | 72 | 0.599169 | 184 | 1,684 | 5.380435 | 0.51087 | 0.024242 | 0.032323 | 0.042424 | 0.066667 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0.014555 | 0.306413 | 1,684 | 58 | 73 | 29.034483 | 0.833048 | 0.124703 | 0 | 0 | 0 | 0 | 0.103714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.102564 | 0 | 0.410256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49714af2a28c8cc760393e38c7a02cd5a74a7dee | 379 | py | Python | .config/qtile-wayland/settings/colors.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | 1 | 2022-03-28T19:11:18.000Z | 2022-03-28T19:11:18.000Z | .config/qtile-wayland/settings/colors.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | null | null | null | .config/qtile-wayland/settings/colors.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | null | null | null | # Justine Smithies
# https://github.com/justinesmithies/qtile-wayland-dotfiles
# Color configuration
colors = [
'#292d3e',
'#dc322f',
'#00ff2a',
'#b58900',
'#268bd2',
'#d33682',
'#2aa198',
'#eee8d5'
]
BLACK = colors[0]
RED = colors[1]
GREEN = colors[2]
YELLOW = colors[3]
BLUE = colors[4]
MAGENTA = colors[5]
CYAN = colors[6]
WHITE = colors[7]
| 15.16 | 59 | 0.609499 | 44 | 379 | 5.25 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126667 | 0.208443 | 379 | 24 | 60 | 15.791667 | 0.643333 | 0.248021 | 0 | 0 | 0 | 0 | 0.199288 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
497174dd194188260bde3d1825b7411d820cc6c5 | 7,777 | py | Python | webqq_client_sdk/webqq_protocol_gen.py | HotIce0/webqq-client-sdk | dc1bb7cd021d59a493f0474ea94485277e75ab51 | [
"MIT"
] | 5 | 2018-11-12T01:12:04.000Z | 2021-06-06T05:16:04.000Z | webqq_client_sdk/webqq_protocol_gen.py | HotIce0/webqq-client-sdk | dc1bb7cd021d59a493f0474ea94485277e75ab51 | [
"MIT"
] | null | null | null | webqq_client_sdk/webqq_protocol_gen.py | HotIce0/webqq-client-sdk | dc1bb7cd021d59a493f0474ea94485277e75ab51 | [
"MIT"
] | 2 | 2019-08-31T05:34:51.000Z | 2021-04-08T10:53:41.000Z | # -*- coding: UTF-8 -*-
import json
import webqq_utils
# 登录状态
class LOGIN_STATUS:
ONLINE = "online"
OFFLINE = "offline"
AWAY = "away"
HIDDEN = "hidden"
BUSY = "busy"
CALLME = "callme"
SILENT = "silent"
num_to_string_dict = {
10: "online",
20: "offline",
30: "away",
40: "hidden",
50: "busy",
60: "callme",
70: "silent",
}
class WebQQProtocolGenerator:
'webqq客户端的协议生成类'
# 静态参数
__client_id = 53999199
__aid = 501004106
__face = 603
def __init__(self):
self.__sequence = 0
"""webqq login urls"""
def build_get_qr_img_url(self):
return "https://ssl.ptlogin2.qq.com/ptqrshow?appid=501004106&e=2&l=M&s=3&d=72&v=4&" \
"t=0.2904360772892842&daid=164&pt_3rd_aid=0"
def build_check_qr_status_url(self, qrsig):
# milli_timestamps
milli_timestamps = webqq_utils.get_current_milli_time()
ptqrtoken = webqq_utils.hash33(qrsig)
return "https://ssl.ptlogin2.qq.com/ptqrlogin?u1=https://web2.qq.com/proxy.html&ptqrtoken=" + (bytes)(
ptqrtoken) + "&ptredirect=0&h=1&t=1&g=1&from_ui=1&ptlang=2052&action=0-0-" + (bytes)(
milli_timestamps) + "&js_ver=10284&js_type=1&login_sig=&pt_uistyle=40&aid=" + (bytes)(self.__aid)\
+ "&daid=164&mibao_css=m_webqq&"
def build_report_url(self):
return "https://report.url.cn/report/report?strValue=0&nValue=11202&tag=0&qver=0.0.1&t="\
+ (bytes)(webqq_utils.get_current_milli_time())
def build_get_vfwebqq_url(self):
return "https://s.web2.qq.com/api/getvfwebqq?ptwebqq=&clientid=" + (bytes)(self.__client_id)\
+ "&psessionid=&t=" + (bytes)(webqq_utils.get_current_milli_time())
# 登录,但是这里不能更改登录状态,不管status是offline还是busy,登录后都是online状态
def build_login2_post(self):
url = "https://d1.web2.qq.com/channel/login2"
post = {
"ptwebqq": "",
"clientid": self.__client_id,
"psessionid": "",
"status": "online",
}
post = "r=" + json.dumps(post)
return url, post
def build_change_status_url(self, newstatus, psessionid):
url = "https://d1.web2.qq.com/channel/change_status2?newstatus=" + newstatus + "&clientid="\
+ (bytes)(self.__client_id) + "&psessionid=" + psessionid + "&t=" \
+ (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_qq_avatar_url(self, uid):
"""get qq avatar"""
return "https://q.qlogo.cn/g?b=qq&nk=" + (bytes)(uid) + "&s=100&t=" + (bytes)(
webqq_utils.get_current_milli_time())
def build_get_user_friends2_post(self, vfwebqq, hash):
url = "https://s.web2.qq.com/api/get_user_friends2"
post = {
"vfwebqq": vfwebqq,
"hash": hash,
}
post = "r=" + json.dumps(post)
return url, post
def build_get_group_name_list_mask2_post(self, vfwebqq, hash):
url = "https://s.web2.qq.com/api/get_group_name_list_mask2"
post = {
"vfwebqq": vfwebqq,
"hash": hash,
}
post = "r=" + json.dumps(post)
return url, post
def build_get_discus_list_url(self, psessionid, vfwebqq):
url = "https://s.web2.qq.com/api/get_discus_list?clientid=" + (bytes)(self.__client_id) + "&psessionid=" +\
psessionid + "&vfwebqq=" + vfwebqq + "&t=" + (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_self_info2_url(self):
url = "https://s.web2.qq.com/api/get_self_info2?t=" + (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_online_buddies2_url(self, psessionid, vfwebqq):
url = "https://d1.web2.qq.com/channel/get_online_buddies2?vfwebqq=" + vfwebqq + "&clientid="\
+ (bytes)(self.__client_id) + "&psessionid=" + psessionid + "&t="\
+ (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_single_long_nick2(self, tuin, vfwebqq):
url = "https://s.web2.qq.com/api/get_single_long_nick2?tuin=" + (bytes)(tuin) + "&vfwebqq=" + vfwebqq + "&t=" \
+ (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_friend_info2_url(self, tuin, vfwebqq, psessionid):
url = "https://s.web2.qq.com/api/get_friend_info2?tuin=" + (bytes)(tuin) + "&vfwebqq=" + vfwebqq \
+ "&clientid=" + (bytes)(self.__client_id) + "&psessionid=" + psessionid\
+ "&t=" + (bytes)(webqq_utils.get_current_milli_time())
return url
# qcode和gid一样
def build_get_group_info_ext2_url(self, gcode, vfwebqq):
url = "https://s.web2.qq.com/api/get_group_info_ext2?gcode=" + (bytes)(gcode) + "&vfwebqq=" + vfwebqq + "&t="\
+ (bytes)(webqq_utils.get_current_milli_time())
return url
def build_get_discu_info_url(self, did, vfwebqq, psessionid):
url = "https://d1.web2.qq.com/channel/get_discu_info?did=" + (bytes)(did) + "&vfwebqq=" + vfwebqq\
+ "&clientid=" + (bytes)(self.__client_id) + "&psessionid=" + psessionid\
+ "&t=" + (bytes)(webqq_utils.get_current_milli_time())
return url
# 经过测试,字体的属性改变没有任何作用
def __get_content(self, msg, font="宋体", font_size=10, font_style1=0, font_style2=0,
font_style3=0, font_color="000000"):
content = [
msg,
[
"font",
{
"name": font,
"size": font_size,
"style": [font_style1, font_style2, font_style3],
"color": font_color,
}
]
]
return json.dumps(content)
def build_send_buddy_msg2(self, tuin, psessionid, msg=""):
url = "https://d1.web2.qq.com/channel/send_buddy_msg2"
self.__sequence = self.__sequence + 1
post = {
"to": tuin,
"content": self.__get_content(msg),
"face": self.__face,
"clientid": self.__client_id,
"msg_id": webqq_utils.get_msg_id(self.__sequence),
"psessionid": psessionid,
}
post = "r=" + json.dumps(post)
return url, post
def build_send_qun_msg2(self, guin, psessionid, msg=""):
url = "https://d1.web2.qq.com/channel/send_qun_msg2"
self.__sequence = self.__sequence + 1
post = {
"group_uin": guin,
"content": self.__get_content(msg),
"face": self.__face,
"clientid": self.__client_id,
"msg_id": webqq_utils.get_msg_id(self.__sequence),
"psessionid": psessionid,
}
post = "r=" + json.dumps(post)
return url, post
def build_send_discu_msg2(self, did, psessionid, msg=""):
url = "https://d1.web2.qq.com/channel/send_discu_msg2"
self.__sequence = self.__sequence + 1
post = {
"did": did,
"content": self.__get_content(msg),
"face": self.__face,
"clientid": self.__client_id,
"msg_id": webqq_utils.get_msg_id(self.__sequence),
"psessionid": psessionid,
}
post = "r=" + json.dumps(post)
return url, post
# 给陌生人发信息,应该是接收到消息之后,能回消息
def build_get_c2cmsg_sig2(self):
pass
# 获取消息
def build_poll2(self, psessionid):
url = "https://d1.web2.qq.com/channel/poll2"
post = {
"ptwebqq": "",
"clientid": self.__client_id,
"psessionid": psessionid,
"key": "",
}
post = "r=" + json.dumps(post)
return url, post
| 36.511737 | 119 | 0.5731 | 929 | 7,777 | 4.500538 | 0.201292 | 0.040182 | 0.036594 | 0.057403 | 0.59005 | 0.58144 | 0.534083 | 0.47381 | 0.42143 | 0.386032 | 0 | 0.032822 | 0.279156 | 7,777 | 212 | 120 | 36.683962 | 0.712986 | 0.02456 | 0 | 0.389535 | 0 | 0.023256 | 0.231064 | 0.024058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133721 | false | 0.005814 | 0.011628 | 0.017442 | 0.343023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4971b621c2ac7b005c86ac620bb0305793287b00 | 9,124 | py | Python | request_validator/serializers.py | amirasaran/request_validator | 0adb1aae3d4f32a4a8f6658e7e4f6b379545def5 | [
"MIT"
] | 16 | 2017-04-04T08:13:49.000Z | 2021-04-14T15:01:53.000Z | request_validator/serializers.py | amirasaran/request_validator | 0adb1aae3d4f32a4a8f6658e7e4f6b379545def5 | [
"MIT"
] | 2 | 2017-04-04T08:23:55.000Z | 2018-08-03T13:03:53.000Z | request_validator/serializers.py | amirasaran/request_validator | 0adb1aae3d4f32a4a8f6658e7e4f6b379545def5 | [
"MIT"
] | 1 | 2020-02-23T02:05:39.000Z | 2020-02-23T02:05:39.000Z | from __future__ import absolute_import
import copy
from .fields import Field
class BaseSerializer(object):
def __init__(self, data=None, source=None, required=True, force_valid=False):
self._initial_data = data
self._source = source
self._required = required
self._force_valid = force_valid
self._errors = None
self._validated_data = None
def get_errors(self):
return self._errors
def validate_data(self):
return self._validated_data
def has_error(self):
return len(self._errors) != 0
@property
def errors(self):
return self.get_errors()
class Serializer(BaseSerializer):
def __init__(self, *args, **kwargs):
super(Serializer, self).__init__(*args, **kwargs)
self._validated_data = {}
self._errors = {}
self._default = {}
self._all_fields_valid = True
def validate_data(self):
if not (self._force_valid and self.has_error()) and self._all_fields_valid:
return self._validated_data
return {}
def __new__(cls, *args, **kwargs):
many = kwargs.pop("many", False)
cls.fields()
if many:
if hasattr(cls, "Meta") and hasattr(cls.Meta, "list_serializer"):
return cls.Meta.list_serializer(cls, *args, **kwargs)
return ListSerializer(cls, *args, **kwargs)
else:
return object.__new__(cls, *args, **kwargs)
@classmethod
def fields(cls):
if "_fields_dict" not in cls.__dict__:
cls._fields_dict = {}
for field in cls._get_fields():
if field in cls.__dict__:
cls._fields_dict[field] = getattr(cls, field)
if hasattr(cls, field):
delattr(cls, field)
if len(cls._get_classes()) > 1:
cls._fields_dict.update(cls._get_parent().fields())
return cls._fields_dict
@classmethod
def _get_fields(cls):
if '_fields' not in cls.__dict__:
cls._fields = []
for field in set(dir(cls)) - set(dir(cls._get_parent())):
if not isinstance(getattr(cls, field), (Field, BaseSerializer)):
continue
cls._fields.append(field)
if len(cls._get_classes()) > 1:
cls._fields = cls._fields + cls._get_parent()._get_fields()
return cls._fields
@classmethod
def _get_classes(cls):
if "_base_classes" not in cls.__dict__:
the_class = cls
cls._base_classes = []
while True:
bases = the_class.__bases__
if len(bases) == 0:
break
assert len(bases) == 1, """ can not use multiple extend"""
the_class = bases[0]
cls._base_classes.append(the_class)
if the_class == Serializer:
break
return cls._base_classes
@classmethod
def _get_parent(cls):
return cls._get_classes()[0]
def _get_field(self, key):
return copy.deepcopy(getattr(self, '_fields_dict')[key])
@property
def data(self):
data = self.validate_data()
for key, value in self.fields().items():
if key not in data:
data[key] = value._default
return data
def add_error(self, index, value):
self._errors[index] = value
def is_valid(self):
validated_data, serializer_validated = self._validate(self._initial_data)
self._validated_data = validated_data
return not self.has_error()
def _validate(self, initial_data):
serializer_validated = True
validate_data = {}
errors, initial_data = self._check_user_validation(initial_data)
if len(errors) != 0:
self._all_fields_valid = False
for error in errors:
for key, value in error.iteritems():
self.add_error(key, value)
for attr in self.fields():
field = self._get_field(attr)
if isinstance(field, Field):
field.set_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
if not field.validate():
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
validate_data[attr] = field.data
elif isinstance(field, Serializer):
field.set_initial_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
elif not field.is_valid():
serializer_validated = False
self.add_error(attr, field.get_errors())
validate_data[attr] = field.validate_data()
continue
elif isinstance(field, ListSerializer):
field.set_initial_data(initial_data, attr)
if field.has_error():
self._all_fields_valid = False
serializer_validated = False
self.add_error(attr, field.get_errors())
continue
elif not field.is_valid():
serializer_validated = False
self.add_error(attr, field.get_errors())
validate_data[attr] = field.validate_data()
continue
return validate_data, serializer_validated
def _check_user_validation(self, data):
try:
before_validation = self.validate(data)
return [], before_validation
except ValidationError as e:
return e.details, data
def validate(self, attr):
return attr
@property
def is_all_fields_valid(self):
return self._all_fields_valid
def set_initial_data(self, data, index):
self._initial_data = None
if not data:
return self
if self._source in data:
self._initial_data = data[self._source]
elif index in data:
self._initial_data = data[index]
else:
if self._required:
self.add_error(index, "This field is required")
return self
class ListSerializer(BaseSerializer):
def __init__(self, serializer, *args, **kwargs):
super(ListSerializer, self).__init__(*args, **kwargs)
kwargs.pop("data", False)
self._serializer = serializer
self._validated_data = []
self._errors = []
self._args = args
self._kwargs = kwargs
self._data = []
self._default = []
self._allow_null = True
def add_error(self, value):
self._errors.append(value)
def _can_null(self):
return self._allow_null and self._initial_data is None
def is_valid(self):
assert isinstance(self._initial_data, (list, tuple)) or self._initial_data is None, \
""" _initial_data must be list or tuple but get {data_type}""".format(
data_type=type(self._initial_data).__name__)
if self._initial_data is not None:
for initial_data in self._initial_data:
serializer = self._serializer(data=initial_data, *self._args, **self._kwargs)
if serializer.is_valid():
self._validated_data.append(serializer.validate_data())
self._data.append(serializer.data)
else:
self.add_error(serializer.get_errors())
if not self._force_valid and serializer.validate_data():
self._validated_data.append(serializer.validate_data())
self._data.append(serializer.data)
else:
self.add_error("can not be null !")
return not self.has_error()
@property
def data(self):
return self._data
def set_initial_data(self, data, index):
self._initial_data = self._default
if not data:
return self
if self._source in data:
self._initial_data = data[self._source]
elif index in data:
self._initial_data = data[index]
else:
if self._required:
self.add_error("This field is required")
return self
class ValidationError(Exception):
def __init__(self, details):
self.details = []
if not isinstance(details, list):
details = [details]
for detail in details:
if not isinstance(detail, dict):
self.details.append({'non_field_error': str(detail)})
else:
self.details.append(detail) | 33.421245 | 93 | 0.567295 | 1,005 | 9,124 | 4.829851 | 0.109453 | 0.061187 | 0.046354 | 0.025958 | 0.348166 | 0.319736 | 0.277915 | 0.263906 | 0.263906 | 0.250309 | 0 | 0.001335 | 0.34338 | 9,124 | 273 | 94 | 33.421245 | 0.80888 | 0 | 0 | 0.362832 | 0 | 0 | 0.019309 | 0 | 0 | 0 | 0 | 0 | 0.00885 | 1 | 0.123894 | false | 0 | 0.013274 | 0.044248 | 0.278761 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4973e3f7f866d2ed807ba9f4308ede01b2115b67 | 393 | py | Python | examples/processing/bezier.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 5 | 2019-09-23T05:15:47.000Z | 2021-01-17T08:06:47.000Z | examples/processing/bezier.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 3 | 2019-05-03T05:25:17.000Z | 2021-04-15T04:53:16.000Z | examples/processing/bezier.py | strakam/PyEasyGraphics | 57a586aa92385d26725d4ec3d61b2bbbe970195d | [
"BSD-3-Clause"
] | 4 | 2019-05-04T13:42:40.000Z | 2021-04-15T10:38:48.000Z | from easygraphics.processing import *
from easygraphics import *
def setup():
set_size(400, 300)
set_fill_color("red")
t = 0
def draw():
global t
clear()
t += 1
t = t % 90
rotate(t)
set_line_width(4)
begin_shape()
vertex(30, 20)
bezier_vertex(80, 0, 80, 75, 30, 75)
bezier_vertex(50, 80, 60, 25, 30, 20)
end_shape()
run_app(globals())
| 15.115385 | 41 | 0.597964 | 61 | 393 | 3.688525 | 0.639344 | 0.142222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132404 | 0.26972 | 393 | 25 | 42 | 15.72 | 0.651568 | 0 | 0 | 0 | 0 | 0 | 0.007634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
497883ad3b28ebcf70614800f9bbf5877c1b06c7 | 1,417 | py | Python | examples/clouds/moultiple_clouds.py | fabioam/python-ffmpeg-video-streaming | e8744edd04d59c4bdf4ace1e618745c4cc21a12a | [
"MIT"
] | 1 | 2020-12-23T13:26:26.000Z | 2020-12-23T13:26:26.000Z | examples/clouds/moultiple_clouds.py | fabioam/python-ffmpeg-video-streaming | e8744edd04d59c4bdf4ace1e618745c4cc21a12a | [
"MIT"
] | null | null | null | examples/clouds/moultiple_clouds.py | fabioam/python-ffmpeg-video-streaming | e8744edd04d59c4bdf4ace1e618745c4cc21a12a | [
"MIT"
] | null | null | null | """
examples.clouds.google_cloud
~~~~~~~~~~~~
Open a file from a local path and save dash files to multiple clouds
:copyright: (c) 2019 by Amin Yazdanpanah.
:website: https://www.aminyazdanpanah.com
:email: contact@aminyazdanpanah.com
:license: MIT, see LICENSE for more details.
"""
import sys
import ffmpeg_streaming
from examples.clouds.aws_cloud import aws_cloud
from examples.clouds.azure_cloud import azure_cloud
from examples.clouds.google_cloud import google_cloud
def transcode_progress(percentage, ffmpeg):
# You can update a field in your database
# You can also create a socket connection and show a progress bar to users
sys.stdout.write("\rTranscoding...(%s%%)[%s%s]" % (percentage, '#' * percentage, '-' * (100 - percentage)))
sys.stdout.flush()
def main():
from_aws_cloud, to_aws_cloud = aws_cloud('bucket_name', 'key')
from_azure_cloud, to_azure_cloud = azure_cloud('container', 'blob')
from_google_cloud, to_google_cloud = google_cloud('bucket_name', 'object_name')
(
ffmpeg_streaming
.dash('/var/www/media/video.mkv', adaption='"id=0,streams=v id=1,streams=a"')
.format('libx265')
.auto_rep()
.package(output='/var/www/media/stream.mpd', clouds=[to_aws_cloud, to_azure_cloud, to_google_cloud],
progress=transcode_progress)
)
if __name__ == "__main__":
sys.exit(main())
| 30.804348 | 112 | 0.692308 | 192 | 1,417 | 4.880208 | 0.479167 | 0.082177 | 0.057631 | 0.053362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010318 | 0.179252 | 1,417 | 45 | 113 | 31.488889 | 0.795357 | 0.276641 | 0 | 0 | 0 | 0 | 0.17126 | 0.075787 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
497da71deb84ae0cfe90e501ef8f61d274540a38 | 1,092 | py | Python | mlapp/MLAPP_CODE/MLAPP-C4-Code/SampleDiscreate.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | mlapp/MLAPP_CODE/MLAPP-C4-Code/SampleDiscreate.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | mlapp/MLAPP_CODE/MLAPP-C4-Code/SampleDiscreate.py | xishansnow/MLAPP | 2f30cd94fd852a3f66fe92a124f65722bd2af509 | [
"MIT"
] | null | null | null | import numpy as np
def sample_discreate(prob, n_samples):
"""根据类先验分布对标签值进行采样
M = sample_discreate(prob, n_samples)
Input:
prob: 类先验分布 shape=(n_classes,)
n_samples: 需要采样的数量 shape = (n_samples,)
Output:
M: 采样得到的样本类别 shape = (n_samples,)
例子:
sample_discreate([0.8,0.2],n_samples)
从类别[0,1]中采样产生n_samples个样本
其中采样得到0的概率为0.8,得到1的概率为0.2.
"""
np.random.seed(1) # 使每一次生成的随机数一样
n = prob.size # 类别的数量
R = np.random.rand(n_samples) # 生成服从均匀分布的随机数
M = np.zeros(n_samples) # 初始化最终结果
cumprob = np.cumsum(prob) # 累积概率分布
if n < n_samples: # 如果采样的样本数量大于类别数量
for i in range(n-1):
M = M + np.array(R > cumprob[i])
else: # 如果采样的样本数量小于类别数量
cumprob2 = cumprob[:-1]
for i in range(n_samples):
M[i] = np.sum(R[i] > cumprob2)
return M
# 进行相关测试
if __name__ == '__main__':
print(sample_discreate(np.array([0.8,0.2]),10))
| 33.090909 | 62 | 0.52381 | 132 | 1,092 | 4.151515 | 0.44697 | 0.145985 | 0.069343 | 0.072993 | 0.142336 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033046 | 0.362637 | 1,092 | 32 | 63 | 34.125 | 0.75431 | 0.358974 | 0 | 0 | 0 | 0 | 0.012461 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
497ef66dfa80b56eff78cc19db264435307b6a92 | 7,239 | py | Python | MusicPlayer.py | JonathanLayman/Music_For_Mom | e08341a135f641e6f61897b55dc677e1980c6ffc | [
"MIT"
] | null | null | null | MusicPlayer.py | JonathanLayman/Music_For_Mom | e08341a135f641e6f61897b55dc677e1980c6ffc | [
"MIT"
] | null | null | null | MusicPlayer.py | JonathanLayman/Music_For_Mom | e08341a135f641e6f61897b55dc677e1980c6ffc | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import vlc
from gmusicapi import Mobileclient
import requests
from datetime import timedelta
class MusicPlayer:
def __init__(self, device_id, token):
# Gmusic API initialization
print("initializing client")
self.api = Mobileclient()
print("logging in")
self.api.oauth_login(device_id, token)
print("loading all songs")
self.all_songs = self.api.get_all_songs()
print("loading playlists")
self.all_playlists = self.api.get_all_user_playlist_contents()
self.all_playlist_names = {}
for playlist in self.all_playlists:
self.all_playlist_names[playlist["name"]] = playlist["id"]
# VLC initialization
self.track_file = vlc.MediaPlayer()
self.track_list = []
self.titles = []
self.track_number = 0
self.playlists = []
self.current_time = -1
self.max_time = -1
# Get playlists, songs from the first playlist, and load the first song
self.get_playlists()
self.get_songs_from_playlist(self.playlists[0])
self.song = self.track_list[self.track_number]["trackId"]
self.load_track()
# GUI initialization
print("creating window")
self.song = ""
self.player_layout = [
[sg.Text("I love you Mom!", size=(15, 1), font=("Helvetica", 25))],
[sg.Listbox(values=self.playlists, size=(30, 20), bind_return_key=True, key="_playlists_"),
# sg.Image(),
sg.Listbox(values=self.titles, size=(30, 20), bind_return_key=True, key="_Tracks_")],
[sg.Text("Click Play or select song", key="_SongName_", enable_events=True)],
[sg.Text("Volume:"), sg.Slider(range=(0, 100), orientation="h", size=(20, 15),
default_value=self.track_file.audio_get_volume(), key="_volume_"),
sg.Button("Play"), sg.Button("Pause"), sg.Button("Next")]
]
self.title = "Music Player"
self.window = sg.Window(self.title).Layout(self.player_layout)
def get_playlists(self):
data = self.api.get_all_playlists()
self.playlists = []
for playlist in data:
if not playlist['deleted']:
self.playlists.append(playlist['name'])
print(self.playlists)
def change_playlists(self, name):
for pos, title in enumerate(self.playlists):
if title == name:
self.get_songs_from_playlist(self.playlists[pos])
def get_songs_from_playlist(self, name):
print("Obtaining track list")
tracks = []
if name in self.all_playlist_names:
for playlist in self.all_playlists:
if playlist["name"] == name:
for track in playlist["tracks"]:
tracks.append(track)
break
self.track_list = tracks
self.get_playlist_song_titles()
def get_playlist_song_titles(self):
print("Getting playlist song titles")
titles = []
for song in self.track_list:
if song["source"] == "2":
titles.append(song["track"]["title"])
else:
for track in self.all_songs:
if track["id"] == song["trackId"]:
print("match found")
titles.append(track["title"])
else:
print("No match found")
print(titles)
self.titles = titles
def get_song_position_from_title(self, title):
for pos, name in enumerate(self.titles):
if name == title:
return pos
else:
print("Couldn't find song in tracks")
def download_song(self):
print("downloading song")
url = self.api.get_stream_url(self.song)
doc = requests.get(url)
with open("song.mp3", "wb") as f:
f.write(doc.content)
def load_track(self):
self.track_file = vlc.MediaPlayer("song.mp3")
print("Time:", self.track_file.get_length())
def play(self):
self.track_file.play()
self.window.FindElement("_SongName_").Update(value=self.titles[self.track_number])
def stop(self):
self.track_file.stop()
def pause(self):
self.track_file.pause()
def next(self):
self.track_number += 1
self.song = self.track_list[self.track_number]["trackId"]
self.window.FindElement("_Tracks_").SetValue(self.titles[self.track_number])
self.download_song()
self.track_file.stop()
self.load_track()
self.track_file.play()
self.max_time = self.track_file.get_time()
def run(self):
print("launching program")
while True:
self.current_time = self.track_file.get_time()
if self.max_time == -1:
self.max_time = self.track_file.get_length()
elif self.max_time == 0:
self.max_time = -1
else:
current = timedelta(milliseconds=self.current_time)
max = timedelta(milliseconds=self.max_time)
# print("Current", current, "Max", max)
# print(int((self.current_time / self.max_time) * 100))
if (self.current_time + 500) > self.max_time:
self.next()
event, values = self.window.Read(timeout=100)
if event is not None:
if event == "Play":
self.play()
elif event == "Stop":
self.stop()
elif event == "Pause":
self.pause()
elif event == "Next":
self.next()
elif event == "_Tracks_":
self.track_number = self.get_song_position_from_title(values[event][0])
self.song = self.track_list[self.track_number]["trackId"]
self.download_song()
self.track_file.stop()
self.load_track()
self.play()
elif event == "_playlists_":
print(values[event][0])
self.change_playlists(values[event][0])
self.window.FindElement("_Tracks_").Update(self.titles)
elif event == "_volume_":
print("Volume", event, values)
else:
self.track_file.audio_set_volume(values["_volume_"])
if event == "Quit" or values is None:
break
if __name__ == "__main__":
try:
with open("oauth/device_id.txt", "r") as f:
device_id = f.read()
mp = MusicPlayer(device_id, "oauth/oauth_code.txt")
mp.run()
except FileNotFoundError:
print("Authorization Token Missing. Run login.py")
answer = input("Would you like to run now? y/n: ")
if answer == "y":
import login
with open("oauth/device_id.txt", "r") as f:
device_id = f.read()
mp = MusicPlayer(device_id, "oauth/oauth_code.txt")
mp.run()
| 36.933673 | 109 | 0.550076 | 828 | 7,239 | 4.618357 | 0.211353 | 0.0659 | 0.047594 | 0.017782 | 0.262814 | 0.195868 | 0.180178 | 0.146705 | 0.132061 | 0.132061 | 0 | 0.009106 | 0.332504 | 7,239 | 195 | 110 | 37.123077 | 0.782285 | 0.032739 | 0 | 0.230303 | 0 | 0 | 0.097655 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078788 | false | 0 | 0.036364 | 0 | 0.127273 | 0.109091 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498071b90e14f0c10981cbbf33de97458dfc9968 | 8,138 | py | Python | stanza/models/depparse/arbori.py | giorgianb/multi-stanza | 43f606785027b015417f0d31f7cb9066e8d76558 | [
"Apache-2.0"
] | null | null | null | stanza/models/depparse/arbori.py | giorgianb/multi-stanza | 43f606785027b015417f0d31f7cb9066e8d76558 | [
"Apache-2.0"
] | null | null | null | stanza/models/depparse/arbori.py | giorgianb/multi-stanza | 43f606785027b015417f0d31f7cb9066e8d76558 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import random
import heapq
class Edge:
def __init__(self, u, v, name=""):
self.u = u
self.v = v
self.name = name
def __iter__(self):
return iter((self.u, self.v))
def __repr__(self):
if self.name == "":
return repr((self.u, self.v))
else:
return self.name
def Get1Best(V, E, score):
# print("Get1Best(")
# print("\tV =", V)
# print("\tE =", E)
# print("\tscore =", score)
# print(")")
n_vertices = len(score)
best_in_edge = {}
for v in V - {0}:
best_score = -np.inf
for e in E:
u, t = e
if t != v:
continue
if score[e] > best_score:
best_in_edge[v] = e
best_score = score[e]
if (cycle := contains_cycle(best_in_edge)) is not None:
C, C_E = cycle
real = {}
v_c = max(V) + 1
V_p = (V | {v_c}) - C
E_p = set()
kicks_out = {}
score_p = {}
for e in E:
e_p = None
t, u = e
if t not in C and u not in C:
e_p = e
score_p[e_p] = score[e]
elif t in C and u not in C:
e_p = Edge(v_c, u, e.name)
score_p[e_p] = score[e]
elif u in C and t not in C:
e_p = Edge(t, v_c, e.name)
kicks_out[e_p] = best_in_edge[u]
score_p[e_p] = score[e] - score[kicks_out[e_p]]
else:
continue # remove internal edges within the cycle
real[e_p] = e
E_p.add(e_p)
A = Get1Best(V_p, E_p, score_p)
if V_p & set(A.keys()) != V_p - {0}:
# print("V_p:", V_p)
# print("A:", A)
return {}
for e in A.values():
u, v = real[e]
best_in_edge[v] = real[e]
kicked_out_edge = kicks_out[A[v_c]]
for e in C_E - {kicked_out_edge}:
u, v = e
best_in_edge[v] = e
return best_in_edge
return best_in_edge
def find_next_edge(e, A, V, E, score):
diff = np.inf
next_edge = None
for e_next in E:
if e == e_next:
continue
# they don't have the same destination
if e_next.v != e.v:
continue
# is the source of the edge downstream from us
if is_ancestor(A, e.v, e_next.u):
continue
if score[e] - score[e_next] < diff:
diff = score[e] - score[e_next]
next_edge = e_next
return next_edge
def is_ancestor(A, u, v):
cur = v
while cur != 0:
if cur == u: # did we reach our vertex?
return True
# print("taking edge:", A[cur])
cur = A[cur].u # go backwards from our current edge
# print("-----------------------")
return False
def FindEdgeToBan(A, V, E, score, req, banned):
# print("FindEdgeToBan(")
# print("\tA=", A)
# print("\tV =", V)
# print("\tE =", E)
# print("\treq =", req)
# print("\tbanned =", banned)
# print("\tscore=", score)
# print(")")
E = E - banned
n_vertices = len(score)
best_in_edge = {}
diff = np.inf
edge_to_ban = None
for v in V - {0}:
if v in A and A[v] not in banned:
best_score = score[A[v]]
best_in_edge[v] = A[v]
else:
best_score = -np.inf
for e in E:
u, t = e
if t != v:
continue
if score[e] > best_score:
best_in_edge[v] = e
best_score = score[e]
if best_in_edge[v] not in req and best_in_edge[v] in A.values():
next_edge = find_next_edge(best_in_edge[v], A, V, E, score)
if next_edge is not None and score[best_in_edge[v]] - score[next_edge] < diff:
edge_to_ban = best_in_edge[v]
diff = score[best_in_edge[v]] - score[next_edge]
# print({e:score[e] for e in E})
if (cycle := contains_cycle(best_in_edge)) is not None:
C, C_E = cycle
real = {}
v_c = max(V) + 1
V_p = (V | {v_c}) - C
E_p = set()
kicks_out = {}
score_p = {}
req_p = set()
banned_p = set()
A_p = dict()
for e in E | set(A.values()):
e_p = None
t, u = e
if t not in C and u not in C:
e_p = e
score_p[e_p] = score[e]
elif t in C and u not in C:
e_p = Edge(v_c, u, e.name)
score_p[e_p] = score[e]
elif u in C and t not in C:
e_p = Edge(t, v_c, e.name)
kicks_out[e_p] = best_in_edge[u]
score_p[e_p] = score[e] - score[kicks_out[e_p]]
else:
continue # remove internal edges within the cycle
real[e_p] = e
if e in E:
E_p.add(e_p)
if e in req:
req_p.add(e_p)
if e in banned:
banned_p.add(e_p)
if e in A.values():
A_p[e_p.v] = e_p
edge_to_ban_p, diff_p = FindEdgeToBan(A_p, V_p, E_p, score_p, req_p, banned_p)
if diff_p < diff:
diff = diff_p
edge_to_ban = real[edge_to_ban_p]
return edge_to_ban, diff
return edge_to_ban, diff
# edges is an array in the form (vertex, parent)
def contains_cycle(edges):
visited_vertices = set()
visited_edges = set()
unvisited = set(edges.keys())
def dfs(v):
if v in visited_vertices:
return (visited_vertices, visited_edges)
if v in unvisited:
unvisited.remove(v)
visited_vertices.add(v)
for child, in_edge in edges.items():
# disallow self-loops
if in_edge.u == v and v != child:
visited_edges.add(in_edge)
cycle_found = dfs(child)
if cycle_found:
return True
visited_edges.remove(in_edge)
visited_vertices.remove(v)
return False
while len(unvisited) > 0:
v = random.sample(unvisited, 1)[0]
if cycle_found := dfs(v):
return visited_vertices, visited_edges
return None
def GetConstrained1Best(V, E, score, req, banned):
# print("GetConstrained1Best(req={}, banned={})".format(req, banned))
E = E - banned
req_vertices = set(map(lambda e: e.v, req))
E = set(filter(lambda e: e.v not in req_vertices or e in req, E))
return Get1Best(V, E, score)
def tree_score(A, score):
return sum(map(lambda e: score[e], A.values()))
def GetKBest(k, V, E, score):
req = set()
banned = set()
A = Get1Best(V, E, score)
# print("A =", A)
As = [(A, tree_score(A, score))]
Q = []
e_ban, diff = FindEdgeToBan(A, V, E, score, req, banned)
heapq.heappush(Q, (-tree_score(A, score) + diff, req, banned, A, e_ban))
while len(As) < k:
weight, req, banned, A, e_ban = heapq.heappop(Q)
while len(Q) > 0 and e_ban is None:
weight, req, banned, A, e_ban = heapq.heappop(Q)
if e_ban is None:
return As
req_p = req | {e_ban}
banned_p = banned | {e_ban}
A_p = GetConstrained1Best(V, E, score, req, banned_p)
if set(A_p.keys()) == V - {0}:
As.append((A_p, tree_score(A_p, score)))
e_ban, diff = FindEdgeToBan(A_p, V, E, score, req, banned_p)
heapq.heappush(Q, (-tree_score(A_p, score) + diff, req, banned_p, A_p, e_ban))
e_ban, diff = FindEdgeToBan(A, V, E, score, req_p, banned)
heapq.heappush(Q, (-tree_score(A, score) + diff, req_p, banned, A, e_ban))
return As
| 28.756184 | 90 | 0.477882 | 1,184 | 8,138 | 3.086149 | 0.105574 | 0.017515 | 0.051998 | 0.033114 | 0.455939 | 0.379584 | 0.331144 | 0.28763 | 0.271757 | 0.23618 | 0 | 0.003919 | 0.404276 | 8,138 | 282 | 91 | 28.858156 | 0.749794 | 0.104448 | 0 | 0.453659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058537 | false | 0 | 0.014634 | 0.009756 | 0.17561 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4980d5cdf65fd5892a48bb3006780338f4879dca | 36,864 | py | Python | src/scenic/syntax/translator.py | HaoranBai17/Scenic | 65372e488ec9323e550ccc1f157369aad88ad94d | [
"BSD-3-Clause"
] | 1 | 2019-06-14T21:04:37.000Z | 2019-06-14T21:04:37.000Z | src/scenic/syntax/translator.py | yuul/Scenic | 66fbf7aa67e649cf2379ee6e4d4273ff4980c04c | [
"BSD-3-Clause"
] | null | null | null | src/scenic/syntax/translator.py | yuul/Scenic | 66fbf7aa67e649cf2379ee6e4d4273ff4980c04c | [
"BSD-3-Clause"
] | 2 | 2020-01-02T12:37:46.000Z | 2020-07-30T02:02:01.000Z |
#### TRANSLATOR
#### turns a Scenic program into a Scenario object
import sys
import os
import io
import builtins
import traceback
import time
import inspect
import types
import importlib
import importlib.abc
import importlib.util
import itertools
from collections import namedtuple
from contextlib import contextmanager
import tokenize
from tokenize import NAME, NL, NEWLINE, ENDMARKER, OP, NUMBER, COLON, COMMENT, ENCODING
from tokenize import LPAR, RPAR, LSQB, RSQB, COMMA, DOUBLESLASH, DOUBLESLASHEQUAL
from tokenize import AT, LEFTSHIFT, RIGHTSHIFT, VBAR, AMPER, TILDE, CIRCUMFLEX, STAR
from tokenize import LEFTSHIFTEQUAL, RIGHTSHIFTEQUAL, VBAREQUAL, AMPEREQUAL, CIRCUMFLEXEQUAL
from tokenize import INDENT, DEDENT
import ast
from ast import parse, dump, NodeVisitor, NodeTransformer, copy_location, fix_missing_locations
from ast import Load, Store, Name, Call, Tuple, BinOp, MatMult, BitAnd, BitOr, BitXor, LShift
from ast import RShift, Starred, Lambda, AnnAssign, Set, Str, Num, Subscript, Index
from scenic.core.distributions import Samplable, needsSampling
from scenic.core.lazy_eval import needsLazyEvaluation
from scenic.core.workspaces import Workspace
from scenic.core.scenarios import Scenario
from scenic.core.object_types import Constructible
from scenic.core.utils import ParseError, RuntimeParseError
import scenic.core.pruning as pruning
import scenic.syntax.veneer as veneer
import scenic.syntax.relations as relations
### THE TOP LEVEL: compiling a Scenic program
def scenarioFromString(string, filename='<string>'):
stream = io.BytesIO(string.encode())
return scenarioFromStream(stream)
def scenarioFromFile(path):
if not os.path.exists(path):
raise FileNotFoundError(path)
fullpath = os.path.realpath(path)
head, extension = os.path.splitext(fullpath)
if not extension or extension[1:] not in scenicExtensions:
ok = ', '.join(scenicExtensions)
err = f'Scenic scenario does not have valid extension ({ok})'
raise RuntimeError(err)
directory, name = os.path.split(head)
with open(path, 'rb') as stream:
return scenarioFromStream(stream, filename=fullpath, path=path)
def scenarioFromStream(stream, filename='<stream>', path=None):
with topLevelNamespace(path) as namespace:
compileStream(stream, namespace, filename=filename)
return constructScenarioFrom(namespace)
@contextmanager
def topLevelNamespace(path=None):
"""Creates an environment like that of a Python script being run directly.
Specifically, __name__ is '__main__', __file__ is the path used to invoke
the script (not necessarily its absolute path), and the parent directory is
added to the path so that 'import blobbo' will import blobbo from that
directory if it exists there.
"""
directory = os.getcwd() if path is None else os.path.dirname(path)
namespace = { '__name__': '__main__' }
if path is not None:
namespace['__file__'] = path
sys.path.insert(0, directory)
try:
yield namespace
finally:
del sys.path[0]
def compileStream(stream, namespace, filename='<stream>'):
if verbosity >= 2:
veneer.verbosePrint(f' Compiling Scenic module from {filename}...')
startTime = time.time()
# Tokenize input stream
try:
tokens = list(tokenize.tokenize(stream.readline))
except tokenize.TokenError as e:
line = e.args[1][0] if isinstance(e.args[1], tuple) else e.args[1]
raise TokenParseError(line, 'file ended during multiline string or expression')
# Partition into blocks with all imports at the end (since imports could
# pull in new constructor definitions, which change the way subsequent
# tokens are transformed)
blocks = partitionByImports(tokens)
veneer.activate()
try:
# Execute preamble
exec(compile(preamble, '<veneer>', 'exec'), namespace)
# Execute each block
for blockNum, block in enumerate(blocks):
# Find all custom constructors defined so far
constructors = findConstructorsIn(namespace)
# Translate tokens to valid Python syntax
translator = TokenTranslator(constructors)
newSource, lineMap, allConstructors = translator.translate(block)
if dumpTranslatedPython:
print(f'### Begin translated Python from block {blockNum} of {filename}')
print(newSource)
print('### End translated Python')
# Parse the translated source
tree = parseTranslatedSource(newSource, lineMap, filename)
# Modify the parse tree to produce the correct semantics
newTree, requirements = translateParseTree(tree, lineMap, allConstructors)
if dumpFinalAST:
print(f'### Begin final AST from block {blockNum} of {filename}')
print(ast.dump(newTree, include_attributes=True))
print('### End final AST')
# Compile the modified tree
code = compileTranslatedTree(newTree, lineMap, filename)
# Execute it
executeCodeIn(code, namespace, lineMap, filename)
# Extract scenario state from veneer and store it
storeScenarioStateIn(namespace, requirements, filename)
finally:
veneer.deactivate()
if verbosity >= 2:
totalTime = time.time() - startTime
veneer.verbosePrint(f' Compiled Scenic module in {totalTime:.4g} seconds.')
return namespace
### TRANSLATION PHASE ZERO: definitions of language elements not already in Python
## Options
showInternalBacktrace = False
dumpTranslatedPython = False
dumpFinalAST = False
verbosity = 0
usePruning = True
## Preamble
# (included at the beginning of every module to be translated;
# imports the implementations of the public language features)
preamble = """\
from scenic.syntax.veneer import *
"""
## Get Python names of various elements
## (for checking consistency between the translator and the veneer)
api = set(veneer.__all__)
## Functions used internally
rangeConstructor = 'Range'
createDefault = 'PropertyDefault'
internalFunctions = { rangeConstructor, createDefault }
# sanity check: these functions actually exist
for imp in internalFunctions:
assert imp in api, imp
## Statements implemented by functions
requireStatement = 'require'
functionStatements = { requireStatement, 'param', 'mutate' }
# sanity check: implementations of statements actually exist
for imp in functionStatements:
assert imp in api, imp
## Built-in functions
builtinFunctions = { 'resample', 'verbosePrint' }
# sanity check: implementations of built-in functions actually exist
for imp in builtinFunctions:
assert imp in api, imp
## Constructors and specifiers
constructorStatement = 'constructor' # statement defining a new constructor
Constructor = namedtuple('Constructor', ('name', 'parent', 'specifiers'))
pointSpecifiers = {
('visible', 'from'): 'VisibleFrom',
('offset', 'by'): 'OffsetBy',
('offset', 'along'): 'OffsetAlongSpec',
('at',): 'At',
('in',): 'In',
('on',): 'In',
('beyond',): 'Beyond',
('visible',): 'VisibleSpec',
('left', 'of'): 'LeftSpec',
('right', 'of'): 'RightSpec',
('ahead', 'of'): 'Ahead',
('behind',): 'Behind',
}
orientedPointSpecifiers = {
('apparently', 'facing'): 'ApparentlyFacing',
('facing', 'toward'): 'FacingToward',
('facing',): 'Facing'
}
objectSpecifiers = {
}
# sanity check: implementations of specifiers actually exist
for imp in pointSpecifiers.values():
assert imp in api, imp
for imp in orientedPointSpecifiers.values():
assert imp in api, imp
for imp in objectSpecifiers.values():
assert imp in api, imp
builtinConstructors = {
'Point': Constructor('Point', None, pointSpecifiers),
'OrientedPoint': Constructor('OrientedPoint', 'Point', orientedPointSpecifiers),
'Object': Constructor('Object', 'OrientedPoint', objectSpecifiers)
}
functionStatements.update(builtinConstructors)
# sanity check: built-in constructors actually exist
for const in builtinConstructors:
assert const in api, const
## Prefix operators
prefixOperators = {
('relative', 'position'): 'RelativePosition',
('relative', 'heading'): 'RelativeHeading',
('apparent', 'heading'): 'ApparentHeading',
('distance', 'from'): 'DistanceFrom',
('distance', 'to'): 'DistanceFrom',
('angle', 'from'): 'AngleFrom',
('angle', 'to'): 'AngleTo',
('ego', '='): 'ego',
('front', 'left'): 'FrontLeft',
('front', 'right'): 'FrontRight',
('back', 'left'): 'BackLeft',
('back', 'right'): 'BackRight',
('front',): 'Front',
('back',): 'Back',
('left',): 'Left',
('right',): 'Right',
('follow',): 'Follow',
('visible',): 'Visible'
}
assert all(1 <= len(op) <= 2 for op in prefixOperators)
prefixIncipits = { op[0] for op in prefixOperators }
assert not any(op in functionStatements for op in prefixIncipits)
# sanity check: implementations of prefix operators actually exist
for imp in prefixOperators.values():
assert imp in api, imp
## Infix operators
# pseudo-operator for encoding argument packages for (3+)-ary ops
packageToken = (RIGHTSHIFT, '>>')
packageNode = RShift
InfixOp = namedtuple('InfixOp', ('syntax', 'implementation', 'arity', 'token', 'node'))
infixOperators = (
# existing Python operators with new semantics
InfixOp('@', 'Vector', 2, None, MatMult),
# operators not in Python (in decreasing precedence order)
InfixOp('at', 'FieldAt', 2, (LEFTSHIFT, '<<'), LShift),
InfixOp('relative to', 'RelativeTo', 2, (AMPER, '&'), BitAnd),
InfixOp('offset by', 'RelativeTo', 2, (AMPER, '&'), BitAnd),
InfixOp('offset along', 'OffsetAlong', 3, (CIRCUMFLEX, '^'), BitXor),
InfixOp('can see', 'CanSee', 2, (VBAR, '|'), BitOr),
# just syntactic conveniences, not really operators
InfixOp('from', None, 2, (COMMA, ','), None),
InfixOp('for', None, 2, (COMMA, ','), None),
InfixOp('to', None, 2, (COMMA, ','), None),
InfixOp('by', None, 2, packageToken, None)
)
infixTokens = {}
infixImplementations = {}
infixIncipits = set()
for op in infixOperators:
# if necessary, set up map from Scenic to Python syntax
if op.token is not None:
tokens = tuple(op.syntax.split(' '))
assert 1 <= len(tokens) <= 2, op
assert tokens not in infixTokens, op
infixTokens[tokens] = op.token
incipit = tokens[0]
assert incipit not in functionStatements, op
infixIncipits.add(incipit)
# if necessary, set up map from Python to Scenic semantics
imp = op.implementation
if imp is not None:
assert imp in api, op
node = op.node
if node in infixImplementations: # two operators may have the same implementation
oldArity, oldName = infixImplementations[node]
assert op.arity == oldArity, (op, oldName)
assert imp == oldName, (op, oldName)
else:
infixImplementations[node] = (op.arity, imp)
allIncipits = prefixIncipits | infixIncipits
## Direct syntax replacements
replacements = { # TODO police the usage of these? could yield bizarre error messages
'of': tuple(),
'deg': ((STAR, '*'), (NUMBER, '0.01745329252')),
'ego': ((NAME, 'ego'), (LPAR, '('), (RPAR, ')'))
}
## Illegal and reserved syntax
illegalTokens = {
LEFTSHIFT, RIGHTSHIFT, VBAR, AMPER, TILDE, CIRCUMFLEX,
LEFTSHIFTEQUAL, RIGHTSHIFTEQUAL, VBAREQUAL, AMPEREQUAL, CIRCUMFLEXEQUAL,
DOUBLESLASH, DOUBLESLASHEQUAL
}
# sanity check: stand-in tokens for infix operators must be illegal
for token in infixTokens.values():
ttype = token[0]
assert (ttype is COMMA or ttype in illegalTokens), token
keywords = ({constructorStatement}
| internalFunctions | functionStatements
| replacements.keys())
### TRANSLATION PHASE ONE: handling imports
## Meta path finder and loader for Scenic files
scenicExtensions = ('sc', 'scenic')
class ScenicMetaFinder(importlib.abc.MetaPathFinder):
def find_spec(self, name, paths, target):
if paths is None:
paths = sys.path
else:
name = name.rpartition('.')[2]
for path in paths:
for extension in scenicExtensions:
filename = name + '.' + extension
filepath = os.path.join(path, filename)
if os.path.exists(filepath):
spec = importlib.util.spec_from_file_location(name, filepath,
loader=ScenicLoader(filepath, filename))
return spec
return None
class ScenicLoader(importlib.abc.Loader):
def __init__(self, filepath, filename):
self.filepath = filepath
self.filename = filename
def create_module(self, spec):
return None
def exec_module(self, module):
# Read source file and compile it
with open(self.filepath, 'rb') as stream:
compileStream(stream, module.__dict__, filename=self.filepath)
# Mark as a Scenic module
module._isScenicModule = True
# register the meta path finder
sys.meta_path.insert(0, ScenicMetaFinder())
## Post-import hook to inherit objects, etc. from imported Scenic modules
def hooked_import(*args, **kwargs):
module = original_import(*args, **kwargs)
if getattr(module, '_isScenicModule', False):
if veneer.isActive():
veneer.allObjects.extend(module._objects)
veneer.globalParameters.update(module._params)
veneer.inheritedReqs.extend(module._requirements)
return module
original_import = builtins.__import__
builtins.__import__ = hooked_import
## Miscellaneous utilities
def partitionByImports(tokens):
blocks = []
currentBlock = []
duringImport = False
haveImported = False
finishLine = False
for token in tokens:
startNewBlock = False
if finishLine:
if token.type == NEWLINE or token.type == NL:
finishLine = False
if duringImport:
duringImport = False
haveImported = True
else:
assert not duringImport
finishLine = True
if token.type == NAME and token.string == 'import' or token.string == 'from':
duringImport = True
elif token.type in (NEWLINE, NL, COMMENT, ENCODING):
finishLine = False
elif haveImported:
# could use new constructors; needs to be in a new block
startNewBlock = True
if startNewBlock:
blocks.append(currentBlock)
currentBlock = [token]
haveImported = False
else:
currentBlock.append(token)
blocks.append(currentBlock) # add last block
return blocks
def findConstructorsIn(namespace):
constructors = []
for name, value in namespace.items():
if inspect.isclass(value) and issubclass(value, Constructible):
if name in builtinConstructors:
continue
parent = None
for base in value.__bases__:
if issubclass(base, Constructible):
assert parent is None
parent = base
constructors.append(Constructor(name, parent.__name__, {}))
return constructors
### TRANSLATION PHASE TWO: translation at the level of tokens
class TokenParseError(ParseError):
def __init__(self, tokenOrLine, message):
line = tokenOrLine.start[0] if hasattr(tokenOrLine, 'start') else tokenOrLine
self.lineno = line
super().__init__('Parse error in line ' + str(line) + ': ' + message)
class Peekable:
"""Utility class to allow iterator lookahead."""
def __init__(self, gen):
self.gen = iter(gen)
self.current = next(self.gen, None)
def __iter__(self):
return self
def __next__(self):
cur = self.current
if cur is None:
raise StopIteration
self.current = next(self.gen, None)
return cur
def peek(self):
return self.current
def peek(thing):
return thing.peek()
class TokenTranslator:
def __init__(self, constructors=()):
self.functions = set(functionStatements)
self.constructors = dict(builtinConstructors)
for constructor in constructors:
name = constructor.name
assert name not in self.constructors
self.constructors[name] = constructor
self.functions.add(name)
def createConstructor(self, name, parent, specs={}):
if parent is None:
parent = 'Object' # default superclass
self.constructors[name] = Constructor(name, parent, specs)
self.functions.add(name)
return parent
def specifiersForConstructor(self, const):
name, parent, specs = self.constructors[const]
if parent is None:
return specs
else:
ps = dict(self.specifiersForConstructor(parent))
ps.update(specs)
return ps
def translate(self, tokens):
"""Process the token stream, adding or modifying tokens as necessary to
produce valid Python syntax."""
tokens = Peekable(tokens)
newTokens = []
functionStack = []
inConstructor = False # inside a constructor or one of its specifiers
specifiersIndented = False
parenLevel = 0
lineCount = 0
lastLine = max(1, peek(tokens).start[0]) - 1
lineMap = { 0: 0 }
startOfLine = True # TODO improve hack?
functions = self.functions
constructors = self.constructors
for token in tokens:
ttype = token.exact_type
tstring = token.string
skip = False
# Catch Python operators that can't be used in Scenic
if ttype in illegalTokens:
raise TokenParseError(token, f'illegal operator "{tstring}"')
# Determine which operators are allowed in current context
context, startLevel = functionStack[-1] if functionStack else (None, None)
inConstructorContext = (context in constructors and parenLevel == startLevel)
if inConstructorContext:
inConstructor = True
allowedPrefixOps = self.specifiersForConstructor(context)
allowedInfixOps = dict()
else:
allowedPrefixOps = prefixOperators
allowedInfixOps = infixTokens
# Parse next token
if ttype == LPAR or ttype == LSQB: # keep track of nesting level
parenLevel += 1
elif ttype == RPAR or ttype == RSQB: # ditto
parenLevel -= 1
elif ttype in (NEWLINE, NL, ENDMARKER): # track non-logical lines for error reporting
lineCount += 1
lineMap[lineCount] = lastLine + 1
lastLine = token.start[0]
elif ttype == DEDENT and specifiersIndented:
# elide dedent corresponding to indented specifiers, if present
skip = True
specifiersIndented = False
elif ttype == NAME: # the interesting case: all new syntax falls in here
function = None
argument = None
# try to match 2-word language constructs
matched = False
nextToken = peek(tokens) # lookahead so we can give 2-word ops precedence
if nextToken is not None:
nextString = nextToken.string
twoWords = (tstring, nextString)
if startOfLine and tstring == 'for': # TODO improve hack?
matched = True
elif startOfLine and tstring == constructorStatement: # constructor definition
if nextToken.type != NAME or nextString in keywords:
raise TokenParseError(nextToken,
f'invalid constructor name "{nextString}"')
next(tokens) # consume name
parent = None
if peek(tokens).exact_type == LPAR: # superclass specification
next(tokens)
parentToken = next(tokens)
parent = parentToken.string
if parentToken.exact_type != NAME or parent in keywords:
raise TokenParseError(parentToken,
f'invalid constructor superclass "{parent}"')
if parent not in self.constructors:
raise TokenParseError(parentToken,
f'constructor cannot subclass non-object "{parent}"')
if next(tokens).exact_type != RPAR:
raise TokenParseError(parentToken,
'malformed constructor definition')
if peek(tokens).exact_type != COLON:
raise TokenParseError(nextToken, 'malformed constructor definition')
parent = self.createConstructor(nextString, parent)
newTokens.append((NAME, 'class'))
newTokens.append((NAME, nextString))
newTokens.append((LPAR, '('))
newTokens.append((NAME, parent))
newTokens.append((RPAR, ')'))
skip = True
matched = True
elif twoWords in allowedPrefixOps: # 2-word prefix operator
function = allowedPrefixOps[twoWords]
next(tokens) # consume second word
matched = True
elif not startOfLine and twoWords in allowedInfixOps: # 2-word infix operator
newTokens.append(allowedInfixOps[twoWords])
next(tokens)
skip = True
matched = True
elif inConstructorContext and tstring == 'with': # special case for 'with' specifier
function = 'With'
argument = '"' + nextString + '"'
next(tokens)
matched = True
elif tstring == requireStatement and nextString == '[': # special case for require[p]
next(tokens) # consume '['
prob = next(tokens)
if prob.exact_type != NUMBER:
raise TokenParseError(prob,
'soft requirement must have constant probability')
if next(tokens).exact_type != RSQB:
raise TokenParseError(prob, 'malformed soft requirement')
function = requireStatement
argument = prob.string
matched = True
if not matched:
# 2-word constructs don't match; try 1-word
oneWord = (tstring,)
if oneWord in allowedPrefixOps: # 1-word prefix operator
function = allowedPrefixOps[oneWord]
elif not startOfLine and oneWord in allowedInfixOps: # 1-word infix operator
newTokens.append(allowedInfixOps[oneWord])
skip = True
elif inConstructorContext: # couldn't match any 1- or 2-word specifier
raise TokenParseError(token, f'unknown constructor specifier "{tstring}"')
elif tstring in functions: # built-in function
function = tstring
elif tstring in replacements: # direct replacement
newTokens.extend(replacements[tstring])
skip = True
elif startOfLine and tstring == 'from': # special case to allow 'from X import Y'
pass
elif tstring in keywords: # some malformed usage
raise TokenParseError(token, f'unexpected keyword "{tstring}"')
else:
pass # nothing matched; pass through unchanged to Python
# generate new tokens for function calls
if function is not None:
functionStack.append((function, parenLevel))
newTokens.append((NAME, function))
newTokens.append((LPAR, '('))
if argument is not None:
newTokens.append((NAME, argument))
newTokens.append((COMMA, ','))
skip = True
# Detect the end of function argument lists
if len(functionStack) > 0:
context, startLevel = functionStack[-1]
while parenLevel < startLevel: # we've closed all parens for the current function
functionStack.pop()
newTokens.append((RPAR, ')'))
context, startLevel = (None, 0) if len(functionStack) == 0 else functionStack[-1]
if inConstructor and parenLevel == startLevel and ttype == COMMA: # starting a new specifier
while functionStack and context not in constructors:
functionStack.pop()
newTokens.append((RPAR, ')'))
context, startLevel = (None, 0) if len(functionStack) == 0 else functionStack[-1]
# allow the next specifier to be on the next line, if indented
nextToken = peek(tokens)
if nextToken.exact_type in (NEWLINE, COMMENT):
if nextToken.exact_type == COMMENT:
next(tokens) # consume comment
nextToken = peek(tokens)
if nextToken.exact_type != NEWLINE:
raise TokenParseError(nextToken, 'comma with no specifier following')
next(tokens) # consume newline
if not specifiersIndented:
nextToken = next(tokens) # consume indent
if nextToken.exact_type != INDENT:
raise TokenParseError(nextToken, 'expected indented specifier (extra comma on previous line?)')
specifiersIndented = True
elif ttype == NEWLINE or ttype == ENDMARKER or ttype == COMMENT: # end of line
inConstructor = False
if parenLevel != 0:
raise TokenParseError(token, 'unmatched parens/brackets')
while len(functionStack) > 0:
functionStack.pop()
newTokens.append((RPAR, ')'))
# Output token unchanged, unless handled above
if not skip:
token = token[:2] # hack to get around bug in untokenize
newTokens.append(token)
startOfLine = (ttype in (ENCODING, NEWLINE, NL, INDENT, DEDENT))
rewrittenSource = tokenize.untokenize(newTokens)
if not isinstance(rewrittenSource, str): # TODO improve?
rewrittenSource = str(rewrittenSource, encoding='utf-8')
return rewrittenSource, lineMap, self.constructors
### TRANSLATION PHASE THREE: parsing of Python resulting from token translation
class PythonParseError(SyntaxError, ParseError):
@classmethod
def fromSyntaxError(cls, exc, lineMap):
msg, (filename, lineno, offset, line) = exc.args
lineno = lineMap[lineno]
try: # attempt to recover line from original file
with open(filename, 'r') as f:
line = list(itertools.islice(f, lineno-1, lineno))
assert len(line) == 1
line = line[0]
offset = min(offset, len(line)) # TODO improve?
except FileNotFoundError:
pass
newExc = cls(msg, (filename, lineno, offset, line))
return newExc.with_traceback(exc.__traceback__)
def parseTranslatedSource(source, lineMap, filename):
try:
tree = parse(source, filename=filename)
return tree
except SyntaxError as e:
cause = e if showInternalBacktrace else None
raise PythonParseError.fromSyntaxError(e, lineMap) from cause
### TRANSLATION PHASE FOUR: modifying the parse tree
noArgs = ast.arguments(
args=[], vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
selfArg = ast.arguments(
args=[ast.arg(arg='self', annotation=None)], vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
class AttributeFinder(NodeVisitor):
"""utility class for finding all referenced attributes of a given name"""
@staticmethod
def find(target, node):
af = AttributeFinder(target)
af.visit(node)
return af.attributes
def __init__(self, target):
super().__init__()
self.target = target
self.attributes = set()
def visit_Attribute(self, node):
val = node.value
if isinstance(val, Name) and val.id == self.target:
self.attributes.add(node.attr)
self.visit(val)
class ASTParseError(ParseError):
def __init__(self, line, message):
self.lineno = line
super().__init__('Parse error in line ' + str(line) + ': ' + message)
class ASTSurgeon(NodeTransformer):
def __init__(self, lineMap, constructors):
super().__init__()
self.lineMap = lineMap
self.constructors = { const for const in constructors }
self.requirements = []
def parseError(self, node, message):
line = self.lineMap[node.lineno]
raise ASTParseError(line, message)
def unpack(self, arg, expected, node):
"""unpacks arguments to ternary (and up) infix operators"""
assert expected > 0
if isinstance(arg, BinOp) and isinstance(arg.op, packageNode):
if expected == 1:
raise self.parseError(node, 'gave too many arguments to infix operator')
else:
return self.unpack(arg.left, expected - 1, node) + [self.visit(arg.right)]
elif expected > 1:
raise self.parseError(node, 'gave too few arguments to infix operator')
else:
return [self.visit(arg)]
def visit_BinOp(self, node):
left = node.left
right = node.right
op = node.op
if isinstance(op, packageNode): # unexpected argument package
raise self.parseError(node, 'unexpected keyword "by"')
elif type(op) in infixImplementations: # an operator with non-Python semantics
arity, impName = infixImplementations[type(op)]
implementation = Name(impName, Load())
copy_location(implementation, node)
assert arity >= 2
args = [self.visit(left)] + self.unpack(right, arity-1, node)
newNode = Call(implementation, args, [])
else: # all other operators have the Python semantics
newNode = BinOp(self.visit(left), op, self.visit(right))
return copy_location(newNode, node)
def visit_Tuple(self, node):
if len(node.elts) != 2:
raise self.parseError(node, 'interval must have exactly two endpoints')
newElts = [self.visit(elt) for elt in node.elts]
return copy_location(Call(Name(rangeConstructor, Load()), newElts, []), node)
def visit_Call(self, node):
func = node.func
if isinstance(func, Name) and func.id == requireStatement: # Require statement
# Soft reqs have 2 arguments, including the probability, which is given as the
# first argument by the token translator; so we allow an extra argument here and
# validate it later on (in case the user wrongly gives 2 arguments to require).
if not (1 <= len(node.args) <= 2):
raise self.parseError(node, 'require takes exactly one argument')
if len(node.keywords) != 0:
raise self.parseError(node, 'require takes no keyword arguments')
cond = node.args[-1]
if isinstance(cond, Starred):
raise self.parseError(node, 'argument unpacking cannot be used with require')
req = self.visit(cond)
line = self.lineMap[node.lineno]
reqID = Num(len(self.requirements)) # save ID number
self.requirements.append(req) # save condition for later inspection
closure = Lambda(noArgs, req) # enclose requirement in a lambda
lineNum = Num(line) # save line number for error messages
copy_location(closure, req)
copy_location(lineNum, req)
newArgs = [reqID, closure, lineNum]
if len(node.args) == 2: # get probability for soft requirements
prob = node.args[0]
if not isinstance(prob, Num):
raise self.parseError(node, 'malformed requirement '
'(should be a single expression)')
newArgs.append(prob)
return copy_location(Call(func, newArgs, []), node)
else: # Ordinary function call
newArgs = []
# Translate arguments, unpacking any argument packages
for arg in node.args:
if isinstance(arg, BinOp) and isinstance(arg.op, packageNode):
newArgs.extend(self.unpack(arg, 2, node))
else:
newArgs.append(self.visit(arg))
newKeywords = [self.visit(kwarg) for kwarg in node.keywords]
return copy_location(Call(func, newArgs, newKeywords), node)
def visit_ClassDef(self, node):
if node.name in self.constructors: # constructor definition
newBody = []
for child in node.body:
child = self.visit(child)
if isinstance(child, AnnAssign): # default value for property
origValue = child.annotation
target = child.target
metaAttrs = []
if isinstance(target, Subscript):
sl = target.slice
if not isinstance(sl, Index):
self.parseError(sl, 'malformed attributes for property default')
sl = sl.value
if isinstance(sl, Name):
metaAttrs.append(sl.id)
elif isinstance(sl, Tuple):
for elt in sl.elts:
if not isinstance(elt, Name):
self.parseError(elt,
'malformed attributes for property default')
metaAttrs.append(elt.id)
else:
self.parseError(sl, 'malformed attributes for property default')
newTarget = Name(target.value.id, Store())
copy_location(newTarget, target)
target = newTarget
properties = AttributeFinder.find('self', origValue)
args = [
Set([Str(prop) for prop in properties]),
Set([Str(attr) for attr in metaAttrs]),
Lambda(selfArg, origValue)
]
value = Call(Name(createDefault, Load()), args, [])
copy_location(value, origValue)
newChild = AnnAssign(
target=target, annotation=value,
value=None, simple=True)
child = copy_location(newChild, child)
newBody.append(child)
node.body = newBody
return node
else: # ordinary Python class
# catch some mistakes where 'class' was used instead of 'constructor'
for base in node.bases:
name = None
if isinstance(base, Call):
name = base.func.id
elif isinstance(base, Name):
name = base.id
if name is not None and name in self.constructors:
self.parseError(node, f'must use "{constructorStatement}" to subclass objects')
return self.generic_visit(node)
def translateParseTree(tree, lineMap, constructors):
surgeon = ASTSurgeon(lineMap, constructors)
tree = fix_missing_locations(surgeon.visit(tree))
return tree, surgeon.requirements
### TRANSLATION PHASE FIVE: AST compilation
def compileTranslatedTree(tree, lineMap, filename):
try:
return compile(tree, filename, 'exec')
except SyntaxError as e:
cause = e if showInternalBacktrace else None
raise PythonParseError.fromSyntaxError(e, lineMap) from cause
### TRANSLATION PHASE SIX: Python execution
def generateTracebackFrom(exc, lineMap, sourceFile, full=False):
# find last stack frame in the source file
tbexc = traceback.TracebackException.from_exception(exc)
last = None
tbs = []
lms = []
currentTb = exc.__traceback__
for depth, frame in enumerate(tbexc.stack):
assert currentTb is not None
tbs.append(currentTb)
currentTb = currentTb.tb_next
if frame.filename == sourceFile:
last = depth
lms.append(lineMap)
else:
lms.append(None)
if full:
last = depth
assert last is not None
# create new trimmed traceback with corrected line numbers
lastTb = tbs[last]
lastLine = lastTb.tb_lineno
if lms[last] is not None:
lastLine = lms[last][lastLine]
tbs = tbs[:last]
lms = lms[:last]
try:
currentTb = types.TracebackType(None, lastTb.tb_frame,
lastTb.tb_lasti, lastLine)
except TypeError:
# Python 3.6 does not allow creation of traceback objects, so we just
# return the original traceback
return exc.__traceback__, lastLine
for tb, lm in zip(reversed(tbs), reversed(lms)):
line = lm[tb.tb_lineno] if lm else tb.tb_lineno
currentTb = types.TracebackType(currentTb, tb.tb_frame,
tb.tb_lasti, line)
return currentTb, lastLine
class InterpreterParseError(ParseError):
def __init__(self, exc, line):
self.lineno = line
exc_name = type(exc).__name__
super().__init__(f'Parse error in line {line}: {exc_name}: {exc}')
def executeCodeIn(code, namespace, lineMap, filename):
try:
exec(code, namespace)
except RuntimeParseError as e:
cause = e if showInternalBacktrace else None
tb, line = generateTracebackFrom(e, lineMap, filename)
raise InterpreterParseError(e, line).with_traceback(tb) from cause
except Exception as e:
tb, line = generateTracebackFrom(e, lineMap, filename, full=True)
raise e.with_traceback(tb) from None
### TRANSLATION PHASE SEVEN: scenario construction
class InvalidScenarioError(Exception):
pass
def storeScenarioStateIn(namespace, requirementSyntax, filename):
# extract created Objects
namespace['_objects'] = tuple(veneer.allObjects)
namespace['_egoObject'] = veneer.egoObject
# extract global parameters
namespace['_params'] = veneer.globalParameters
for name, value in veneer.globalParameters.items():
if needsLazyEvaluation(value):
raise InvalidScenarioError(f'parameter {name} uses value {value}'
' undefined outside of object definition')
# extract requirements and create proper closures
requirements = veneer.pendingRequirements
finalReqs = veneer.inheritedReqs
requirementDeps = set() # things needing to be sampled to evaluate the requirements
namespace['_requirements'] = finalReqs
namespace['_requirementDeps'] = requirementDeps
def makeClosure(req, bindings, ego, line):
def closure(values):
# rebind any names referring to sampled objects
for name, value in bindings.items():
if value in values:
namespace[name] = values[value]
# rebind ego object, which can be referred to implicitly
if ego is not None:
veneer.egoObject = values[ego]
# evaluate requirement condition, reporting errors on the correct line
try:
veneer.evaluatingRequirement = True
result = req()
assert not needsSampling(result)
if needsLazyEvaluation(result):
raise RuntimeParseError(f'requirement on line {line} uses value'
' undefined outside of object definition')
return result
except RuntimeParseError as e:
cause = e if showInternalBacktrace else None
raise InterpreterParseError(e, line) from cause
finally:
veneer.evaluatingRequirement = False
return closure
for reqID, (req, bindings, ego, line, prob) in requirements.items():
# Check whether requirement implies any relations used for pruning
reqNode = requirementSyntax[reqID]
relations.inferRelationsFrom(reqNode, bindings, ego, line)
# Gather dependencies of the requirement
for value in bindings.values():
if needsSampling(value):
requirementDeps.add(value)
if needsLazyEvaluation(value):
raise InvalidScenarioError(f'requirement on line {line} uses value {value}'
' undefined outside of object definition')
if ego is not None:
assert isinstance(ego, Samplable)
requirementDeps.add(ego)
# Construct closure
finalReqs.append((makeClosure(req, bindings, ego, line), prob))
def constructScenarioFrom(namespace):
# extract ego object
if namespace['_egoObject'] is None:
raise InvalidScenarioError('did not specify ego object')
# extract workspace, if one is specified
if 'workspace' in namespace:
workspace = namespace['workspace']
if not isinstance(workspace, Workspace):
raise InvalidScenarioError(f'workspace {workspace} is not a Workspace')
if needsSampling(workspace):
raise InvalidScenarioError('workspace must be a fixed region')
if needsLazyEvaluation(workspace):
raise InvalidScenarioError('workspace uses value undefined '
'outside of object definition')
else:
workspace = None
scenario = Scenario(workspace,
namespace['_objects'], namespace['_egoObject'],
namespace['_params'],
namespace['_requirements'], namespace['_requirementDeps'])
if usePruning:
pruning.prune(scenario, verbosity=verbosity)
return scenario
| 34.94218 | 103 | 0.712728 | 4,375 | 36,864 | 5.959314 | 0.188571 | 0.002877 | 0.004142 | 0.004296 | 0.127263 | 0.090058 | 0.057725 | 0.046832 | 0.032487 | 0.026158 | 0 | 0.003325 | 0.184218 | 36,864 | 1,054 | 104 | 34.975332 | 0.863632 | 0.164497 | 0 | 0.166069 | 0 | 0 | 0.098941 | 0.000785 | 0 | 0 | 0 | 0.001898 | 0.032258 | 1 | 0.052569 | false | 0.004779 | 0.063321 | 0.004779 | 0.172043 | 0.007168 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4980dcb5c6a0ba79a60f2f54a50ced3303043821 | 19,227 | py | Python | ircutils3/events.py | PiPeep/python-ircutils | a8c9525427af7c15737947a2052c37f4ce98d9e5 | [
"MIT"
] | 3 | 2015-02-21T01:27:32.000Z | 2015-03-25T17:36:32.000Z | ircutils3/events.py | PiPeep/python-ircutils | a8c9525427af7c15737947a2052c37f4ce98d9e5 | [
"MIT"
] | 2 | 2015-07-16T23:38:42.000Z | 2015-07-17T10:52:17.000Z | ircutils3/events.py | bgw/python-ircutils | a8c9525427af7c15737947a2052c37f4ce98d9e5 | [
"MIT"
] | 1 | 2019-12-13T04:05:28.000Z | 2019-12-13T04:05:28.000Z | """ This module gets used by :class:`ircutils.client.SimpleClient` and
:class:`ircutils.bot.SimpleBot` for event handling and management.
Each line sent from the IRC server represents its own event. This information
is parsed to fill in the values for the event object. In some cases, these
single-line events are combined together to build more complex events that span
multiple lines of data from the server. This information
is parsed to fill in the values for the event object.
"""
import bisect
import collections
import traceback
from . import protocol
class EventDispatcher(object):
""" The event dispatcher is in charge of three major tasks. (1) Registering
listeners to the dispatcher, (2) providing a way to interact with the
listeners, and (3) dispatching events.
"""
def __init__(self):
self._listeners = {}
def register_listener(self, name, listener):
""" Adds a listener to the dispatcher. """
self._listeners[name] = listener
def __setitem__(self, name, listener):
self.register_listener(name, listener)
def __getitem__(self, name):
return self._listeners[name]
def __iter__(self):
return iter(list(self._listeners.keys()))
def dispatch(self, client, event):
""" Notifies all of the listeners that an event is available.
Any listener which analyses the event and finds it to have what
the listener is looking for will then activate its event handlers.
"""
for name, listener in list(self._listeners.items()):
if listener.handlers != []:
listener.notify(client, event)
# ------------------------------------------------------------------------------
# > BEGIN EVENT OBJECTS
# ------------------------------------------------------------------------------
#
class Event(object):
pass
class ConnectionEvent(Event):
""" Handles events for connecting and disconnecting. Currently, the only useful data in
the event object is the command. It will either be CONN_CONNECT or CONN_DISCONNECT.
"""
def __init__(self, command):
self.command = command
self.source = None
self.target = None
self.params = []
class StandardEvent(Event):
""" Represents a standard event. """
def __init__(self, prefix, command, params):
self.command = command
self.prefix = prefix
self.source, self.user, self.host = protocol.parse_prefix(prefix)
if len(params) > 0:
if command not in protocol.commands_with_no_target:
self.target = params[0]
self.params = params[1:]
else:
self.target = None
self.params = params
else:
self.target = None
self.params = []
class MessageEvent(StandardEvent):
""" MessageEvent has all of the attributes as
:class:`ircutils.events.StandardEvent` with the added attribute ``message``
which holds the message data.
::
from ircutils import bot
class PrinterBot(bot.SimpleBot):
def on_message(self, event):
print "<{0}> {1}".format(event.source, event.message)
"""
def __init__(self, prefix, command, params):
StandardEvent.__init__(self, prefix, command, params)
self.message = params[-1]
class CTCPEvent(StandardEvent):
""" Represents a Client-To-Client Protocol (CTCP) event. """
def __init__(self):
self.source = None
self.target = None
self.command = None
self.params = []
# ------------------------------------------------------------------------------
# > BEGIN EventListener AND HELPER CODE
# ------------------------------------------------------------------------------
class EventListener(object):
""" This class is a simple event listener designed to be subclassed. Each
event listener is in charge of activating its handlers.
"""
def __init__(self):
self.handlers = []
def add_handler(self, handler, priority=0):
""" Add a handler to the event listener. It will be called when the
listener decides it's time. It will place it in order depending
on the priority specified. The default is 0.
Event handlers take the form of::
def my_handler(client, event):
# Do stuff with the client and event here
# Example:
client.send_message(event.target, "Hi!")
If :class:`ircutils.bot.SimpleBot` is being used, you do not need to
use this method as handlers are automatically added.
"""
if len(self.handlers) == 0:
self.handlers.append((priority, handler))
else:
handler_priorities = list(zip(*self.handlers))[0]
ins_loc = bisect.bisect(handler_priorities, priority)
self.handlers.insert(ins_loc, (priority, handler))
def remove_handler(self, handler):
""" This removes all handlers that are equal to the ``handler`` which
are bound to the event listener. This isn't too efficient since
it is ``O(n^2)``.
"""
for p, l in self.handlers:
if l == handler:
self.handlers.remove((p,l))
def activate_handlers(self, *args):
""" This activates each handler that's bound to the listener. It works
in order, so handlers with a higher priority will be activated
before all others. The ``args`` sent to this will be sent to each
handler. It's a good idea to always make sure to send in the client
and the event.
"""
for p, handler in self.handlers:
handler(*args)
# try:
# handler(*args)
# except Exception as ex:
# #traceback.print_exc(ex)
# self.handlers.remove((p, handler))
def notify(self, client, event):
""" This is to be overridden when subclassed. It gets called after each
event generated by the system. If the event listener decides to, it
should run its handlers from here.
"""
raise NotImplementedError("notify() must be overridden.")
class _CustomListener(EventListener):
def __init__(self, command, target, source):
EventListener.__init__(self)
self.command = command
self.target = target
self.source = source
def notify(self, client, event):
if self.command in (None, event.command) and \
self.target in (None, event.target) and \
self.source in (None, event.source):
self.activate_handlers(client, event)
def create_listener(command=None, target=None, source=None):
""" Create a listener on-the-fly. This is the simplest way of creating event
listeners, but also very limited. Examples::
# Creates a listener that looks for events where the command is PRIVMSG
msg_listener = events.create_listener(command="PRIVMSG")
# Listens for events from the NickServ service
ns_listener = events.create_lisener(source="NickServ")
# Listens for events that are messages to a specific channel
example = events.create_listener(command="PRIVMSG", target="#channel")
"""
return _CustomListener(command, target, source)
# ------------------------------------------------------------------------------
# > BEGIN BUILT-IN EVENT LISTENERS
# ------------------------------------------------------------------------------
class ConnectListener(EventListener):
def notify(self, client, event):
if event.command == "CONN_CONNECT":
self.activate_handlers(client, event)
class DisconnectListener(EventListener):
def notify(self, client, event):
if event.command == "CONN_DISCONNECT":
self.activate_handlers(client, event)
connection = {
"connect": ConnectListener,
"disconnect": DisconnectListener
}
class AnyListener(EventListener):
def notify(self, client, event):
self.activate_handlers(client, event)
class WelcomeListener(EventListener):
def notify(self, client, event):
if event.command == "RPL_WELCOME":
self.activate_handlers(client, event)
class NickChangeListener(EventListener):
def notify(self, client, event):
if event.command == "NICK":
self.activate_handlers(client, event)
class PingListener(EventListener):
def notify(self, client, event):
if event.command == "PING":
self.activate_handlers(client, event)
class InviteListener(EventListener):
def notify(self, client, event):
if event.command == "INVITE":
self.activate_handlers(client, event)
class KickListener(EventListener):
def notify(self, client, event):
if event.command == "KICK":
self.activate_handlers(client, event)
class JoinListener(EventListener):
def notify(self, client, event):
if event.command == "JOIN":
self.activate_handlers(client, event)
class QuitListener(EventListener):
def notify(self, client, event):
if event.command == "QUIT":
self.activate_handlers(client, event)
class PartListener(EventListener):
def notify(self, client, event):
if event.command == "PART":
self.activate_handlers(client, event)
class ErrorListener(EventListener):
def notify(self, client, event):
if event.command == "ERROR":
self.activate_handlers(client, event)
class ModeListener(EventListener):
def notify(self, client, event):
if event.command == "MODE":
self.activate_handlers(client, event)
standard = {
"any": AnyListener,
"welcome": WelcomeListener,
"ping": PingListener,
"invite": InviteListener,
"kick": KickListener,
"join": JoinListener,
"quit": QuitListener,
"part": PartListener,
"nick_change": NickChangeListener,
"error": ErrorListener,
"mode": ModeListener,
}
class MessageListener(EventListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
self.activate_handlers(client, event)
class PrivateMessageListener(MessageListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
if not protocol.is_channel(event.target):
self.activate_handlers(client, event)
class ChannelMessageListener(MessageListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
if protocol.is_channel(event.target):
self.activate_handlers(client, event)
class NoticeListener(MessageListener):
def notify(self, client, event):
if event.command == "NOTICE":
self.activate_handlers(client, event)
class PrivateNoticeListener(NoticeListener):
def notify(self, client, event):
if event.command == "NOTICE":
if not protocol.is_channel(event.target):
self.activate_handlers(client, event)
class ChannelNoticeListener(NoticeListener):
def notify(self, client, event):
if event.command == "NOTICE":
if protocol.is_channel(event.target):
self.activate_handlers(client, event)
messages = {
"message": MessageListener,
"channel_message": ChannelMessageListener,
"private_message": PrivateMessageListener,
"notice": NoticeListener,
"channel_notice": ChannelNoticeListener,
"private_notice": PrivateNoticeListener
}
class CTCPListener(EventListener):
def notify(self, client, event):
if event.command.startswith("CTCP_"):
self.activate_handlers(client, event)
class CTCPActionListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_ACTION":
self.activate_handlers(client, event)
class CTCPUserInfoListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_USERINFO":
self.activate_handlers(client, event)
class CTCPClientInfoListener(CTCPListener):
def notify(self, client, event):
return event.command == "CTCP_CLIENTINFO"
class CTCPVersionListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_VERSION":
self.activate_handlers(client, event)
class CTCPPingListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_PING":
self.activate_handlers(client, event)
class CTCPErrorListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_ERROR":
self.activate_handlers(client, event)
class CTCPTimeListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_TIME":
self.activate_handlers(client, event)
class DCCListener(CTCPListener):
def notify(self, client, event):
if event.command.startswith("CTCP_DCC"):
self.activate_handlers(client, event)
ctcp = {
"ctcp": CTCPListener,
"ctcp_action": CTCPActionListener,
"ctcp_userinfo": CTCPUserInfoListener,
"ctcp_clientinfo": CTCPClientInfoListener,
"ctcp_version": CTCPVersionListener,
"ctcp_ping": CTCPPingListener,
"ctcp_error": CTCPErrorListener,
"ctcp_time": CTCPTimeListener,
"dcc": DCCListener
}
class ReplyListener(EventListener):
def notify(self, client, event):
if event.command.startswith("RPL_"):
self.activate_handlers(client, event)
class NameReplyListener(ReplyListener):
class NameReplyEvent(Event):
def __init__(self):
self.channel = None
self.name_list = []
def __init__(self):
ReplyListener.__init__(self)
self._name_lists = collections.defaultdict(self.NameReplyEvent)
def notify(self, client, event):
if event.command == "RPL_NAMREPLY":
# "( "=" / "*" / "@" ) <channel>
# :[ "@" / "+" ] <nick> *( " " [ "@" / "+" ] <nick> )
#
# - "@" is used for secret channels, "*" for private
# channels, and "=" for others (public channels).
channel = event.params[1].lower()
names = event.params[2].strip().split(" ")
# TODO: This line below is wrong. It doesn't use name symbols.
names = list(map(protocol.strip_name_symbol, names))
self._name_lists[channel].name_list.extend(names)
elif event.command == "RPL_ENDOFNAMES":
# <channel> :End of NAMES list
channel_name = event.params[0]
name_event = self._name_lists[channel_name]
name_event.channel = channel_name
self.activate_handlers(client, name_event)
del self._name_lists[channel_name]
class ListReplyListener(ReplyListener):
class ListReplyEvent(Event):
def __init__(self, channel_list):
self.channel_list = channel_list
def __init__(self):
ReplyListener.__init__(self)
self.channel_list = []
def notify(self, client, event):
if event.command == "RPL_LIST":
# <channel> <# visible> :<topic>
channel_data = (event.params[0].lower(), event.params[1], event.params[2])
self.channel_list.append(channel_data)
elif event.command == "RPL_LISTEND":
# :End of LIST
list_event = self.ListReplyEvent(self.channel_list)
self.activate_handlers(client, list_event)
self.channel_list = []
class WhoisReplyListener(ReplyListener):
""" http://tools.ietf.org/html/rfc1459#section-4.5.2 """
class WhoisReplyEvent(Event):
def __init__(self):
self.nick = None
self.user = None
self.host = None
self.real_name = None
self.channels = []
self.server = None
self.is_operator = False
self.idle_time = 0 # seconds
def __init__(self):
ReplyListener.__init__(self)
self._whois_replies = collections.defaultdict(self.WhoisReplyEvent)
def notify(self, client, event):
if event.command == "RPL_WHOISUSER":
# <nick> <user> <host> * :<real name>
reply = self._whois_replies[event.params[1]]
reply.nick = event.params[0]
reply.user = event.params[1]
reply.host = event.params[2]
reply.real_name = event.params[4]
elif event.command == "RPL_WHOISCHANNELS":
# <nick> :*( ( "@" / "+" ) <channel> " " )
channels = event.params[1].strip().split()
channels = list(map(protocol.strip_name_symbol, channels))
self._whois_replies[event.params[0]].channels.extend(channels)
elif event.command == "RPL_WHOISSERVER":
# <nick> <server> :<server info>
self._whois_replies[event.params[0]].server = event.params[1]
elif event.command == "RPL_WHOISIDLE":
# <nick> <integer> :seconds idle
self._whois_replies[event.params[0]].idle_time = event.params[1]
elif event.command == "RPL_WHOISOPERATOR":
# <nick> :is an IRC operator
self._whois_replies[event.params[0]].is_operator = True
elif event.command == "RPL_ENDOFWHOIS":
# <nick> :End of WHOIS list
self.activate_handlers(client, self._whois_replies[event.params[0]])
del self._whois_replies[event.params[0]]
class WhoReplyListener(ReplyListener):
""" http://tools.ietf.org/html/rfc1459#section-4.5.2 """
class WhoReplyEvent(Event):
def __init__(self):
self.channel_name = None
self.user_list = []
def __init__(self):
ReplyListener.__init__(self)
self._who_replies = collections.defaultdict(self.WhoReplyEvent)
def notify(self, client, event):
if event.command == "RPL_WHOREPLY":
channel = event.params[0].lower()
user = protocol.User()
user.user = event.params[1]
user.host = event.params[2]
user.server = event.params[3]
user.nick = event.params[4]
user.real_name = event.params[6].split()[1]
self._who_replies[channel].user_list.append(user)
elif event.command == "RPL_ENDOFWHO":
channel = event.params[0].lower()
self._who_replies[channel].channel_name = channel
self.activate_handlers(client, self._who_replies[channel])
class ErrorReplyListener(ReplyListener):
def notify(self, client, event):
if event.command.startswith("ERR_"):
self.activate_handlers(client, event)
replies = {
"reply": ReplyListener,
"name_reply": NameReplyListener,
"list_reply": ListReplyListener,
"whois_reply": WhoisReplyListener,
"who_reply": WhoReplyListener,
"error_reply": ErrorReplyListener
} | 33.496516 | 91 | 0.613096 | 2,068 | 19,227 | 5.566731 | 0.180368 | 0.065931 | 0.048211 | 0.059416 | 0.374826 | 0.329656 | 0.228892 | 0.200747 | 0.190323 | 0.11614 | 0 | 0.004013 | 0.261299 | 19,227 | 574 | 92 | 33.496516 | 0.80652 | 0.23545 | 0 | 0.325513 | 0 | 0 | 0.048456 | 0 | 0 | 0 | 0 | 0.001742 | 0 | 1 | 0.175953 | false | 0.002933 | 0.01173 | 0.008798 | 0.334311 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4983a092318741ac4b8c3dce7ea36ad19260adbd | 3,377 | py | Python | model.py | parksurk/CVND---Image-Captioning-Project | 536c7b93581f471e2b4b17744fcff31b53bfc14c | [
"MIT"
] | null | null | null | model.py | parksurk/CVND---Image-Captioning-Project | 536c7b93581f471e2b4b17744fcff31b53bfc14c | [
"MIT"
] | null | null | null | model.py | parksurk/CVND---Image-Captioning-Project | 536c7b93581f471e2b4b17744fcff31b53bfc14c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.embed_size = embed_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.num_layers = num_layers
# Embedding layer
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
# LSTM layer
self.lstm = nn.LSTM(input_size=embed_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
# Linear layer
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
# step 1 : Creating embedded word vectors for each token in a batch of captions
embeds = self.word_embeddings(captions[:,:-1]) # batch_size,cap_length -> batch_size,cap_length-1,embed_size
# step 2 : Concatenating the input features and caption inputs
inputs = torch.cat((features.unsqueeze(dim=1),embeds), dim=1) # batch_size, caption (sequence) length, embed_size
# step 3 : Feeding into LSTM layer
lstm_out, _ = self.lstm(inputs) # lstm_out.shape : batch_size, caplength, hidden_size
# step 4 : Converting LSTM outputs to word prediction
outputs = self.linear(lstm_out) # outputs.shape : batch_size, caplength, vocab_size
return outputs #[:,:-1,:] : Discarding the last output of each sample in the batch.
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
caption = []
# Initializing the hidden state & Sending it to the same device as the inputs
hidden = (torch.randn(self.num_layers, 1, self.hidden_size).to(inputs.device),
torch.randn(self.num_layers, 1, self.hidden_size).to(inputs.device))
# Feeding the LSTM output and hidden states back into itself to get the caption
for i in range(max_len):
lstm_out, hidden = self.lstm(inputs, hidden) # lstm_out shape : (1, 1, hidden_size)
outputs = self.linear(lstm_out) # outputs shape : (1, 1, vocab_size)
outputs = outputs.squeeze(1) # outputs shape : (1, vocab_size)
wordid = outputs.argmax(dim=1)
caption.append(wordid.item())
# Preparing input for next iteration
inputs = self.word_embeddings(wordid.unsqueeze(0)) # inputs shape : (1, 1, embed_size)
return caption | 42.746835 | 125 | 0.630145 | 425 | 3,377 | 4.828235 | 0.289412 | 0.04386 | 0.02729 | 0.024854 | 0.11306 | 0.11306 | 0.11306 | 0.050682 | 0.050682 | 0.050682 | 0 | 0.011895 | 0.278057 | 3,377 | 79 | 126 | 42.746835 | 0.829779 | 0.292271 | 0 | 0.040816 | 0 | 0.020408 | 0.04624 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.061224 | 0 | 0.265306 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4983a6ce405e93ba0082edda8d39e78a36280b62 | 5,810 | py | Python | spyder/plugins/projects/api.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | 2 | 2020-12-13T04:57:52.000Z | 2021-02-23T03:30:31.000Z | spyder/plugins/projects/api.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | 1 | 2020-11-02T21:11:19.000Z | 2020-11-02T21:11:19.000Z | spyder/plugins/projects/api.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Projects Plugin API.
"""
# Standard library imports
import os
import os.path as osp
from collections import OrderedDict
# Local imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.translations import get_translation
from spyder.config.base import get_project_config_folder
from spyder.plugins.projects.utils.config import (ProjectMultiConfig,
PROJECT_NAME_MAP,
PROJECT_DEFAULTS,
PROJECT_CONF_VERSION,
WORKSPACE, CODESTYLE,
ENCODING, VCS)
# Localization
_ = get_translation("spyder")
class BaseProjectType:
"""
Spyder base project.
This base class must not be used directly, but inherited from. It does not
assume that python is specific to this project.
"""
ID = None
def __init__(self, root_path, parent_plugin=None):
self.plugin = parent_plugin
self.root_path = root_path
self.open_project_files = []
self.open_non_project_files = []
path = os.path.join(root_path, get_project_config_folder(), 'config')
self.config = ProjectMultiConfig(
PROJECT_NAME_MAP,
path=path,
defaults=PROJECT_DEFAULTS,
load=True,
version=PROJECT_CONF_VERSION,
backup=True,
raw_mode=True,
remove_obsolete=False,
)
self.set_option("project_type", self.ID)
# --- Helpers
# -------------------------------------------------------------------------
def get_option(self, option, section=WORKSPACE, default=None):
"""Get project configuration option."""
return self.config.get(section=section, option=option, default=default)
def set_option(self, option, value, section=WORKSPACE):
"""Set project configuration option."""
self.config.set(section=section, option=option, value=value)
def set_recent_files(self, recent_files):
"""Set a list of files opened by the project."""
processed_recent_files = []
for recent_file in recent_files:
if os.path.isfile(recent_file):
try:
relative_recent_file = os.path.relpath(
recent_file, self.root_path)
processed_recent_files.append(relative_recent_file)
except ValueError:
processed_recent_files.append(recent_file)
files = list(OrderedDict.fromkeys(processed_recent_files))
self.set_option("recent_files", files)
def get_recent_files(self):
"""Return a list of files opened by the project."""
recent_files = self.get_option("recent_files", default=[])
recent_files = [recent_file if os.path.isabs(recent_file)
else os.path.join(self.root_path, recent_file)
for recent_file in recent_files]
for recent_file in recent_files[:]:
if not os.path.isfile(recent_file):
recent_files.remove(recent_file)
return list(OrderedDict.fromkeys(recent_files))
# --- API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
"""
Provide a human readable version of NAME.
"""
raise NotImplementedError("Must implement a `get_name` method!")
@staticmethod
def validate_name(path, name):
"""
Validate the project's name.
Returns
-------
tuple
The first item (bool) indicates if the name was validated
successfully, and the second item (str) indicates the error
message, if any.
"""
return True, ""
def create_project(self):
"""
Create a project and do any additional setup for this project type.
Returns
-------
tuple
The first item (bool) indicates if the project was created
successfully, and the second item (str) indicates the error
message, if any.
"""
return False, "A ProjectType must define a `create_project` method!"
def open_project(self):
"""
Open a project and do any additional setup for this project type.
Returns
-------
tuple
The first item (bool) indicates if the project was opened
successfully, and the second item (str) indicates the error
message, if any.
"""
return False, "A ProjectType must define an `open_project` method!"
def close_project(self):
"""
Close a project and do any additional setup for this project type.
Returns
-------
tuple
The first item (bool) indicates if the project was closed
successfully, and the second item (str) indicates the error
message, if any.
"""
return False, "A ProjectType must define a `close_project` method!"
class EmptyProject(BaseProjectType):
ID = 'empty-project-type'
@staticmethod
def get_name():
return _("Empty project")
def create_project(self):
return True, ""
def open_project(self):
return True, ""
def close_project(self):
return True, ""
| 33.2 | 80 | 0.561274 | 611 | 5,810 | 5.176759 | 0.256956 | 0.055643 | 0.015175 | 0.025292 | 0.296238 | 0.267151 | 0.262409 | 0.262409 | 0.24344 | 0.205501 | 0 | 0.00026 | 0.337005 | 5,810 | 174 | 81 | 33.390805 | 0.820613 | 0.281928 | 0 | 0.2125 | 0 | 0 | 0.07385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175 | false | 0 | 0.0875 | 0.05 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49840c226d6c3af814c0af5672e31fa549c5b562 | 3,775 | py | Python | bot.py | nQuisition/pineapple | 8862c366aef4352af3ef7c810532bc88b64677fe | [
"MIT"
] | null | null | null | bot.py | nQuisition/pineapple | 8862c366aef4352af3ef7c810532bc88b64677fe | [
"MIT"
] | null | null | null | bot.py | nQuisition/pineapple | 8862c366aef4352af3ef7c810532bc88b64677fe | [
"MIT"
] | null | null | null | import logging
import traceback
import discord
from PluginManager import PluginManager
logging.basicConfig(filename='pineapple.log', filemode='a', level=logging.INFO,
format=('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger = logging.getLogger('discord')
logger.setLevel(logging.NOTSET)
logging.info("Starting Pineapple")
logging.info("Starting Discord Client")
# Creates a discord client, which we will use to connect and interact with the server.
# All methods with @client.event annotations are event handlers for this client.
client = discord.Client()
logging.info("Loading plugins")
# Loads and initializes the plugin manager for the bot
pm = PluginManager("plugins", "cache", client)
pm.load_plugins()
pm.register_events()
logging.info("Plugins loaded and registered")
@client.event
async def on_ready():
"""
Event handler, fires when the bot has connected and is logged in
"""
logging.info('Logged in as ' + client.user.name + " (" + str(client.user.id) + ")")
# Change nickname to nickname in configuration
for instance in client.guilds:
await instance.me.edit(nick=pm.botPreferences.nickName)
# Load rank bindings
pm.botPreferences.bind_roles(instance.id)
game = discord.Game('Use ' + pm.botPreferences.commandPrefix + 'help for help')
await client.change_presence(status=discord.Status.online, activity=game)
await pm.handle_loop()
@client.event
async def on_message(message):
"""
Event handler, fires when a message is received in the server.
:param message: discord.Message object containing the received message
"""
try:
if message.content.startswith(pm.botPreferences.commandPrefix) and client.user.id != message.author.id:
# Send the received message off to the Plugin Manager to handle the command
words = message.content.partition(' ')
await pm.handle_command(message, words[0][len(pm.botPreferences.commandPrefix):], words[1:])
elif message.guild is not None:
await pm.handle_message(message)
except Exception as e:
await message.channel.send("Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_typing(channel, user, when):
"""
Event handler, fires when a user is typing in a channel
:param channel: discord.Channel object containing channel information
:param user: discord.Member object containing the user information
:param when: datetime timestamp
"""
try:
await pm.handle_typing(channel, user, when)
except Exception as e:
await channel.send("Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_message_delete(message):
"""
Event handler, fires when a message is deleted
:param message: discord.Message object containing the deleted message
"""
try:
if message.author.name != "PluginBot":
await pm.handle_message_delete(message)
except Exception as e:
await message.channel.send("Error: " + str(e))
if pm.botPreferences.get_config_value("client", "debug") == "1":
traceback.print_exc()
@client.event
async def on_member_join(member):
await pm.handle_member_join(member)
@client.event
async def on_member_remove(member):
await pm.handle_member_leave(member)
@client.event
async def on_server_join(server):
for instance in pm.client.guilds:
pm.botPreferences.bind_roles(instance.id)
# Run the client and login with the bot token (yes, this needs to be down here)
client.run(pm.botPreferences.token)
| 32.826087 | 111 | 0.698278 | 494 | 3,775 | 5.265182 | 0.303644 | 0.061515 | 0.04306 | 0.051134 | 0.320261 | 0.275663 | 0.209919 | 0.175317 | 0.146098 | 0.146098 | 0 | 0.001647 | 0.196026 | 3,775 | 114 | 112 | 33.114035 | 0.855354 | 0.114437 | 0 | 0.353846 | 0 | 0 | 0.097642 | 0.012896 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.061538 | 0 | 0.061538 | 0.046154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4984ccebdd30b982fdf9bfeab51ddbaea9e196a1 | 2,398 | py | Python | vmtkScripts/vmtkmeshvectorfromcomponents.py | CemrgDevelopers/vmtk | a555b3c1686e70dcad0346acaaed9b3481dcb338 | [
"Apache-2.0"
] | 3 | 2016-02-26T17:30:04.000Z | 2017-11-09T03:24:04.000Z | vmtkScripts/vmtkmeshvectorfromcomponents.py | CemrgDevelopers/vmtk | a555b3c1686e70dcad0346acaaed9b3481dcb338 | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtkmeshvectorfromcomponents.py | CemrgDevelopers/vmtk | a555b3c1686e70dcad0346acaaed9b3481dcb338 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkmeshvectorfromcomponents.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.6 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import sys
import pypes
vmtkmeshvectorfromcomponents = 'vmtkMeshVectorFromComponents'
class vmtkMeshVectorFromComponents(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Mesh = None
self.VectorArrayName = None
self.ComponentsArrayNames = None
self.RemoveComponentArrays = False
self.SetScriptName('vmtkmeshvectorfromcomponents')
self.SetScriptDoc('create a vector array from a number of scalar arrays treated as vector components')
self.SetInputMembers([
['Mesh','i','vtkUnstructuredGrid',1,'','the input mesh','vmtkmeshreader'],
['VectorArrayName','vector','str',1,'',''],
['ComponentsArrayNames','components','str',-1,'',''],
['RemoveComponentArrays','removecomponents','bool',1,'','']
])
self.SetOutputMembers([
['Mesh','o','vtkUnstructuredGrid',1,'','the output mesh','vmtkmeshwriter']
])
def Execute(self):
if (self.Mesh == None):
self.PrintError('Error: no Mesh.')
numberOfComponents = len(self.ComponentsArrayNames)
vectorArray = vtk.vtkDoubleArray()
vectorArray.SetName(self.VectorArrayName)
vectorArray.SetNumberOfComponents(numberOfComponents)
vectorArray.SetNumberOfTuples(self.Mesh.GetNumberOfPoints())
for i in range(numberOfComponents):
componentArray = self.Mesh.GetPointData().GetArray(self.ComponentsArrayNames[i])
vectorArray.CopyComponent(i,componentArray,0)
if self.RemoveComponentArrays:
self.Mesh.GetPointData().RemoveArray(self.ComponentsArrayNames[i])
self.Mesh.GetPointData().AddArray(vectorArray)
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 32.849315 | 110 | 0.66347 | 225 | 2,398 | 7 | 0.546667 | 0.030476 | 0.038095 | 0.020317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01174 | 0.218515 | 2,398 | 72 | 111 | 33.305556 | 0.828709 | 0.204337 | 0 | 0.05 | 0 | 0 | 0.192369 | 0.040806 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.075 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49853b6c297a1e39a4291f0b28f92bdf1f5cfa06 | 629 | py | Python | xyjxyf/tools/dxhtmlparser.py | saurabh896/python-1 | f8d3aedf4c0fe6e24dfa3269ea7e642c9f7dd9b7 | [
"MIT"
] | 3,976 | 2015-01-01T15:49:39.000Z | 2022-03-31T03:47:56.000Z | xyjxyf/tools/dxhtmlparser.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 97 | 2015-01-11T02:59:46.000Z | 2022-03-16T14:01:56.000Z | xyjxyf/tools/dxhtmlparser.py | dwh65416396/python | 1a7e3edd1cd3422cc0eaa55471a0b42e004a9a1a | [
"MIT"
] | 3,533 | 2015-01-01T06:19:30.000Z | 2022-03-28T13:14:54.000Z | # encoding = utf-8
from html.parser import HTMLParser
class DXHTMLParser(HTMLParser):
def __init__(self, tag, tag_name, url):
HTMLParser.__init__(self)
self.tag = tag
self.tag_name = tag_name
self.url = url
self.rets = []
def handle_starttag(self, tag, attrs):
if tag == self.tag:
for name,value in attrs:
if name == self.tag_name:
if value.startswith("/") and len(self.url) > 0:
value = self.url + value
self.rets.append(value)
def getrets(self):
return self.rets | 27.347826 | 67 | 0.54372 | 76 | 629 | 4.328947 | 0.407895 | 0.12766 | 0.06079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004938 | 0.356121 | 629 | 23 | 68 | 27.347826 | 0.807407 | 0.025437 | 0 | 0 | 0 | 0 | 0.001634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.058824 | 0.058824 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4987070a0979ff49f74a01c79dfa56c4bd9bb2f4 | 3,161 | py | Python | src/fhir_types/FHIR_Immunization_ProtocolApplied.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | 2 | 2022-02-03T00:51:30.000Z | 2022-02-03T18:42:43.000Z | src/fhir_types/FHIR_Immunization_ProtocolApplied.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | src/fhir_types/FHIR_Immunization_ProtocolApplied.py | anthem-ai/fhir-types | 42348655fb3a9b3f131b911d6bc0782da8c14ce4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, List, Literal, TypedDict
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_Element import FHIR_Element
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Describes the event of a patient being administered a vaccine or a record of an immunization as reported by a patient, a clinician or another party.
FHIR_Immunization_ProtocolApplied = TypedDict(
"FHIR_Immunization_ProtocolApplied",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# One possible path to achieve presumed immunity against a disease - within the context of an authority.
"series": FHIR_string,
# Extensions for series
"_series": FHIR_Element,
# Indicates the authority who published the protocol (e.g. ACIP) that is being followed.
"authority": FHIR_Reference,
# The vaccine preventable disease the dose is being administered against.
"targetDisease": List[FHIR_CodeableConcept],
# Nominal position in a series.
"doseNumberPositiveInt": float,
# Extensions for doseNumberPositiveInt
"_doseNumberPositiveInt": FHIR_Element,
# Nominal position in a series.
"doseNumberString": str,
# Extensions for doseNumberString
"_doseNumberString": FHIR_Element,
# The recommended number of doses to achieve immunity.
"seriesDosesPositiveInt": float,
# Extensions for seriesDosesPositiveInt
"_seriesDosesPositiveInt": FHIR_Element,
# The recommended number of doses to achieve immunity.
"seriesDosesString": str,
# Extensions for seriesDosesString
"_seriesDosesString": FHIR_Element,
},
total=False,
)
| 70.244444 | 836 | 0.748814 | 408 | 3,161 | 5.737745 | 0.340686 | 0.021358 | 0.015378 | 0.009398 | 0.332337 | 0.311833 | 0.311833 | 0.311833 | 0.311833 | 0.311833 | 0 | 0 | 0.208478 | 3,161 | 44 | 837 | 71.840909 | 0.935651 | 0.657387 | 0 | 0 | 0 | 0 | 0.236398 | 0.113508 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.192308 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498819c88d72ad9e6c8dcc724899648d36a16a08 | 4,411 | py | Python | Main/AlphaZero/DistributedSelfPlay/Trainer.py | FreddeFrallan/AlphaHero | bda8f78424294a4f52359b6591296abb229611ae | [
"MIT"
] | null | null | null | Main/AlphaZero/DistributedSelfPlay/Trainer.py | FreddeFrallan/AlphaHero | bda8f78424294a4f52359b6591296abb229611ae | [
"MIT"
] | null | null | null | Main/AlphaZero/DistributedSelfPlay/Trainer.py | FreddeFrallan/AlphaHero | bda8f78424294a4f52359b6591296abb229611ae | [
"MIT"
] | null | null | null | import datetime
import os
import time
from Main import MachineSpecificSettings, Hyperparameters
from Main.AlphaZero.DistributedSelfPlay import Connection, FitModel
from Main.AlphaZero import Utils
from Main.Training.Connect4 import MemoryBuffers
STATUS_TRAIN_DATA = "trainData"
STATUS_INIT_MODEL = "initModel"
def _getModelPath():
return os.path.abspath("TrainerModel")
def _writeModelToDiskAsBytes(modelAsBytes):
tempFilePath = _getModelPath()
f = open(tempFilePath, 'wb')
f.write(modelAsBytes)
f.close()
return tempFilePath
def _readModelFromDisk():
f = open(_getModelPath(), 'rb')
temp = f.read()
f.close()
return temp
def _getLearningRate(generation):
for a in Hyperparameters.LEARNING_RATE_SCHEDULE:
cycleNumber, lr = a
if (generation < cycleNumber):
return lr
_, finalLr = Hyperparameters.LEARNING_RATE_SCHEDULE[-1]
return finalLr
def _init(port):
connection = Connection.Connection(ip='localhost', port=port, server=False)
status, data = connection.readMessage()
assert status == STATUS_INIT_MODEL
modelAsBytes, trainerSettings = data
modelAbsPath = _writeModelToDiskAsBytes(modelAsBytes)
Hyperparameters.REPLAY_BUFFER_LENGTH = trainerSettings[0]
Hyperparameters.SLIDING_WINDOW_TURNS_TO_FULL = trainerSettings[1]
# Used for naming the runtime analasys log
if ("Y" in input("Use old training data (Y/N):").upper()):
MemoryBuffers.loadOldTrainingDataFromDisk()
return connection, modelAbsPath
'''
The looping trainer is passed Self-Play data from the Overlord
This data is appended to the replay buffer, where all data contained in the buffer is used in the supervised learning
Upon finish, the updated network is sent back to the overlord
'''
def loopingTrainer(port, gpuSettings):
connection, modelAbsPath = _init(port)
import os, StartInit
StartInit.init()
print("Starting Trainer GPU-Settings: {}".format(gpuSettings))
os.environ['CUDA_VISIBLE_DEVICES'] = gpuSettings
from Main.AlphaZero import NeuralNetworks
import numpy as np
import keras
MachineSpecificSettings.setupHyperparameters()
singleModel = keras.models.load_model(modelAbsPath)
# In our experiments we ended up using only a single GPU for training. Since a to big batch-size gave weird results
if (MachineSpecificSettings.AMOUNT_OF_GPUS > 1):
trainingModel = NeuralNetworks.createMultipleGPUModel(singleModel)
else:
trainingModel = singleModel
# Training Loop
while (True):
status, data = connection.readMessage()
print("Got msg:", status)
if (status == STATUS_TRAIN_DATA): # TODO: Create an informative else statement
t1 = time.time() # Only used for displaying elapsed time to the user
modelVersion, states, values, policies, weights = data
# Setup settings for this training turn
keras.backend.set_value(trainingModel.optimizer.lr, _getLearningRate(modelVersion))
MemoryBuffers.CURRENT_MODEL_VERSION = modelVersion
MemoryBuffers.addLabelsToReplayBuffer(states, values, policies)
# Get all the data contained in the Replay Buffers. With pre-calculated average of similair states
inStates, valueLabels, policyLabels = MemoryBuffers.getDistinctTrainingData()
s = np.array(inStates)
v = np.array(valueLabels)
p = np.array(policyLabels)
# Run the supervised-learning
dataProcessingTime = time.time() - t1
print("Data preprocessing finished: {}".format(dataProcessingTime))
print("Using LR:", keras.backend.get_value(trainingModel.optimizer.lr))
trainingModel.fit(np.array(s), [np.array(v), np.array(p)],
epochs=Hyperparameters.EPOCHS_PER_TRAINING, batch_size=Hyperparameters.MINI_BATCH_SIZE,
verbose=2,
shuffle=True)
singleModel.save(modelAbsPath, overwrite=True)
singleModel.save(Hyperparameters.MODELS_SAVE_PATH + str(modelVersion + 1))
trainedModelAsBytes = _readModelFromDisk()
print("Training finished:", time.time() - t1)
connection.sendMessage("Finished", (trainedModelAsBytes,))
MemoryBuffers.storeTrainingDataToDisk()
| 35.861789 | 119 | 0.697348 | 463 | 4,411 | 6.544276 | 0.444924 | 0.013861 | 0.016832 | 0.015182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002915 | 0.222399 | 4,411 | 122 | 120 | 36.155738 | 0.880466 | 0.096123 | 0 | 0.049383 | 0 | 0 | 0.05338 | 0 | 0 | 0 | 0 | 0.008197 | 0.012346 | 1 | 0.074074 | false | 0 | 0.135802 | 0.012346 | 0.283951 | 0.061728 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49881a03b11a0638bc293766738e036c0f55673f | 3,348 | py | Python | vision/ssd/mobilenetv1_ssd_lite.py | idenc/SSD-translate | 6cf8c83b42d6d47282d98822b90491264288d6f1 | [
"MIT"
] | 2 | 2020-03-17T02:49:26.000Z | 2020-05-21T04:10:12.000Z | vision/ssd/mobilenetv1_ssd_lite.py | idenc/SSD-translate | 6cf8c83b42d6d47282d98822b90491264288d6f1 | [
"MIT"
] | 2 | 2021-08-25T15:47:57.000Z | 2022-02-10T00:31:17.000Z | vision/ssd/mobilenetv1_ssd_lite.py | idenc/SSD-translate | 6cf8c83b42d6d47282d98822b90491264288d6f1 | [
"MIT"
] | 1 | 2020-05-21T04:10:13.000Z | 2020-05-21T04:10:13.000Z | from tensorflow.python.keras.layers import Conv2D, SeparableConv2D
from vision.nn.mobilenet import MobileNet
from .config import mobilenetv1_ssd_config as config
from .predictor import Predictor
from .ssd import SSD
def create_mobilenetv1_ssd_lite(num_classes, is_test=False, is_train=False):
base_net = MobileNet(input_shape=(config.image_size, config.image_size, 3),
include_top=False, weights=None) # disable dropout layer
source_layer_indexes = [
73,
85,
]
extras = [
[
Conv2D(filters=256, kernel_size=1, activation='relu'),
SeparableConv2D(filters=512, kernel_size=3, strides=2, padding="same"),
],
[
Conv2D(filters=128, kernel_size=1, activation='relu'),
SeparableConv2D(filters=256, kernel_size=3, strides=2, padding="same"),
],
[
Conv2D(filters=128, kernel_size=1, activation='relu'),
SeparableConv2D(filters=256, kernel_size=3, strides=2, padding="same"),
],
[
Conv2D(filters=128, kernel_size=1, activation='relu'),
SeparableConv2D(filters=256, kernel_size=3, strides=2, padding="same")
]
]
regression_headers = [
SeparableConv2D(filters=6 * 4, kernel_size=3, padding="same", activation='relu'),
SeparableConv2D(filters=6 * 4, kernel_size=3, padding="same", activation='relu'),
SeparableConv2D(filters=6 * 4, kernel_size=3, padding="same", activation='relu'),
SeparableConv2D(filters=6 * 4, kernel_size=3, padding="same", activation='relu'),
SeparableConv2D(filters=6 * 4, kernel_size=3, padding="same", activation='relu'),
Conv2D(filters=6 * 4, kernel_size=1),
]
classification_headers = [
SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding="same", activation='relu',
name='sep_conv_extra_1_' + str(6 * num_classes)),
SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding="same", activation='relu',
name='sep_conv_extra_2_' + str(6 * num_classes)),
SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding="same", activation='relu',
name='sep_conv_extra_3_' + str(6 * num_classes)),
SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding="same", activation='relu',
name='sep_conv_extra_4_' + str(6 * num_classes)),
SeparableConv2D(filters=6 * num_classes, kernel_size=3, padding="same", activation='relu',
name='sep_conv_extra_5_' + str(6 * num_classes)),
Conv2D(filters=6 * num_classes, kernel_size=1),
]
return SSD(num_classes, base_net, source_layer_indexes,
extras, classification_headers, regression_headers,
is_test=is_test, config=config, is_train=is_train)
def create_mobilenetv1_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5):
predictor = Predictor(net, config.image_size, config.image_mean,
config.image_std,
nms_method=nms_method,
iou_threshold=config.iou_threshold,
candidate_size=candidate_size,
sigma=sigma)
return predictor
| 46.5 | 98 | 0.631123 | 389 | 3,348 | 5.192802 | 0.200514 | 0.09901 | 0.076238 | 0.089109 | 0.643069 | 0.581188 | 0.567327 | 0.544059 | 0.544059 | 0.544059 | 0 | 0.044382 | 0.252987 | 3,348 | 71 | 99 | 47.15493 | 0.763295 | 0.006272 | 0 | 0.290323 | 0 | 0 | 0.059248 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.080645 | 0 | 0.145161 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498858075f297b3801e95791f233bcdeee02d803 | 2,159 | py | Python | main.py | escaperoomdoc/smartsnake | a9d658108669efa6aded0533a49196af5e0f46ab | [
"MIT"
] | null | null | null | main.py | escaperoomdoc/smartsnake | a9d658108669efa6aded0533a49196af5e0f46ab | [
"MIT"
] | null | null | null | main.py | escaperoomdoc/smartsnake | a9d658108669efa6aded0533a49196af5e0f46ab | [
"MIT"
] | null | null | null | import pygame
import sys
import json
from snake import Snake, Generation
genes = None
if len(sys.argv) > 1:
fname = sys.argv[1]
with open(fname, 'r', encoding='utf-8') as f:
data = f.read()
obj = json.loads(data)
genes = [obj['i2h_1'], obj['i2h_2'], obj['h2o_1'], obj['h2o_2']]
print(f'genes parsed from file {fname}')
cell_size = 15
cells = 40
BG_COLOR = (0, 0, 0)
resolution = (cell_size * cells, cell_size * cells)
fps = 20
screen = pygame.display.set_mode(resolution)
clock = pygame.time.Clock()
pygame.display.set_caption('snake')
def draw_rect(y: int, x: int, color):
pygame.draw.rect(screen, color, (x * cell_size - 1, y * cell_size - 1, cell_size - 2, cell_size - 2))
gen = Generation(genes=genes)
silent_mode = False
auto_mode = False
while True:
# handle events
exit_flag = False
step = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit_flag = True
if event.type == pygame.KEYDOWN:
dir = None
if event.key == pygame.K_LEFT: dir = (0, -1)
elif event.key == pygame.K_RIGHT: dir = (0, 1)
elif event.key == pygame.K_UP: dir = (-1, 0)
elif event.key == pygame.K_DOWN: dir = (1, 0)
elif event.key == pygame.K_SPACE: step = True
elif event.key == pygame.K_s: silent_mode = not silent_mode
elif event.key == pygame.K_a: auto_mode = not auto_mode
if dir:
for snake in gen.snakes:
if pygame.key.get_mods() & pygame.KMOD_SHIFT:
wall = snake.scan(dir, Snake.Wall)
body = snake.scan(dir, Snake.Body)
food = snake.scan(dir, Snake.Food)
print(f'wall={wall}, body={body}, food={food}')
else:
snake.move(dir)
if step: gen.step()
if exit_flag: break
# silent mode
if auto_mode: gen.step()
# draw snakes
if not silent_mode:
screen.fill(BG_COLOR)
for snake in gen.snakes:
if not snake.alive: continue
for body in snake.body:
if snake == gen.best_snake:
draw_rect(body[0], body[1], (0, 255, 0))
else:
draw_rect(body[0], body[1], (196, 196, 196))
draw_rect(snake.food[0], snake.food[1], (255, 0, 0))
# flip buffers
pygame.display.flip()
if auto_mode: clock.tick(fps)
print('exit')
| 28.038961 | 102 | 0.652154 | 360 | 2,159 | 3.797222 | 0.294444 | 0.040966 | 0.07169 | 0.076811 | 0.155084 | 0.127286 | 0.070227 | 0.070227 | 0 | 0 | 0 | 0.032948 | 0.198703 | 2,159 | 76 | 103 | 28.407895 | 0.757225 | 0.023159 | 0 | 0.060606 | 0 | 0 | 0.048479 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015152 | false | 0 | 0.060606 | 0 | 0.075758 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498b5d220b8646ed905f2a8b76bf81c0acfeeb03 | 1,441 | bzl | Python | tools/py_cffi/build_defs.bzl | Antetokounpo/plaidml | 06891b0a4a2691994580add063232f83294f7fec | [
"Apache-2.0"
] | null | null | null | tools/py_cffi/build_defs.bzl | Antetokounpo/plaidml | 06891b0a4a2691994580add063232f83294f7fec | [
"Apache-2.0"
] | null | null | null | tools/py_cffi/build_defs.bzl | Antetokounpo/plaidml | 06891b0a4a2691994580add063232f83294f7fec | [
"Apache-2.0"
] | null | null | null | def _py_cffi_impl(ctx):
args = ctx.actions.args()
args.add_all(ctx.files.srcs, before_each = "--source")
args.add("--module", ctx.attr.module)
args.add("--output", ctx.outputs.out)
ctx.actions.run(
inputs = ctx.files.srcs,
outputs = [ctx.outputs.out],
arguments = [args],
tools = [ctx.executable._tool],
executable = ctx.executable._tool,
mnemonic = "PyCffi",
)
return [DefaultInfo(files = depset([ctx.outputs.out]))]
py_cffi_rule = rule(
attrs = {
"srcs": attr.label_list(
allow_files = True,
mandatory = True,
),
"module": attr.string(
mandatory = True,
),
"out": attr.output(),
"_tool": attr.label(
default = Label("//tools/py_cffi"),
allow_single_file = True,
executable = True,
cfg = "host",
),
},
implementation = _py_cffi_impl,
)
# It's named srcs_ordered because we want to prevent buildifier from automatically sorting this list.
def py_cffi(name, module, srcs_ordered, tags = [], **kwargs):
out = name + ".py"
py_cffi_rule(
name = name + "_py_cffi",
module = module,
srcs = srcs_ordered,
out = out,
tags = tags + ["conda"],
)
native.py_library(
name = name,
srcs = [out],
tags = tags + ["conda"],
**kwargs
)
| 27.188679 | 101 | 0.537821 | 158 | 1,441 | 4.734177 | 0.405063 | 0.05615 | 0.052139 | 0.042781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.323387 | 1,441 | 52 | 102 | 27.711538 | 0.767179 | 0.068702 | 0 | 0.145833 | 0 | 0 | 0.065672 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498c4803dab070505af5f2df598669f122a51b66 | 11,265 | py | Python | Analyses/analyses_table.py | alexgonzl/TreeMazeAnalyses | a834dc6b59beffe6bce59cdd9749b761fab3fe08 | [
"MIT"
] | null | null | null | Analyses/analyses_table.py | alexgonzl/TreeMazeAnalyses | a834dc6b59beffe6bce59cdd9749b761fab3fe08 | [
"MIT"
] | null | null | null | Analyses/analyses_table.py | alexgonzl/TreeMazeAnalyses | a834dc6b59beffe6bce59cdd9749b761fab3fe08 | [
"MIT"
] | null | null | null | # creates a table with progress of analyses for each session
# update analyses table
from pathlib import Path
import os,sys, json, datetime, getopt
import pickle as pkl
import time
import pandas as pd
import numpy as np
sys.path.append('../PreProcessing/')
sys.path.append('../Lib/')
sys.path.append('../Analyses/')
import TrialAnalyses as TA
import TreeMazeFunctions as TMF
nTetrodes=16
def getOakPaths():
oakPaths = {}
oakPaths['Root'] = Path('/mnt/o/giocomo/alexg/')
oakPaths['Clustered'] = Path('/mnt/o/giocomo/alexg/Clustered/')
oakPaths['PreProcessed'] = Path('/mnt/o/giocomo/alexg/PreProcessed/')
oakPaths['Raw'] = Path('/mnt/o/giocomo/alexg/RawData/InVivo/')
oakPaths['Analyses'] = Path('/mnt/o/giocomo/alexg/Analyses')
return oakPaths
def getAnimalPaths(rootPath,animal):
Paths = {}
Paths['Clusters'] = rootPath['Clustered'] / animal
Paths['Raw'] = rootPath['Raw'] / animal
Paths['PreProcessed'] = rootPath['PreProcessed']
Paths['Analyses'] = rootPath['Analyses'] / animal
return Paths
def getSessionPaths(rootPath, session,step=0.02,SR=32000):
tmp = session.split('_')
animal = tmp[0]
task = tmp[1]
date = tmp[2]
Paths = {}
Paths['session'] = session
Paths['animal']=animal
Paths['task'] = task
Paths['date'] = date
Paths['step'] = step
Paths['SR'] = SR
Paths['Clusters'] = rootPath['Clustered'] / animal /(session+'_KSClusters')
Paths['Raw'] = rootPath['Raw'] / animal / session
Paths['PreProcessed'] = rootPath['PreProcessed'] / animal / (session + '_Results')
Paths['Analyses'] = rootPath['Analyses'] / animal/ (session + '_Analyses')
Paths['ClusterTable'] = rootPath['Clustered'] / animal / (animal+'_ClusteringSummary.json')
Paths['Analyses'].mkdir(parents=True, exist_ok=True)
Paths['BehavTrackDat'] = Paths['Analyses'] / ('BehTrackVariables_{}ms.h5'.format(int(step*1000)))
for ut in ['Cell','Mua']:
Paths[ut + '_Spikes'] = Paths['Analyses'] / (ut+'_Spikes.json')
Paths[ut + '_WaveForms'] = Paths['Analyses'] / (ut+'_WaveForms.pkl')
Paths[ut + '_WaveFormInfo'] = Paths['Analyses'] / (ut+'_WaveFormInfo.pkl')
Paths[ut + '_Bin_Spikes'] = Paths['Analyses'] / ('{}_Bin_Spikes_{}ms.npy'.format(ut,int(step*1000)))
Paths[ut + '_FR'] = Paths['Analyses'] / ('{}_FR_{}ms.npy'.format(ut,int(step*1000)))
Paths['Spike_IDs'] = Paths['Analyses'] / 'Spike_IDs.json'
Paths['ZoneAnalyses'] = Paths['Analyses'] / 'ZoneAnalyses.pkl'
Paths['TrialInfo'] = Paths['Analyses'] / 'TrInfo.pkl'
Paths['TrialCondMat'] = Paths['Analyses'] / 'TrialCondMat.csv'
Paths['TrLongPosMat'] = Paths['Analyses'] / 'TrLongPosMat.csv'
Paths['TrLongPosFRDat'] = Paths['Analyses'] / 'TrLongPosFRDat.csv'
Paths['TrModelFits'] = Paths['Analyses'] / 'TrModelFits.csv'
Paths['TrModelFits2'] = Paths['Analyses'] / 'TrModelFits2.csv'
Paths['CueDesc_SegUniRes'] = Paths['Analyses'] / 'CueDesc_SegUniRes.csv'
Paths['CueDesc_SegDecRes'] = Paths['Analyses'] / 'CueDesc_SegDecRes.csv'
Paths['CueDesc_SegDecSumRes'] = Paths['Analyses'] / 'CueDesc_SegDecSumRes.csv'
Paths['PopCueDesc_SegDecSumRes'] = Paths['Analyses'] / 'PopCueDesc_SegDecSumRes.csv'
# plots directories
Paths['Plots'] = Paths['Analyses'] / 'Plots'
Paths['Plots'].mkdir(parents=True, exist_ok=True)
Paths['SampCountsPlots'] = Paths['Plots'] / 'SampCountsPlots'
Paths['SampCountsPlots'].mkdir(parents=True, exist_ok=True)
Paths['ZoneFRPlots'] = Paths['Plots'] / 'ZoneFRPlots'
Paths['ZoneFRPlots'].mkdir(parents=True, exist_ok=True)
Paths['ZoneCorrPlots'] = Paths['Plots'] / 'ZoneCorrPlots'
Paths['ZoneCorrPlots'].mkdir(parents=True, exist_ok=True)
Paths['SIPlots'] = Paths['Plots'] / 'SIPlots'
Paths['SIPlots'].mkdir(parents=True, exist_ok=True)
Paths['TrialPlots'] = Paths['Plots'] / 'TrialPlots'
Paths['TrialPlots'].mkdir(parents=True, exist_ok=True)
Paths['CueDescPlots'] = Paths['Plots'] / 'CueDescPlots'
Paths['CueDescPlots'].mkdir(parents=True, exist_ok=True)
return Paths
def checkRaw(sePaths,aTable):
for se in aTable.index:
rawFlag = 1
for ch in ['a','b','c','d']:
for tt in np.arange(1,nTetrodes+1):
if not (sePaths[se]['Raw'] / ('CSC{}{}.ncs'.format(tt,ch))).exists():
rawFlag = 0
break
aTable.loc[se,'Raw']=rawFlag
return aTable
def checkPrePro(sePaths,aTable):
for se in aTable.index:
allTTFlag = 1
partialFlag = 0
for tt in np.arange(1,nTetrodes+1):
if not (sePaths[se]['PreProcessed'] / ('tt_{}.bin'.format(tt))).exists():
allTTFlag=0
else:
partialFlag=1
aTable.loc[se,'PP']=partialFlag
aTable.loc[se,'PP_A']=allTTFlag
return aTable
def checkSort(sePaths,aTable):
for se in aTable.index:
allTTFlag = 1
partialFlag = 0
for tt in np.arange(1,nTetrodes+1):
if not (sePaths[se]['Clusters'] / ('tt_{}'.format(tt)) / 'rez.mat').exists():
allTTFlag=0
else:
partialFlag=1
aTable.loc[se,'Sort']=partialFlag
aTable.loc[se,'Sort_A']=allTTFlag
return aTable
def checkClust(sePaths,aTable):
for se in aTable.index:
allTTFlag = 1
partialFlag = 0
for tt in np.arange(1,nTetrodes+1):
if not (sePaths[se]['Clusters'] / ('tt_{}'.format(tt)) / 'cluster_group.tsv').exists():
allTTFlag=0
else:
partialFlag=1
aTable.loc[se,'Clust']=partialFlag
aTable.loc[se,'Clust_A']=allTTFlag
return aTable
def checkFR(sePaths,aTable):
for se in aTable.index:
allFR = 1
partialFR = 0
if not (sePaths[se]['Cell_Bin_Spikes']).exists():
allFR = 0
else:
dat = np.load(sePaths[se]['Cell_Bin_Spikes'])
if np.all(dat.sum(axis=1)>0):
partialFR = 1
else:
allFR = 0
aTable.loc[se,'FR']=partialFR
aTable.loc[se,'FR_A']=allFR
return aTable
def checkZoneAnalyses(sePaths,aTable):
for se in aTable.index:
if not (aTable.loc[se,'Task']=='OF'):
if sePaths[se]['ZoneAnalyses'].exists():
aTable.loc[se,'Zone']=1
else:
aTable.loc[se,'Zone']=0
return aTable
def checkTrialAnalyses(sePaths,aTable):
for se in aTable.index:
if not (aTable.loc[se,'Task']=='OF'):
if sePaths[se]['TrialCondMat'].exists():
aTable.loc[se,'Trial']= 1
else:
aTable.loc[se,'Trial']= 0
if sePaths[se]['TrModelFits'].exists():
aTable.loc[se,'TrModels'] = 1
else:
aTable.loc[se,'TrModels'] = 0
return aTable
def loadSessionData(sessionPaths,vars = ['all']):
if 'all' in vars:
vars = ['wfi','bin_spikes','fr','ids','za','PosDat','TrialLongMat',
'TrialFRLongMat','fitTable','TrialConds']
dat = {}
mods = {}
params = TA.getParamSet()
for k,pp in params.items():
s =''
for p in pp:
s+='-'+p
mods[k]=s[1:]
for a in ['wfi','bin_spikes','fr']:
if a in vars:
dat[a] = {}
for ut in ['Cell','Mua']:
if 'wfi' in vars:
with sessionPaths[ut+'_WaveFormInfo'].open(mode='rb') as f:
dat['wfi'][ut] = pkl.load(f)
if 'bin_spikes' in vars:
dat['bin_spikes'][ut]=np.load(sessionPaths[ut+'_Bin_Spikes'])
if 'fr' in vars:
dat['fr'][ut] = np.load(sessionPaths[ut+'_FR'])
if 'ids' in vars:
with sessionPaths['Spike_IDs'].open() as f:
dat['ids'] = json.load(f)
if 'za' in vars:
with sessionPaths['ZoneAnalyses'].open(mode='rb') as f:
dat['za'] = pkl.load(f)
if 'PosDat' in vars:
dat['PosDat'] = TMF.getBehTrackData(sessionPaths)
if 'TrialLongMat' in vars:
dat['TrialLongMat'] = pd.read_csv( sessionPaths['TrLongPosMat'],index_col=0)
if 'TrialFRLongMat' in vars:
dat['TrialFRLongMat'] = pd.read_csv(sessionPaths['TrLongPosFRDat'],index_col=0)
if 'TrialConds' in vars:
dat['TrialConds'] = pd.read_csv(sessionPaths['TrialCondMat'] ,index_col=0)
if 'fitTable' in vars:
def addModelName(fitTable,fitNum):
mods = {}
if fitNum==1:
params = TA.getParamSet()
else:
params = TA.getParamSet(params=['Loc','IO','Cue','Sp','Co'])
for k,pp in params.items():
s =''
for p in pp:
s+='-'+p
mods[k]=s[1:]
selModels = []
for u in fitTable['modelNum']:
if u>-1:
selModels.append(mods[int(u)])
else:
selModels.append('UnCla')
fitTable['selMod'] = selModels
return fitTable
if sessionPaths['TrModelFits'].exists():
dat['fitTable'] = pd.read_csv(sessionPaths['TrModelFits'],index_col=0)
if not ('selMod' in dat['fitTable'].columns):
dat['fitTable'] = addModelName(dat['fitTable'] ,1)
if sessionPaths['TrModelFits2'].exists():
dat['fitTable2'] = pd.read_csv(sessionPaths['TrModelFits2'],index_col=0)
if not ('selMod' in dat['fitTable2'].columns):
dat['fitTable2'] = addModelName(dat['fitTable2'] ,2)
#if isinstance(dat['fitTable'] ,pd.core.frame.DataFrame):
# nUnits = dat['fitTable'] .shape[0]
# x=[]
# for i in np.arange(nUnits):
# if np.isnan(dat['fitTable'] ['modelNum'][i]):
# x.append('UnCla')
# else:
# x.append(mods[dat['fitTable'] ['modelNum'][i]])
# dat['fitTable']['selMod'] = x
return dat
# if __name__ == '__main__':
# ID = ''
# minFileSize = 16384
# TetrodeRecording = 1
# nTetrodes = 16
#
# if len(sys.argv)<2:
# print("Usage: %s -a ID " % sys.argv[0])
# sys.exit('Invalid input.')
#
# print(sys.argv[1:])
# myopts, args = getopt.getopt(sys.argv[1:],"a:p:")
# for o, a in myopts:
# print(o,a)
# if o == '-a':
# ID = str(a)
# elif o == '-p':
# if str(a)=='NR32':
# TetrodeRecording = 0
# nChannels = 32
# elif str(a)=='TT16':
# TetrodeRecording = 1
# nTetrodes=16
# else:
# sys.exit('Invalid Probe Type.')
# else:
# print("Usage: %s -a ID " % sys.argv[0])
# sys.exit('Invalid input. Aborting.')
#
# oakPaths = getOakPaths()
| 34.768519 | 109 | 0.546915 | 1,246 | 11,265 | 4.882825 | 0.187801 | 0.047009 | 0.030736 | 0.027613 | 0.287968 | 0.20069 | 0.190993 | 0.143984 | 0.126233 | 0.10503 | 0 | 0.013026 | 0.291256 | 11,265 | 323 | 110 | 34.876161 | 0.748998 | 0.109277 | 0 | 0.283186 | 0 | 0 | 0.219694 | 0.034857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053097 | false | 0 | 0.035398 | 0 | 0.141593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
498d6ae2c1adc0f223fd5b27d183fc36d3ccafca | 2,969 | py | Python | spikeinterface/toolkit/postprocessing/tests/test_curationsorting.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/postprocessing/tests/test_curationsorting.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/postprocessing/tests/test_curationsorting.py | vncntprvst/spikeinterface | dd5ae94f85fe5d9082b45321d2c96ba316eb4b77 | [
"MIT"
] | null | null | null | import pytest
from spikeinterface.core import NumpySorting
import numpy as np
from spikeinterface.toolkit.postprocessing import CurationSorting, MergeUnitsSorting, SplitUnitSorting
def test_split_merge():
spikestimes = [{
0:np.arange(15),
1:np.arange(17),
2:np.arange(17)+5,
4:np.concatenate([np.arange(10),np.arange(20,30)]),
5:np.arange(9)
},{
0:np.arange(15),
1:np.arange(17),
2:np.arange(40,140),
4:np.arange(40,140),
5:np.arange(40,140)
}]
parent_sort = NumpySorting.from_dict(spikestimes, sampling_frequency=1000) #to have 1 sample=1ms
parent_sort.set_property('someprop',[float(k) for k in spikestimes[0].keys()]) #float
# %%
split_index = [v[4]%2 for v in spikestimes] #spit class 4 in even and odds
splited = SplitUnitSorting(parent_sort, split_unit_id=4, indices_list=split_index,new_unit_ids= [8, 10],properties_policy='keep')
merged = MergeUnitsSorting(splited, new_unit_id=4, units_to_merge=[8, 10],properties_policy='keep')
for i in range(len(spikestimes)):
assert all(parent_sort.get_unit_spike_train(4,segment_index=i)==merged.get_unit_spike_train(4,segment_index=i))==True, 'splir or merge error'
assert parent_sort.get_unit_property(4,'someprop')==merged.get_unit_property(4,'someprop'), 'property wasn''t kept'
merged_with_dups = MergeUnitsSorting(parent_sort, new_unit_id=8, units_to_merge=[0, 1], properties_policy='remove', delta_time_ms=0.5)
for i in range(len(spikestimes)):
assert all(merged_with_dups.get_unit_spike_train(8,segment_index=i)==parent_sort.get_unit_spike_train(1,segment_index=i)), 'error removing duplications'
assert np.isnan(merged_with_dups.get_unit_property(8,'someprop')), 'error creating empty property'
def test_curation():
spikestimes = [{
'a':np.arange(15),
'b':np.arange(5,10),
'c':np.arange(20),
},{
'a':np.arange(12,15),
'b':np.arange(3,17),
'c':np.arange(50)
}]
parent_sort = NumpySorting.from_dict(spikestimes, sampling_frequency=1000) #to have 1 sample=1ms
parent_sort.set_property('some_names',['unit_{}'.format(k) for k in spikestimes[0].keys()]) #float
cs = CurationSorting(parent_sort, properties_policy='remove')
# %%
cs.merge(['a','c'])
split_index = [v['b']<6 for v in spikestimes] #spit class 4 in even and odds
cs.split('b', split_index)
after_split = cs.sorting
all_units = cs.sorting.get_unit_ids()
cs.merge(all_units)
assert len(cs.sorting.get_unit_ids())==1, 'error merging units'
cs.undo()
assert cs.sorting is after_split
cs.redo()
unit = cs.sorting.get_unit_ids()[0]
for i in range(len(spikestimes)):
assert all(cs.sorting.get_unit_spike_train(unit,segment_index=i)==parent_sort.get_unit_spike_train('c',segment_index=i))
if __name__ == '__main__':
test_split_merge()
test_curation()
| 40.671233 | 160 | 0.686426 | 445 | 2,969 | 4.350562 | 0.269663 | 0.070248 | 0.03719 | 0.052686 | 0.435434 | 0.335744 | 0.330579 | 0.330579 | 0.216942 | 0.17562 | 0 | 0.040667 | 0.171775 | 2,969 | 72 | 161 | 41.236111 | 0.746645 | 0.038397 | 0 | 0.254237 | 0 | 0 | 0.070952 | 0 | 0 | 0 | 0 | 0 | 0.118644 | 1 | 0.033898 | false | 0 | 0.067797 | 0 | 0.101695 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49914e5236b7426399f8134735899144f4eed2fe | 3,218 | py | Python | pycukes/console.py | hltbra/pycukes | 7eca6d9bf6577f6bc735a3893a3b1d7c0af950f1 | [
"MIT"
] | 1 | 2018-05-22T07:10:39.000Z | 2018-05-22T07:10:39.000Z | pycukes/console.py | hugobr/pycukes | 7eca6d9bf6577f6bc735a3893a3b1d7c0af950f1 | [
"MIT"
] | 1 | 2016-08-24T01:17:34.000Z | 2016-08-24T01:17:34.000Z | pycukes/console.py | hugobr/pycukes | 7eca6d9bf6577f6bc735a3893a3b1d7c0af950f1 | [
"MIT"
] | null | null | null | from finder import (find_steps_modules,
find_text_specs,
find_before_all,
find_before_each,
find_after_all,
find_after_each,)
from runner import StoryRunner
from optparse import OptionParser
import sys
import os
def pycukes_console(stories_dir, steps_dir, output, colored=False):
modules = find_steps_modules(steps_dir)
for spec in find_text_specs(stories_dir):
StoryRunner(spec, output, colored=colored, modules=modules).run()
def main():
steps_modules = []
files = []
before_all_methods = []
before_each_methods = []
after_all_methods = []
after_each_methods = []
stories_dirname = 'stories'
for arg in sys.argv[1:]:
if arg.startswith('-'):
break
files.append(arg)
stories_dirname = os.path.dirname(arg) or '.'
parser = OptionParser()
parser.add_option('-s', '--stories-dir', default=None, dest='stories_dir')
parser.add_option('-t', '--steps-dir', default=None, dest='steps_dir')
parser.add_option('-n', '--no-colors', default=None, action='store_true', dest='no_colors')
parser.add_option('-c', '--colored', default=None, action='store_true', dest='colored')
parser.add_option('-l', '--language', default='en-us', dest='language')
values, args = parser.parse_args()
try:
if values.stories_dir:
files.extend([values.stories_dir+'/'+filename for filename in os.listdir(values.stories_dir)
if filename.endswith('.story')])
stories_dirname = values.stories_dir
elif files == []:
files.extend([stories_dirname+'/'+filename for filename in os.listdir(stories_dirname)
if filename.endswith('.story')])
steps_modules = find_steps_modules(values.steps_dir or stories_dirname+'/step_definitions')
except OSError:
pass
if os.path.exists(stories_dirname+'/support'):
before_all_methods = find_before_all(stories_dirname+'/support')
after_all_methods = find_after_all(stories_dirname+'/support')
before_each_methods = find_before_each(stories_dirname+'/support')
after_each_methods = find_after_each(stories_dirname+'/support')
colored = True
if values.no_colors and not values.colored:
colored = False
exit_code = True
for index, story in enumerate(files):
story_status = StoryRunner(open(story).read(),
sys.stdout,
colored=colored,
modules=steps_modules,
language=values.language,
before_all=before_all_methods,
before_each=before_each_methods,
after_all=after_all_methods,
after_each=after_each_methods).run()
exit_code = exit_code and story_status
if index < len(files)-1:
sys.stdout.write('\n\n')
exit(int(not exit_code))
| 40.734177 | 104 | 0.587632 | 355 | 3,218 | 5.053521 | 0.259155 | 0.085842 | 0.041806 | 0.025641 | 0.140468 | 0.06689 | 0 | 0 | 0 | 0 | 0 | 0.000898 | 0.307645 | 3,218 | 78 | 105 | 41.25641 | 0.804309 | 0 | 0 | 0.029412 | 0 | 0 | 0.067433 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0.014706 | 0.073529 | 0 | 0.102941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4993e23cbe0cf6952dc8121b967680dd2f369001 | 1,732 | py | Python | lightning_pass/util/database.py | kucera-lukas/lightning-pass | 6be2860dae5e93a09702c180321d75bc5fe01b93 | [
"MIT"
] | null | null | null | lightning_pass/util/database.py | kucera-lukas/lightning-pass | 6be2860dae5e93a09702c180321d75bc5fe01b93 | [
"MIT"
] | null | null | null | lightning_pass/util/database.py | kucera-lukas/lightning-pass | 6be2860dae5e93a09702c180321d75bc5fe01b93 | [
"MIT"
] | null | null | null | """Module containing various utils connected to database management."""
import contextlib
from typing import TYPE_CHECKING, Iterator
import mysql.connector
if TYPE_CHECKING:
from mysql.connector import MySQLConnection
from mysql.connector.cursor import MySQLCursor
@contextlib.contextmanager
def database_manager() -> Iterator[None]:
"""Manage database queries easily with context manager.
Automatically yields the database connection on __enter__ and closes the
connection on __exit__.
:returns: database connection cursor
"""
# avoid circular import
from lightning_pass.settings import Credentials
try:
con: MySQLConnection = mysql.connector.connect(
host=Credentials.db_host,
user=Credentials.db_user,
password=Credentials.db_password,
database=Credentials.db_database,
)
# fix unread results with buffered cursor
cur: MySQLCursor = con.cursor(buffered=True)
except mysql.connector.errors.InterfaceError as e:
raise ConnectionRefusedError(
"Please make sure that your database is running.",
) from e
else:
yield cur
finally:
with contextlib.suppress(UnboundLocalError):
con.commit()
con.close()
@contextlib.contextmanager
def enable_db_safe_mode() -> Iterator[None]:
"""Enable database safe mode."""
with database_manager() as db:
sql = "SET SQL_SAFE_UPDATES = 0"
db.execute(sql)
try:
yield
finally:
with database_manager() as db:
sql = "SET SQL_SAFE_UPDATES = 1"
db.execute(sql)
__all__ = [
"enable_db_safe_mode",
"database_manager",
]
| 27.0625 | 76 | 0.669746 | 189 | 1,732 | 5.962963 | 0.47619 | 0.062112 | 0.031943 | 0.028394 | 0.076309 | 0.076309 | 0.076309 | 0.076309 | 0.076309 | 0.076309 | 0 | 0.001552 | 0.255774 | 1,732 | 63 | 77 | 27.492063 | 0.87277 | 0.198614 | 0 | 0.238095 | 0 | 0 | 0.096083 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.047619 | 0.142857 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4995b8da2e97fc72c831cb7203fa33f05403b258 | 1,360 | py | Python | codeDojo/TirePressureMonitoringSystem/test_tire_pressure_monitoring.py | kelesi/coedcop | 2bdbac207cf6f81de70b92c644c40663bbea8c8a | [
"MIT"
] | 1 | 2017-12-08T15:55:17.000Z | 2017-12-08T15:55:17.000Z | codeDojo/TirePressureMonitoringSystem/test_tire_pressure_monitoring.py | kelesi/coedcop | 2bdbac207cf6f81de70b92c644c40663bbea8c8a | [
"MIT"
] | null | null | null | codeDojo/TirePressureMonitoringSystem/test_tire_pressure_monitoring.py | kelesi/coedcop | 2bdbac207cf6f81de70b92c644c40663bbea8c8a | [
"MIT"
] | null | null | null | import unittest
import decimal
from mock import MagicMock
from mock import patch
from parameterized import parameterized
from tire_pressure_monitoring import Alarm
from tire_pressure_monitoring import Sensor
class AlarmTest(unittest.TestCase):
@parameterized.expand([
[15, 16, True],
[17, 21, False],
[22, 30, True]
])
def test_alarm(self, pressure_start, pressure_stop, is_alarm_on):
for pressure in xrange(pressure_start, pressure_stop, 1):
with patch.object(Sensor, 'pop_next_pressure_psi_value', return_value=pressure) as mock_method:
alarm = Alarm()
alarm.check()
alarm_status = alarm.is_alarm_on
mock_method.assert_called_once()
self.assertEqual(alarm_status, is_alarm_on, "Blabla %s" % pressure)
'''
def test_alarm_check(self):
alarm = Alarm()
with patch.object(Sensor, 'pop_next_pressure_psi_value', return_value=pressure) as mock_method:
alarm.check()
alarm_status = alarm.is_alarm_on
mock_method.assert_called_once()
self.assertEqual(alarm_status, is_alarm_on, "Blabla %s" % pressure)
'''
if __name__ == "__main__":
#unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(AlarmTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| 34 | 107 | 0.682353 | 164 | 1,360 | 5.347561 | 0.384146 | 0.039909 | 0.051311 | 0.059293 | 0.502851 | 0.429875 | 0.429875 | 0.429875 | 0.429875 | 0.429875 | 0 | 0.013295 | 0.225735 | 1,360 | 39 | 108 | 34.871795 | 0.819563 | 0.011029 | 0 | 0 | 0 | 0 | 0.044715 | 0.027439 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.041667 | false | 0 | 0.291667 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499644bc4d4ae01dfab3ce06a5de9abedfa01c13 | 6,867 | py | Python | tests/niftynet_global_config_test.py | krishnapalS/NiftyNet | c31019b14137d5a39d59c81221fe2b8e066b38b4 | [
"Apache-2.0"
] | null | null | null | tests/niftynet_global_config_test.py | krishnapalS/NiftyNet | c31019b14137d5a39d59c81221fe2b8e066b38b4 | [
"Apache-2.0"
] | null | null | null | tests/niftynet_global_config_test.py | krishnapalS/NiftyNet | c31019b14137d5a39d59c81221fe2b8e066b38b4 | [
"Apache-2.0"
] | null | null | null | from glob import glob
from os import (remove, makedirs)
from os.path import (expanduser, join, isdir, isfile)
from os.path import getmtime
from shutil import rmtree
from unittest import TestCase
from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig
class NiftyNetGlobalConfigTestBase(TestCase):
"""Tests included here all pertain to the NiftyNet global configuration
file and `NiftyNetGlobalConfig` is a singleton. These require each test
to be run separately. This is why all tests are decorated with
`skipUnless`.
"""
@classmethod
def typify(cls, file_path):
"""Append file type extension to passed file path."""
return '.'.join([file_path, cls.file_type])
@classmethod
def remove_path(cls, path):
"""Remove passed item, whether it's a file or directory."""
print("removing {}".format(path))
if isdir(path):
rmtree(path)
elif isfile(path):
remove(path)
@classmethod
def setUpClass(cls):
# use this method in sub-classes
if cls is not NiftyNetGlobalConfigTestBase:
NiftyNetGlobalConfigTestBase.setUpClass()
return
cls.config_home = join(expanduser('~'), '.niftynet')
cls.file_type = 'ini'
cls.config_file = join(cls.config_home, cls.typify('config'))
cls.header = '[global]'
cls.default_config_opts = {
'home': '~/niftynet',
'ext': 'extensions',
'ext_mods': ['network']
}
def setUp(self):
NiftyNetGlobalConfigTestBase.remove_path(
NiftyNetGlobalConfigTestBase.config_home)
NiftyNetGlobalConfigTestBase.remove_path(
expanduser(NiftyNetGlobalConfigTestBase.default_config_opts['home'])
)
def tearDown(self):
self.setUp()
class TestGlobalConfigSingleton(NiftyNetGlobalConfigTestBase):
@classmethod
def setUpClass(cls):
NiftyNetGlobalConfigTestBase.setUpClass()
def test_global_config_singleton(self):
global_config_1 = NiftyNetGlobalConfig()
global_config_2 = NiftyNetGlobalConfig()
self.assertEqual(global_config_1, global_config_2)
self.assertTrue(global_config_1 is global_config_2)
class TestNonExistingConfigFileCreated(NiftyNetGlobalConfigTestBase):
def test_non_existing_config_file_created(self):
self.assertFalse(isfile(NiftyNetGlobalConfigTestBase.config_file))
global_config = NiftyNetGlobalConfig().setup()
self.assertTrue(isfile(NiftyNetGlobalConfigTestBase.config_file))
self.assertEqual(global_config.get_niftynet_config_folder(),
NiftyNetGlobalConfigTestBase.config_home)
class TestExistingConfigFileLoaded(NiftyNetGlobalConfigTestBase):
def test_existing_config_file_loaded(self):
# create a config file with a custom NiftyNet home
makedirs(NiftyNetGlobalConfigTestBase.config_home)
custom_niftynet_home = '~/customniftynethome'
custom_niftynet_home_abs = expanduser(custom_niftynet_home)
config = ''.join(['home = ', custom_niftynet_home])
with open(NiftyNetGlobalConfigTestBase.config_file, 'w') as config_file:
config_file.write('\n'.join(
[NiftyNetGlobalConfigTestBase.header, config]))
global_config = NiftyNetGlobalConfig().setup()
self.assertEqual(global_config.get_niftynet_home_folder(),
custom_niftynet_home_abs)
NiftyNetGlobalConfigTestBase.remove_path(custom_niftynet_home_abs)
class TestIncorrectConfigFileBackedUp(NiftyNetGlobalConfigTestBase):
def test_incorrect_config_file_backed_up(self):
# create an incorrect config file at the correct location
makedirs(NiftyNetGlobalConfigTestBase.config_home)
incorrect_config = '\n'.join([NiftyNetGlobalConfigTestBase.header,
'invalid_home_tag = ~/niftynet'])
with open(NiftyNetGlobalConfigTestBase.config_file, 'w') as config_file:
config_file.write(incorrect_config)
# the following should back it up and replace it with default config
global_config = NiftyNetGlobalConfig().setup()
self.assertTrue(isfile(NiftyNetGlobalConfigTestBase.config_file))
self.assertEqual(global_config.get_niftynet_config_folder(),
NiftyNetGlobalConfigTestBase.config_home)
# check if incorrect file was backed up
found_files = glob(
join(NiftyNetGlobalConfigTestBase.config_home,
NiftyNetGlobalConfigTestBase.typify('config-backup-*')))
self.assertTrue(len(found_files) == 1)
with open(found_files[0], 'r') as backup_file:
self.assertEqual(backup_file.read(), incorrect_config)
# cleanup: remove backup file
NiftyNetGlobalConfigTestBase.remove_path(found_files[0])
class TestNonExistingNiftynetHomeCreated(NiftyNetGlobalConfigTestBase):
def test_non_existing_niftynet_home_created(self):
niftynet_home = expanduser(
NiftyNetGlobalConfigTestBase.default_config_opts['home'])
NiftyNetGlobalConfigTestBase.remove_path(niftynet_home)
self.assertFalse(isdir(niftynet_home))
niftynet_ext = join(
niftynet_home, NiftyNetGlobalConfigTestBase.default_config_opts['ext']
)
self.assertFalse(isfile(join(niftynet_ext, '__init__.py')))
for mod in NiftyNetGlobalConfigTestBase.default_config_opts['ext_mods']:
self.assertFalse(isfile(join(niftynet_ext, mod, '__init__.py')))
global_config = NiftyNetGlobalConfig().setup()
self.assertTrue(isdir(niftynet_home))
self.assertTrue(isfile(join(niftynet_ext, '__init__.py')))
for mod in NiftyNetGlobalConfigTestBase.default_config_opts['ext_mods']:
self.assertTrue(isfile(join(niftynet_ext, mod, '__init__.py')))
class TestExistingNiftynetHomeNotTouched(NiftyNetGlobalConfigTestBase):
def test_existing_niftynet_home_not_touched(self):
niftynet_home = expanduser(
NiftyNetGlobalConfigTestBase.default_config_opts['home'])
makedirs(niftynet_home)
niftynet_ext = join(
niftynet_home, NiftyNetGlobalConfigTestBase.default_config_opts['ext']
)
makedirs(niftynet_ext)
niftynet_ext_init = join(niftynet_ext, '__init__.py')
open(niftynet_ext_init, 'w').close()
mtime_before = getmtime(niftynet_ext_init)
global_config = NiftyNetGlobalConfig()
mtime_after = getmtime(niftynet_ext_init)
self.assertEqual(mtime_before, mtime_after)
| 40.157895 | 83 | 0.684142 | 660 | 6,867 | 6.84697 | 0.221212 | 0.047798 | 0.030095 | 0.069706 | 0.318655 | 0.293649 | 0.238991 | 0.225714 | 0.225714 | 0.19252 | 0 | 0.001713 | 0.234892 | 6,867 | 170 | 84 | 40.394118 | 0.858394 | 0.085773 | 0 | 0.294118 | 0 | 0 | 0.041021 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.10084 | false | 0 | 0.058824 | 0 | 0.235294 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49975226b6d53bc6703ae8e4d0c13e17efcb92ac | 23,084 | py | Python | acoustics-server.py | klange/amppy | 4f6d5b701baaaae4dd990137887d11533ed68b47 | [
"NCSA"
] | 6 | 2015-05-07T00:40:47.000Z | 2018-08-27T02:39:32.000Z | acoustics-server.py | klange/amppy | 4f6d5b701baaaae4dd990137887d11533ed68b47 | [
"NCSA"
] | null | null | null | acoustics-server.py | klange/amppy | 4f6d5b701baaaae4dd990137887d11533ed68b47 | [
"NCSA"
] | 3 | 2018-03-24T17:28:13.000Z | 2021-11-11T18:15:32.000Z | #!/usr/bin/env python3
import http.server
import http.cookies
import socketserver
import base64
import subprocess
import socket
import os
import json
import sys
import time
import uuid
import tempfile
import importlib
from urllib.parse import urlparse, parse_qs
sys.path.append('lib')
from amp import db, config
import amp.players
import amp.rpc.local
PORT = 6969
mode_registry = {}
class ModeMeta(type):
def __new__(cls, name, bases, dct):
new_cls = super(ModeMeta, cls).__new__(cls, name, bases, dct)
if 'name' in dct:
mode_registry[dct['name']] = new_cls
if 'names' in dct:
for name in dct['names']:
mode_registry[name] = new_cls
return new_cls
class Mode(metaclass=ModeMeta):
def __init__(self, server, session):
self.owner = server
self.session = session
def get(self, args):
return (400, {"api_error": "Internal API error: You have requested a mode which was initialized by not implemented by the server."})
class ModeStatus(Mode):
name = "status"
def get(self, args):
output = {}
output["selected_player"] = self.session._player
output["players"] = self.owner.players
output["player"] = self.session.player()
output["now_playing"] = self.session.currentSong()
output["playlist"] = self.owner.db.PlayerQueue(self.session._player)
output["who"] = self.session.user()
output["can_skip"] = self.session.can_skip()
output["is_admin"] = self.session.is_admin()
return (200, output)
class ModeGlobalStatus(Mode):
name = "global_status"
def get(self, args):
output = {}
output["who"] = self.session.user()
output["player_names"] = self.owner.players
output["players"] = {}
for i in output['player_names']:
x = self.owner.db.SELECT("players", {"player_id": i})
if x:
pl = {}
pl["info"] = x[0]
pl["song"] = self.owner.db.SELECT(
"songs", {"song_id": x[0]["song_id"]})[0]
output["players"][i] = pl
return (200, output)
class ModeSearch(Mode):
name = "paged_search"
def get(self, args):
if "value" not in args:
return (400, {"api_error": "Search requests require a 'value' argument."})
limit = 10
offset = 0
if "limit" in args:
limit = args["limit"]
if "offset" in args:
offset = args["offset"]
results = self.owner.db.Search(args["value"], limit, offset)
return (200, results)
class ModeQuickSearch(Mode):
name = "quick_search"
def get(self, args):
if "q" not in args:
return (400, {"api_error": "Quick search results a 'q' argument."})
limit = 10
if "limit" in args:
limit = args["limit"]
results = self.owner.db.QuickSearch(args["q"], limit)
return (200, results)
class ModeRandom(Mode):
name = "random"
def get(self, args):
limit = 10
if "amount" in args:
limit = args["amount"]
results = self.owner.db.Random(limit)
return (200, results)
class ModeRecent(Mode):
name = "recent"
def get(self, args):
limit = 10
if "amount" in args:
limit = args["amount"]
results = self.owner.db.Recent(limit)
return (200, results)
class ModeHistory(Mode):
name = "history"
def get(self, args):
limit = 10
voter = None
if "amount" in args:
limit = args["amount"]
if "voter" in args:
voter = args["voter"]
results = self.owner.db.History(voter=voter, limit=limit)
history = []
for song in results:
if len(history) and song["song_id"] == history[-1]["song_id"] and history[-1]["time"] == song["time"]:
history[-1]["who"].append(song["who"])
else:
song["who"] = [song["who"]]
history.append(song)
return (200, results)
class ModeDetails(Mode):
name = "get_details"
def get(self, args):
if "song_id" not in args:
return (400, {"api_error": "get_details requires a 'song_id'"})
obj = self.owner.db.SELECT("songs", {"song_id": args["song_id"]})[0]
obj["now"] = int(time.time())
voters = self.owner.db.SELECT(
"votes", {"song_id": obj["song_id"], "player_id": self.session._player})
obj["who"] = []
for i in voters:
obj["who"].append(i['who'])
return (200, {"song": obj})
class ModeArt(Mode):
name = "art"
def get(self, args):
if "song_id" not in args:
return (400, {"api_error": "art requires a 'song_id'"})
if "size" not in args:
size = 500
else:
size = int(args["size"])
if size > 1000:
size = 1000
obj = self.owner.db.SELECT("songs", {"song_id": args["song_id"]})[0]
possible = ["acoustics-art.png", "acoustics-art.jpg",
"cover.png", "cover.jpg", "Folder.png", "Folder.jpg"]
path = "web/www-data/images/cd_case.png"
for i in possible:
fpath = os.path.join(os.path.dirname(obj["path"]), i)
if os.path.exists(fpath):
path = fpath
break
f = tempfile.NamedTemporaryFile()
subprocess.call(
["convert", path, "-resize", "%dx%d" % (size, size), f.name])
filecontents = f.read()
f.close()
return (200, filecontents, "image/png")
class ModeReorderQueue(Mode):
name = "reorder_queue"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return ModeStatus.get(self, [])
p = 0
for i in args["song_id"].split(";"):
self.owner.db.UpdateVote(
i, self.session.user(), self.session._player, p)
p += 1
return ModeStatus.get(self, [])
class ModeSelect(Mode):
name = "select"
def get(self, args):
if "field" not in args:
return (400, {"api_error": "select requires a 'field' argument."})
if "value" not in args:
return (400, {"api_error": "select requires a 'value' argument."})
return (200, self.owner.db.Select(args["field"], args["value"]))
class ModeChangePlayer(Mode):
name = "change_player"
def get(self, args):
if "player_id" not in args:
return (400, {"api_error": "Player change requires a player to change to."})
if args["player_id"] not in self.owner.players:
return (400, {"api_error": "Bad player id."})
self.session._player = args["player_id"]
return ModeStatus.get(self, args)
class ModeTopVoted(Mode):
name = "top_voted"
def get(self, args):
if "limit" in args:
limit = args["limit"]
else:
limit = 10
return (200, self.owner.db.TopVoted(limit))
class ModeAlbumSearch(Mode):
name = "album_search"
def get(self, args):
if "album" not in args:
return (400, {"api_error": "album_search requires an 'album' argument."})
return (200, self.owner.db.AlbumSearch(args["album"]))
class ModeVote(Mode):
name = "vote"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return (400, {"api_error": "vote requires a 'song_id' argument."})
priorityNumber = self.owner.db.NextVote(
self.session.user(), self.session._player)
# XXX: We don't check max-votes
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.AddVote(
self.session.user(), self.session._player, i, priorityNumber)
else:
self.owner.db.AddVote(
self.session.user(), self.session._player, args['song_id'], priorityNumber)
return ModeStatus.get(self, args)
class ModeUnvote(Mode):
name = "unvote"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to vote for songs."})
if "song_id" not in args:
return (400, {"api_error": "vote requires a 'song_id' argument."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.Unvote(self.session.user(), i)
else:
self.owner.db.Unvote(self.session.user(), args['song_id'])
return ModeStatus.get(self, args)
class ModePlaylists(Mode):
name = "playlists"
def get(self, args):
who = ""
title = ""
if "who" in args:
who = args["who"]
if "title" in args:
title = args["title"]
return (200, self.owner.db.Playlists(who, title))
class ModePlaylistsLoose(Mode):
name = "playlists_loose"
def get(self, args):
value = ""
if "value" in args:
value = args["value"]
return (200, self.owner.db.PlaylistsLoose(value))
class ModePlaylistContents(Mode):
name = "playlist_contents"
def get(self, args):
if not "playlist_id" in args:
return (400, {"api_error": "playlist_contents requires a 'playlist_id' argument."})
return (200, self.owner.db.PlaylistContents(args["playlist_id"]))
class ModePlaylistInfo(Mode):
name = "playlist_info"
def get(self, args):
if not "playlist_id" in args:
return (400, {"api_error": "playlist_info requires a 'playlist_id' argument."})
return (200, self.owner.db.PlaylistInfo(args["playlist_id"]))
class ModeAddToPlaylist(Mode):
name = "add_to_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if "song_id" not in args:
return (400, {"api_error": "add_to_playlist requires a 'song_id' argument."})
if not "playlist_id" in args:
return (400, {"api_error": "add_to_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.AddToPlaylist(args["playlist_id"], i)
else:
self.owner.db.AddToPlaylist(args["playlist_id"], args['song_id'])
return ModePlaylistContents.get(self, args)
class ModeRemoveFromPlaylist(Mode):
name = "remove_from_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if "song_id" not in args:
return (400, {"api_error": "remove_from_playlist requires a 'song_id' argument."})
if not "playlist_id" in args:
return (400, {"api_error": "remove_from_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
if ";" in args["song_id"]:
for i in args["song_id"].split(";"):
self.owner.db.RemoveFromPlaylist(args["playlist_id"], i)
else:
self.owner.db.RemoveFromPlaylist(
args["playlist_id"], args['song_id'])
return ModePlaylistContents.get(self, args)
class ModeCreatePlaylist(Mode):
name = "create_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if not "title" in args:
return (400, {"api_error": "create_playlist requires a 'title' argument."})
self.owner.db.CreatePlaylist(self.session.user(), args["title"])
args["who"] = self.session.user()
return ModePlaylists.get(self, args)
class ModeDeletePlaylist(Mode):
name = "delete_playlist"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to modify playlists."})
if not "playlist_id" in args:
return (400, {"api_error": "delete_playlist requires a 'playlist_id' argument."})
(discard, playlist) = ModePlaylistInfo.get(self, args)
if playlist['who'] != self.session.user() and not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to modify this playlist."})
self.owner.db.DeletePlaylist(args["playlist_id"])
args["who"] = self.session.user()
return ModePlaylists.get(self, args)
class ModePurge(Mode):
name = "purge"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "This action can only be executed by logged-in uesrs."})
if not "who" in args or not self.session.is_admin():
args["who"] = self.session.user()
self.owner.db.Purge(args["who"], self.session._player)
return ModeStatus.get(self, args)
class ModeStats(Mode):
name = "stats"
def get(self, args):
who = None
if "who" in args:
who = args["who"]
output = {}
output["total_songs"] = self.owner.db.SongCount()
output["top_artists"] = self.owner.db.TopArtists(who)
return (200, output)
class ModeControls(Mode):
names = ["start", "stop", "skip", "pause", "volume", "zap"]
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to control the player."})
_args = [args["mode"]]
if "value" in args:
_args.append(args["value"])
if args["mode"] == "skip" and not self.session.can_skip():
return (500, {"auth_error": "You can not skip this song."})
self.owner.rpc(self.session._player, _args)
time.sleep(1) # Give the players a bit of time to update the DB
return ModeStatus.get(self, [])
class ModeSessions(Mode):
name = "sessions"
def get(self, args):
if not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to view this information."})
sessions = {}
for k, v in self.owner.sessions.items():
sessions[k] = {}
sessions[k]['is_admin'] = v.is_admin()
sessions[k]['user'] = v.user()
sessions[k]['player'] = v._player
sessions[k]['created'] = v.created
sessions[k]['address'] = v.remote
return (200, sessions)
class ModeScan(Mode):
name = "scan"
def get(self, args):
if not self.session.is_admin():
return (500, {"auth_error": "You are not permitted to perform this action."})
if not 'path' in args:
return (500, {"api_error": "Expected a path to scan."})
subprocess.call(["./acoustics-scanner.py", args['path']])
return (200, {})
class ModeRawAudio(Mode):
name = "audio-data"
def get(self, args):
if not self.session.user():
return (500, {"auth_error": "You must login to perform this action."})
obj = self.owner.db.SELECT("songs", {"song_id": args['song_id']})[0]
if obj:
import mimetypes
with open(obj["path"], "rb") as f:
filecontents = f.read()
mimetypes.init()
mimetype = mimetypes.guess_type(obj["path"])[0]
return (200, filecontents, mimetype)
else:
return (404, {'error': "Not found."})
class AcousticsSession(object):
def __init__(self, sessid, owner, remote_host):
self.owner = owner
self.sessid = sessid
self._user = None
self._player = self.owner.players[0]
self.created = int(time.time())
self.remote = remote_host
def is_admin(self):
return not (self._user is None)
def can_skip(self):
return not (self._user is None)
def user(self):
return self._user
def player(self):
x = self.owner.db.SELECT("players", {"player_id": self._player})
if x:
return x[0]
else:
return None
def currentSong(self):
if not self.player():
return None
obj = self.owner.db.SELECT(
"songs", {"song_id": self.player()["song_id"]})[0]
obj["now"] = int(time.time())
voters = self.owner.db.SELECT(
"votes", {"song_id": obj["song_id"], "player_id": self._player})
obj["who"] = []
for i in voters:
obj["who"].append(i['who'])
return obj
class AcousticsServer(object):
def __init__(self):
self.modes = mode_registry
self.config = config.AcousticsConfig()
self.db = db.Sqlite(
self.config["database"]["data_source"].split(":")[-1])
self.sessions = {}
self.players = self.config["{}"]['players'].split(",")
def newSession(self, client_address):
sid = str(uuid.uuid1())
self.sessions[sid] = AcousticsSession(sid, self, client_address)
return sid
def rpc(self, player_id, args):
if player_id in self.players:
rpc_module = importlib.import_module(
self.config.translate(self.config['player.' + player_id]["rpc"]))
player = rpc_module.RPC()
player.execute(player_id, args)
def execute(self, session, args):
if args["mode"] in self.modes:
return self.modes[args["mode"]](self, self.sessions[session]).get(args)
else:
return (400, {"api_error": "Unrecognized mode `%s`" % args["mode"]})
def AcousticsHandlerFactory(server):
failures = {}
handler = AcousticsHandler
handler.acoustics_server = server
handler.failures = failures
return handler
class AcousticsHandler(http.server.SimpleHTTPRequestHandler):
def address_string(self):
host, port = self.client_address[:2]
return '%s' % host
def do_GET(self):
if 'cookie' in self.headers:
self.cookie = http.cookies.SimpleCookie(self.headers["cookie"])
else:
self.cookie = http.cookies.SimpleCookie()
session = None
if "sessid" in self.cookie and self.cookie['sessid'].value in self.acoustics_server.sessions:
session = self.cookie["sessid"].value
else:
if "sessid" in self.cookie:
print("Bad session from previous instance?",
self.cookie["sessid"].value)
if self.client_address[0] not in self.failures.keys():
print(
"First time, trying to give out session key for " + self.client_address[0])
self.failures[self.client_address[0]] = 0
self.failures[self.client_address[0]] += 1
session = self.acoustics_server.newSession(self.client_address)
self.cookie["sessid"] = session
print("serving fresh session", session)
if self.failures[self.client_address[0]] > 5:
print("Failed too many times, going to have to duck out.")
self.failures[self.client_address[0]] -= 5
self.send_response(400)
self.send_header('Content-type', 'text/html')
self.send_header(
'Set-Cookie', self.cookie.output(header="").strip())
self.end_headers()
self.wfile.write(
b"Your browser is not accepting a required session cookie, please try refreshing.")
return
else:
self.send_response(307)
self.send_header('Location', self.path)
self.send_header(
'Set-Cookie', self.cookie['sessid'].output(header="").strip())
self.end_headers()
return
if self.path.startswith("/www-data/auth"):
# Replace this with your own authorization as necessary.
if 'Authorization' in self.headers:
if self.headers["Authorization"].startswith('Basic '):
tmp = base64.standard_b64decode(
bytes(self.headers['Authorization'].split(" ")[1].encode('utf-8'))).decode("utf-8")
# TODO: Actual authentication
self.acoustics_server.sessions[
session]._user = tmp.split(":")[0]
self.send_response(200)
self.send_header('Location', '/json.pl?mode=status')
self.end_headers()
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="Acoustics"')
self.end_headers()
return
elif self.path.startswith("/json."):
# Parse API call
path = urlparse(self.path)
args = parse_qs(path.query, keep_blank_values=True)
for k in args.keys():
args[k] = ";".join(args[k])
if "mode" not in args:
args["mode"] = "status"
result = self.acoustics_server.execute(session, args)
if len(result) == 2:
(status, results) = result
output = json.dumps(results).encode("utf-8")
ctype = 'application/json'
elif len(result) == 3:
(status, output, ctype) = result
else:
output = "?"
status = 400
# Respond appropriately
self.send_response(status)
self.send_header('Content-type', ctype)
self.end_headers()
self.wfile.write(output)
else:
self.path = os.path.join("web", self.path.replace("/", "", 1))
if not os.path.realpath(self.path).startswith(os.path.realpath("web")):
self.send_response(400)
self.end_headers()
return
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class AcousticsSocket(socketserver.TCPServer):
allow_reuse_address = True
address_family = socket.AF_INET6
def server_bind(self):
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, False)
socketserver.TCPServer.server_bind(self)
if __name__ == "__main__":
server = AcousticsServer()
httpd = AcousticsSocket(("", PORT), AcousticsHandlerFactory(server))
httpd.serve_forever()
| 33.89721 | 140 | 0.568532 | 2,768 | 23,084 | 4.641257 | 0.147399 | 0.041099 | 0.035962 | 0.033782 | 0.43084 | 0.370125 | 0.335098 | 0.283023 | 0.263953 | 0.260839 | 0 | 0.016648 | 0.294836 | 23,084 | 680 | 141 | 33.947059 | 0.772576 | 0.009487 | 0 | 0.339416 | 0 | 0 | 0.173768 | 0.002319 | 0 | 0 | 0 | 0.001471 | 0 | 1 | 0.085766 | false | 0 | 0.034672 | 0.007299 | 0.40146 | 0.007299 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499775111df96a88bea8809972dfa3ed80b7927f | 5,375 | py | Python | flask_app/utils/fetch_wiktionary_word.py | AtilioA/wiktionary-french-homophones | 85539065364089346b544dd9ada9ce26594fb752 | [
"Apache-2.0"
] | null | null | null | flask_app/utils/fetch_wiktionary_word.py | AtilioA/wiktionary-french-homophones | 85539065364089346b544dd9ada9ce26594fb752 | [
"Apache-2.0"
] | null | null | null | flask_app/utils/fetch_wiktionary_word.py | AtilioA/wiktionary-french-homophones | 85539065364089346b544dd9ada9ce26594fb752 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import urllib.parse
from multiprocessing import Pool
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
from wiktionaryparser import WiktionaryParser
parser = WiktionaryParser()
def unquote_url():
with open("french_homophones.txt", "r", encoding="utf8", errors='ignore') as fR:
with open("french_homophones_unquoted.txt", "w+", encoding="utf8", errors='ignore') as fW:
words = [urllib.parse.unquote(line) for line in fR]
fW.writelines(words)
def find_infinitive_form(verbDefinition):
match = re.search(r".*of (\w+)\s*$", verbDefinition)
if match:
return match.group(1)
else:
return None
def fetch_wiktionary_word(word, isInfinitive=False):
""" Extract word information with WiktionaryParser """
try:
word = word.strip()
print(word)
homophone = {'word': f"{word}"}
parsedHomophone = parser.fetch(word, "french")[0]
# print(parsedHomophone)
if not (parsedHomophone['definitions'] or parsedHomophone['etymology'] or parsedHomophone['pronunciations']['text']):
with open("failed.txt", "a+", encoding="utf8", errors='ignore') as failed:
failed.write(f"{word}\n")
print("Word not found.")
print(parsedHomophone)
# PRONUNCIATIONS
parsedHomophone['pronunciations']['homophones'] = None
try:
for element in parsedHomophone['pronunciations']['text']:
if "IPA" in element:
parsedHomophone['pronunciations']['IPA'] = element
if "Homophone" in element:
parsedHomophone['pronunciations']['homophones'] = element.split(", ")
# # If IPA entry doesn't exist
# if "Homophone" in parsedHomophone['pronunciations']['text'][0]:
# # print("Doesn't have IPA, has homophones")
# parsedHomophone['pronunciations']['IPA'] = None
# parsedHomophone['pronunciations']['homophones'] = parsedHomophone['pronunciations']['text'][0].split(", ")
except (IndexError, KeyError) as err:
print("Error in pronunciations input.")
print(err)
if not parsedHomophone['pronunciations']['homophones']:
print("Entry doesn't have homophones!")
with open("failed.txt", "a+", encoding="utf8", errors='ignore') as failed:
failed.write(f"No homophones: {word}\n")
return None
# Delete "text" key from "pronunciations" value
parsedHomophone['pronunciations'].pop('text', None)
# DEFINITIONS
for i, pOS in enumerate(parsedHomophone['definitions']):
try:
# Split "text" value from "definitions" key into keys "word" and "meanings"
parsedHomophone['definitions'][i]['meanings'] = parsedHomophone['definitions'][i]['text'][1:]
except (IndexError, KeyError) as err:
print("Error in definitions input.")
print(err)
# Introduce "infinitive" key
if pOS['partOfSpeech'] == "verb" and not isInfinitive:
# Look for infinitive form if it's a verb
try:
infinitive = find_infinitive_form(pOS['text'][1]).strip()
# print(f"\nFetching infinitive: {pOS['text'][1]}")
# print(infinitive)
parsedInfinitive = parser.fetch(infinitive, "french")[0]
# print(f"infinitive dictionary: {parsedInfinitive}")
parsedHomophone['definitions'][i]['infinitive'] = {
"text": parsedInfinitive['definitions'][0]['text'][0],
"meanings": parsedInfinitive['definitions'][0]['text'][1:]
}
except (IndexError, TypeError, AttributeError):
print("No infinitive form.")
else:
parsedHomophone['definitions'][i]['infinitive'] = None
# Remove "text" value from definitions key
word = parsedHomophone['definitions'][i].pop('text', None)
except KeyboardInterrupt:
raise KeyboardInterruptError()
# Put dictionary after "word" key
homophone.update(parsedHomophone)
return homophone
def request_homophones_wiktionary():
""" Read french_homophones.txt to request words' informations with WiktionaryParser.
Write to homophones.txt.
"""
with open("french_homophones.txt", "r+", encoding="utf8", errors='ignore') as fHomophones:
with open("homophones.txt", "a+", encoding="utf8", errors='ignore') as fOut:
words = fHomophones.readlines()
with Pool(10) as p:
try:
fetchedHomophones = list(p.map(fetch_wiktionary_word, words))
except KeyboardInterrupt:
print('Got ^C while pool mapping, terminating the pool')
p.terminate()
print('Terminating pool...')
p.terminate()
p.join()
print('Done!')
for fetchedHomophone in fetchedHomophones:
fOut.write(f"{fetchedHomophone}\n")
if __name__ == "__main__":
request_homophones_wiktionary()
| 39.522059 | 125 | 0.580093 | 513 | 5,375 | 6.019493 | 0.2846 | 0.112694 | 0.034974 | 0.046632 | 0.137306 | 0.111399 | 0.111399 | 0.101684 | 0.07513 | 0.07513 | 0 | 0.005267 | 0.293581 | 5,375 | 135 | 126 | 39.814815 | 0.808006 | 0.165023 | 0 | 0.218391 | 0 | 0 | 0.181143 | 0.016202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045977 | false | 0 | 0.068966 | 0 | 0.16092 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4998534a23c5a08fc87272e477b56a3ff9f61a24 | 1,050 | py | Python | src/utils/convert_tags_to_npy.py | pkf-thesis/playground | 70133fb0ea1b3627af85b1ba11b0d8609f714d1a | [
"MIT"
] | 1 | 2019-05-01T08:20:56.000Z | 2019-05-01T08:20:56.000Z | src/utils/convert_tags_to_npy.py | pkf-thesis/playground | 70133fb0ea1b3627af85b1ba11b0d8609f714d1a | [
"MIT"
] | null | null | null | src/utils/convert_tags_to_npy.py | pkf-thesis/playground | 70133fb0ea1b3627af85b1ba11b0d8609f714d1a | [
"MIT"
] | null | null | null | import numpy as np
import sqllite_repository as sql
import utils.msd_tags as msd
def convert_tags_to_npy(ids, npy_path):
y = np.empty((len(ids), len(msd.TAGS)), dtype=bool)
tid_tag = sql.fetch_tags_from_songs_above_treshold(ids, 50)
if len(tid_tag) != len(ids):
print("Skipped dataset %s" % npy_path)
return
for i, song in enumerate(ids):
tags = []
for tag in msd.TAGS:
if tag in tid_tag[song]:
tags.append(True)
else:
tags.append(False)
y[i] = tags
np.savez_compressed(npy_path, y)
train_ids = [song.split('/')[-1].rstrip() for song in open("../../data/msd/train_path.txt")]
valid_ids = [song.split('/')[-1].rstrip() for song in open("../../data/msd/valid_path.txt")]
test_ids = [song.split('/')[-1].rstrip() for song in open("../../data/msd/test_path.txt")]
convert_tags_to_npy(train_ids, "../../data/msd/y_train")
convert_tags_to_npy(valid_ids, "../../data/msd/y_valid")
convert_tags_to_npy(test_ids, "../../data/msd/y_test")
| 35 | 92 | 0.626667 | 166 | 1,050 | 3.73494 | 0.331325 | 0.067742 | 0.083871 | 0.103226 | 0.18871 | 0.18871 | 0.18871 | 0.18871 | 0.18871 | 0.18871 | 0 | 0.005924 | 0.19619 | 1,050 | 29 | 93 | 36.206897 | 0.728673 | 0 | 0 | 0 | 0 | 0 | 0.16381 | 0.14381 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.208333 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49997d7ba1396e63db8a84c1b6ca053ca4b41b0d | 809 | py | Python | _collections/articles/obstoanki_setup.py | SubZeroX/SubZeroX.github.io | 1df9c43d538af7812e68ac07d7591f258c8c1619 | [
"MIT"
] | null | null | null | _collections/articles/obstoanki_setup.py | SubZeroX/SubZeroX.github.io | 1df9c43d538af7812e68ac07d7591f258c8c1619 | [
"MIT"
] | null | null | null | _collections/articles/obstoanki_setup.py | SubZeroX/SubZeroX.github.io | 1df9c43d538af7812e68ac07d7591f258c8c1619 | [
"MIT"
] | null | null | null | import urllib.request
import sys
import subprocess
import os
SCRIPT_URL = "".join(
[
"https://github.com/Pseudonium/Obsidian_to_Anki/releases/latest",
"/download/obsidian_to_anki.py"
]
)
REQUIRE_URL = "".join(
[
"https://github.com/Pseudonium/Obsidian_to_Anki/releases/latest",
"/download/requirements.txt"
]
)
with urllib.request.urlopen(SCRIPT_URL) as script:
with open("obsidian_to_anki.py", "wb") as f:
f.write(script.read())
with urllib.request.urlopen(REQUIRE_URL) as require:
with open("obstoankirequire.txt", "wb") as f:
f.write(require.read())
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", "obstoankirequire.txt"]
)
os.remove("obstoankirequire.txt")
| 26.096774 | 79 | 0.631644 | 96 | 809 | 5.1875 | 0.416667 | 0.080321 | 0.11245 | 0.072289 | 0.313253 | 0.269076 | 0.269076 | 0.269076 | 0.269076 | 0.269076 | 0 | 0 | 0.221261 | 809 | 30 | 80 | 26.966667 | 0.790476 | 0 | 0 | 0.076923 | 0 | 0 | 0.3543 | 0.070603 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499ba879bb77d8378d75403366b117d411f57180 | 1,103 | py | Python | tk_gitStatus.py | CrazyJ36/python | 4cff6e7240672a273d978521bb511065f45d4312 | [
"MIT"
] | null | null | null | tk_gitStatus.py | CrazyJ36/python | 4cff6e7240672a273d978521bb511065f45d4312 | [
"MIT"
] | null | null | null | tk_gitStatus.py | CrazyJ36/python | 4cff6e7240672a273d978521bb511065f45d4312 | [
"MIT"
] | null | null | null | # This checks git status in any platform on any local repo
import os
from tkinter import *
win = Tk()
win.pack_propagate(0)
win.title(string="Git Status")
win.geometry("400x300")
repo = Entry(win)
# easier than below, use os.environ['HOME'] + regex for development dir name
if sys.platform == "win32" and os.environ['USERNAME'] == "Thomas":
devdir = "C:\\Users\\Thomas\\Development\\"
elif sys.platform == "win32" and os.environ['USERNAME'] == "CrazyJ36":
devdir = "C:\\Users\\CrazyJ36\\Development\\"
elif sys.platform == "win32" and os.environ['USERNAME'] == "thomas":
devdir = "C:\\Users\\thomas\\Development\\"
elif sys.platform == "linux":
devdir = "/home/thomas/development/"
else:
envwarn = Label(win, text="failed to get environment")
envwarn.pack()
def gitcmd():
cmdstr = "cd " + devdir + repo.get() + " && git status"
cmdfileobj = os.popen(cmdstr)
out = cmdfileobj.read()
cmdfileobj.close()
outtxt = Label(win, text=out)
outtxt.pack()
btn = Button(win, text="Check Git Repo", command=lambda: gitcmd())
repo.pack()
btn.pack()
win.mainloop()
| 29.810811 | 76 | 0.663645 | 147 | 1,103 | 4.972789 | 0.482993 | 0.049248 | 0.065663 | 0.077975 | 0.290014 | 0.290014 | 0.290014 | 0.290014 | 0.290014 | 0.235294 | 0 | 0.018418 | 0.163191 | 1,103 | 36 | 77 | 30.638889 | 0.773564 | 0.118767 | 0 | 0 | 0 | 0 | 0.268318 | 0.126935 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.103448 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499bc714b43e62c974c3b0170d94cd4ec62f3127 | 874 | py | Python | day-19.py | analuisadev/100-Days-Of-Code | b1dafabc335cd2c3c9b1cecd50597b42d8959d4a | [
"MIT"
] | 2 | 2021-01-08T22:13:21.000Z | 2021-03-17T10:44:12.000Z | day-19.py | analuisadev/100-Days-Of-Code | b1dafabc335cd2c3c9b1cecd50597b42d8959d4a | [
"MIT"
] | null | null | null | day-19.py | analuisadev/100-Days-Of-Code | b1dafabc335cd2c3c9b1cecd50597b42d8959d4a | [
"MIT"
] | null | null | null | sumage = 0
mAge = 0
bigageMan = 0
nameOld = 0
twoman20 = 0
print ('\033[1m{:=^40}\033[m'.format(' COMPLETE ANALYZER '))
for people in range(1, 5):
print ('=-=-=-=--=-= {}ª PEOPLE -=-=-=-=-=-='.format(people))
name = str(input('\033[1mName:\033[m ')).strip()
age = int(input('\033[1mAge:\033[m '))
sex = str(input('\033[1mSex M/F:\033[m ')).strip()
sumage += age
if people == 1 and sex in 'Mm':
sumage = age
nameOld = name
if sex in 'Mm' and age > bigageMan:
bigageMan = age
nameOld = name
if sex in 'Ff' and age < 20 :
twoman20 +=1
mAge = sumage / 4
print ('\033[1;32mThe average age of the group is {} years\033[m'.format(mAge))
print ('\033[1;32mThe oldest man is {} years old and is called {}\033[m'.format(bigageMan,nameOld))
print ('\033[1;32mThere are {} women under 20\033[m'.format(twoman20))
| 34.96 | 99 | 0.584668 | 135 | 874 | 3.785185 | 0.392593 | 0.054795 | 0.078278 | 0.062622 | 0.082192 | 0.082192 | 0 | 0 | 0 | 0 | 0 | 0.112903 | 0.21968 | 874 | 24 | 100 | 36.416667 | 0.636364 | 0 | 0 | 0.083333 | 0 | 0 | 0.345538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.208333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499c9a9a86e06de3e57ea0b94c44e665e71182be | 711 | py | Python | src/main/python/metadata.py | sudoparsa/paperECG | 85c9413ec023e31d6602b973b7e55934713a6116 | [
"MIT"
] | null | null | null | src/main/python/metadata.py | sudoparsa/paperECG | 85c9413ec023e31d6602b973b7e55934713a6116 | [
"MIT"
] | null | null | null | src/main/python/metadata.py | sudoparsa/paperECG | 85c9413ec023e31d6602b973b7e55934713a6116 | [
"MIT"
] | null | null | null | import os
import json
import copy
def metadata(file_name, path, metadata_path):
out_path = path + '.paperecg\\'
if not os.path.isdir(out_path):
os.mkdir(out_path)
if not os.path.isfile(metadata_path):
print('Warning: No metadata file found!')
else:
with open(metadata_path) as metadata_json:
metadata = json.load(metadata_json)
data = copy.deepcopy(metadata)
data['image']['name'] = file_name
data['image']['directory'] = path
name, extension = file_name.split('.')
outfile = open(out_path + name + '-' + extension + '.json', 'w')
json.dump(data, outfile)
outfile.close()
| 30.913043 | 76 | 0.586498 | 86 | 711 | 4.709302 | 0.406977 | 0.069136 | 0.034568 | 0.054321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.284107 | 711 | 22 | 77 | 32.318182 | 0.795678 | 0 | 0 | 0 | 0 | 0 | 0.104079 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.210526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
499e8860b06a3805c1e64c26eba0b047c9de0590 | 3,906 | py | Python | envplus/cli.py | vqh2308/envplus | 4bea95985e69d1f102543b9ff948742a4ac17d63 | [
"MIT"
] | 54 | 2015-01-25T06:26:53.000Z | 2022-02-28T08:50:53.000Z | envplus/cli.py | vqh2308/envplus | 4bea95985e69d1f102543b9ff948742a4ac17d63 | [
"MIT"
] | 1 | 2017-11-02T14:14:34.000Z | 2017-11-02T14:22:32.000Z | envplus/cli.py | vqh2308/envplus | 4bea95985e69d1f102543b9ff948742a4ac17d63 | [
"MIT"
] | 8 | 2015-11-07T14:02:55.000Z | 2021-07-06T22:44:59.000Z | #!/usr/bin/env python
import sys, os
import argparse
from subprocess import Popen
from envplus import pathfile, helpers
if "VIRTUAL_ENV" not in os.environ:
raise Exception("$VIRTUAL_ENV missing. It seems you're not currently in a virtualenv.")
else: pass
def run_in_env(pf, args):
env = os.environ.copy()
paths = env["PATH"].split(":")
bin_paths = pf.get_binpaths()
new_paths = paths[:1] + bin_paths + paths[1:]
env["PATH"] = ":".join(new_paths)
sp = Popen(args, env=env)
out, err = sp.communicate()
return out
def cmd_add(pf, args):
map(pf.add_env, args.envs)
pf.save()
def cmd_rm(pf, args):
map(pf.remove_env, args.envs)
pf.save()
def cmd_pause(pf, args):
envs = args.envs if len(args.envs) else pf.ls()
map(pf.pause_env, envs)
pf.save()
def cmd_resume(pf, args):
envs = args.envs if len(args.envs) else pf.ls_paused()
map(pf.resume_env, envs)
pf.save()
def cmd_ls(pf, args):
active = set(pf.ls())
paused = set(pf.ls_paused())
envs = (paused if args.paused else active) | \
((paused | active) if args.all else set())
out = "".join(e + "\n" for e in envs)
sys.stdout.write(out)
def cmd_cat(pf, args):
sys.stdout.write(pf.to_string())
def cmd_edit(pf, args):
editor = os.environ["EDITOR"]
run_in_env(pf, [ editor, pf.filepath ])
def cmd_path(pf, args):
sys.stdout.write(pf.filepath + "\n")
def cmd_run(pf, args):
command = " ".join(args.cmd)
run_in_env(pf, [ os.environ["SHELL"], "-c", "-i", command ])
def parse_args():
parser = argparse.ArgumentParser(description="Combine your virtualenvs.", prog="envplus")
subparsers = parser.add_subparsers(title="Subcommands", dest="command")
# envplus add
parser_add = subparsers.add_parser("add")
parser_add.add_argument("envs",
nargs="+",
help="virtualenvs to add to current virtualenv's path")
# envplus rm
parser_rm = subparsers.add_parser("rm")
parser_rm.add_argument("envs",
nargs="+",
help="virtualenvs to remove from current virtualenv's path")
# envplus pause
parser_pause = subparsers.add_parser("pause")
parser_pause.add_argument("envs",
nargs="*",
help="virtualenvs to pause. Defaults to all.")
# envplus resume
parser_resume = subparsers.add_parser("resume")
parser_resume.add_argument("envs",
nargs="*",
help="virtualenvs to resume. Defaults to all.")
# envplus ls
parser_ls = subparsers.add_parser("ls")
parser_ls.add_argument("--paused", "-p",
action="store_true",
help="Show paused virtualenvs instead of active ones.")
parser_ls.add_argument("--all", "-a",
action="store_true",
help="Show paused *and* active virtualenvs")
# envplus run
parser_run = subparsers.add_parser("run")
parser_run.add_argument("cmd",
nargs=argparse.REMAINDER,
help="Command to run, with optional arguments.")
# envplus path
parser_path = subparsers.add_parser("path")
# envplus cat
parser_cat = subparsers.add_parser("cat")
# envplus edit
parser_edit = subparsers.add_parser("edit")
args = parser.parse_args()
return args
def get_pathfile_path(pathfile_name="_envplus.pth"):
sp_dir = helpers.get_site_packages_dir(os.environ["VIRTUAL_ENV"])
pathfile_path = os.path.join(sp_dir, pathfile_name)
return pathfile_path
def dispatch_command(args):
commands = {
"run": cmd_run,
"add": cmd_add,
"rm": cmd_rm,
"pause": cmd_pause,
"resume": cmd_resume,
"ls": cmd_ls,
"cat": cmd_cat,
"path": cmd_path,
"edit": cmd_edit,
}
pf = pathfile.PathFile(get_pathfile_path())
commands[args.command](pf, args)
def main():
args = parse_args()
dispatch_command(args)
if __name__ == "__main__":
main()
| 27.702128 | 93 | 0.641065 | 539 | 3,906 | 4.458256 | 0.218924 | 0.027466 | 0.071161 | 0.02164 | 0.195589 | 0.171452 | 0.113192 | 0.032459 | 0.032459 | 0.032459 | 0 | 0.000654 | 0.217614 | 3,906 | 140 | 94 | 27.9 | 0.785668 | 0.03405 | 0 | 0.095238 | 0 | 0 | 0.162902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.009524 | 0.038095 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
49a11d361db231c8c3207f15b599e764c10fe725 | 986 | py | Python | hiapi/hi.py | GradysGhost/pyhiapi | f2ff18c65ac1e37f42af80f34e1e1fa1ce13396f | [
"Apache-2.0"
] | null | null | null | hiapi/hi.py | GradysGhost/pyhiapi | f2ff18c65ac1e37f42af80f34e1e1fa1ce13396f | [
"Apache-2.0"
] | null | null | null | hiapi/hi.py | GradysGhost/pyhiapi | f2ff18c65ac1e37f42af80f34e1e1fa1ce13396f | [
"Apache-2.0"
] | 1 | 2020-10-30T16:10:03.000Z | 2020-10-30T16:10:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import flask
RESPONSE_CODE = 200
app = flask.Flask(__name__)
@app.route('/', defaults={'path': ''}, methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'])
@app.route('/<path:path>', methods=['GET', 'POST', 'PUT', 'DELETE', 'HEAD', 'OPTIONS'])
def hello(path):
global RESPONSE_CODE
if RESPONSE_CODE == 200:
return 'Hi!\n'
else:
flask.abort(RESPONSE_CODE)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bind-address',
dest='bind', default='127.0.0.1')
parser.add_argument('-p', '--port', dest='port', default=4000, type=int)
parser.add_argument('-c', '--response_code', dest='code', default=200, type=int)
return parser.parse_args()
def main():
global RESPONSE_CODE
opts = parse_args()
RESPONSE_CODE = opts.code
app.run(host=opts.bind, port=opts.port)
if __name__ == "__main__":
main()
| 27.388889 | 99 | 0.617647 | 128 | 986 | 4.5625 | 0.453125 | 0.143836 | 0.087329 | 0.061644 | 0.130137 | 0.130137 | 0.130137 | 0.130137 | 0 | 0 | 0 | 0.024876 | 0.184584 | 986 | 35 | 100 | 28.171429 | 0.701493 | 0.042596 | 0 | 0.076923 | 0 | 0 | 0.154989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8cbb5e459f9d84664f8fbddc97d9f6ac9c9ed83 | 18,978 | py | Python | scripts/postprocessing/bcdi_rocking_curves.py | sjleake/bcdi | bf071ad085a11622158e1e651857a8a172c51cf1 | [
"CECILL-B"
] | null | null | null | scripts/postprocessing/bcdi_rocking_curves.py | sjleake/bcdi | bf071ad085a11622158e1e651857a8a172c51cf1 | [
"CECILL-B"
] | null | null | null | scripts/postprocessing/bcdi_rocking_curves.py | sjleake/bcdi | bf071ad085a11622158e1e651857a8a172c51cf1 | [
"CECILL-B"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, carnis_jerome@yahoo.fr
try:
import hdf5plugin # for P10, should be imported before h5py or PyTables
except ModuleNotFoundError:
pass
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.ndimage.measurements import center_of_mass
from bcdi.experiment.detector import create_detector
from bcdi.experiment.setup import Setup
import bcdi.preprocessing.bcdi_utils as bu
import bcdi.graph.graph_utils as gu
import bcdi.utils.utilities as util
import bcdi.utils.validation as valid
helptext = """
Open a series of rocking curve data and track the position of the Bragg peak over the
series. Supported beamlines: ESRF ID01, PETRAIII P10, SOLEIL SIXS, SOLEIL CRISTAL,
MAX IV NANOMAX.
"""
scans = np.arange(1460, 1475 + 1, step=3) # list or array of scan numbers
scans = np.concatenate((scans, np.arange(1484, 1586 + 1, 3)))
scans = np.concatenate((scans, np.arange(1591, 1633 + 1, 3)))
scans = np.concatenate((scans, np.arange(1638, 1680 + 1, 3)))
root_folder = "D:/data/P10_OER/data/"
sample_name = "dewet2_2" # list of sample names. If only one name is indicated,
# it will be repeated to match the number of scans
save_dir = "D:/data/P10_OER/analysis/candidate_12/"
# images will be saved here, leave it to None otherwise (default to root_folder)
x_axis = [0.740 for _ in range(16)]
for _ in range(10):
x_axis.append(0.80)
for _ in range(15):
x_axis.append(-0.05)
for _ in range(15):
x_axis.append(0.3)
for _ in range(15):
x_axis.append(0.8)
# values against which the Bragg peak center of mass evolution will be plotted,
# leave [] otherwise
x_label = "voltage (V)" # label for the X axis in plots, leave '' otherwise
comment = "_BCDI_RC" # comment for the saving filename, should start with _
strain_range = 0.00005 # range for the plot of the q value
peak_method = (
"max_com" # Bragg peak determination: 'max', 'com', 'max_com' (max then com)
)
debug = False # set to True to see more plots
###############################
# beamline related parameters #
###############################
beamline = (
"P10" # name of the beamline, used for data loading and normalization by monitor
)
# supported beamlines: 'ID01', 'SIXS_2018', 'SIXS_2019', 'CRISTAL', 'P10'
custom_scan = False # True for a stack of images acquired without scan,
# e.g. with ct in a macro (no info in spec file)
custom_images = np.arange(11353, 11453, 1) # list of image numbers for the custom_scan
custom_monitor = np.ones(
len(custom_images)
) # monitor values for normalization for the custom_scan
custom_motors = {
"eta": np.linspace(16.989, 18.989, num=100, endpoint=False),
"phi": 0,
"nu": -0.75,
"delta": 36.65,
}
# ID01: eta, phi, nu, delta
# CRISTAL: mgomega, gamma, delta
# P10: om, phi, chi, mu, gamma, delta
# SIXS: beta, mu, gamma, delta
rocking_angle = "outofplane" # "outofplane" or "inplane"
is_series = False # specific to series measurement at P10
specfile_name = ""
# template for ID01: name of the spec file without '.spec'
# template for SIXS_2018: full path of the alias dictionnary,
# typically root_folder + 'alias_dict_2019.txt'
# template for all other beamlines: ''
###############################
# detector related parameters #
###############################
detector = "Eiger4M" # "Eiger2M" or "Maxipix" or "Eiger4M"
x_bragg = 1387 # horizontal pixel number of the Bragg peak,
# can be used for the definition of the ROI
y_bragg = 809 # vertical pixel number of the Bragg peak,
# can be used for the definition of the ROI
roi_detector = [
y_bragg - 200,
y_bragg + 200,
x_bragg - 400,
x_bragg + 400,
] # [Vstart, Vstop, Hstart, Hstop]
# leave it as None to use the full detector.
# Use with center_fft='skip' if you want this exact size.
debug_pix = 40 # half-width in pixels of the ROI centered on the Bragg peak
hotpixels_file = None # root_folder + 'hotpixels.npz' # non empty file path or None
flatfield_file = (
None # root_folder + "flatfield_8.5kev.npz" # non empty file path or None
)
template_imagefile = "_master.h5"
# template for ID01: 'data_mpx4_%05d.edf.gz' or 'align_eiger2M_%05d.edf.gz'
# template for SIXS_2018: 'align.spec_ascan_mu_%05d.nxs'
# template for SIXS_2019: 'spare_ascan_mu_%05d.nxs'
# template for Cristal: 'S%d.nxs'
# template for P10: '_master.h5'
# template for NANOMAX: '%06d.h5'
# template for 34ID: 'Sample%dC_ES_data_51_256_256.npz'
####################################
# q calculation related parameters #
####################################
convert_to_q = True # True to convert from pixels to q values using parameters below
beam_direction = (1, 0, 0) # beam along z
directbeam_x = 476 # x horizontal, cch2 in xrayutilities
directbeam_y = 1374 # y vertical, cch1 in xrayutilities
direct_inplane = -2.0 # outer angle in xrayutilities
direct_outofplane = 0.8
sdd = 1.83 # sample to detector distance in m
energy = 10300 # in eV, offset of 6eV at ID01
##################################
# end of user-defined parameters #
##################################
###################
# define colormap #
###################
bad_color = "1.0" # white
bckg_color = "0.7" # grey
colormap = gu.Colormap(bad_color=bad_color)
my_cmap = colormap.cmap
########################################
# check and initialize some parameters #
########################################
print(f"\n{len(scans)} scans: {scans}")
print(f"\n {len(x_axis)} x_axis values provided:")
if len(x_axis) == 0:
x_axis = np.arange(len(scans))
if len(x_axis) != len(scans):
raise ValueError("the length of x_axis should be equal to the number of scans")
if isinstance(sample_name, str):
sample_name = [sample_name for idx in range(len(scans))]
valid.valid_container(
sample_name,
container_types=(tuple, list),
length=len(scans),
item_types=str,
name="preprocess_bcdi",
)
if peak_method not in [
"max",
"com",
"max_com",
]:
raise ValueError('invalid value for "peak_method" parameter')
int_sum = [] # integrated intensity in the detector ROI
int_max = [] # maximum intensity in the detector ROI
zcom = [] # center of mass for the first data axis
ycom = [] # center of mass for the second data axis
xcom = [] # center of mass for the third data axis
tilt_com = [] # center of mass for the incident rocking angle
q_com = [] # q value of the center of mass
check_roi = [] # a small ROI around the Bragg peak will be stored for each scan,
# to see if the peak is indeed
# captured by the rocking curve
#######################
# Initialize detector #
#######################
detector = create_detector(
name=detector,
template_imagefile=template_imagefile,
roi=roi_detector,
)
####################
# Initialize setup #
####################
setup = Setup(
beamline=beamline,
detector=detector,
energy=energy,
rocking_angle=rocking_angle,
distance=sdd,
beam_direction=beam_direction,
custom_scan=custom_scan,
custom_images=custom_images,
custom_monitor=custom_monitor,
custom_motors=custom_motors,
is_series=is_series,
)
########################################
# print the current setup and detector #
########################################
print("\n##############\nSetup instance\n##############")
print(setup)
print("\n#################\nDetector instance\n#################")
print(detector)
###############################################
# load recursively the scans and update lists #
###############################################
flatfield = util.load_flatfield(flatfield_file)
hotpix_array = util.load_hotpixels(hotpixels_file)
for scan_idx, scan_nb in enumerate(scans, start=1):
tmp_str = f"Scan {scan_idx}/{len(scans)}: S{scan_nb}"
print(f'\n{"#" * len(tmp_str)}\n' + tmp_str + "\n" + f'{"#" * len(tmp_str)}')
# initialize the paths
setup.init_paths(
sample_name=sample_name[scan_idx - 1],
scan_number=scan_nb,
root_folder=root_folder,
save_dir=save_dir,
verbose=True,
specfile_name=specfile_name,
template_imagefile=template_imagefile,
)
# override the saving directory, we want to save results at the same place
detector.savedir = save_dir
logfile = setup.create_logfile(
scan_number=scan_nb, root_folder=root_folder, filename=detector.specfile
)
data, mask, frames_logical, monitor = bu.load_bcdi_data(
logfile=logfile,
scan_number=scan_nb,
detector=detector,
setup=setup,
flatfield=flatfield,
hotpixels=hotpix_array,
normalize=True,
debugging=debug,
)
tilt, grazing, inplane, outofplane = setup.diffractometer.goniometer_values(
frames_logical=frames_logical, logfile=logfile, scan_number=scan_nb, setup=setup
)
nbz, nby, nbx = data.shape
if peak_method == "max":
piz, piy, pix = np.unravel_index(data.argmax(), shape=(nbz, nby, nbx))
elif peak_method == "com":
piz, piy, pix = center_of_mass(data)
else: # 'max_com'
max_z, max_y, max_x = np.unravel_index(data.argmax(), shape=data.shape)
com_z, com_y, com_x = center_of_mass(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
]
)
# correct the pixel offset due to the ROI defined by debug_pix around the max
piz = com_z # the data was not cropped along the first axis
piy = com_y + max_y - debug_pix
pix = com_x + max_x - debug_pix
if debug:
fig, _, _ = gu.multislices_plot(
data,
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60, 0.30, f"(piz, piy, pix) = ({piz:.1f}, {piy:.1f}, {pix:.1f})", size=12
)
plt.draw()
if peak_method == "max_com":
fig, _, _ = gu.multislices_plot(
data[
:,
int(max_y) - debug_pix : int(max_y) + debug_pix,
int(max_x) - debug_pix : int(max_x) + debug_pix,
],
sum_frames=True,
plot_colorbar=True,
cmap=my_cmap,
title="scan" + str(scan_nb),
scale="log",
is_orthogonal=False,
reciprocal_space=True,
)
fig.text(
0.60,
0.30,
f"(com_z, com_y, com_x) = ({com_z:.1f}, {com_y:.1f}, {com_x:.1f})",
size=12,
)
plt.draw()
print("")
zcom.append(piz)
ycom.append(piy)
xcom.append(pix)
int_sum.append(data.sum())
int_max.append(data.max())
check_roi.append(
data[:, :, int(pix) - debug_pix : int(pix) + debug_pix].sum(axis=1)
)
interp_tilt = interp1d(np.arange(data.shape[0]), tilt, kind="linear")
tilt_com.append(interp_tilt(piz))
##############################
# convert pixels to q values #
##############################
if convert_to_q:
(
setup.outofplane_angle,
setup.inplane_angle,
setup.tilt_angle,
setup.grazing_angle,
) = (outofplane, inplane, tilt, grazing)
# calculate the position of the Bragg peak in full detector pixels
bragg_x = detector.roi[2] + pix
bragg_y = detector.roi[0] + piy
# calculate the position of the direct beam at 0 detector angles
x_direct_0 = directbeam_x + setup.inplane_coeff * (
direct_inplane * np.pi / 180 * sdd / detector.pixelsize_x
) # inplane_coeff is +1 or -1
y_direct_0 = (
directbeam_y
- setup.outofplane_coeff
* direct_outofplane
* np.pi
/ 180
* sdd
/ detector.pixelsize_y
) # outofplane_coeff is +1 or -1
# calculate corrected detector angles for the Bragg peak
bragg_inplane = setup.inplane_angle + setup.inplane_coeff * (
detector.pixelsize_x * (bragg_x - x_direct_0) / sdd * 180 / np.pi
) # inplane_coeff is +1 or -1
bragg_outofplane = (
setup.outofplane_angle
- setup.outofplane_coeff
* detector.pixelsize_y
* (bragg_y - y_direct_0)
/ sdd
* 180
/ np.pi
) # outofplane_coeff is +1 or -1
print(
f"\nBragg angles before correction (gam, del): ({setup.inplane_angle:.4f}, "
f"{setup.outofplane_angle:.4f})"
)
print(
f"Bragg angles after correction (gam, del): ({bragg_inplane:.4f}, "
f"{bragg_outofplane:.4f})"
)
# update setup with the corrected detector angles
setup.inplane_angle = bragg_inplane
setup.outofplane_angle = bragg_outofplane
##############################################################
# wavevector transfer calculations (in the laboratory frame) #
##############################################################
kin = 2 * np.pi / setup.wavelength * np.asarray(beam_direction)
# in lab frame z downstream, y vertical, x outboard
kout = (
setup.exit_wavevector
) # in lab.frame z downstream, y vertical, x outboard
q = (kout - kin) / 1e10 # convert from 1/m to 1/angstrom
q_com.append(np.linalg.norm(q))
print(f"Wavevector transfer of Bragg peak: {q}, Qnorm={np.linalg.norm(q):.4f}")
##########################################################
# plot the ROI centered on the Bragg peak for each scan #
##########################################################
plt.ion()
# plot maximum 7x7 ROIs per figure
nb_fig = 1 + len(scans) // 49
if nb_fig == 1:
nb_rows = np.floor(np.sqrt(len(scans)))
nb_columns = np.ceil(len(scans) / nb_rows)
else:
nb_rows = 7
nb_columns = 7
scan_counter = 0
for fig_idx in range(nb_fig):
fig = plt.figure(figsize=(12, 9))
for idx in range(min(49, len(scans) - scan_counter)):
axis = plt.subplot(nb_rows, nb_columns, idx + 1)
axis.imshow(np.log10(check_roi[scan_counter]))
axis.set_title("S{:d}".format(scans[scan_counter]))
scan_counter = scan_counter + 1
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + f"check-roi{fig_idx+1}" + comment + ".png")
##########################################################
# plot the evolution of the center of mass and intensity #
##########################################################
fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(
nrows=2, ncols=3, figsize=(12, 9)
)
ax0.plot(scans, x_axis, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel(x_label)
ax1.scatter(x_axis, int_sum, s=24, c=scans, cmap=my_cmap)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Integrated intensity")
ax1.set_facecolor(bckg_color)
ax2.scatter(x_axis, int_max, s=24, c=scans, cmap=my_cmap)
ax2.set_xlabel(x_label)
ax2.set_ylabel("Maximum intensity")
ax2.set_facecolor(bckg_color)
ax3.scatter(x_axis, xcom, s=24, c=scans, cmap=my_cmap)
ax3.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax3.set_ylabel("xcom (pixels)")
else: # 'max'
ax3.set_ylabel("xmax (pixels)")
ax3.set_facecolor(bckg_color)
ax4.scatter(x_axis, ycom, s=24, c=scans, cmap=my_cmap)
ax4.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax4.set_ylabel("ycom (pixels)")
else: # 'max'
ax4.set_ylabel("ymax (pixels)")
ax4.set_facecolor(bckg_color)
plt5 = ax5.scatter(x_axis, zcom, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt5, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax5.set_xlabel(x_label)
if peak_method in ["com", "max_com"]:
ax5.set_ylabel("zcom (pixels)")
else: # 'max'
ax5.set_ylabel("zmax (pixels)")
ax5.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "summary" + comment + ".png")
############################################
# plot the evolution of the incident angle #
############################################
tilt_com = np.asarray(tilt_com)
x_axis = np.asarray(x_axis)
uniq_xaxis = np.unique(x_axis)
mean_tilt = np.empty(len(uniq_xaxis))
std_tilt = np.empty(len(uniq_xaxis))
for idx, item in enumerate(uniq_xaxis):
mean_tilt[idx] = np.mean(tilt_com[x_axis == item])
std_tilt[idx] = np.std(tilt_com[x_axis == item])
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(12, 9))
ax0.plot(scans, tilt_com, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel("Bragg angle (deg)")
ax1.errorbar(
uniq_xaxis,
mean_tilt,
yerr=std_tilt,
elinewidth=2,
capsize=6,
capthick=2,
linestyle="",
marker="o",
markersize=6,
markerfacecolor="w",
)
ax1.set_xlabel(x_label)
ax1.set_ylabel("Bragg angle (deg)")
plt2 = ax2.scatter(x_axis, tilt_com, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt2, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax2.set_xlabel(x_label)
ax2.set_ylabel("Bragg angle (deg)")
ax2.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "Bragg angle" + comment + ".png")
##############################################
# plot the evolution of the diffusion vector #
##############################################
if convert_to_q:
q_com = np.asarray(q_com)
mean_q = np.empty(len(uniq_xaxis))
std_q = np.empty(len(uniq_xaxis))
for idx, item in enumerate(uniq_xaxis):
mean_q[idx] = np.mean(q_com[x_axis == item])
std_q[idx] = np.std(q_com[x_axis == item])
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(12, 9))
ax0.plot(scans, q_com, "-o")
ax0.set_xlabel("Scan number")
ax0.set_ylabel("q (1/A)")
ax1.errorbar(
uniq_xaxis,
mean_q,
yerr=std_q,
elinewidth=2,
capsize=6,
capthick=2,
linestyle="",
marker="o",
markersize=6,
markerfacecolor="w",
)
ax1.set_xlabel(x_label)
ax1.set_ylabel("q (1/A)")
plt2 = ax2.scatter(x_axis, q_com, s=24, c=scans, cmap=my_cmap)
gu.colorbar(plt2, scale="linear", numticks=min(len(scans), 20), label="scan #")
ax2.set_xlabel(x_label)
ax2.set_ylabel("q (1/A)")
ax2.set_ylim(bottom=min(q_com) - strain_range, top=max(q_com) + strain_range)
ax2.set_facecolor(bckg_color)
plt.tight_layout()
plt.pause(0.1)
fig.savefig(detector.savedir + "diffusion vector" + comment + ".png")
plt.ioff()
plt.show()
| 34.886029 | 88 | 0.605649 | 2,612 | 18,978 | 4.230092 | 0.213629 | 0.012218 | 0.010861 | 0.012218 | 0.302652 | 0.256675 | 0.212236 | 0.187347 | 0.160377 | 0.143814 | 0 | 0.031712 | 0.215723 | 18,978 | 543 | 89 | 34.950276 | 0.710629 | 0.232849 | 0 | 0.231144 | 0 | 0.007299 | 0.113082 | 0.023874 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.002433 | 0.026764 | 0 | 0.026764 | 0.026764 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8cc924d479e5abac34c42f3e20b9c86e8bebea6 | 3,211 | py | Python | portfolio/gui/ui_components/charts/total_equity.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | portfolio/gui/ui_components/charts/total_equity.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | portfolio/gui/ui_components/charts/total_equity.py | timeerr/portfolio | 256032eb638048f3cd3c824f2bb4976a8ec320b1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from datetime import datetime
from PyQt5.QtChart import QChartView, QDateTimeAxis, QChart
from PyQt5.QtChart import QValueAxis, QSplineSeries
from PyQt5.QtCore import QDateTime, Qt
from PyQt5.QtGui import QBrush, QColor, QFont, QPen, QPainter
from portfolio.utils import confighandler
class TotalEquityChartView(QChartView):
"""
Chart that displays the balance between several dates
from an account, token or whole portfolio
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chart = QChart()
def setupChartWithData(self, data, linecolor='#422F8A'):
"""
Chart gets updated displaying the new data.
Data has to be expressed on a dictionary form:
- keys are timestamps
- values are total balance for that timestamp
"""
self.chart = QChart()
self.chart.setTheme(QChart.ChartThemeDark)
self.chart.setAnimationOptions(QChart.SeriesAnimations)
self.chart.setBackgroundBrush(QBrush(QColor("transparent")))
# self.chart.setTitle("")
# self.chart.setTitleBrush(QBrush(QColor('white')))
# Axis X (Dates)
self.x_axis = QDateTimeAxis()
self.x_axis.setTickCount(11)
self.x_axis.setLabelsAngle(70)
font = QFont()
font.setFamily('Roboto')
font.setLetterSpacing(QFont.PercentageSpacing, 110)
font.setPointSize(8)
self.x_axis.setLabelsFont(font)
self.x_axis.setFormat("dd-MM-yy")
self.x_axis.setTitleText(self.tr('Date'))
self.x_axis.setTitleVisible(False)
self.x_axis.setLineVisible(False)
self.x_axis.setGridLineVisible(False)
# Axis Y (Balances)
self.y_axis = QValueAxis()
if data != {}:
self.y_axis.setMax(max(data.values())*1.05)
self.y_axis.setMin(min(data.values())*0.95)
# self.y_axis.setMinorGridLineVisible(False)
self.y_axis.setLineVisible(False)
self.y_axis.setGridLineColor(QColor("#ECE9F1"))
self.chart.addAxis(self.y_axis, Qt.AlignLeft)
self.chart.addAxis(self.x_axis, Qt.AlignBottom)
# Series
self.btcseries = QSplineSeries()
for date in data:
balance = data[date]
date = QDateTime(datetime.fromtimestamp(int(float(date))))
self.btcseries.append(date.toMSecsSinceEpoch(), balance)
self.btcseries.setName("BTC")
pen = QPen(QColor(linecolor))
pen.setWidth(3)
self.btcseries.setPen(pen)
# Series functionality
self.btcseries.hovered.connect(self.selectPoint)
self.chart.addSeries(self.btcseries)
self.btcseries.attachAxis(self.x_axis)
self.btcseries.attachAxis(self.y_axis)
self.setChart(self.chart)
self.setRenderHint(QPainter.Antialiasing)
self.setStyleSheet("border: 0px; background-color: rgba(0,0,0,0); ")
self.chart.legend().hide()
def selectPoint(self, point, state):
""" Shows point where mouse is hovered """
self.chart.setTitle(
f"{int(point.y())} {confighandler.get_fiat_currency().upper()}")
| 33.103093 | 76 | 0.64715 | 363 | 3,211 | 5.644628 | 0.449036 | 0.057101 | 0.048316 | 0.021474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012669 | 0.237932 | 3,211 | 96 | 77 | 33.447917 | 0.824683 | 0.15914 | 0 | 0.035714 | 0 | 0 | 0.058015 | 0.016412 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0 | 0.107143 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d01e449561f13aea5bee7e189eba4cb4893fa4 | 7,635 | py | Python | test_airsim_v1.2/demo_20181101_ok/PythonClient/multirotor/_multi_drone_threading.py | kumokay/randomstuff | 4fa674cd6b5f0b080381cc4a548ad0039510bd51 | [
"Apache-2.0"
] | null | null | null | test_airsim_v1.2/demo_20181101_ok/PythonClient/multirotor/_multi_drone_threading.py | kumokay/randomstuff | 4fa674cd6b5f0b080381cc4a548ad0039510bd51 | [
"Apache-2.0"
] | null | null | null | test_airsim_v1.2/demo_20181101_ok/PythonClient/multirotor/_multi_drone_threading.py | kumokay/randomstuff | 4fa674cd6b5f0b080381cc4a548ad0039510bd51 | [
"Apache-2.0"
] | null | null | null | import setup_path
import airsim
import numpy as np
import os
import tempfile
import pprint
import msgpackrpc
import time
import base64
import threading
from msgpackrpc.error import RPCError
image_folder = 'C:\\NESLProjects\\airsim_v1.2.0\\screenshot'
image_id = {
'Drone1': 1,
'Drone2': 1,
}
def take_picture(drone_name, is_save=True):
responses = client.simGetImages([
airsim.ImageRequest("front_center", airsim.ImageType.Scene),
# airsim.ImageRequest("bottom_center", airsim.ImageType.Scene),
], vehicle_name=drone_name)
if is_save:
drone_image_folder = '{}\\{}'.format(image_folder, drone_name)
if not os.path.isdir(drone_image_folder):
os.makedirs(drone_image_folder)
for idx, response in enumerate(responses):
if response.compress: #png format
print('image type {}, size {}'.format(
response.image_type, len(response.image_data_uint8)))
filename = '{}\\{}-{}.png'.format(
drone_image_folder, image_id[drone_name], idx)
image_id[drone_name] += 1
airsim.write_file(filename, response.image_data_uint8)
print('save image: {}'.format(filename))
else:
print('error: image format not support')
return responses
def send_image_async(image_data_uint8, server_ip, server_port):
myrpcclient = msgpackrpc.Client(
msgpackrpc.Address(server_ip, server_port), timeout=1)
data = base64.b64encode(image_data_uint8)
ret = myrpcclient.call('push', data, time.time())
print('image sent to {}:{}'.format(server_ip, server_port))
return ret
def get_cur_pos(vehicle_name=''):
cur_state = client.getMultirotorState(vehicle_name=vehicle_name)
return cur_state.kinematics_estimated.position
def move_drone(drone_name, dx, dy, dz, yaw, speed, is_async=False):
cur_pos = get_cur_pos(vehicle_name=drone_name)
next_pos = airsim.Vector3r(
cur_pos.x_val + dx, cur_pos.y_val + dy, cur_pos.z_val + dz)
print("try to move: {} -> {}, yaw={}, speed={}".format(
cur_pos, next_pos, yaw, speed))
thread = client.moveToPositionAsync(
next_pos.x_val, next_pos.y_val, next_pos.z_val, speed,
yaw_mode=airsim.YawMode(is_rate=False, yaw_or_rate=yaw),
drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom,
vehicle_name=drone_name)
if is_async:
return thread
thread.join()
cur_pos = get_cur_pos(vehicle_name=drone_name)
print(cur_pos)
def move_to_pos(drone_name, x, y, z, yaw, speed):
cur_pos = get_cur_pos(vehicle_name=drone_name)
print("try to move: {} -> {}, yaw={}, speed={}".format(
cur_pos, (x, y, z), yaw, speed))
rc = client.moveToPositionAsync(
x, y, z, speed,
yaw_mode=airsim.YawMode(is_rate=False, yaw_or_rate=yaw),
drivetrain=airsim.DrivetrainType.MaxDegreeOfFreedom,
vehicle_name=drone_name).join()
cur_pos = get_cur_pos(vehicle_name=drone_name)
print(cur_pos)
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True, "Drone1")
client.enableApiControl(True, "Drone2")
client.armDisarm(True, "Drone1")
client.armDisarm(True, "Drone2")
f1 = client.takeoffAsync(vehicle_name="Drone1")
f2 = client.takeoffAsync(vehicle_name="Drone2")
f1.join()
f2.join()
state1 = client.getMultirotorState(vehicle_name="Drone1")
s = pprint.pformat(state1)
print("state: %s" % s)
state2 = client.getMultirotorState(vehicle_name="Drone2")
s = pprint.pformat(state2)
print("state: %s" % s)
airsim.wait_key('Press any key to start workers')
is_car_found = threading.Event()
is_follow = threading.Event()
is_stop = threading.Event()
class rpcServer(object):
def push(self, result, timestamp):
print("recv result={}; sender_time={}".format(result, timestamp))
if 'car' in result:
is_car_found.set()
is_follow.set()
def stop(self, result, timestamp):
print('stop simulation')
is_stop.set()
server = msgpackrpc.Server(rpcServer())
def actuator(server):
server.listen(msgpackrpc.Address("172.17.15.21", 18800))
server.start()
server.close()
def control_drone1_move(is_stop):
while 1:
while 1:
try:
thread = move_drone('Drone1', 45, 0, 0, 90, 5, is_async=True)
time.sleep(15)
thread.join()
break
except RuntimeError as err:
print('wait a sec')
time.sleep(3)
pass
while 1:
try:
thread = move_drone('Drone1', -45, 0, 0, 90, 5, is_async=True)
time.sleep(15)
thread.join()
break
except RuntimeError as err:
print('wait a sec')
time.sleep(3)
pass
def control_drone1_pic(is_stop):
while 1:
time.sleep(1) # fps = 1
# prevent RPC error stop the process
print('take_picture')
responses = None
try:
responses = take_picture('Drone1', is_save=False)
except RPCError as err:
print('RPCError but it is okay: {}'.format(err))
except Exception as exp:
print('Exception but it is okay: {}'.format(exp))
except RuntimeError as err:
print('RuntimeError but it is okay: {}'.format(err))
if responses:
try:
send_image_async(responses[0].image_data_uint8, '172.17.20.12', 18800)
except RPCError as err:
print('RPCError but it is okay: {}'.format(err))
except Exception as exp:
print('Exception but it is okay: {}'.format(exp))
except RuntimeError as err:
print('RuntimeError but it is okay: {}'.format(err))
if is_stop.isSet():
break
def control_drone2(is_follow):
while 1:
if is_follow.isSet():
next_pos = get_cur_pos(vehicle_name='Drone1')
rc = move_to_pos('Drone2', next_pos.x_val, next_pos.y_val, next_pos.z_val-1, 0, 10)
responses = take_picture('Drone2')
next_pos = get_cur_pos(vehicle_name='Drone1')
rc = move_to_pos('Drone2', next_pos.x_val+10, next_pos.y_val, next_pos.z_val-1, 0, 1)
responses = take_picture('Drone2')
break
else:
time.sleep(1)
worker0 = threading.Thread(
target=actuator, args=(server,), name='actuator')
worker1 = threading.Thread(
target=control_drone1_move, args=(is_stop,), name='control_drone1_move')
worker2 = threading.Thread(
target=control_drone1_pic, args=(is_stop,), name='control_drone1_pic')
worker3 = threading.Thread(
target=control_drone2, args=(is_follow,), name='control_drone2')
print('Start worker threads')
worker0.start()
worker1.start()
worker2.start()
worker3.start()
print('Waiting for worker threads')
worker2.join()
worker3.join()
server.stop()
worker1.join()
worker0.join()
airsim.wait_key('Press any key to reset to original state')
client.armDisarm(False, "Drone1")
client.armDisarm(False, "Drone2")
client.reset()
# that's enough fun for now. let's quit cleanly
client.enableApiControl(False, "Drone1")
client.enableApiControl(False, "Drone2")
| 33.634361 | 98 | 0.619515 | 950 | 7,635 | 4.785263 | 0.218947 | 0.023757 | 0.024637 | 0.030796 | 0.33018 | 0.305983 | 0.287945 | 0.276507 | 0.276507 | 0.254949 | 0 | 0.024364 | 0.263523 | 7,635 | 226 | 99 | 33.783186 | 0.784101 | 0.025147 | 0 | 0.315789 | 0 | 0 | 0.117753 | 0.005964 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057895 | false | 0.010526 | 0.057895 | 0 | 0.142105 | 0.136842 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d0c892ddfa64fb32150298ff488a5d6ba587d7 | 2,470 | py | Python | tests/test_fits.py | mjcarter95/httpstan | 4a15b0316d7cb0a50193555d80fb1785f557f645 | [
"ISC"
] | null | null | null | tests/test_fits.py | mjcarter95/httpstan | 4a15b0316d7cb0a50193555d80fb1785f557f645 | [
"ISC"
] | null | null | null | tests/test_fits.py | mjcarter95/httpstan | 4a15b0316d7cb0a50193555d80fb1785f557f645 | [
"ISC"
] | null | null | null | """Test sampling."""
import statistics
from typing import Any, Dict, List, Optional, Union
import aiohttp
import numpy as np
import pytest
import helpers
headers = {"content-type": "application/json"}
program_code = "parameters {real y;} model {y ~ normal(0, 0.0001);}"
@pytest.mark.asyncio
async def test_fits(api_url: str) -> None:
"""Simple test of sampling."""
num_samples = num_warmup = 5000
payload = {
"function": "stan::services::sample::hmc_nuts_diag_e_adapt",
"num_samples": num_samples,
"num_warmup": num_warmup,
}
param_name = "y"
draws = await helpers.sample_then_extract(api_url, program_code, payload, param_name)
assert len(draws) == num_samples, (len(draws), num_samples)
assert -0.01 < statistics.mean(draws) < 0.01
@pytest.mark.asyncio
async def test_models_actions_bad_args(api_url: str) -> None:
"""Test handler argument handling."""
model_name = await helpers.get_model_name(api_url, program_code)
fits_url = f"{api_url}/models/{model_name.split('/')[-1]}/fits"
payload = {"wrong_key": "wrong_value"}
async with aiohttp.ClientSession() as session:
async with session.post(fits_url, json=payload) as resp:
assert resp.status == 422
response_dict = await resp.json()
assert "json" in response_dict
assert response_dict["json"] == {
"function": ["Missing data for required field."],
"wrong_key": ["Unknown field."],
}
@pytest.mark.asyncio
async def test_fits_random_seed(api_url: str) -> None:
"""Simple test of sampling with fixed random seed."""
async def draws(random_seed: Optional[int] = None) -> List[Union[int, float]]:
payload: Dict[str, Any] = {"function": "stan::services::sample::hmc_nuts_diag_e_adapt"}
if random_seed is not None:
payload["random_seed"] = random_seed
param_name = "y"
return await helpers.sample_then_extract(api_url, program_code, payload, param_name)
draws1 = np.array(await draws(random_seed=123))
draws2 = np.array(await draws(random_seed=123))
draws3 = np.array(await draws(random_seed=456))
draws4 = np.array(await draws())
assert draws1[0] == draws2[0] != draws4[0]
assert draws2[0] != draws4[0]
assert draws1[0] != draws3[0] != draws4[0]
# look at all draws
assert np.allclose(draws1, draws2)
assert not np.allclose(draws1, draws3)
| 34.788732 | 95 | 0.660324 | 334 | 2,470 | 4.697605 | 0.344311 | 0.057361 | 0.038241 | 0.04334 | 0.3174 | 0.291906 | 0.256214 | 0.175908 | 0.133843 | 0.079031 | 0 | 0.027138 | 0.209312 | 2,470 | 70 | 96 | 35.285714 | 0.776242 | 0.01336 | 0 | 0.098039 | 0 | 0 | 0.155344 | 0.060147 | 0 | 0 | 0 | 0 | 0.196078 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.137255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d3a9c4a41286b6466da986ff0ba508db0a7e0e | 365 | py | Python | CODE/makeLabels.py | ldr1997/Mid-Or-Feed | 70bd30e408d4ae11e16f9cf30daeaed07e76f27d | [
"MIT"
] | null | null | null | CODE/makeLabels.py | ldr1997/Mid-Or-Feed | 70bd30e408d4ae11e16f9cf30daeaed07e76f27d | [
"MIT"
] | null | null | null | CODE/makeLabels.py | ldr1997/Mid-Or-Feed | 70bd30e408d4ae11e16f9cf30daeaed07e76f27d | [
"MIT"
] | null | null | null | '''
makeLabels - scipt for making label vector
input: list of players + their roles ('prettyProPlayers.txt')
output: labels.csv
Authors:
DELOS SANTOS, Angelo Vincent
DEL ROSARIO, Luis Gabriel
MIRANDA, Edrene Bryze
'''
f = open('prettyProPlayers.txt','r')
o = open('labels.csv', 'w+')
for line in f:
a = line.split(',')
o.write(a[1])
f.close()
o.close() | 18.25 | 62 | 0.676712 | 54 | 365 | 4.574074 | 0.759259 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003279 | 0.164384 | 365 | 20 | 63 | 18.25 | 0.806557 | 0.575342 | 0 | 0 | 0 | 0 | 0.241135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d47e4eb5e7c82237c2fbe390f5641a0cb56062 | 1,332 | py | Python | test/espnet2/bin/test_tts_inference.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | test/espnet2/bin/test_tts_inference.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | test/espnet2/bin/test_tts_inference.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | import string
from argparse import ArgumentParser
from pathlib import Path
import pytest
from espnet2.bin.tts_inference import Text2Speech, get_parser, main
from espnet2.tasks.tts import TTSTask
def test_get_parser():
assert isinstance(get_parser(), ArgumentParser)
def test_main():
with pytest.raises(SystemExit):
main()
@pytest.fixture()
def token_list(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("<blank>\n")
for c in string.ascii_letters:
f.write(f"{c}\n")
f.write("<unk>\n")
f.write("<sos/eos>\n")
return tmp_path / "tokens.txt"
@pytest.fixture()
def config_file(tmp_path: Path, token_list):
# Write default configuration file
TTSTask.main(
cmd=[
"--dry_run",
"true",
"--output_dir",
str(tmp_path),
"--token_list",
str(token_list),
"--token_type",
"char",
"--cleaner",
"none",
"--g2p",
"none",
"--normalize",
"none",
]
)
return tmp_path / "config.yaml"
@pytest.mark.execution_timeout(5)
def test_Text2Speech(config_file):
text2speech = Text2Speech(train_config=config_file)
text = "aiueo"
text2speech(text)
| 22.2 | 67 | 0.577327 | 155 | 1,332 | 4.793548 | 0.464516 | 0.056528 | 0.043069 | 0.043069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009534 | 0.291291 | 1,332 | 59 | 68 | 22.576271 | 0.777542 | 0.024024 | 0 | 0.108696 | 0 | 0 | 0.122496 | 0 | 0 | 0 | 0 | 0 | 0.021739 | 1 | 0.108696 | false | 0 | 0.130435 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d73f3bf72ede75bd8369eb4ba6cfd0ba856c01 | 3,206 | py | Python | pairidentification/model_playground.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 6 | 2020-01-29T07:24:14.000Z | 2022-03-16T10:05:25.000Z | pairidentification/model_playground.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 6 | 2020-07-03T00:31:10.000Z | 2021-09-10T07:45:01.000Z | pairidentification/model_playground.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 5 | 2019-02-27T22:56:49.000Z | 2019-08-24T19:01:41.000Z | ###################################################################################################
#
# PairIdentification.py
#
# Copyright (C) by Andreas Zoglauer & Harrison Costatino.
#
# Please see the file LICENSE in the main repository for the copyright-notice.
#
###################################################################################################
###################################################################################################
import tensorflow as tf
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
import random
import signal
import sys
import time
import math
import csv
import os
import argparse
from datetime import datetime
from functools import reduce
print("\nPair Identification")
print("============================\n")
# Step 1: Input parameters
###################################################################################################
# Default parameters
# X, Y, Z bins
XBins = 32
YBins = 32
ZBins = 64
# Depends on GPU memory and layout
BatchSize = 128
MaxEvents = 100000
# Determine derived parameters
OutputDataSpaceSize = ZBins
XMin = -43
XMax = 43
# XMin = -5
# XMax = +5
YMin = -43
YMax = 43
# YMin = -5
# YMax = +5
ZMin = 13
ZMax = 45
###################################################################################################
# Step 4: Setting up the neural network
###################################################################################################
#TODO: Tweak/optimize model
# Is there a better loss function?
#Make more efficient for larger data sets
print("Info: Setting up neural network...")
print("Info: Setting up 3D CNN...")
conv_model = tf.keras.models.Sequential(name='Pair Identification CNN')
conv_model.add(tf.keras.layers.Conv3D(filters=64, kernel_size=3, strides=2, input_shape=(XBins, YBins, ZBins, 1)))
# conv_model.add(tf.keras.layers.MaxPooling3D((2,2,1)))
conv_model.add(tf.keras.layers.LeakyReLU(alpha=0.25))
conv_model.add(tf.keras.layers.BatchNormalization())
conv_model.add(tf.keras.layers.Conv3D(filters=96, kernel_size=3, strides=1, activation='relu'))
conv_model.add(tf.keras.layers.BatchNormalization())
# conv_model.add(tf.keras.layers.MaxPooling3D((2,2,1)))
conv_model.add(tf.keras.layers.Flatten())
conv_model.add(tf.keras.layers.Dense(3*OutputDataSpaceSize, activation='relu'))
conv_model.add(tf.keras.layers.BatchNormalization())
print("Conv Model Summary: ")
print(conv_model.summary())
print("Info: Setting up Numerical/Categorical Data...")
base_model = tf.keras.models.Sequential(name='Base Model')
base_model.add(tf.keras.layers.Dense(3*OutputDataSpaceSize, activation='relu', input_shape=(1,)))
base_model.add(tf.keras.layers.BatchNormalization())
print("Base Model Summary: ")
print(base_model.summary())
print("Info: Setting up Combined NN...")
combinedInput = tf.keras.layers.concatenate([conv_model.output, base_model.output])
combinedLayer = tf.keras.layers.Dense(OutputDataSpaceSize, activation='softmax')(combinedInput)
combined_model = tf.keras.models.Model([conv_model.input, base_model.input], combinedLayer)
print("Combined Model Summary: ")
print(combined_model.summary())
| 26.495868 | 114 | 0.609482 | 372 | 3,206 | 5.180108 | 0.395161 | 0.061754 | 0.094447 | 0.093409 | 0.365854 | 0.339907 | 0.269331 | 0.243384 | 0.216917 | 0.187338 | 0 | 0.020991 | 0.093575 | 3,206 | 120 | 115 | 26.716667 | 0.64212 | 0.195883 | 0 | 0.058824 | 0 | 0 | 0.155181 | 0.026034 | 0 | 0 | 0 | 0.008333 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d8c40f78302cda3e88a3cd2895f4efd0efa2fa | 1,713 | py | Python | twitter_media_downloader.py | kiriphorito/twitter_media_downloader | 4f267b061c975daa64d9331cc73524978846b038 | [
"Apache-2.0"
] | null | null | null | twitter_media_downloader.py | kiriphorito/twitter_media_downloader | 4f267b061c975daa64d9331cc73524978846b038 | [
"Apache-2.0"
] | null | null | null | twitter_media_downloader.py | kiriphorito/twitter_media_downloader | 4f267b061c975daa64d9331cc73524978846b038 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import os.path
import sys
import csv
# Import local functions
from src.args import parse_args, parse_file_arg
from src.config import get_oauth
from src.parser import get_medias
from src.mapper import generate_results
from src.downloader import download
if __name__ == '__main__':
# Parse program arguments
args = parse_args(sys.argv[1:])
user_ids = []
if (len(args.userid) == 0 and args.csv is None) or (len(args.userid) > 0 and args.csv is not None):
raise Exception('You need to either provider a csv or list of userIds')
elif len(args.userid) > 0:
user_ids = parse_file_arg(args.userid)
elif args.csv is not None:
if not os.access(args.csv, os.R_OK):
raise Exception('Could not read file ' + args.csv)
with open(args.csv) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
user_ids.append(row['userId'])
# Twitter OAuth
auth = get_oauth('.oauth.json')
# Suppress output if the "quiet" flag is enabled
if args.quiet:
sys.stdout = open(os.devnull, 'w')
# For each user in the ID list
for user_id in user_ids:
# Create output directory if necessary
outputDir = os.path.join(args.output, user_id + os.sep if args.o_userid else '')
if not os.path.exists(outputDir):
os.makedirs(outputDir)
# Start the download
medias = get_medias(auth, user_id, args.retweets, args.image_size, args.since, args.since_id, args.until, args.until_id, args.likes)
results = generate_results(medias, args.format)
download(results, outputDir, False, True)
| 31.145455 | 140 | 0.663164 | 258 | 1,713 | 4.263566 | 0.414729 | 0.038182 | 0.035455 | 0.038182 | 0.068182 | 0.047273 | 0.047273 | 0.047273 | 0 | 0 | 0 | 0.003843 | 0.240514 | 1,713 | 54 | 141 | 31.722222 | 0.84166 | 0.132516 | 0 | 0 | 0 | 0 | 0.066351 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8d9ffae96c27e70a87dd467b060ca1df3eb23b9 | 6,703 | py | Python | pyprof/prof/linear.py | herberthum/PyProf | 1645f55674c59d57a40e6acb6038a4bac19a58a3 | [
"Apache-2.0"
] | null | null | null | pyprof/prof/linear.py | herberthum/PyProf | 1645f55674c59d57a40e6acb6038a4bac19a58a3 | [
"Apache-2.0"
] | null | null | null | pyprof/prof/linear.py | herberthum/PyProf | 1645f55674c59d57a40e6acb6038a4bac19a58a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .tc import TC_Whitelist
from .utility import Utility
from .base import OperatorLayerBase
class Linear(OperatorLayerBase):
'''
Notes:
If the bias occurs before the GEMM, then its 1 write (bias expansion).
If the bias occurs after, then its 1 read and 1 write.
bias in bprop is a reduction and hence is 1 read.
'''
gemmKernels = [
"gemm", "gemv", "dot_kernel", "splitKreduce_kernel",
"reduce_1Block_kernel", "cutlass"
]
biasKernels = [
"kernelReduceContigDim", "kernelReduceNoncontigDim_shared",
"elementwise_kernel", "reduce_kernel", "kernelPointwiseApply2",
"2d_grouped_direct_kernel"
]
def setXWBMNK(self, args):
x = None
w = None
b = None
if (len(args) == 2):
x, w = args
elif (len(args) == 3):
x, w, b = args
assert (x['type'] == w['type'] == "tensor")
if (b['type'] == "tensor"):
# no longer true as b can be of shape [1, XX]
# assert (len(b['shape']) == 1)
tmp = 1 # dummy statement for commented line above
# TODO: clean up if..elif..else struct
elif (b['type'] == "NoneType"):
assert b['value'] is None
b = None
else:
assert False
else:
assert False
assert (len(w['shape']) == 2)
k1 = x['shape'][-1]
n, k2 = w['shape']
assert (k1 == k2)
if b is not None:
# assert (b['shape'][0] == n)
assert(b['shape'][0] == n or
(b['shape'][0] == 1 and b['shape'][1] == n))
t1 = x['dtype']
t2 = w['dtype']
# no longer true
# assert (t1 == t2)
# X, W, B
self.x = x['shape']
self.w = w['shape']
self.b = b['shape'] if b is not None else None
self.type = t1
# M, N, K
#n = Utility.numElems(x[0:-1])
n = self.x[0:-1]
k = self.x[-1]
m, k1 = self.w
assert (k == k1)
self.m = m
self.n = n
self.k = k
def tc(self):
if self.op() == "linear":
if self.name in TC_Whitelist():
return 1
return 0
else:
return "-"
def __init__(self, d):
self.name = d.name
self.dir = d.dir
self.sub = d.sub
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
assert (mod == "torch.nn.functional")
assert (op == "linear")
self.setXWBMNK(args)
if any(x in d.name for x in Linear.gemmKernels):
self.op_ = "linear"
else:
assert any(x in d.name for x in Linear.biasKernels), f"Kernel name: {d.name}"
self.op_ = "bias"
'''
elif (("kernelPointwiseApply2" in d.name) or ("kernelReduceContigDim" in d.name) or ("kernelReduceNoncontigDim_shared" in d.name)):
#bias expansion was before the gemm
self.op_ = "bias"
elif ("elementwise_kernel" in d.name):
#Bias addition happens later with a broadcast tensor
self.op_ = "bias"
assert (len(d.argMarker) == 2)
marker = eval(d.argMarker[1])
mod = marker['mod']
op = marker['op']
args = marker['args']
assert (mod == "Tensor")
assert (op == "__iadd__")
assert (len(args) == 2)
mn = args[0]['shape']
b = args[1]['shape']
assert (len(b) == 1)
assert (mn == (self.n + (self.m,)))
assert (b == self.b)
else:
assert False
'''
def params(self):
#p = OrderedDict([('X', self.x), ('W', self.w), ('B', self.b), ('type', self.type)])
m, n, k, x, w, t = self.m, self.n, self.k, self.x, self.w, self.type
if len(n) == 1:
n = n[0]
if self.op_ == "linear":
if self.dir == "fprop":
p = OrderedDict([('M', m), ('N', n), ('K', k), ('type', t)])
elif self.dir == "bprop":
if self.sub == 0: #dgrad (most likely)
p = OrderedDict([('M', k), ('N', n), ('K', m), ('type', t)])
elif self.sub == 1: #wgrad (most likely)
p = OrderedDict([('M', k), ('N', m), ('K', n), ('type', t)])
else:
#This happens when there are additional kernels for reduction
p = OrderedDict([('X', x), ('W', w), ('type', t)])
else:
assert False
elif self.op_ == "bias":
p = OrderedDict([('M', m), ('N', n), ('type', t)])
else:
assert False
return p
def op(self):
return self.op_
def bytesFlops(self):
m = self.m
n = Utility.numElems(self.n)
k = self.k
if self.op_ == "linear":
if self.dir == "fprop":
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
elif self.dir == "bprop":
if self.sub == 0: #dgrad (most likely)
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
elif self.sub == 1: #wgrad (most likely)
f = m * n * k * 2
b = m * n + m * k + n * k * Utility.typeToBytes(self.type)
else:
#This happens when there are additional kernels for reduction
f = 0
b = 0
else:
assert False
elif self.op_ == "bias":
f = m * n
b = 2 * m * n * Utility.typeToBytes(self.type)
else:
assert False
return b, f
# TODO: Fix bytes and flops with CUTLASS kernels.
def bytes(self):
b, f = self.bytesFlops()
return b
def flops(self):
b, f = self.bytesFlops()
return f
def mod(self):
return self.mod_
| 30.058296 | 133 | 0.494256 | 867 | 6,703 | 3.784314 | 0.232987 | 0.007924 | 0.032002 | 0.004877 | 0.259982 | 0.22737 | 0.195672 | 0.157879 | 0.140811 | 0.126791 | 0 | 0.015746 | 0.36521 | 6,703 | 222 | 134 | 30.193694 | 0.755347 | 0.207967 | 0 | 0.287879 | 0 | 0 | 0.094977 | 0.021277 | 0 | 0 | 0 | 0.009009 | 0.113636 | 1 | 0.068182 | false | 0 | 0.030303 | 0.015152 | 0.189394 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8dcb033c22350d524424aeac323310029a1a273 | 3,703 | py | Python | 4-EDOs/taylor.py | mzahnd/algoritmos-mn | 122be9a0dcd2cdbf5ddf9e23a0e203c8f12380ab | [
"MIT"
] | null | null | null | 4-EDOs/taylor.py | mzahnd/algoritmos-mn | 122be9a0dcd2cdbf5ddf9e23a0e203c8f12380ab | [
"MIT"
] | null | null | null | 4-EDOs/taylor.py | mzahnd/algoritmos-mn | 122be9a0dcd2cdbf5ddf9e23a0e203c8f12380ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
from numpy import linspace, logspace, diff, zeros
from numpy import cos, sin, exp, log, pi
import matplotlib.pyplot as plt
####################################
# Implementación genérica de Euler
# f(t,x): derivada de x respecto al tiempo
# x0: condición inicial
# t0, tf: tiempo inicial y final
# h: paso de integración
####################################
def euler(f, x0, t0, tf, h):
"""
Arguments:
f: Function handler. Must recieve two arguments, t and x. f(t, x).
x0: Initial value
t0 / tf: Initial and final time to evaluate
h: Desiered step
Returns:
t, x
t: Array with the time points used for the function's calculation.
(X axis on a plot)
Shape: (n,)
x: Obtained values at a given t.
(Y axis on a plot)
Shape: (n,1) - One row for each element in t -
"""
N = int((tf - t0) / h) # número de puntos
t = linspace(t0, tf, N + 1)
n = x0.shape[0] # dimensión del problema
x = zeros((n, N + 1))
x[:, 0] = x0
for k in range(N):
x[:, k + 1] = x[:, k] + h * f(t[k], x[:, k])
return t, x
def taylor(x0, t0, tf, h, *derivatives):
"""
Generic implementation of taylor of N order
Arguments:
x0: Initial value
t0 / tf: Initial and final time to evaluate
h: Desiered step
f: derivatives of the function g, starting from g'
up to the desired order
Returns:
t, x
t: Array with the time points used for the function's calculation.
(X axis on a plot)
Shape: (n,)
x: Obtained values at a given t.
(Y axis on a plot)
Shape: (n,1) - One row for each element in t -
"""
N = int((tf - t0) / h) # número de puntos
t = linspace(t0, tf, N + 1)
n = x0.shape[0] # dimensión del problema
x = zeros((n, N + 1))
x[:, 0] = x0
for k in range(N):
x[:, k + 1] = x[:, k]
for index, der in enumerate(derivatives):
i = index+1
x[:, k + 1] = x[:, k + 1] + der(t[k], x[:, k]) * (h ** i) / np.math.factorial(i)
return t, x
# LO SIGUIENTE ES EL EJEMPLO QUE DIÓ EN CLASE USANDO AL FUNCIÓN TAYOR
# SI CORRES LA FUNCIÓN PARA MOSTRAR EL ERROR TENES QUE CAMBIAR AHÍ LOS DATOS
# SEGUN EL GRAOD QUE USES
# SAME PLOTAYLOR
########################
# EJEMPLO
########################
R = 1e3 # Valor de la resistencia
C = 1e-6 # Valor de la capacidad
w = 2.0 * pi * 1000 # frecuencia angular de la señal de entrada
A = 1.0 # amplitud de la señal de entrada
T = 5 * 2 * pi / w # simulo cinco ciclos
####################################
# Derivada primera de x
def dx(t, x):
return ((A * cos(w * t) - x) / (R * C))
####################################
# Derivada segunda de x
def d2x(t, x):
return ((-A * w * sin(w * t) - ((A * cos(w * t) - x) / (R * C))) / (R * C))
####################################
# Plot ejemplo
def plotaylor(h):
x0 = zeros(1)
t, xt = taylor(x0, 0, T, h, dx, d2x)
fig, ax = plt.subplots()
ax.plot(t, xt[0, :], label='x(t)')
ax.legend()
plt.title('Ejercicio')
plt.show()
####################################
# Estimación error
def esterrorejemplo(h):
# i es el grado del polinomio
i = 2
x0 = zeros(1)
t, xt1 = taylor(x0, 0, T, h, dx, d2x)
t, xt2 = taylor(x0, 0, T, h/2, dx, d2x)
eet = abs(xt1[0, -1] - xt2[0, -1]) / ((2**i)-1)
return eet
plotaylor(T / 10000)
print(esterrorejemplo(T / 10000))
| 27.634328 | 93 | 0.491493 | 542 | 3,703 | 3.357934 | 0.328413 | 0.010989 | 0.015385 | 0.024176 | 0.389011 | 0.36044 | 0.36044 | 0.332967 | 0.332967 | 0.332967 | 0 | 0.033757 | 0.320011 | 3,703 | 133 | 94 | 27.842105 | 0.689039 | 0.432352 | 0 | 0.313725 | 0 | 0 | 0.008409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.078431 | 0.039216 | 0.294118 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8dfb78e26b67882837b41546d35e46c07255069 | 2,617 | py | Python | project/src/core/dao.py | romulocollopy/exchange_rates | f345d135a20993bcd94ec432ad498911cf2e1214 | [
"MIT"
] | 2 | 2019-02-10T17:57:06.000Z | 2019-02-10T18:38:35.000Z | project/src/core/dao.py | romulocollopy/exchange_rates | f345d135a20993bcd94ec432ad498911cf2e1214 | [
"MIT"
] | null | null | null | project/src/core/dao.py | romulocollopy/exchange_rates | f345d135a20993bcd94ec432ad498911cf2e1214 | [
"MIT"
] | null | null | null | import datetime
from itertools import chain
from django.conf import settings
import grequests
from .models import DailyExchangeRate
CURRENCIES = ['BRL', 'ARS', 'EUR']
CURRENCY_LAYER_BASE = (
'http://apilayer.net/api/historical'
'?access_key={}'
'¤cies={}'
).format(
settings.CURRENCYLAYER_API_KEY,
','.join(CURRENCIES)
)
class DAO(object):
@classmethod
def get_rates_interval(cls, start_date, end_date):
number_of_days = cls._get_number_of_days(start_date, end_date)
db_rates = cls.get_rates_interval_from_db(start_date, end_date)
if len(db_rates) == number_of_days:
return db_rates
api_rates = cls.retrieve_missing_from_api(db_rates, start_date, end_date)
return sorted(chain(db_rates, api_rates), key=lambda x: x.date)
def get_rates_interval_from_db(start_date, end_date):
return DailyExchangeRate.objects.filter(
date__gte=start_date,
date__lte=end_date
)
@classmethod
def retrieve_missing_from_api(cls, db_rates, start_date, end_date):
missing_dates = cls._get_missing_dates(db_rates, start_date, end_date)
objs = cls.get_from_currencylayer(missing_dates)
return objs
@classmethod
def _get_missing_dates(cls, db_rates, start_date, end_date):
date_delta = cls._get_number_of_days(start_date, end_date)
all_dates = set([
start_date + datetime.timedelta(days=n)
for n in range(date_delta)
])
db_dates = set([rate.date for rate in db_rates])
return all_dates - db_dates
@classmethod
def get_from_currencylayer(cls, missing_dates):
responses = cls._get_responses(missing_dates)
daily_exchange_rates = []
for r in responses:
data = r.json()
der = DailyExchangeRate(
date=datetime.date(*[int(i) for i in data['date'].split('-')]),
timestamp=data['timestamp'],
brl=data['quotes']['USDBRL'],
ars=data['quotes']['USDARS'],
eur=data['quotes']['USDEUR'],
)
daily_exchange_rates.append(der)
qs = DailyExchangeRate.objects.bulk_create(daily_exchange_rates)
return qs
def _get_responses(missing_dates):
request_set = [
grequests.get("{}&date={:%Y-%m-%d}".format(CURRENCY_LAYER_BASE, date))
for date in missing_dates
]
return grequests.map(request_set)
def _get_number_of_days(start_date, end_date):
return (end_date - start_date).days + 1
| 31.914634 | 82 | 0.645013 | 331 | 2,617 | 4.755287 | 0.277946 | 0.074333 | 0.076239 | 0.101652 | 0.184879 | 0.173443 | 0.144219 | 0.111182 | 0.091487 | 0 | 0 | 0.000511 | 0.252579 | 2,617 | 81 | 83 | 32.308642 | 0.804192 | 0 | 0 | 0.060606 | 0 | 0 | 0.053878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106061 | false | 0 | 0.075758 | 0.030303 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e55df5a344d7bce1a9bee982ad1a59b43e42ad | 962 | py | Python | tests/test_jmfdloader.py | irukafe/jmfdtk | e25ce3d9edccd7aa46079caddd88bacfe0326698 | [
"MIT"
] | null | null | null | tests/test_jmfdloader.py | irukafe/jmfdtk | e25ce3d9edccd7aa46079caddd88bacfe0326698 | [
"MIT"
] | null | null | null | tests/test_jmfdloader.py | irukafe/jmfdtk | e25ce3d9edccd7aa46079caddd88bacfe0326698 | [
"MIT"
] | null | null | null | from jmfdtk import load_jmfd, JMFD_PATH
def test_load_jmfd():
df, foundation = load_jmfd(JMFD_PATH)
f_ans = {
'HarmVirtue': 51,
'HarmVice': 93,
'FairnessVirtue': 43,
'FairnessVice': 34,
'IngroupVirtue': 99,
'IngroupVice': 42,
'AuthorityVirtue': 130,
'AuthorityVice': 52,
'PurityVirtue': 90,
'PurityVice': 88,
'MoralityGeneral': 43
}
foundation_ans = {
'01': 'HarmVirtue',
'02': 'HarmVice',
'03': 'FairnessVirtue',
'04': 'FairnessVice',
'05': 'IngroupVirtue',
'06': 'IngroupVice',
'07': 'AuthorityVirtue',
'08': 'AuthorityVice',
'09': 'PurityVirtue',
'10': 'PurityVice',
'11': 'MoralityGeneral'
}
for f, v in df['foundation'].value_counts().items():
assert f_ans[f] == v, '{} count does not match.'.format(f)
assert foundation == foundation_ans
| 24.666667 | 66 | 0.534304 | 89 | 962 | 5.651685 | 0.629213 | 0.047714 | 0.047714 | 0.063618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067873 | 0.310811 | 962 | 38 | 67 | 25.315789 | 0.690799 | 0 | 0 | 0 | 0 | 0 | 0.334719 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e630e5ae041622841e532be7884fd0a994dee3 | 1,239 | py | Python | signify/check.py | romab/python-signify | 1486d191f4e481e655df1093e998879c5c627b0c | [
"MIT"
] | 11 | 2016-01-02T07:17:07.000Z | 2021-02-23T00:16:54.000Z | signify/check.py | romab/python-signify | 1486d191f4e481e655df1093e998879c5c627b0c | [
"MIT"
] | 2 | 2017-02-25T18:07:44.000Z | 2017-03-05T21:31:35.000Z | signify/check.py | romab/python-signify | 1486d191f4e481e655df1093e998879c5c627b0c | [
"MIT"
] | 2 | 2017-02-24T13:47:51.000Z | 2018-08-23T13:54:27.000Z | # -*- coding: utf-8 -*-
# Copyright (c) Björn Edström <be@bjrn.se> 2015. See LICENSE for details.
import hashlib
import re
import os
BUFSIZE = 64*1024
ALGO_TO_CLS = {
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
def hash_file(hashobj, path):
with open(path, 'rb') as fobj:
while True:
buf = fobj.read(BUFSIZE)
hashobj.update(buf)
if buf != BUFSIZE:
break
return True
def openbsd_sha_files(algo_str, root, files):
hash_cls = ALGO_TO_CLS[algo_str]
res = []
for path in files:
hashobj = hash_cls()
hash_file(hashobj, os.path.join(root, path))
res.append('%s (%s) = %s' % (algo_str, path, hashobj.hexdigest()))
return '\n'.join(res) + '\n'
def checkfiles(root, checkfile):
for algo, path, ref_digest in re.findall(
r'^(\S+) [(]([^)]+)[)] = ([0-9a-fA-F]+)$', checkfile, re.M):
hash_cls = ALGO_TO_CLS[algo]
hashobj = hash_cls()
try:
hash_file(hashobj, os.path.join(root, path))
digest = hashobj.hexdigest()
status = digest.upper() == ref_digest.upper()
except Exception as e:
status = e
yield (path, status)
| 26.361702 | 74 | 0.564972 | 162 | 1,239 | 4.197531 | 0.462963 | 0.041176 | 0.039706 | 0.038235 | 0.155882 | 0.155882 | 0.097059 | 0.097059 | 0 | 0 | 0 | 0.028345 | 0.288136 | 1,239 | 46 | 75 | 26.934783 | 0.74263 | 0.075061 | 0 | 0.111111 | 0 | 0 | 0.059493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e646279dffa998c6fe41d3d3b5e907df6a8bdd | 3,110 | py | Python | examples/ev3/screen_extra/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 51 | 2020-04-02T10:03:45.000Z | 2022-03-27T23:49:39.000Z | examples/ev3/screen_extra/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 77 | 2020-03-22T17:32:14.000Z | 2022-03-28T18:02:43.000Z | examples/ev3/screen_extra/main.py | thesynman/pybricks-api | fbbb81caf0703d3d862d5417416adb9295754de0 | [
"MIT"
] | 25 | 2020-03-18T23:35:17.000Z | 2022-01-01T12:52:01.000Z | #!/usr/bin/env pybricks-micropython
import math
from pybricks.hubs import EV3Brick
from pybricks.parameters import Color
from pybricks.tools import wait
from pybricks.media.ev3dev import Font, Image
# Initialize the EV3
ev3 = EV3Brick()
# SPLIT SCREEN ################################################################
# Make a sub-image for the left half of the screen
left = Image(ev3.screen, sub=True, x1=0, y1=0,
x2=ev3.screen.width // 2 - 1, y2=ev3.screen.height - 1)
# Make a sub-image for the right half of the screen
right = Image(ev3.screen, sub=True, x1=ev3.screen.width // 2, y1=0,
x2=ev3.screen.width - 1, y2=ev3.screen.height - 1)
# Use a monospaced font so that text is vertically aligned when we print
right.set_font(Font(size=8, monospace=True))
# Graphing y = sin(x)
def f(x):
return math.sin(x)
for t in range(200):
# Graph on left side
# Scale t to x-axis and compute y values
x0 = (t - 1) * 2 * math.pi / left.width
y0 = f(x0)
x1 = t * 2 * math.pi / left.width
y1 = f(x1)
# Scale y values to screen coordinates
sy0 = (-y0 + 1) * left.height / 2
sy1 = (-y1 + 1) * left.height / 2
# Shift the current graph to the left one pixel
left.draw_image(-1, 0, left)
# Fill the last column with white to erase the previous plot point
left.draw_line(left.width - 1, 0, left.width - 1, left.height - 1, 1, Color.WHITE)
# Draw the new value of the graph in the last column
left.draw_line(left.width - 2, int(sy0), left.width - 1, int(sy1), 3)
# Print every 10th value on right side
if t % 10 == 0:
right.print('{:10.2f}{:10.2f}'.format(x1, y1))
wait(100)
# SPRITE ANIMATION ############################################################
# Copy of screen for double-buffering
buf = Image(ev3.screen)
# Load images from file
bg = Image('background.png')
sprite = Image('sprite.png')
# Number of cells in each sprite animation
NUM_CELLS = 8
# Each cell in the sprite is 75 x 100 pixels
CELL_WIDTH, CELL_HEIGHT = 75, 100
# Get sub-images for each individual cell
# This is more efficient that loading individual images
walk_right = [Image(sprite, sub=True, x1=x * CELL_WIDTH, y1=0,
x2=(x + 1) * CELL_WIDTH - 1, y2=CELL_HEIGHT - 1)
for x in range(NUM_CELLS)]
walk_left = [Image(sprite, sub=True, x1=x * CELL_WIDTH, y1=CELL_HEIGHT,
x2=(x + 1) * CELL_WIDTH - 1, y2=2 * CELL_HEIGHT - 1)
for x in range(NUM_CELLS)]
# Walk from left to right
for x in range(-100, 200, 2):
# Start with the background image
buf.draw_image(0, 0, bg)
# Draw the current sprite - purple is treated as transparent
buf.draw_image(x, 5, walk_right[x // 5 % NUM_CELLS], Color.PURPLE)
# Copy the double-buffer to the screen
ev3.screen.draw_image(0, 0, buf)
# 20 frames per second
wait(50)
# Walk from right to left
for x in range(200, -100, -2):
buf.draw_image(0, 0, bg)
buf.draw_image(x, 5, walk_left[x // 5 % NUM_CELLS], Color.PURPLE)
ev3.screen.draw_image(0, 0, buf)
wait(50)
wait(1000)
| 29.619048 | 86 | 0.623473 | 511 | 3,110 | 3.739726 | 0.287671 | 0.047096 | 0.018838 | 0.023025 | 0.289901 | 0.251177 | 0.10989 | 0.069074 | 0.069074 | 0.035583 | 0 | 0.059826 | 0.226045 | 3,110 | 104 | 87 | 29.903846 | 0.734109 | 0.322187 | 0 | 0.166667 | 0 | 0 | 0.020439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.104167 | 0.020833 | 0.145833 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e71229b5e81e54aae093ac996553be3f2deac6 | 8,652 | py | Python | tests/validators_basic_tests.py | derekjamescurtis/veritranspay | 0367f8b261293e49ee8a6e395f6c44455212cf7b | [
"BSD-3-Clause"
] | 6 | 2015-07-12T09:46:59.000Z | 2017-06-19T18:38:12.000Z | tests/validators_basic_tests.py | derekjamescurtis/veritranspay | 0367f8b261293e49ee8a6e395f6c44455212cf7b | [
"BSD-3-Clause"
] | 8 | 2015-01-21T17:00:42.000Z | 2017-07-06T05:26:30.000Z | tests/validators_basic_tests.py | derekjamescurtis/veritranspay | 0367f8b261293e49ee8a6e395f6c44455212cf7b | [
"BSD-3-Clause"
] | 6 | 2015-07-21T16:49:57.000Z | 2017-07-05T07:55:35.000Z | '''
Validation logic tests for our simple validators.
These generally serve as building block for more complex validator
types (but not always. In some cases they're used directly).
All the validators live together in the veritranspay.validators module.
'''
from random import randint
import unittest
from faker import Faker
from veritranspay import validators
fake = Faker()
class ValidatorBase_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.ValidatorBase. '''
def test_init_accepts_any_args(self):
'''
Init should accept any or no keyword or positionl arguments provided
to it.
'''
# generate a random amount of fake text/numeric array elements
args = fake.words(fake.random_digit()) + \
[fake.random_number() for _ in range(randint(5, 10))]
validators.ValidatorBase(*args)
# generate a fake dictionary to use as keyword args
kwargs = fake.pydict(randint(5, 10))
validators.ValidatorBase(**kwargs)
# make sure it accepts no args
validators.ValidatorBase()
# if no exceptions thrown, then this test passes.
self.assertTrue(True)
def test_validate_accepts_any_single_arg(self):
'''
Calls to validate accept a single argument,
including None and always return None on successful call.
'''
v = validators.ValidatorBase()
# must take at least 1 arg
self.assertRaises(TypeError, lambda: v.validate())
# cannot take two args
args = fake.words(2)
self.assertRaises(TypeError, lambda: v.validate(*args))
# successful call should return nothing
return_val = v.validate(value=fake.word())
self.assertIsNone(return_val)
return_val = v.validate(None)
self.assertIsNone(return_val)
class DummyValidator_UnitTests(ValidatorBase_UnitTests):
''' Unit tests for veritranspay.validators.DummyValidator. '''
pass
class RequiredValidator_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.RequiredValidator. '''
def test_is_required_default_true(self):
'''
When the is_required init param is not provided, it's expected
default is True
'''
v = validators.RequiredValidator()
self.assertTrue(v.is_required)
def test_validate_None_raises_ValidationError(self):
'''
When is_required == True, a ValidationError should be raised if
None is passed to .validate()
'''
v = validators.RequiredValidator()
self.assertRaises(validators.ValidationError,
lambda: v.validate(None))
def test_always_validates_if_is_required_eq_False(self):
'''
When is_required == False, validate() should succeed when passed None.
'''
v = validators.RequiredValidator(is_required=False)
v.validate(None)
def test_non_None_values_always_pass(self):
'''
Values that are not None should always pass validation, regardless
of whether is_required is True or false.
'''
v_req = validators.RequiredValidator()
v_opt = validators.RequiredValidator(is_required=False)
# the first few values are things that will boolean evaluate false
# just to make sure we aren't doing the wrong thing in validation
test_values = ([],
False,
'',
fake.text(),
fake.pydict(),
)
for value in test_values:
v_req.validate(value)
v_opt.validate(value)
def test_validate_returns_None(self):
'''
When validate() passes, it should return None.
'''
v = validators.RequiredValidator()
result = v.validate(fake.pystr())
self.assertIsNone(result)
class LengthValidator_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.LengthValidator '''
def test_invalid_init_args_raise_ValueError(self):
''' max_length cannot be less than min_length on init. '''
self.assertRaises(ValueError,
lambda: validators.LengthValidator(min_length=10,
max_length=5))
def test_max_enforced(self):
''' max_length is enforced when no min_length is provided. '''
v = validators.LengthValidator(max_length=10)
good_empty = ''
good_none = None
good_len = ''.join([fake.random_letter() for _ in range(5)])
good_max_len = ''.join([fake.random_letter() for _ in range(10)])
for good in [good_empty, good_none, good_len, good_max_len]:
self.assertIsNone(v.validate(good))
bad_too_long = ''.join([fake.random_letter() for _ in range(11)])
for bad in [bad_too_long]:
l = lambda: v.validate(bad)
self.assertRaises(validators.ValidationError, l)
def test_min_enforced(self):
''' min_legnth is enforced, when no max_legnth is provided. '''
v = validators.LengthValidator(min_length=5)
good_len = ''.join([fake.random_letter() for _ in range(5)])
for good in [good_len]:
self.assertIsNone(v.validate(good))
bad_empty = ''
bad_too_short = ''.join([fake.random_letter() for _ in range(3)])
for bad in [bad_empty, bad_too_short]:
l = lambda: v.validate(bad)
self.assertRaises(validators.ValidationError, l)
def test_min_max_enforced(self):
'''
min_value and max_value should both apply if both are
provided to the constructor.
'''
v = validators.LengthValidator(min_length=5, max_length=5)
good_len = ''.join([fake.random_letter() for _ in range(5)])
for good in [good_len]:
self.assertIsNone(v.validate(good))
bad_too_short = ''.join([fake.random_letter() for _ in range(4)])
bad_too_long = ''.join([fake.random_letter() for _ in range(6)])
for bad in [bad_too_short, bad_too_long]:
l = lambda: v.validate(bad)
self.assertRaises(validators.ValidationError, l)
class RegexValidator_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.RegexValidator '''
def test_pattern_matching(self):
''' Verify that regex pattern validation is occurring. '''
test_digit_pattern = r'^\d+$'
v = validators.RegexValidator(pattern=test_digit_pattern)
bad_value = ''.join([fake.random_letter() for _ in range(10)])
good_value = ''.join([str(fake.random_digit()) for _ in range(10)])
l = lambda: v.validate(bad_value)
self.assertRaises(validators.ValidationError, l)
self.assertIsNone(v.validate(good_value))
class StringValidator_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.StringValidator '''
def test_strings_accepted(self):
''' Any string value should be accepted '''
v = validators.StringValidator()
for val in fake.words(randint(5, 10)) + fake.sentences(randint(5, 10)):
return_val = v.validate(val)
self.assertIsNone(return_val)
def test_numbers_rejected(self):
''' Numeric values should raise a validation error '''
v = validators.StringValidator()
self.assertRaises(validators.ValidationError,
lambda: v.validate(fake.random_number()))
class NumericValidator_UnitTests(unittest.TestCase):
''' Unit tests for veritranspay.validators.NumericValidator '''
def test_numbers_accepted(self):
''' floats, ints, and longs should all pass validation. '''
v = validators.NumericValidator()
good_float = fake.pyfloat()
good_int = fake.pyint()
# python 3 removes long (ints are longs)
if hasattr(__builtins__, 'long'):
good_long = long(good_int)
else:
good_long = fake.pyint()
for good in [good_float, good_int, good_long]:
result = v.validate(good)
self.assertIsNone(result)
def test_letters_rejected(self):
''' string/unicode values should raise a ValidationError. '''
v = validators.NumericValidator()
bad_stringified_int = str(fake.pyint())
bad_letters = fake.word()
for bad in [bad_stringified_int, bad_letters]:
l = lambda: v.validate(bad)
self.assertRaises(validators.ValidationError, l)
| 34.608 | 79 | 0.635922 | 1,004 | 8,652 | 5.303785 | 0.222112 | 0.032113 | 0.020657 | 0.033803 | 0.333146 | 0.258592 | 0.231362 | 0.210329 | 0.125258 | 0.125258 | 0 | 0.005842 | 0.268031 | 8,652 | 249 | 80 | 34.746988 | 0.834991 | 0.249191 | 0 | 0.244094 | 0 | 0 | 0.001462 | 0 | 0 | 0 | 0 | 0 | 0.165354 | 1 | 0.125984 | false | 0.015748 | 0.031496 | 0 | 0.212598 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e83e94162637704558794922c4ae072258aba9 | 2,150 | py | Python | backend/edw/tasks/update_related_data_marts.py | MMotionMan/django-edw | 0f686429d29e0f40409a3b2318664973b2844c08 | [
"BSD-3-Clause"
] | 4 | 2019-09-18T05:51:12.000Z | 2020-10-23T08:50:00.000Z | backend/edw/tasks/update_related_data_marts.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 10 | 2020-04-29T11:46:44.000Z | 2022-03-11T23:38:27.000Z | backend/edw/tasks/update_related_data_marts.py | Vvvnukova/django-edw | 18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f | [
"BSD-3-Clause"
] | 13 | 2020-04-09T07:49:48.000Z | 2022-03-02T07:06:28.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from celery import shared_task
from edw.models.entity import EntityModel
from edw.models.data_mart import DataMartModel
from edw.models.related import EntityRelatedDataMartModel
@shared_task(name='update_entities_related_data_marts')
def update_entities_related_data_marts(entities_ids, to_set_datamart_ids, to_unset_datamart_ids):
does_not_exist_entities_ids = []
does_not_exist_datamarts_ids = []
if to_set_datamart_ids:
to_set_related_data_marts = DataMartModel.objects.filter(id__in=to_set_datamart_ids)
does_not_exist_datamarts_ids.extend(
list(
set(to_set_datamart_ids) - set([i.id for i in to_set_related_data_marts])
)
)
if len(to_set_related_data_marts):
for entity_id in entities_ids:
try:
entity = EntityModel.objects.get(id=entity_id)
except EntityModel.DoesNotExist:
does_not_exist_entities_ids.append(entity_id)
else:
for data_mart in to_set_related_data_marts:
EntityRelatedDataMartModel.objects.get_or_create(
entity=entity, data_mart=data_mart
)
if to_unset_datamart_ids:
to_unset_related_data_marts = DataMartModel.objects.filter(id__in=to_unset_datamart_ids)
does_not_exist_datamarts_ids.extend(
list(
set(to_unset_datamart_ids) - set([i.id for i in to_unset_related_data_marts])
)
)
if len(to_unset_related_data_marts):
EntityRelatedDataMartModel.objects.filter(
entity__id__in=entities_ids,
data_mart__in=to_unset_related_data_marts
).delete()
return {
'entities_ids': entities_ids,
'to_set_datamart_ids': to_set_datamart_ids,
'to_unset_datamart_ids': to_unset_datamart_ids,
'does_not_exist_entities_ids': does_not_exist_entities_ids,
'does_not_exist_datamarts_ids': does_not_exist_datamarts_ids
}
| 39.090909 | 97 | 0.671163 | 268 | 2,150 | 4.847015 | 0.201493 | 0.101617 | 0.123172 | 0.092379 | 0.688992 | 0.511162 | 0.378753 | 0.350269 | 0.350269 | 0.211701 | 0 | 0.000635 | 0.267907 | 2,150 | 54 | 98 | 39.814815 | 0.824651 | 0.009767 | 0 | 0.086957 | 0 | 0 | 0.066291 | 0.051716 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.108696 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8e9b6fcea8c94eddf8cb51b907af34eabf6dc64 | 702 | py | Python | spotpuppy/utils/pid_control.py | JoshPattman/Spot-Puppy-Lib | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | 1 | 2021-11-16T13:24:16.000Z | 2021-11-16T13:24:16.000Z | spotpuppy/utils/pid_control.py | JoshPattman/spotpuppy | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | null | null | null | spotpuppy/utils/pid_control.py | JoshPattman/spotpuppy | 90172c269ccaf7feefe55257606e0c519871a66d | [
"MIT"
] | null | null | null | import time
class pid_controller:
def __init__(self, Kp, Ki, Kd):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.last_error = 0
self.integral = 0
self.reset_time()
self.target = 0
def update(self, val):
t = time.time()
dt = t - self.last_time
self.last_time = t
error = self.target - val
d = (error - self.last_error) / dt
self.integral += error * dt
self.last_error = error
return (self.Kp * error) + (self.Ki * self.integral) + (self.Kd * d)
def reset_time(self):
self.last_time = time.time()
def set_target(self, target):
self.target = target | 22.645161 | 76 | 0.539886 | 95 | 702 | 3.842105 | 0.252632 | 0.131507 | 0.106849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00655 | 0.347578 | 702 | 31 | 77 | 22.645161 | 0.790393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.043478 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8ea1cdb9d23d1904c677e4fd6ebdc6efb3aea52 | 1,154 | py | Python | LR.py | ajaymuktha/Linear_Regression | 87792064c9bc44a006d74ab1720f90b501d7b308 | [
"MIT"
] | null | null | null | LR.py | ajaymuktha/Linear_Regression | 87792064c9bc44a006d74ab1720f90b501d7b308 | [
"MIT"
] | null | null | null | LR.py | ajaymuktha/Linear_Regression | 87792064c9bc44a006d74ab1720f90b501d7b308 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import datasets
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,1].values
#splitting data into dataset and training set
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y, test_size=1/3, random_state = 0)
#Fitting simple linear resgreesion to training set
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
#predicting testset results
y_pred = regressor.predict(X_test)
print(r2_score(Y_test,y_pred))
#Visualising training set results
plt.scatter(X_train,Y_train, color = 'red')
plt.plot(X_train , regressor.predict(X_train), color='blue')
plt.title('Salary vs Experience')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
#Visualising test set results
plt.scatter(X_test,Y_test, color = 'red')
plt.plot(X_train , regressor.predict(X_train), color='blue')
plt.title('Salary vs Experience')
plt.xlabel('Years of Experience')
plt.ylabel('Salary')
plt.show()
| 26.227273 | 86 | 0.779029 | 184 | 1,154 | 4.722826 | 0.369565 | 0.048331 | 0.058688 | 0.041427 | 0.33832 | 0.289988 | 0.289988 | 0.289988 | 0.289988 | 0.289988 | 0 | 0.00677 | 0.103986 | 1,154 | 43 | 87 | 26.837209 | 0.833656 | 0.168111 | 0 | 0.384615 | 0 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8eca528897b018b340224a5411347cee1142be0 | 25,339 | py | Python | bak/google_bak01.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 4 | 2016-12-17T20:06:10.000Z | 2021-11-19T04:45:29.000Z | bak/google_bak01.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 11 | 2021-01-06T05:35:11.000Z | 2022-03-11T23:28:31.000Z | bak/google_bak01.py | thorwhalen/ut | 353a4629c35a2cca76ef91a4d5209afe766433b4 | [
"MIT"
] | 3 | 2015-06-12T10:44:16.000Z | 2021-07-26T18:39:47.000Z | __author__ = 'thorwhalen'
from bs4 import BeautifulSoup
import re
# import os.path
# from urllib2 import urlopen
# from ut.pfile import to
from lxml import etree
import tldextract
import ut.parse.util as util
from urllib.parse import urlparse, parse_qs
from ut.util import extract_section
# RE_HAS_NEW_LINE = re.compile('\n|\r')
RE_NUM_SEP = "[,\.\s]"
RE_NUMBER = "\d[\d,\.\s]+\d|\d"
CRE_NUMBER = re.compile(RE_NUMBER)
XP_NRESULT = '//*[@id="ab_ps_r"]/text()'
# PARSE_DEF = {
# 'center_col':{
# 'expand':{
# 'taw':{'attrs':{'id':'taw'},'name':'span'},
# 'res':{'attrs':{'id':'res'},'name':'div'},
# 'extrares':{'attrs':{'id':'extrares'},'name':'div'}
# }}}
##################################################################################
# IMPORTANT FUNCTIONS
##################################################################################
def mk_gresult_tag_dict(input):
"""
mk_result_dict(input)
takes a soup, html string, or filename of google result htmls as an input
and returns a dict containing components of the html we're interested in
"""
input = util.x_to_soup(input)
d = dict()
# number of results
resultStats = input.find(name='div',attrs={'id':'resultStats'})
if resultStats: d['_resultStats'] = resultStats
# center_col
center_col = input.find(name='div',attrs={'id':'center_col'})
if center_col:
d['_center_col'] = center_col
# tads
tads = d['_center_col'].find(name='div',attrs={'id':'tads'})
if tads:
d['_tads'] = tads
# top_ads
top_ads = d['_tads'].findAll('li')
if top_ads:
d['_top_ads_list'] = top_ads
# res
res = d['_center_col'].find(name='div',attrs={'id':'res'})
if res:
d['_res'] = res
# searchInstead
topstuff = d['_res'].find(name='div',attrs={'id':'topstuff'})
if topstuff:
d['_topstuff'] = topstuff
# spell
spell = d['_topstuff'].find(name='a',attrs={'class':'spell'})
if spell: d['_spell'] = spell
# search
search = d['_res'].find(name='div',attrs={'id':'search'})
if search:
d['_search'] = search
# ires
ires = d['_search'].find(name='div',attrs={'id':'ires'})
if ires:
d['_ires'] = ires
# organicResults
organic_results = d['_ires'].findAll('li')
if organic_results:
d['_organic_results_list'] = organic_results
# related_search
after_res = d['_res'].nextSibling
if after_res:
related_search = after_res.find('table')
if related_search:
d['_related_search'] = related_search
# rhs_block
rhs_block = input.find(name='div',attrs={'id':'rhs_block'})
if rhs_block:
d['_rhs_block'] = rhs_block
rhs_ads = [] # initializing
# rhs_ads from mbEnd
mbEnd = d['_rhs_block'].find(name='div',attrs={'id':'mbEnd'})
if mbEnd:
d['_mbEnd'] = mbEnd
rhs_ads = rhs_ads + mbEnd.findAll('li')
# rhs_ads from nobr
nobr = d['_rhs_block'].find(name='ol',attrs={'class':'nobr'})
if nobr:
d['_nobr'] = nobr
rhs_ads = rhs_ads + nobr.findAll('li')
# puting rhs_ads in the dict
if rhs_ads:
d['_rhs_ads_list'] = rhs_ads
# Okay, no more parsing wishes, return the dict d
return d
def parse_tag_dict(tag_dict):
d = {
'number_of_results':None,
'top_ads_list':[],
'organic_results_list':[],
'rhs_ads_list':[],
'related_search_list':[]
}
if '_top_ads_list' in tag_dict:
for x in tag_dict['_top_ads_list']:
xx = parse_ad(x)
if xx: d['top_ads_list'].append(xx)
# d['top_ads_list'] = [d['top_ads_list'].append(parse_ad(x)) for x in tag_dict['_top_ads_list'] if x!=None]
if '_organic_results_list' in tag_dict:
for x in tag_dict['_organic_results_list']:
xx = parse_organic_result(x)
if xx: d['organic_results_list'].append(xx)
if '_rhs_ads_list' in tag_dict:
for x in tag_dict['_rhs_ads_list']:
xx = parse_ad(x)
if xx: d['rhs_ads_list'].append(xx)
if '_related_search' in tag_dict:
for x in tag_dict['_related_search']:
xx = parse_related_search_list(x)
if xx: d['related_search_list'].append(xx)
# organic_results_parsed = []
# for org_res in d['_organic_results_list']:
# org_result_parsed_item = parse_organic_result(org_res)
# if org_result_parsed_item:
# organic_results_parsed.append(org_result_parsed_item)
# parsed_d['organic_results_parsed'] = organic_results_parsed
# if tag_dict.has_key('_rhs_ads_list'):
# rad_parsed = []
# for tad in d['_rhs_ads_list']:
# rad_parsed_item = parse_ad(tad)
# if rad_parsed_item:
# rad_parsed.append(parse_ad(rad_parsed_item))
# parsed_d['rad_parsed'] = rad_parsed
# # Okay, no more parsing wishes, return the dict d
# if d.has_key('_related_search'):
return d
def parse_organic_result(search_ires_li_instance):
d = dict()
title_html = search_ires_li_instance.findAll(name='h3',attrs={'class':'r'})
if title_html:
title_html = title_html[-1] # take the last one only
d = dict(d,**{'title_html':title_html})
d = dict(d,**{'title_text':title_html.text}) # the text of the title
div_class_s = search_ires_li_instance.findAll(name='div',attrs={'class':'s'})
if div_class_s:
div_class_s = div_class_s[-1] # take the last one only (hopefully the one just after the last class='r')
cite_text = get_section(div_class_s,attrs={},name='cite')
if cite_text:
d = dict(d,**{'cite_text':cite_text.text})
d = dict(d,**{'domain':tldextract.extract(cite_text.text).domain})
f_slp_text = get_section(div_class_s,attrs={'class':'f slp'},name='div')
if f_slp_text: d = dict(d,**{'f_slp_text':f_slp_text.text})
st_text = get_section(div_class_s,attrs={'class':'st'},name='span')
if st_text:
st_text = st_text.text
if st_text: d = dict(d,**{'st_text':st_text})
osl = get_section(div_class_s,attrs={'class':'osl'},name='div')
if osl: d = dict(d,**{'osl':osl})
orp_table = get_section(div_class_s,attrs={},name='table')
if orp_table:
table_tds = orp_table.findAll('td')
table_tds = [x for x in table_tds if x.text] # keep only the tds that have text
d = dict(d,**{'table_tds':table_tds})
# the following is specific to single hotel hits. May not be applicable to all orp_table.table_tds encountered
if len(table_tds)>=1:
td = table_tds[0]
span = td.find(name='span')
if span:
d = dict(d,**{'td0_span_text':span.get_text(separator="\n",strip=True)})
fl = td.findAll(name='a',attrs={'class':'fl'})
if fl:
d = dict(d,**{'td0_fl':[x.get_text(separator="\n",strip=True) for x in fl]})
if len(table_tds)>=2:
td1 = table_tds[1]
if td1:
d = dict(d,**{'td1':td1.find(name='a').get('href')})
d = dict(d,**{'td1_text':td1.get_text(separator="\n",strip=True)})
organic_result_type = 0 # will remain 0 if dict has no organic_results_parsed key
if d:
organic_result_type = 1 # default organic result type
d = dict(d,**{'organic_result_type':1}) # default organic result type
if 'table_tds' in d:
if all([x in d for x in ['td0_fl', 'td1_text']]):
organic_result_type = 2 # a specific hotel google meta listing
else:
organic_result_type = 3 # something else with a table in it
d = dict(d,**{'organic_result_type':organic_result_type})
return d
def parse_ad(rad):
d = dict()
t = rad.find('h3').find('a')
dest_url = t.get('href')
if dest_url:
d = dict(d,**{'dest_url':dest_url})
dest_url_parsed = parse_qs(dest_url)
if dest_url_parsed:
dest_url_parsed = {k:v[0] for k,v in dest_url_parsed.items()}
if dest_url_parsed:
d['dest_url_parsed'] = dest_url_parsed
if 'adurl' in dest_url_parsed:
adurl = dest_url_parsed['adurl']
if adurl:
d['adurl'] = adurl
d['adurl_domain'] = tldextract.extract(adurl).domain
title = t.getText()
if title: d = dict(d,**{'title':title})
disp_url = rad.find('div','kv')
if disp_url:
d['disp_url'] = disp_url.getText()
d['disp_url_domain'] = tldextract.extract(d['disp_url']).domain
#
ad_text_html = rad.find('span','ac')
if ad_text_html:
d['ad_text_html'] = ad_text_html.renderContents()
ad_text_lines = [re.sub(r"</?b>","",x) for x in d['ad_text_html'].split('<br/>')]
if len(ad_text_lines)>=1:
d['ad_text_line_1'] = ad_text_lines[0]
if len(ad_text_lines)>=2:
d['ad_text_line_2'] = ad_text_lines[1]
else:
d['ad_text_line_2'] = ''
else:
d['ad_text_line_1'] = ''
div_f = rad.find('div','f')
if div_f:
d['div_f'] = div_f.renderContents()
d['div_f_text'] = div_f.get_text('|||')
# ad_text = ttt.getText(separator='|||')
return d
def parse_related_search_list(rel_search_tag):
d = dict()
rel_search_list = rel_search_tag.findAll('td')
if rel_search_list:
return [x.get_text() for x in rel_search_list]
else:
return []
#
# def mk_result_dict(input):
# """
# mk_result_dict(input)
# takes a soup, html string, or filename of google result htmls as an input
# and returns a dict containing components of the html we're interested in
# """
# input = util.x_to_soup(input)
# d = dict()
#
# # number of results
# resultStats = extract_section(input,attrs={'id':'resultStats'},name='div')
# if resultStats: d = dict(d,**{'resultStats':resultStats})
#
# # center_col
# center_col = extract_section(input,attrs={'id':'center_col'},name='div')
# if center_col:
# d = dict(d,**{'center_col':center_col})
# # tads
# tads = extract_section(d['center_col'],attrs={'id':'tads'},name='div')
# if tads:
# d = dict(d,**{'tads':tads})
# # top_ads
# top_ads = d['tads'].findAll('li')
# if top_ads:
# d = dict(d,**{'top_ads':top_ads})
# # res
# res = extract_section(d['center_col'],attrs={'id':'res'},name='div')
# if res:
# d = dict(d,**{'res':res})
# # searchInstead
# topstuff = extract_section(d['res'],attrs={'id':'topstuff'},name='div')
# if topstuff:
# d = dict(d,**{'topstuff':topstuff})
# # spell
# spell = extract_section(d['topstuff'],attrs={'class':'spell'},name='a')
# if spell: d = dict(d,**{'spell':spell})
# # search
# search = extract_section(d['res'],attrs={'id':'search'},name='div')
# if search:
# d = dict(d,**{'search':search})
# # ires
# ires = extract_section(d['search'],attrs={'id':'ires'},name='div')
# if ires:
# d = dict(d,**{'ires':ires})
# # organicResults
# organic_results = d['ires'].findAll('li')
# if organic_results:
# d = dict(d,**{'organic_results':organic_results})
# organic_results_parsed = []
# for org_res in d['organic_results']:
# org_result_parsed_item = organic_result_parser(org_res)
# if org_result_parsed_item:
# organic_results_parsed.append(org_result_parsed_item)
# if organic_results_parsed:
# d = dict(d,**{'organic_results_parsed':organic_results_parsed})
# # related_search
# t = [x.find(name='table') for x in d['center_col'].findAll(name='div')]
# t = [xx for xx in t if xx!=None]
# if t:
# related_search = t[0].findAll('td')
# d = dict(d,**{'related_search':related_search})
#
# # rhs_block
# rhs_block = extract_section(input,attrs={'id':'rhs_block'},name='div')
# if rhs_block:
# d = dict(d,**{'rhs_block':rhs_block})
# rhs_ads = [] # initializing
# # rhs_ads from mbEnd
# mbEnd = extract_section(d['rhs_block'],attrs={'id':'mbEnd'},name='div')
# if mbEnd:
# d = dict(d,**{'mbEnd':mbEnd})
# rhs_ads = rhs_ads + mbEnd.findAll('li')
# # rhs_ads from nobr
# nobr = extract_section(d['rhs_block'],attrs={'class':'nobr'},name='ol')
# if nobr:
# d = dict(d,**{'nobr':nobr})
# rhs_ads = rhs_ads + nobr.findAll('li')
# # puting rhs_ads in the dict
# if rhs_ads:
# d = dict(d,**{'rhs_ads':rhs_ads})
# # Okay, no more parsing wishes, return the dict d
# return d
# # below is the first version of organic_result_parser. It assumed there'd be only one class='r', but I found multiple in some special results
# def organic_result_parser(input):
# input = x_to_soup(input)
# d = dict()
# title_html = get_section(input,attrs={'class':'r'},name='h3')
# if title_html:
# d = dict(d,**{'title_html':title_html})
# d = dict(d,**{'title_text':title_html.text})
#
# div_class_s = get_section(input,attrs={'class':'s'},name='div')
# if div_class_s:
# cite_text = get_section(div_class_s,attrs={},name='cite')
# if cite_text: d = dict(d,**{'cite_text':cite_text.text})
#
# f_slp_text = get_section(div_class_s,attrs={'class':'f slp'},name='div')
# if f_slp_text: d = dict(d,**{'f_slp_text':f_slp_text.text})
#
# st_text = get_section(div_class_s,attrs={'class':'st'},name='span')
# if st_text: d = dict(d,**{'st_text':st_text.text})
#
# osl = get_section(div_class_s,attrs={'class':'osl'},name='div')
# if osl: d = dict(d,**{'osl':osl})
#
# tbody = get_section(div_class_s,attrs={},name='tbody')
# if tbody:
# tbody_tds = tbody.findAll('td')
# d = dict(d,**{'tbody_tds':tbody_tds})
#
# return d
##################################################################################
# UTILS
##################################################################################
##################################################################################
# OTHER FUNCTIONS
# TODO: move or remove (don't think we'll need these any more)
# TODO: But before (re)moving, consider the logic in these functions: May want to reuse some of it
##################################################################################
def mk_rhs_ads_dict(li_tag):
d = {
'h3':extract_section(li_tag,attrs={'id':'taw'},name='h3'),
'kv':extract_section(li_tag,attrs={'class':'kv'},name='div'),
'f':extract_section(li_tag,attrs={'class':'f'},name='div'),
'extrares':extract_section(li_tag,attrs={'class':'ac'},name='span')
}
return {i:j for i,j in list(d.items()) if j != None}
def expand_node(node,expand_def):
if isinstance(expand_def,str):
return extract_section(node,attrs={'id':expand_def},name='div')
elif isinstance(expand_def,dict) and 'attrs' in expand_def:
if 'name' in expand_def:
return extract_section(node,expand_def['attrs'],name=expand_def['name'])
else:
return extract_section(node,expand_def['attrs'],name='div')
# TODO: exception throwing
def rm_empty_dict_values(d):
for k in list(d.keys()):
if not d[k]:
d.pop(k)
# TODO: test if the following dict comprehension accelerates things
# TODO: (this approach returns a modified dict instead of changing things in place
# return {i:j for i,j in d.items() if j != None}
def mk_tag_dict(soup):
d = root_dict(soup)
to_expand = ['center_col','rhscol','taw','res','extrares','rhscol']
for t in to_expand:
if t in d: d = dict(d,**extract_tag_dict_from_node(d[t],t))
def root_dict(soup):
soup = util.x_to_soup(soup)
return extract_tag_dict_from_node(soup,'root')
def extract_tag_dict_from_node(node, dict_spec):
if dict_spec=='root':
appbar = extract_section(node,attrs={'id':'appbar'},name='div')
rcnt = extract_section(node,attrs={'id':'rcnt'},name='div')
center_col = extract_section(rcnt,attrs={'id':'center_col'},name='div')
rhscol = extract_section(rcnt,attrs={'id':'rhscol'},name='div')
d = {
'appbar':appbar,
'center_col':center_col,
'rhscol':rhscol
}
elif dict_spec=='rhs_ads':
d = {
'h3':extract_section(node,attrs={'id':'taw'},name='h3'),
'kv':extract_section(node,attrs={'class':'kv'},name='div'),
'f':extract_section(node,attrs={'class':'f'},name='div'),
'extrares':extract_section(node,attrs={'class':'ac'},name='span')
}
elif dict_spec=='center_col':
d = {
'taw':extract_section(node,attrs={'id':'taw'},name='span'),
'res':extract_section(node,attrs={'id':'res'},name='div'),
'extrares':extract_section(node,attrs={'id':'extrares'},name='div')
}
elif dict_spec=='taw':
d = {
'tads':extract_section(node,attrs={'id':'tads'},name='div'), # top ads
'cu':extract_section(node,attrs={'id':'cu'},name='div') # hotel finder
}
elif dict_spec=='ires':
d = {
'search':extract_section(node,attrs={'id':'search'},name='div') # organic search results
}
elif dict_spec=='extrares':
d = {
'brs':extract_section(node,attrs={'id':'brs'},name='div') # related searches
}
elif dict_spec=='rhs_block':
d = {
'mbEnd':extract_section(node,attrs={'id':'brs'},name='div'), # side ads
'knop':extract_section(node,attrs={'id':'knop'},name='div'),
'rhsvw vk_rhsc':extract_section(node,attrs={'id':'rhsvw vk_rhsc'},name='div') # hotel finder (specific)
}
else:
d = {}
d = {i:j for i,j in list(d.items()) if j != None} # remove keys with empty values
return d
def get_section(soup, attrs={}, name='div', all=False):
"""
gets the bs4.element.Tag (or list thereof) of a section specified by the attrs (dict or list of dicts)
"""
if all==False:
if isinstance(attrs,dict):
return soup.find(name=name, attrs=attrs)
else:
tag = soup
for ss in attrs:
tag = tag.find(name=name, attrs=ss)
return tag
else:
if isinstance(attrs,dict):
return soup.findAll(name=name, attrs=attrs)
else: # not sure how to handle this, so I'm forcing exit
print("haven't coded this yet")
return None
def num_of_results(input):
"""
returns the number of google results.
Function assumes it will be the number just before the last "result" match of the appbar div (sometimes there's
other results (like "personal results")
"""
return num_of_results_soup01(input)
def top_elements(input):
"""
the top ads, hotel finder and other elements coming before webResults
"""
input = util.x_to_soup(input)
return input.findAll('span', attrs={'id':'taw'})
# taw = input.findAll('span', attrs={'id':'taw'})
# top_ads = taw[0].findAll('div', attrs={'id':'tads'})
# pre_results = taw[0].findAll('div', attrs={'class':'c'})
# return dict({'top_ads':top_ads},**{'pre_results':pre_results})
def google_webResults(input):
"""
google_webResults
"""
return google_webResults_soup(input)
def rh_ads(input):
"""
right hand side ads
"""
output = []
input = util.x_to_soup(input)
rhs_block_element = rhs_block(input)
if len(rhs_block_element)!=0:
ol_section = rhs_block_element[0].findAll('ol')
if len(ol_section):
return [str(li) for li in ol_section[0].findAll('li')] # TODO: do we want string, unicode, or the tag itself as is?
# else returns empty list
####################################
# MENU OF POSSIBLE PARSING FUNCTIONS
def num_of_results_lxml(file_spec):
"""
num_of_results using lxml
"""
file_spec = util.x_to_file(file_spec)
doc = etree.parse(file_spec, etree.HTMLParser())
t = doc.xpath(XP_NRESULT)
# return int(CRE_NUMBER.search(t[-1]).group(0).replace(',',''))
return int(re.sub(RE_NUM_SEP,"",CRE_NUMBER.search(t[-1]).group(0)))
def num_of_results_soup01(soup):
"""
num_of_results using Beautifulsoup
"""
soup = util.x_to_soup(soup)
assert isinstance(soup,BeautifulSoup), "hey, I'm expecting soup!"
t = get_section(soup,[{'id':'appbar'},{'id':'topabar'}])
t = re.findall("("+RE_NUMBER+").*?result",t.text)
return int(re.sub(RE_NUM_SEP,"",t[-1]))
def google_webResults_soup(source):
"""
google_webResults using Beautifulsoup
"""
soup = util.x_to_soup(source)
list_of_li = soup.findAll('li', attrs={'class':'g'})
return [
{'pos':i+1, 'title': li.find('a'), 'text': li.find('span', attrs={'class':'st'})}
for (i,li) in enumerate(list_of_li)
]
##################################################################################
# Parsing sections from soup (returns soup bs4.element.Tag objects)
##################################################################################
########### Level 1
def topabar(soup):
return soup.findAll('div', attrs={'id':'topabar'})
def tvcap(soup):
return soup.findAll('div', attrs={'id':'tvcap'})
def res__main(soup):
return soup.findAll('div', attrs={'id':'res','role':'main'})
def extrares(soup):
return soup.findAll('div', attrs={'id':'extrares'})
def rhs_block(soup):
return soup.findAll('div', attrs={'id':'rhs_block'})
if __name__ == "__main__":
print("you just ran ut.parse.google")
# is_none = lambda x : [xx==None for xx in x]
# index_of_trues = lambda x : [idx for idx in range(len(x)) if x[idx]==True]
# idx_of_nones_of = lambda x : index_of_trues(is_none(x))
# folder = '/D/Dropbox/dev/py/data/tmp'
# file_list = ['www hidden camera in hotel com.html',
# 'beekmann tower hotel in new york.html',
# 'campo prague hotel.html',
# 'b&b romantica venezia.html',
# '5 star hotels in salta argentina.html',
# '$100 hotel in amsterdam.html',
# '1 star hotel.html',
# 'albergo studi vernazza.html',
# 'asport.html']
# #file_list = os.listdir(folder)
# #file_list = filter ((lambda s: re.search(r"\.html$", s)), file_list)
# filepath_list = [os.path.join(folder,f) for f in file_list]
# my_parser = lambda f: mk_result_dict(f)
# # test if the parser returned any Nones for the filepath_list
# r = [my_parser(f) for f in filepath_list]
# idx = idx_of_nones_of(r)
# if len(idx)==0:
# print "all {} htmls had some data".format(len(filepath_list))
# else:
# print "{}/{} htmls didn't have any parsed data".format(len(idx),len(filepath_list))
# print "the indices of these are in the variable idx"
#
# # Check what keys were parse
# key_list = ['center_col', 'ires', 'mbEnd', 'nobr', 'organic_results', 'related_search', 'res', 'resultStats', 'rhs_ads', 'rhs_block', 'search', 'spell', 'tads', 'top_ads', 'topstuff']
# idx_of_items_not_having_key = dict()
# for key in key_list:
# t = [x[key] for x in r if x.has_key(key)]
# lidx = [x.has_key(key) for x in r]
# idx = [i for i,x in enumerate(lidx) if x==False]
# idx_of_items_not_having_key = dict(idx_of_items_not_having_key,**{key : [i for i,x in enumerate(lidx) if x==False]})
# print "{}/{} have a {}".format(len(t),len(r),key)
# print ""
# print "--> Indices of items not having specific keys may be found in the dict: idx_of_items_not_having_key"
# files_not_having_key = lambda key : [filepath_list[i] for i in idx_of_items_not_having_key[key]]
# print "--> Filenames of items not having specific keys may be found with the function: files_not_having_key(key)"
#
# # see files with missing key...
# key = 'rhs_ads'
# files_not_having_key('rhs_ads')
| 38.276435 | 189 | 0.557046 | 3,414 | 25,339 | 3.907147 | 0.11365 | 0.024665 | 0.017992 | 0.029313 | 0.452358 | 0.357148 | 0.294325 | 0.22408 | 0.188845 | 0.162231 | 0 | 0.003483 | 0.263546 | 25,339 | 661 | 190 | 38.334342 | 0.711323 | 0.411184 | 0 | 0.11875 | 0 | 0 | 0.130769 | 0.006375 | 0 | 0 | 0 | 0.003026 | 0.003125 | 1 | 0.075 | false | 0 | 0.021875 | 0.015625 | 0.184375 | 0.00625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8eddbfac7e564583fa94dddc5f82e826c8958f3 | 273 | py | Python | atlas-to-text.py | Chlumsky/shadron-jpeg | e131d68bc1ab9f12a088881a4c24189e4ceee7ea | [
"MIT"
] | null | null | null | atlas-to-text.py | Chlumsky/shadron-jpeg | e131d68bc1ab9f12a088881a4c24189e4ceee7ea | [
"MIT"
] | null | null | null | atlas-to-text.py | Chlumsky/shadron-jpeg | e131d68bc1ab9f12a088881a4c24189e4ceee7ea | [
"MIT"
] | null | null | null | from PIL import Image
im = Image.open("atlas.png")
pixels = list(im.getdata())
width, height = im.size
text = '{ '
x = 0
for pel in pixels:
text += "0x%0.2x, " % pel[0]
x += 1
if x == width:
x = 0
text += '},\n{ '
text = text[:-2]
print(text) | 16.058824 | 32 | 0.516484 | 44 | 273 | 3.204545 | 0.613636 | 0.028369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041237 | 0.289377 | 273 | 17 | 33 | 16.058824 | 0.685567 | 0 | 0 | 0.142857 | 0 | 0 | 0.094891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8eeaa2be0ef34d0e968012791af674b681426bb | 2,939 | py | Python | src/modules/functions.py | Yasushi-Shinohara/TD2LM | e37771e5f9db425c2262eeaba514ff1d9880cf51 | [
"MIT"
] | null | null | null | src/modules/functions.py | Yasushi-Shinohara/TD2LM | e37771e5f9db425c2262eeaba514ff1d9880cf51 | [
"MIT"
] | null | null | null | src/modules/functions.py | Yasushi-Shinohara/TD2LM | e37771e5f9db425c2262eeaba514ff1d9880cf51 | [
"MIT"
] | null | null | null | # coding: UTF-8
#Relevant functions are written
# This is created 2020/04/17 by Y. Shinohara
# This is lastly modified 2020/05/20 by Y. Shinohara
import os
import math
import numpy as np
from modules.constants import *
#
def get_hD(param):
hD = np.zeros([2,2],dtype=np.complex128)
hD[0,0] = 0.5*param.Delta
hD[1,1] = -0.5*param.Delta
return hD
#
def get_hD_IntPict(param):
hD = np.zeros([2,2],dtype=np.complex128)
return hD
#
def ES_hOD(param,E,S): #The action S is not used for this function.
hOD = np.zeros([2,2],dtype=np.complex128)
hOD[0,1] = -param.a*E #The negative sign is from elementary charge of electron
hOD[1,0] = np.conj(hOD[0,1])
return hOD
#
def ES_hOD_IntPict(param,E,S):
hOD = np.zeros([2,2],dtype=np.complex128)
hOD[0,1] = -np.exp(+zI*S)*param.a*E #The negative sign is from elementary charge of electron
hOD[1,0] = np.conj(hOD[0,1])
return hOD
#
def psih_Ene(param,psi,h): #The class param is not used for this function.
Ene = np.vdot(psi,np.dot(h,psi))
return np.real(Ene)
#
def psih_Ene_IntPict(param,psi,h):
h[0,0] = 0.5*param.Delta
h[1,1] = -0.5*param.Delta
Ene = np.vdot(psi,np.dot(h,psi))
return np.real(Ene)
#
def h_U(param,h):
w, v = np.linalg.eigh(h)
U = np.exp(-zI*w[0]*param.dt)*np.outer(v[0,:],np.conj(v[0,:])) + np.exp(-zI*w[1]*param.dt)*np.outer(v[1,:],np.conj(v[1,:]))
return U
#
def psih2psi_exp(param,psi,h):
U = h_U(param,h)
psi = np.dot(U, psi)
return psi
#
def psih_hpsi(param,psi,h):
hpsi = np.dot(h, psi)
return hpsi
#
def psih2psi_RK4(param,psi,h):
k1 = psih_hpsi(param, psi, h)/zI
k2 = psih_hpsi(param, psi + 0.5*param.dt*k1, h)/zI
k3 = psih_hpsi(param, psi + 0.5*param.dt*k2, h)/zI
k4 = psih_hpsi(param, psi + param.dt*k3, h)/zI
psi = psi + (k1 + 2.0*k2 + 2.0*k3 + k4)*param.dt/6.0
return psi
#
def Make_Efield(param):
t = np.zeros([param.Nt],dtype=np.float64)
# E = np.zeros([param.Nt],dtype=np.float64)
E = np.zeros([param.Nt,param.Ncolor],dtype=np.float64)
for it in range(param.Nt):
t[it] = param.dt*it
if (param.Ncolor == 1):
icolor = 0
for it in range(param.Nt):
if (t[it] < param.Tpulse):
E[it,icolor] = param.E0*(np.sin(pi*t[it]/param.Tpulse))**param.nenvelope*np.sin(param.omegac*(t[it] - 0.5*param.Tpulse) + param.phi_CEP)
elif (param.Ncolor > 1):
for icolor in range(param.Ncolor):
for it in range(param.Nt):
if (t[it] < param.Tpulse[icolor]):
E[it,icolor] = param.E0[icolor]*(np.sin(pi*t[it]/param.Tpulse[icolor]))**param.nenvelope[icolor]*np.sin(param.omegac[icolor]*(t[it] - 0.5*param.Tpulse[icolor]) + param.phi_CEP[icolor])
E = np.sum(E,axis=1)
else :
print('ERROR: The parameter '+str(param.Ncolor)+' is improper.')
sys.exit()
return t, E
| 34.576471 | 204 | 0.603607 | 532 | 2,939 | 3.295113 | 0.218045 | 0.041072 | 0.031945 | 0.045636 | 0.50656 | 0.436395 | 0.347975 | 0.324016 | 0.295493 | 0.257844 | 0 | 0.048282 | 0.217761 | 2,939 | 84 | 205 | 34.988095 | 0.714224 | 0.129636 | 0 | 0.279412 | 0 | 0 | 0.013402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.161765 | false | 0 | 0.058824 | 0 | 0.382353 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8efae7ff69082b7654045609a0e7610be5d6a9e | 436 | py | Python | Projetos Python/pythonexercicios/des060.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | Projetos Python/pythonexercicios/des060.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | Projetos Python/pythonexercicios/des060.py | Moyses-Nunes/Projetos-Python | 71ae170fb0d7be6afea18608bca630b57b9f0dff | [
"MIT"
] | null | null | null | from random import randint
print('Vou pensar em um número de 0 à 10, tente advinhar..')
np = int(input('Qual número eu pensei? '))
nc = randint(0, 10)
tot = 0
while nc != np:
if np > nc:
print('ERROU! menos..')
np = int(input('Outro número: '))
if np < nc:
print('ERROU! mais..')
np = int(input('Outro número: '))
tot += 1
print('Acertou mizeravi!!! Depois de {} tentativas'.format(tot + 1))
| 29.066667 | 68 | 0.582569 | 65 | 436 | 3.907692 | 0.553846 | 0.059055 | 0.11811 | 0.086614 | 0.291339 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027607 | 0.252294 | 436 | 14 | 69 | 31.142857 | 0.751534 | 0 | 0 | 0.142857 | 0 | 0 | 0.394495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8f09da4dbeebdbb50f2fe7aee198aa0fbceeeea | 3,424 | py | Python | tools/grit/preprocess_if_expr.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,219 | 2018-03-26T02:57:34.000Z | 2022-03-31T00:27:59.000Z | tools/grit/preprocess_if_expr.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 250 | 2018-02-02T23:16:57.000Z | 2022-03-21T06:09:53.000Z | tools/grit/preprocess_if_expr.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 473 | 2019-03-24T16:34:23.000Z | 2022-03-31T02:01:05.000Z | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import io
import json
import os
import sys
# For Node, EvaluateExpression
import grit.node.base
# For CheckConditionalElements
import grit.format.html_inline
_CWD = os.getcwd()
class PreprocessIfExprNode(grit.node.base.Node):
def __init__(self):
super(PreprocessIfExprNode, self).__init__()
def PreprocessIfExpr(self, content):
return grit.format.html_inline.CheckConditionalElements(self, content)
def EvaluateCondition(self, expr):
return grit.node.base.Node.EvaluateExpression(expr, self.defines,
self.target_platform, {})
def SetDefines(self, defines):
self.defines = defines
def SetTargetPlatform(self, target_platform):
self.target_platform = target_platform
@staticmethod
def Construct(defines, target_platform):
node = PreprocessIfExprNode()
node.SetDefines(defines)
node.SetTargetPlatform(target_platform or sys.platform)
return node
def ParseDefinesArg(definesArg):
defines = {}
for define_arg in definesArg:
define, = define_arg
parts = [part.strip() for part in define.split('=', 1)]
name = parts[0]
val = True if len(parts) == 1 else parts[1]
if (val == "1" or val == "true"):
val = True
elif (val == "0" or val == "false"):
val = False
defines[name] = val
return defines
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--in-folder', required=True)
parser.add_argument('--out-folder', required=True)
parser.add_argument('--out-manifest')
parser.add_argument('--in-files', required=True, nargs="*")
parser.add_argument('-D', '--defines', nargs="*", action='append')
parser.add_argument('-E', '--environment')
parser.add_argument('-t', '--target')
args = parser.parse_args(argv)
in_folder = os.path.normpath(os.path.join(_CWD, args.in_folder))
out_folder = os.path.normpath(os.path.join(_CWD, args.out_folder))
defines = ParseDefinesArg(args.defines)
node = PreprocessIfExprNode.Construct(defines, args.target)
for input_file in args.in_files:
content = ""
with io.open(os.path.join(in_folder, input_file),
encoding='utf-8',
mode='r') as f:
content = f.read()
preprocessed = node.PreprocessIfExpr(content)
out_path = os.path.join(out_folder, input_file)
out_dir = os.path.dirname(out_path)
assert out_dir.startswith(out_folder), \
'Cannot preprocess files to locations not under %s.' % out_dir
try:
os.makedirs(out_dir)
except OSError as e:
# Ignore directory exists errors. This can happen if two build rules
# for overlapping directories hit the makedirs line at the same time.
if e.errno != errno.EEXIST:
raise
with io.open(out_path, mode='wb') as f:
f.write(preprocessed.encode('utf-8'))
if args.out_manifest:
manifest_data = {}
manifest_data['base_dir'] = '%s' % args.out_folder
manifest_data['files'] = args.in_files
manifest_file = io.open(
os.path.normpath(os.path.join(_CWD, args.out_manifest)), 'w',
encoding='utf-8', newline='\n')
json.dump(manifest_data, manifest_file)
return
if __name__ == '__main__':
main(sys.argv[1:])
| 30.300885 | 75 | 0.681075 | 449 | 3,424 | 5.044543 | 0.347439 | 0.023841 | 0.052539 | 0.021192 | 0.082561 | 0.082561 | 0.082561 | 0.049007 | 0.049007 | 0 | 0 | 0.005087 | 0.196262 | 3,424 | 112 | 76 | 30.571429 | 0.817951 | 0.101636 | 0 | 0 | 0 | 0 | 0.064211 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 1 | 0.096386 | false | 0 | 0.096386 | 0.024096 | 0.26506 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8f23b725362bf3e6ee6b383a329f52a225397d3 | 13,328 | py | Python | examples/MuscularSnake/muscular_snake.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | examples/MuscularSnake/muscular_snake.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | examples/MuscularSnake/muscular_snake.py | bhosale2/PyElastica | 520374672cbd6b0c89a912c5019559e66c5535e3 | [
"MIT"
] | null | null | null | __doc__ = """Muscular snake example from Zhang et. al. Nature Comm 2019 paper."""
import sys
import numpy as np
sys.path.append("../../")
from elastica import *
from examples.MuscularSnake.post_processing import (
plot_video_with_surface,
plot_snake_velocity,
)
from examples.MuscularSnake.muscle_forces import MuscleForces
from elastica.experimental.connection_contact_joint.parallel_connection import (
SurfaceJointSideBySide,
get_connection_vector_straight_straight_rod,
)
# Set base simulator class
class MuscularSnakeSimulator(
BaseSystemCollection, Constraints, Connections, Forcing, CallBacks
):
pass
muscular_snake_simulator = MuscularSnakeSimulator()
# Simulation parameters
final_time = 16.0
time_step = 5e-6
total_steps = int(final_time / time_step)
rendering_fps = 30
step_skip = int(1.0 / (rendering_fps * time_step))
rod_list = []
# Snake body
n_elem_body = 100
density_body = 1000
base_length_body = 1.0
base_radius_body = 0.025
E = 1e7
nu = 4e-3
shear_modulus = E / 2 * (0.5 + 1.0)
poisson_ratio = 0.5
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
start = np.array([0.0, 0.0, base_radius_body])
snake_body = CosseratRod.straight_rod(
n_elem_body,
start,
direction,
normal,
base_length_body,
base_radius_body,
density_body,
nu,
youngs_modulus=E,
shear_modulus=shear_modulus,
)
body_elem_length = snake_body.rest_lengths[0]
# Define muscle fibers
n_muscle_fibers = 8
# Muscle force amplitudes
muscle_force_amplitudes = (
np.array([22.96, 22.96, 20.95, 20.95, 9.51, 9.51, 13.7, 13.7])[::-1] / 2
)
# Set connection index of first node of each muscle with body
muscle_start_connection_index = [4, 4, 33, 33, 23, 23, 61, 61]
muscle_end_connection_index = []
muscle_glue_connection_index = (
[]
) # These are the middle node idx of muscles that are glued to body
muscle_rod_list = []
"""
The muscle density is higher than the physiological one, since
we lump many muscles (SSP-SP, LD and IC) into one actuator. These rods
also represent the two tendons on the sides of the muscle which biologically
have a higher density than the muscle itself. For these reasons,we set the
muscle density to approximately twice the biological value.
"""
density_muscle = 2000
E_muscle = 1e4
nu_muscle = nu
shear_modulus_muscle = E_muscle / 2 * (0.5 + 1.0)
# Muscle group 1 and 3, define two antagonistic muscle pairs
n_elem_muscle_group_one_to_three = 13 * 3
base_length_muscle = 0.39
"""
In our simulation, we lump many biological tendons into one computational
tendon. As a result, our computational tendon is bigger in size, set as elements other than 4-8
below.
"""
muscle_radius = np.zeros((n_elem_muscle_group_one_to_three))
muscle_radius[:] = 0.003 # First set tendon radius for whole rod.
muscle_radius[4 * 3 : 9 * 3] = 0.006 # Change the radius of muscle elements
for i in range(int(n_muscle_fibers / 2)):
index = muscle_start_connection_index[i]
# Chose which side of body we are attaching the muscles. Note that these muscles are antagonistic pairs.
# So they are at the opposite sides of the body and side_sign determines that.
side_sign = -1 if i % 2 == 0 else 1
start_muscle = np.array(
[
index * body_elem_length,
side_sign * (base_radius_body + 0.003),
base_radius_body,
]
)
muscle_rod = CosseratRod.straight_rod(
n_elem_muscle_group_one_to_three,
start_muscle,
direction,
normal,
base_length_muscle,
muscle_radius,
density_muscle,
nu_muscle,
youngs_modulus=E_muscle,
shear_modulus=shear_modulus_muscle,
)
"""
The biological tendons have a high Young's modulus E.,but are very slender.
As a result, they resist extension (stretch) but can bend easily.
Due to our decision to lump tendons and in order to mimic the above behavior
of the biological tendons, we use a lower Young's
Modulus and harden the stiffness of the shear and stretch modes only.
Numerically, this is done by putting a pre-factor of 50000 before the
shear/stretch matrix below. The actual value of the prefactor does not matter,
what is important is that it is a high value to high stretch/shear stiffness.
"""
muscle_rod.shear_matrix[..., : 4 * 3] *= 50000
muscle_rod.shear_matrix[..., 9 * 3 :] *= 50000
muscle_rod_list.append(muscle_rod)
muscle_end_connection_index.append(index + n_elem_muscle_group_one_to_three)
muscle_glue_connection_index.append(
np.hstack(
(
np.arange(0, 4 * 3, 1, dtype=np.int64),
np.arange(9 * 3, n_elem_muscle_group_one_to_three, 1, dtype=np.int64),
)
)
)
# Muscle group 2 and 4, define two antagonistic muscle pairs
n_elem_muscle_group_two_to_four = 33
base_length_muscle = 0.33
"""
In our simulation, we lump many biological tendons into one computational
tendon. As a result, our computational tendon is bigger in size, set as rm_t
below.
"""
muscle_radius = np.zeros((n_elem_muscle_group_two_to_four))
muscle_radius[:] = 0.003 # First set tendon radius for whole rod.
muscle_radius[4 * 3 : 9 * 3] = 0.006 # Change the radius of muscle elements
for i in range(int(n_muscle_fibers / 2), n_muscle_fibers):
index = muscle_start_connection_index[i]
# Chose which side of body we are attaching the muscles. Note that these muscles are antagonistic pairs.
# So they are at the opposite sides of the body and side_sign determines that.
side_sign = -1 if i % 2 == 0 else 1
start_muscle = np.array(
[
index * body_elem_length,
side_sign * (base_radius_body + 0.003),
base_radius_body,
]
)
muscle_rod = CosseratRod.straight_rod(
n_elem_muscle_group_two_to_four,
start_muscle,
direction,
normal,
base_length_muscle,
muscle_radius,
density_muscle,
nu_muscle,
youngs_modulus=E_muscle,
shear_modulus=shear_modulus_muscle,
)
"""
The biological tendons have a high Young's modulus E.,but are very slender.
As a result, they resist extension (stretch) but can bend easily.
Due to our decision to lump tendons and in order to mimic the above behavior
of the biological tendons, we use a lower Young's
Modulus and harden the stiffness of the shear and stretch modes only.
Numerically, this is done by putting a pre-factor of 50000 before the
shear/stretch matrix below. The actual value of the prefactor does not matter,
what is important is that it is a high value to high stretch/shear stiffness.
"""
muscle_rod.shear_matrix[..., : 4 * 3] *= 50000
muscle_rod.shear_matrix[..., 9 * 3 :] *= 50000
muscle_rod_list.append(muscle_rod)
muscle_end_connection_index.append(index + n_elem_muscle_group_two_to_four)
muscle_glue_connection_index.append(
# np.array([0,1, 2, 3, 9, 10 ], dtype=np.int)
np.hstack(
(
np.arange(0, 4 * 3, 1, dtype=np.int64),
np.arange(9 * 3, n_elem_muscle_group_two_to_four, 1, dtype=np.int64),
)
)
)
# After initializing the rods append them on to the simulation
rod_list.append(snake_body)
rod_list = rod_list + muscle_rod_list
for _, my_rod in enumerate(rod_list):
muscular_snake_simulator.append(my_rod)
# Muscle actuation
post_processing_forces_dict_list = []
for i in range(n_muscle_fibers):
post_processing_forces_dict_list.append(defaultdict(list))
muscle_rod = muscle_rod_list[i]
side_of_body = 1 if i % 2 == 0 else -1
time_delay = muscle_start_connection_index[::-1][i] * 1.0 / 101.76
muscular_snake_simulator.add_forcing_to(muscle_rod).using(
MuscleForces,
amplitude=muscle_force_amplitudes[i],
wave_number=2.0 * np.pi / 1.0,
arm_length=(base_radius_body + 0.003),
time_delay=time_delay,
side_of_body=side_of_body,
muscle_start_end_index=np.array([4 * 3, 9 * 3], np.int64),
step=step_skip,
post_processing=post_processing_forces_dict_list[i],
)
straight_straight_rod_connection_list = []
straight_straight_rod_connection_post_processing_dict = defaultdict(list)
for idx, rod_two in enumerate(muscle_rod_list):
rod_one = snake_body
(
rod_one_direction_vec_in_material_frame,
rod_two_direction_vec_in_material_frame,
offset_btw_rods,
) = get_connection_vector_straight_straight_rod(
rod_one,
rod_two,
(muscle_start_connection_index[idx], muscle_end_connection_index[idx]),
(0, rod_two.n_elems),
)
straight_straight_rod_connection_list.append(
[
rod_one,
rod_two,
rod_one_direction_vec_in_material_frame.copy(),
rod_two_direction_vec_in_material_frame.copy(),
offset_btw_rods.copy(),
]
)
for k in range(rod_two.n_elems):
rod_one_index = k + muscle_start_connection_index[idx]
rod_two_index = k
k_conn = (
rod_one.radius[rod_one_index]
* rod_two.radius[rod_two_index]
/ (rod_one.radius[rod_one_index] + rod_two.radius[rod_two_index])
* body_elem_length
* E
/ (rod_one.radius[rod_one_index] + rod_two.radius[rod_two_index])
)
if k < 12 or k >= 27:
scale = 1 * 2
scale_contact = 20
else:
scale = 0.01 * 5
scale_contact = 20
muscular_snake_simulator.connect(
first_rod=rod_one,
second_rod=rod_two,
first_connect_idx=rod_one_index,
second_connect_idx=rod_two_index,
).using(
SurfaceJointSideBySide,
k=k_conn * scale,
nu=1e-4,
k_repulsive=k_conn * scale_contact,
rod_one_direction_vec_in_material_frame=rod_one_direction_vec_in_material_frame[
..., k
],
rod_two_direction_vec_in_material_frame=rod_two_direction_vec_in_material_frame[
..., k
],
offset_btw_rods=offset_btw_rods[k],
post_processing_dict=straight_straight_rod_connection_post_processing_dict,
step_skip=step_skip,
)
# Friction forces
# Only apply to the snake body.
gravitational_acc = -9.81
muscular_snake_simulator.add_forcing_to(snake_body).using(
GravityForces, acc_gravity=np.array([0.0, 0.0, gravitational_acc])
)
origin_plane = np.array([0.0, 0.0, 0.0])
normal_plane = normal
slip_velocity_tol = 1e-8
froude = 0.1
period = 1.0
mu = base_length_body / (period * period * np.abs(gravitational_acc) * froude)
kinetic_mu_array = np.array(
[1.0 * mu, 1.5 * mu, 2.0 * mu]
) # [forward, backward, sideways]
static_mu_array = 2 * kinetic_mu_array
muscular_snake_simulator.add_forcing_to(snake_body).using(
AnisotropicFrictionalPlane,
k=1e1,
nu=40,
plane_origin=origin_plane,
plane_normal=normal_plane,
slip_velocity_tol=slip_velocity_tol,
static_mu_array=static_mu_array,
kinetic_mu_array=kinetic_mu_array,
)
class MuscularSnakeCallBack(CallBackBaseClass):
def __init__(self, step_skip: int, callback_params: dict):
CallBackBaseClass.__init__(self)
self.every = step_skip
self.callback_params = callback_params
def make_callback(self, system, time, current_step: int):
if current_step % self.every == 0:
self.callback_params["time"].append(time)
self.callback_params["step"].append(current_step)
self.callback_params["position"].append(system.position_collection.copy())
self.callback_params["com"].append(system.compute_position_center_of_mass())
self.callback_params["radius"].append(system.radius.copy())
self.callback_params["velocity"].append(system.velocity_collection.copy())
self.callback_params["avg_velocity"].append(
system.compute_velocity_center_of_mass()
)
self.callback_params["center_of_mass"].append(
system.compute_position_center_of_mass()
)
post_processing_dict_list = []
for idx, rod in enumerate(rod_list):
post_processing_dict_list.append(defaultdict(list))
muscular_snake_simulator.collect_diagnostics(rod).using(
MuscularSnakeCallBack,
step_skip=step_skip,
callback_params=post_processing_dict_list[idx],
)
muscular_snake_simulator.finalize()
timestepper = PositionVerlet()
integrate(timestepper, muscular_snake_simulator, final_time, total_steps)
plot_video_with_surface(
post_processing_dict_list,
video_name="muscular_snake.mp4",
fps=rendering_fps,
step=1,
# The following parameters are optional
x_limits=(-0.1, 1.0), # Set bounds on x-axis
y_limits=(-0.3, 0.3), # Set bounds on y-axis
z_limits=(-0.3, 0.3), # Set bounds on z-axis
dpi=100, # Set the quality of the image
vis3D=True, # Turn on 3D visualization
vis2D=True, # Turn on projected (2D) visualization
)
plot_snake_velocity(
post_processing_dict_list[0], period=period, filename="muscular_snake_velocity.png"
)
| 33.236908 | 108 | 0.68855 | 1,913 | 13,328 | 4.500261 | 0.187141 | 0.004182 | 0.00453 | 0.018585 | 0.511674 | 0.458938 | 0.426066 | 0.38634 | 0.370775 | 0.33918 | 0 | 0.032214 | 0.226741 | 13,328 | 400 | 109 | 33.32 | 0.803124 | 0.093487 | 0 | 0.237113 | 0 | 0 | 0.017277 | 0.002666 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006873 | false | 0.003436 | 0.020619 | 0 | 0.034364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8f403b08f3a02fec6fbd48d949e6b9c1b06f6c6 | 4,670 | py | Python | pnums/int_to_xcoords.py | SimLeek/pnums | 6ad207d8ba083f7ff53617caa37cf629b20cd755 | [
"MIT"
] | 3 | 2021-02-10T15:38:22.000Z | 2021-12-13T02:10:17.000Z | pnums/int_to_xcoords.py | SimLeek/pnums | 6ad207d8ba083f7ff53617caa37cf629b20cd755 | [
"MIT"
] | null | null | null | pnums/int_to_xcoords.py | SimLeek/pnums | 6ad207d8ba083f7ff53617caa37cf629b20cd755 | [
"MIT"
] | null | null | null | """Translate int/float coordinates to formats more suitable for transformer neural networks."""
import math as m
import numpy as np
from pnums.ith_middle import ith_middle, mask_ith_middle
from typing import Dict, Any
def _int_to_packed_uint8(in_int):
"""
Convert an int to a unit array.
>>> _int_to_packed_uint8(16)
array([16], dtype=uint8)
"""
in_bytes = int(in_int).to_bytes(
length=int(m.ceil(m.log2(in_int + 1) / 8.0)), byteorder="big", signed=False
)
in_array = np.frombuffer(in_bytes, dtype=np.uint8)
return in_array
def int_to_bool_array(in_int, length):
"""
Convert an int to a bool array.
>>> int_to_bool_array(16, 32)
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.])
"""
byte_arr = np.unpackbits(_int_to_packed_uint8(in_int))
if len(byte_arr) < length:
pad_l = np.zeros((length - len(byte_arr),))
byte_arr = np.concatenate((pad_l, byte_arr))
return byte_arr
def inversion_double(in_array):
"""
Get the input boolean array along with its element-wise logical not beside it. For error correction.
>>> inversion_double(np.array([1,0,1,1,1,0,0,1], dtype=np.bool))
array([[ True, False, True, True, True, False, False, True],
[False, True, False, False, False, True, True, False]])
"""
return np.stack((in_array, np.logical_not(in_array)))
_i_extend_dict: Dict[Any, Any] = {} # optimization dictionary
def _i_extend(in_array, ext, sort=True):
"""
Create `ext`*2 duplicates and shifts the duplicates so each number has a unique representation.
Error correction could fetch back two numbers if they were recorded on the same array.
Useful for neural algorithms that would guess multiple locations represented by these coordiantes.
>>> _i_extend(np.array([ 1, 0, 1, 1, 1, 0, 0, 1], dtype=np.bool), 8).astype(np.uint8)
array([[[1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 1, 1, 0]],
<BLANKLINE>
[[1, 1, 1, 1, 1, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 1, 0]],
<BLANKLINE>
[[1, 1, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 0]],
<BLANKLINE>
[[1, 1, 0, 1, 1, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 1, 0]],
<BLANKLINE>
[[1, 1, 0, 0, 1, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 0]],
<BLANKLINE>
[[0, 1, 0, 0, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 1, 0, 0]],
<BLANKLINE>
[[0, 1, 0, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 0]],
<BLANKLINE>
[[0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 0, 0]]], dtype=uint8)
"""
base_array = inversion_double(in_array)
base_array = np.expand_dims(base_array, axis=0)
if sort:
if ext in _i_extend_dict.keys():
i_list = _i_extend_dict[ext]
else:
i_list = [
ith
for _, ith in sorted(
zip(
[ith_middle(i) for i in range(ext - 1)],
[j for j in range(ext - 1)],
)
)
]
_i_extend_dict[ext] = i_list
else:
i_list = range(ext - 1)
for i in i_list:
next_array = mask_ith_middle(in_array, i)
double_next_array = inversion_double(next_array)
double_next_array = np.expand_dims(double_next_array, axis=0)
base_array = np.concatenate((base_array, double_next_array), axis=0)
return base_array
def int_to_1d(in_int, max_bits, new_dim_length):
"""Convert an integer to a 1D format with error correction for transformers."""
bool_array = int_to_bool_array(in_int, max_bits)
extended_bool_array = _i_extend(bool_array, new_dim_length)
return extended_bool_array
def ints_to_2d(
in_int_1, max_bits_1, new_dim_length_1, in_int_2, max_bits_2, new_dim_length_2
):
"""Convert two integers representing x,y coordinates to a correlated output for 2D transformers."""
bool_ento_1 = int_to_1d(in_int_1, max_bits_1, new_dim_length_1).astype(np.bool)
bool_ento_2 = int_to_1d(in_int_2, max_bits_2, new_dim_length_2).astype(np.bool)
positive_array = np.zeros(
(new_dim_length_1, new_dim_length_2, max(max_bits_1, max_bits_2)), dtype=np.bool
)
for i in range(bool_ento_1.shape[-1]):
region_1 = bool_ento_1[:, 0, i, np.newaxis]
region_2 = bool_ento_2[np.newaxis, :, 0, i]
square_region = np.matmul(region_1, region_2)
positive_array[:, :, i] = square_region
return positive_array
| 33.597122 | 104 | 0.581585 | 760 | 4,670 | 3.339474 | 0.205263 | 0.045705 | 0.041371 | 0.044129 | 0.234831 | 0.196612 | 0.152482 | 0.128842 | 0.128842 | 0.116627 | 0 | 0.070243 | 0.277516 | 4,670 | 138 | 105 | 33.84058 | 0.68198 | 0.405353 | 0 | 0.032258 | 0 | 0 | 0.001171 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.064516 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8f96c9deea1368255f55da7e5fd63f1cc258fd6 | 1,695 | py | Python | data_structures/stacks_and_queues/queue.py | tomwalton78/Data-Structures-in-Python | b7222453e524660f0831082a789a06bd3e630e3a | [
"MIT"
] | null | null | null | data_structures/stacks_and_queues/queue.py | tomwalton78/Data-Structures-in-Python | b7222453e524660f0831082a789a06bd3e630e3a | [
"MIT"
] | null | null | null | data_structures/stacks_and_queues/queue.py | tomwalton78/Data-Structures-in-Python | b7222453e524660f0831082a789a06bd3e630e3a | [
"MIT"
] | null | null | null | from data_structures.linked_lists.singly_linked_list import LinkedList
from data_structures.stacks_and_queues.stack import Stack
class Queue(Stack):
"""Class to represent a Queue data structure.
Inherits from Stack class, since these two data structures are very
similar. Stack inherits from LinkedList class.
"""
def add(self, data):
"""Add item to end of queue.
Uses append method of LinkedList base class.
"""
super(Stack, self).append(data)
def remove(self):
"""Remove and return first item in queue.
Returns
-------
Data at head of LinkedList
"""
return self.pop()
def __str__(self):
"""Overload __str__ operator to print all values in Queue.
Returns
-------
str
Heading line, then comma separated list of Queue contents
"""
# Retrieve result of __str__ from LinkedList class
base_string = super().__str__()
# Handle empty stack
if base_string == 'Stack is empty':
return 'Queue is empty'
# Remove heading, only need contents
contents = base_string.split('\n')[1]
# Set different heading for Stack class
heading = 'Queue contents: front -> back\n'
return ''.join([heading, contents])
if __name__ == '__main__':
q = Queue()
[q.add(i) for i in range(5)]
print(q)
print('peek:', q.peek())
print('Removed:', q.remove(), q.remove(), q.remove())
print(q)
print('Is queue empty?', q.is_empty())
print('Removed:', q.remove(), q.remove())
print(q)
print('Is queue empty?', q.is_empty())
| 23.541667 | 71 | 0.59587 | 212 | 1,695 | 4.59434 | 0.391509 | 0.035934 | 0.033881 | 0.043121 | 0.12423 | 0.12423 | 0.092402 | 0.092402 | 0.092402 | 0.092402 | 0 | 0.001663 | 0.290265 | 1,695 | 71 | 72 | 23.873239 | 0.80798 | 0.352802 | 0 | 0.2 | 0 | 0 | 0.123839 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.36 | 0.32 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8fad1295268174dc8aebaf56924e8dc086b76b9 | 3,347 | py | Python | androguard/core/binaries/elf.py | yang-guangliang/android_guard | 704fea36d796d5fbda2badf2237fc9dba74c3a96 | [
"Apache-2.0"
] | 3 | 2015-10-23T13:36:07.000Z | 2021-07-22T23:30:41.000Z | androguard/core/binaries/elf.py | yang-guangliang/android_guard | 704fea36d796d5fbda2badf2237fc9dba74c3a96 | [
"Apache-2.0"
] | null | null | null | androguard/core/binaries/elf.py | yang-guangliang/android_guard | 704fea36d796d5fbda2badf2237fc9dba74c3a96 | [
"Apache-2.0"
] | 1 | 2015-10-31T08:38:25.000Z | 2015-10-31T08:38:25.000Z | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from elfesteem import *
from miasm.tools.pe_helper import *
from miasm.core import asmbloc
from miasm.arch import arm_arch
from miasm.core import bin_stream
from androguard.core import bytecode
from androguard.core.androconf import CONF, debug
def disasm_at_addr(in_str, ad_to_dis, symbol_pool):
kargs = {}
all_bloc = asmbloc.dis_bloc_all(arm_arch.arm_mn, in_str, ad_to_dis, set(),
symbol_pool=symbol_pool,
dontdis_retcall = False,
follow_call = False,
**kargs)
for i in all_bloc:
bytecode._PrintDefault("%s\n" % i.label)
for j in i.lines:
bytecode._PrintDefault("\t %s\n" % j)
bytecode._PrintDefault("\n")
class Function(object):
def __init__(self, cm, name, info):
self.cm = cm
self.name = name
self.info = info
def show(self):
bytecode._PrintSubBanner("Function")
bytecode._PrintDefault("name=%s addr=0x%x\n" % (self.name, self.info.value))
self.cm.disasm_at_addr( self.info.value )
class ClassManager(object):
def __init__(self, in_str, symbol_pool):
self.in_str = in_str
self.symbol_pool = symbol_pool
def disasm_at_addr(self, ad_to_dis):
disasm_at_addr( self.in_str, ad_to_dis, self.symbol_pool )
class ELF(object):
def __init__(self, buff):
self.E = elf_init.ELF( buff )
self.in_str = bin_stream.bin_stream(self.E.virt)
self.symbol_pool = None
self.functions = collections.deque()
self.create_symbol_pool()
self.CM = ClassManager( self.in_str, self.symbol_pool )
self.create_functions()
def create_symbol_pool(self):
dll_dyn_funcs = get_import_address_elf(self.E)
self.symbol_pool = asmbloc.asm_symbol_pool()
for (n,f), ads in dll_dyn_funcs.items():
for ad in ads:
l = self.symbol_pool.getby_name_create("%s_%s"%(n, f))
l.offset = ad
self.symbol_pool.s_offset[l.offset] = l
def show(self):
for i in self.get_functions():
i.show()
def get_functions(self):
return self.functions
def create_functions(self):
try:
for k, v in self.E.sh.symtab.symbols.items():
if v.size != 0:
self.functions.append( Function(self.CM, k, v) )
except AttributeError:
pass
for k, v in self.E.sh.dynsym.symbols.items():
if v.size != 0:
self.functions.append( Function(self.CM, k, v) )
| 32.813725 | 84 | 0.620854 | 464 | 3,347 | 4.288793 | 0.346983 | 0.075377 | 0.049246 | 0.013568 | 0.106533 | 0.069347 | 0.069347 | 0.055276 | 0.055276 | 0.055276 | 0 | 0.005408 | 0.281745 | 3,347 | 101 | 85 | 33.138614 | 0.822379 | 0.188527 | 0 | 0.090909 | 0 | 0 | 0.016673 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0.015152 | 0.121212 | 0.015152 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8fb5c140dd1e9df336a0245a2a5791042aa45d4 | 959 | py | Python | Python Basics/While Loop/More Exercise/Task02.py | DonikaChervenkova/SoftUni | bff579c037ec48f39ed193b34bc3502a32e90732 | [
"MIT"
] | 1 | 2022-03-16T10:23:04.000Z | 2022-03-16T10:23:04.000Z | Python Basics/While Loop/More Exercise/Task02.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | null | null | null | Python Basics/While Loop/More Exercise/Task02.py | IvanTodorovBG/SoftUni | 7b667f6905d9f695ab1484efbb02b6715f6d569e | [
"MIT"
] | 1 | 2021-12-04T12:30:57.000Z | 2021-12-04T12:30:57.000Z | needed_money = int(input())
x = input()
count = 0
cash = 0
card = 0
total_money = 0
cash_count = 0
card_count = 0
while x != "End":
count += 1
money = int(x)
if count % 2 != 0:
if money > 100:
print("Error in transaction!")
elif money <= 100:
total_money += money
cash += money
cash_count += 1
print("Product sold!")
elif count % 2 == 0:
if money < 10:
print("Error in transaction!")
elif money >= 10:
total_money += money
card += money
card_count += 1
print("Product sold!")
if total_money >= needed_money:
average_cs = cash / cash_count
average_cc = card / card_count
print(f"Average CS: {average_cs:.2f}")
print(f"Average CC: {average_cc:.2f}")
break
x = input()
if x == "End":
print("Failed to collect required money for charity.")
| 23.975 | 58 | 0.520334 | 122 | 959 | 3.959016 | 0.295082 | 0.082816 | 0.028986 | 0.037267 | 0.281574 | 0.132505 | 0 | 0 | 0 | 0 | 0 | 0.040917 | 0.362878 | 959 | 39 | 59 | 24.589744 | 0.749591 | 0 | 0 | 0.222222 | 0 | 0 | 0.183054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.194444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8fc5997902d40ab35fcd22899d1aba70a2bcaa0 | 2,008 | py | Python | config.py | Yaasha/Maturita | a007d6ebea70c1e1730512ad3395c7c36d004f9b | [
"MIT"
] | 2 | 2019-04-17T08:34:32.000Z | 2019-05-01T01:09:07.000Z | config.py | Yaasha/Maturita | a007d6ebea70c1e1730512ad3395c7c36d004f9b | [
"MIT"
] | 1 | 2018-05-09T15:14:09.000Z | 2019-02-11T09:13:31.000Z | config.py | Yaasha/Maturita | a007d6ebea70c1e1730512ad3395c7c36d004f9b | [
"MIT"
] | null | null | null | import os.path
# filepaths
DATASET_DIRECTORY = 'dataset' # directory of the dataset and mapping files
DATASET_FILE = 'emnist-byclass.mat' # name of the dataset file
DATASET_MAPPING_FILE = 'mapping.p' # name of the mapping file
MODEL_DIRECTORY = 'saves_tensorflow' # directory of the model and weights
MODEL_FILE = 'model.h5' # name of the model file
HISTORY_FILE = 'history.p' # name of the model`s history file
HISTORY_BATCH_FILE = 'history_batch.p' # name of the model`s batch history file
CUR_E_FILE = 'epoch.p' # name of the current epoch file
MODEL_VISUALIZE = 'model.png' # name of the image of the model
# joined directories and filenames
dataset_path = os.path.join(DATASET_DIRECTORY, DATASET_FILE)
mapping_path = os.path.join(DATASET_DIRECTORY, DATASET_MAPPING_FILE)
model_path = os.path.join(MODEL_DIRECTORY, MODEL_FILE)
history_path = os.path.join(MODEL_DIRECTORY, HISTORY_FILE)
history_batch_path = os.path.join(MODEL_DIRECTORY, HISTORY_BATCH_FILE)
cur_epoch_path = os.path.join(MODEL_DIRECTORY, CUR_E_FILE)
visualize_path = os.path.join(MODEL_DIRECTORY, MODEL_VISUALIZE)
# input data parameters
HEIGHT = 28 # height of the images
WIDTH = 28 # width of the images
# neural net model parameters
BACKEND = "tensorflow" # backend for keras (tensorflow, theano, cntk)
NB_FILTERS = 64 # number of convolutional filters
POOL_SIZE = (2, 2) # size of pooling area for max pooling
KERNEL_SIZE = (3, 3) # size of the convolutional kernel
DROPOUT = 0.25 # dropout chance
BATCH_SIZE = 1000 # number of inputs in one epoch
EPOCHS = 10 # number of training cycles
# constants for creating pdf
# offset of each side of document in cm
X_OFFSET_CM = 2 # horizontal
Y_OFFSET_CM = 2 # vertical
# dimensions of cell in cm
CELL_WIDTH_CM = 0.65
CELL_HEIGHT_CM = 0.65
FONT_SIZE = 20
# constants for reading pdf
# number of characters per line
LINE_BREAK = 26
| 37.886792 | 87 | 0.717131 | 298 | 2,008 | 4.651007 | 0.328859 | 0.046898 | 0.045455 | 0.070707 | 0.194805 | 0.194805 | 0.151515 | 0 | 0 | 0 | 0 | 0.020266 | 0.213645 | 2,008 | 52 | 88 | 38.615385 | 0.857505 | 0.399402 | 0 | 0 | 0 | 0 | 0.091837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8fe31fd8a63d28d817343991bf9a9ed38a771af | 1,224 | py | Python | project/submissions/akshathaj/src/astar_4_path.py | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | 1 | 2021-07-09T16:48:44.000Z | 2021-07-09T16:48:44.000Z | project/submissions/akshathaj/src/astar_4_path.py | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | null | null | null | project/submissions/akshathaj/src/astar_4_path.py | naveenmoto/lablet102 | 24de9daa4ae75cbde93567a3239ede43c735cf03 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
def euclidean(node1, node2):
x1,y1 = node1
x2,y2 = node2
dist = np.sqrt(pow((y2-y1),2) + pow((x2-x1),2))
return dist
def gen_astar_path(grid,start,goal):
#initialize graph
grid_path = np.flipud(grid)
grid_size = grid_path.shape
G = nx.grid_2d_graph(*grid_size)
nx.set_edge_attributes(G,1,'cost')
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if grid_path[i,j] == 1:
G.remove_node((i,j))
start_path = (start[1],start[0])
goal_path = (goal[1],goal[0])
astar_path = nx.astar_path(G,start_path,goal_path,heuristic=euclidean,weight='cost')
astar_final = [(y,x) for x,y in astar_path]
extent_limits = [0,grid_size[1],0,grid_size[0]]
return astar_final,extent_limits
def plot_astar(grid,start,goal,astar_path,extent_limits):
fig, ax = plt.subplots(figsize=(15,19))
ax.imshow(np.flipud(grid), cmap=plt.cm.Dark2,origin='Lower',extent=extent_limits)
ax.scatter(start[0],start[1], marker = "+", color = "yellow", s = 200)
ax.scatter(goal[0],goal[1], marker = "+", color = "red", s = 200)
for s in astar_path[1:]:
ax.plot(s[0], s[1],'r+')
ax.grid() | 32.210526 | 86 | 0.658497 | 213 | 1,224 | 3.633803 | 0.361502 | 0.069767 | 0.033592 | 0.03876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044423 | 0.172386 | 1,224 | 38 | 87 | 32.210526 | 0.719645 | 0.013072 | 0 | 0 | 0 | 0 | 0.022203 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.096774 | 0 | 0.258065 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b8fe61f6e864865e60f9cb776ee400fc083dc787 | 7,976 | py | Python | LEA_cmd.py | dnlstmffj/LEA_SECU_Python | afe8c64b8b14275623bf406b4f13665c5dccaad7 | [
"MIT"
] | null | null | null | LEA_cmd.py | dnlstmffj/LEA_SECU_Python | afe8c64b8b14275623bf406b4f13665c5dccaad7 | [
"MIT"
] | null | null | null | LEA_cmd.py | dnlstmffj/LEA_SECU_Python | afe8c64b8b14275623bf406b4f13665c5dccaad7 | [
"MIT"
] | null | null | null | #-*- coding: utf-8 -*-
import argparse
import LEA_Test
import LEA
def printTest(result):
if result is not None and type(result) is tuple and result[0] != 0:
print(result)
def test():
leaVS = LEA_Test.LEAvs()
printTest(leaVS.lea_mmt_ecb_test())
printTest(leaVS.lea_mmt_cbc_test())
printTest(leaVS.lea_mmt_ctr_test())
printTest(leaVS.lea_mmt_cfb_test())
printTest(leaVS.lea_mmt_ofb_test())
printTest(leaVS.lea_cmac_g_test())
printTest(leaVS.lea_ccm_ge_test())
printTest(leaVS.lea_gcm_ae_test())
LEA_Test.benchmark().lea_ecb_benchmark()
def makeBinary(filename,length, fillValue = None):
import random
import struct
buffer = bytearray()
with open(filename,'wb') as fp:
for idx in range(length):
if fillValue is not None:
buffer.append(fillValue)
else:
buffer.append(random.randint(0,255))
if len(buffer) > 4096:
fp.write(buffer)
buffer = bytearray()
fp.write(buffer)
def doEnc(env):
key = env.key.read()
data = env.input
data_len = 0
iv = None
if env.mode != 'ECB':
if env.iv is None:
raise AttributeError('IV is required in mode.'%env.mode)
else:
iv = env.iv.read()
if env.mode == 'ECB':
leaEnc = LEA.ECB(env.enc, key)
elif env.mode == 'CBC':
leaEnc = LEA.CBC(env.enc, key, iv)
elif env.mode == 'CTR':
leaEnc = LEA.CTR(env.enc, key, iv)
elif env.mode == 'CFB':
leaEnc = LEA.CFB(env.enc, key, iv)
elif env.mode == 'OFB':
leaEnc = LEA.OFB(env.enc, key, iv)
else:
leaEnc = None
raise AttributeError('Unknown Mode')
while True:
buffer = data.read(4096)
if len(buffer) == 0:
break
data_len += len(buffer)
env.output.write(leaEnc.update(buffer))
env.output.write(leaEnc.final())
if env.enc:
enc_txt = 'Enc.'
else:
enc_txt = 'Dec.'
#print('LEA %s %s - Done'%(env.mode,enc_txt))
return True
def doAE(env):
key = env.key.read()
iv = env.iv.read()
if env.aad is None:
aad = b''
else:
aad = env.aad.read()
tag_len = env.taglen
if tag_len > 16 or tag_len <= 0:
raise AttributeError('Invalid Tag Length')
data = env.input
data_len = 0
try:
if env.mode == 'CCM' and (LEA.LEA.py_under3 and not isinstance(data, file)):
buffer = data.read()
leaCCM = LEA.CCM(env.enc, key, iv, aad, tag_len, len(buffer))
env.output.write(leaCCM.update(buffer))
env.output.write(leaCCM.final())
else:
if env.mode == 'GCM':
leaAE = LEA.GCM(env.enc, key, iv, aad, tag_len)
else:
data.seek(0,2)
data_len = data.tell() - tag_len
data.seek(0,0)
leaAE = LEA.CCM(env.enc, key, iv, aad, tag_len, data_len)
while True:
buffer = data.read(4096)
if len(buffer) == 0:
break
data_len += len(buffer)
env.output.write(leaAE.update(buffer))
env.output.write(leaAE.final())
except LEA.TagError:
print('LEA %s Dec. - Tag value is inconsistent.' % env.mode)
return False
return True
def doCMAC(env):
key = env.key.read()
data = env.input
leaCMAC = LEA.CMAC(key)
data_len = 0
while True:
buffer = data.read(4096)
if len(buffer) == 0:
break
data_len += len(buffer)
leaCMAC.update(buffer)
env.output.write(leaCMAC.final())
return True
#print('LEA CMAC - Done')
def buildParser():
import sys
import os
basename = os.path.basename(sys.argv[0])
parser = argparse.ArgumentParser(description='LEA for Python command line tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Example: %s enc --help'%basename)
subparsers = parser.add_subparsers()
parser_enc = subparsers.add_parser('enc', description='Operatie en/decrpyt with LEA',
help='Operatie en/decrpyt with LEA',
epilog='Example: %s enc -e -m CTR -k key.bin --iv iv.bin -i pt.bin -o ct.bin'%basename)
parser_enc_e = parser_enc.add_mutually_exclusive_group(required=True)
parser_enc_e.add_argument('-e', '--encrypt', dest='enc', action='store_true', help='Encrypt file')
parser_enc_e.add_argument('-d', '--decrypt', dest='enc', action='store_false', help='Decrypt file')
parser_enc.add_argument('-m' , '--mode', type=str.upper, choices=['ECB','CBC','CTR','CFB','OFB'],
help='Mode of operation', required=True)
parser_enc.add_argument('-k','--key', type=argparse.FileType('rb'), help='Key file(binary)', required=True)
parser_enc.add_argument('--iv', '--nonce', type=argparse.FileType('rb'), help='IV file')
parser_enc.add_argument('-i', '--input', type=argparse.FileType('rb'), help='Input file(binary)', required=True)
parser_enc.add_argument('-o', '--output', type=argparse.FileType('wb'), help='Output file(binary)', required=True)
parser_enc.set_defaults(func=doEnc)
parser_ae = subparsers.add_parser('ae', description='Operate authenticated encryption with LEA',
help='Operate authenticated encryption with LEA',
epilog='Example: %s ae -e -m GCM -k key.bin --iv iv.bin --tag tag.bin -i pt.bin -o ct.bin'%basename)
parser_ae_e = parser_ae.add_mutually_exclusive_group(required=True)
parser_ae_e.add_argument('-e', '--encrypt', dest='enc', action='store_true', help='Encrypt file')
parser_ae_e.add_argument('-d', '--decrypt', dest='enc', action='store_false', help='Decrypt file')
parser_ae.add_argument('-m', '--mode', choices=['CCM','GCM'], type=str.upper, help ='Mode of operation',
required=True)
parser_ae.add_argument('-k','--key', type=argparse.FileType('rb'), help='Key file(binary)', required=True)
parser_ae.add_argument('--iv', '--nonce', type=argparse.FileType('rb'), help='IV file', required= True)
parser_ae.add_argument('--aad', type=argparse.FileType('rb'), help='Additional authenticated data file(binary)')
parser_ae.add_argument('--taglen', type=int, default=16, help='Tag length. should be equal or less than 16.')
parser_ae.add_argument('-i', '--input', type=argparse.FileType('rb'), help='Input file(binary)', required=True)
parser_ae.add_argument('-o,', '--output', type=argparse.FileType('wb'), help='Output file(binary)', required= True)
parser_ae.set_defaults(func=doAE)
parser_cmac = subparsers.add_parser('cmac', help='operation CMAC with LEA',
epilog='Example: %s cmac -k key.bin -i data.bin -o cmac.bin'%basename)
parser_cmac.add_argument('-k', '--key', type=argparse.FileType('rb'), help='Key file(binary)', required=True)
parser_cmac.add_argument('-i', '--input', type=argparse.FileType('rb'), help='Input file(binary)', required=True)
parser_cmac.add_argument('-o,', '--output', type=argparse.FileType('wb'), help='Output file(binary)', required=True)
parser_cmac.set_defaults(func=doCMAC)
return parser
def main():
parser = buildParser()
arguments = parser.parse_args()
if hasattr(arguments,'func'):
arguments.func(arguments)
else:
parser = buildParser()
arguments = parser.parse_args(['-h'])
if __name__ == "__main__":
#test()
main()
| 37.446009 | 139 | 0.57999 | 1,019 | 7,976 | 4.412169 | 0.176644 | 0.046486 | 0.05605 | 0.044039 | 0.530472 | 0.42371 | 0.371441 | 0.316726 | 0.300267 | 0.275801 | 0 | 0.007274 | 0.276078 | 7,976 | 212 | 140 | 37.622642 | 0.771389 | 0.011911 | 0 | 0.254438 | 0 | 0.011834 | 0.153947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047337 | false | 0 | 0.04142 | 0 | 0.118343 | 0.065089 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77025ccb785e7d28bf7ebe6f8ab161f279cb5152 | 1,436 | py | Python | ctmc/ctmc_class.py | kmedian/ctmc | 6b2e27955067638648c0feefb9dfe2ac20983052 | [
"MIT"
] | 10 | 2018-09-30T08:03:37.000Z | 2022-03-22T17:32:19.000Z | ctmc/ctmc_class.py | kmedian/ctmc | 6b2e27955067638648c0feefb9dfe2ac20983052 | [
"MIT"
] | 11 | 2018-10-23T10:48:20.000Z | 2019-12-01T11:06:32.000Z | ctmc/ctmc_class.py | kmedian/ctmc | 6b2e27955067638648c0feefb9dfe2ac20983052 | [
"MIT"
] | 3 | 2018-12-25T06:06:31.000Z | 2021-04-13T10:12:25.000Z | from sklearn.base import BaseEstimator
from .ctmc_func import ctmc
from .simulate import simulate
import numpy as np
class Ctmc(BaseEstimator):
"""Continous Time Markov Chain, sklearn API class"""
def __init__(self, numstates: int = None, transintv: float = 1.0,
toltime: float = 1e-8, autocorrect: bool = False,
debug: bool = False):
self.numstates = numstates
self.transintv = transintv
self.toltime = toltime
self.autocorrect = autocorrect
self.debug = debug
def fit(self, X: list, y=None):
"""Calls the ctmc.ctmc function
Parameters
----------
X : list of lists
(see ctmc function 'data')
y
not used, present for API consistence purpose.
"""
self.transmat, self.genmat, self.transcount, self.statetime = ctmc(
X, numstates=self.numstates,
transintv=self.transintv,
toltime=self.toltime,
autocorrect=self.autocorrect,
debug=self.debug)
return self
def predict(self, X: np.ndarray, steps: int = 1) -> np.ndarray:
"""
Parameters
----------
X : ndarray
vector with state variables at t
Returns
-------
C : ndarray
vector with state variables at t+1
"""
return simulate(X, self.transmat, steps)
| 28.156863 | 75 | 0.56337 | 155 | 1,436 | 5.187097 | 0.43871 | 0.048507 | 0.042289 | 0.054726 | 0.084577 | 0.084577 | 0.084577 | 0 | 0 | 0 | 0 | 0.006309 | 0.337744 | 1,436 | 50 | 76 | 28.72 | 0.839117 | 0.236769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.173913 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77047b151c15e4eb59596e258b4ca3bb1f389e2f | 1,945 | py | Python | NiaPy/tests/test_mts.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | 1 | 2020-03-16T11:15:43.000Z | 2020-03-16T11:15:43.000Z | NiaPy/tests/test_mts.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | null | null | null | NiaPy/tests/test_mts.py | lukapecnik/NiaPy | a40ac08a4c06a13019ec5e39cc137461884928b0 | [
"MIT"
] | 1 | 2020-03-25T16:20:36.000Z | 2020-03-25T16:20:36.000Z | # encoding=utf8
from NiaPy.algorithms.other import MultipleTrajectorySearch, MultipleTrajectorySearchV1
from NiaPy.tests.test_algorithm import AlgorithmTestCase, MyBenchmark
class MTSTestCase(AlgorithmTestCase):
def test_type_parameters(self):
d = MultipleTrajectorySearch.typeParameters()
self.assertTrue(d['NoLsTests'](10))
self.assertTrue(d['NoLsTests'](0))
self.assertFalse(d['NoLsTests'](-10))
self.assertTrue(d['NoLs'](10))
self.assertTrue(d['NoLs'](0))
self.assertFalse(d['NoLs'](-10))
self.assertTrue(d['NoLsBest'](10))
self.assertTrue(d['NoLsBest'](0))
self.assertFalse(d['NoLsBest'](-10))
self.assertTrue(d['NoEnabled'](10))
self.assertFalse(d['NoEnabled'](0))
self.assertFalse(d['NoEnabled'](-10))
def test_custom_works_fine(self):
mts_custom = MultipleTrajectorySearch(n=10, C_a=2, C_r=0.5, seed=self.seed)
mts_customc = MultipleTrajectorySearch(n=10, C_a=2, C_r=0.5, seed=self.seed)
AlgorithmTestCase.algorithm_run_test(self, mts_custom, mts_customc, MyBenchmark())
def test_griewank_works_fine(self):
mts_griewank = MultipleTrajectorySearch(n=10, C_a=5, C_r=0.5, seed=self.seed)
mts_griewankc = MultipleTrajectorySearch(n=10, C_a=5, C_r=0.5, seed=self.seed)
AlgorithmTestCase.algorithm_run_test(self, mts_griewank, mts_griewankc)
class MTSv1TestCase(AlgorithmTestCase):
def test_custom_works_fine(self):
mts_custom = MultipleTrajectorySearchV1(n=10, C_a=2, C_r=0.5, seed=self.seed)
mts_customc = MultipleTrajectorySearchV1(n=10, C_a=2, C_r=0.5, seed=self.seed)
AlgorithmTestCase.algorithm_run_test(self, mts_custom, mts_customc, MyBenchmark())
def test_griewank_works_fine(self):
mts_griewank = MultipleTrajectorySearchV1(n=10, C_a=5, C_r=0.5, seed=self.seed)
mts_griewankc = MultipleTrajectorySearchV1(n=10, C_a=5, C_r=0.5, seed=self.seed)
AlgorithmTestCase.algorithm_run_test(self, mts_griewank, mts_griewankc)
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 44.204545 | 87 | 0.772237 | 281 | 1,945 | 5.149466 | 0.185053 | 0.038701 | 0.022115 | 0.027643 | 0.678645 | 0.670352 | 0.568072 | 0.568072 | 0.519696 | 0.519696 | 0 | 0.039459 | 0.087918 | 1,945 | 43 | 88 | 45.232558 | 0.776212 | 0.034447 | 0 | 0.235294 | 0 | 0 | 0.048 | 0 | 0 | 0 | 0 | 0 | 0.352941 | 1 | 0.147059 | false | 0 | 0.058824 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7706af7457995b46e5d3ede1dfa6c612815211a0 | 1,073 | py | Python | combustion/unets/tests/test_utils.py | AI4SIM/model-collection | 4e69558300e78d134d97d5a9665c5d0b717391eb | [
"Apache-2.0"
] | null | null | null | combustion/unets/tests/test_utils.py | AI4SIM/model-collection | 4e69558300e78d134d97d5a9665c5d0b717391eb | [
"Apache-2.0"
] | null | null | null | combustion/unets/tests/test_utils.py | AI4SIM/model-collection | 4e69558300e78d134d97d5a9665c5d0b717391eb | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase, main
from utils import RandomCropper3D
from numpy import copy
from numpy.random import rand
class TestData(TestCase):
def test_random_cropper(self):
n, n_ = 64, 32
x = rand(n, n, n)
y = copy(x)
random_cropper = RandomCropper3D(n_)
x_, y_ = random_cropper(x, y)
self.assertEqual(x_.shape, (n_, n_, n_))
self.assertEqual(y_.shape, (n_, n_, n_))
self.assertEqual(x_[0, 0, 0], y_[0, 0, 0])
if __name__ == '__main__':
main()
| 31.558824 | 74 | 0.691519 | 162 | 1,073 | 4.419753 | 0.518519 | 0.019553 | 0.01257 | 0.044693 | 0.064246 | 0.064246 | 0 | 0 | 0 | 0 | 0 | 0.019025 | 0.216216 | 1,073 | 33 | 75 | 32.515152 | 0.832342 | 0.485555 | 0 | 0 | 0 | 0 | 0.014815 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.0625 | false | 0 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7706e8c7c8b89396e5645ccad3f13456e7c4a9c6 | 2,136 | py | Python | Post-op-Lung-Cancer-Surgery-Mortality-Predictor .py | rudibakaal/Post-op-Lung-Cancer-Surgery-Mortality-Predictor | 4729d39b3644e7c28fbf327d4fefa5a5baf0bea1 | [
"MIT"
] | null | null | null | Post-op-Lung-Cancer-Surgery-Mortality-Predictor .py | rudibakaal/Post-op-Lung-Cancer-Surgery-Mortality-Predictor | 4729d39b3644e7c28fbf327d4fefa5a5baf0bea1 | [
"MIT"
] | null | null | null | Post-op-Lung-Cancer-Surgery-Mortality-Predictor .py | rudibakaal/Post-op-Lung-Cancer-Surgery-Mortality-Predictor | 4729d39b3644e7c28fbf327d4fefa5a5baf0bea1 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from keras.utils.vis_utils import plot_model
import matplotlib.style as style
columns = ['DGN', 'PRE4', 'PRE5', 'PRE6', 'PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE14', 'PRE17', 'PRE19', 'PRE25', 'PRE30', 'PRE32', 'AGE', 'Risk1Yr']
ds = pd.read_csv('ThoraricSurgery.csv', names=columns, delim_whitespace=True)
ds = ds.reindex(np.random.permutation(ds.index))
train_features = ds.drop('Risk1Yr',axis=1)
train_label = ds['Risk1Yr']
for x in train_features.columns:
if x != ['PRE4','PRE5','AGE']:
train_features[x] = train_features[x].astype('category').cat.codes
s = StandardScaler()
for x in train_features.columns:
train_features[x] = s.fit_transform(train_features[x].values.reshape(-1, 1)).astype('float64')
label_encoder = LabelEncoder()
train_label = label_encoder.fit_transform(train_label)
input_dim = train_features.shape[1]
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, input_dim = input_dim, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))
model.add(keras.layers.Dense(16, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))
model.add(keras.layers.Dense(16, activation=tf.keras.layers.LeakyReLU(),kernel_initializer='he_uniform'))
model.add(keras.layers.Dense(1, activation='sigmoid',kernel_initializer='he_uniform'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
metrics = 'binary_accuracy')
history = model.fit(train_features, train_label, epochs=50, validation_split=0.5)
metrics = np.mean(history.history['val_binary_accuracy'])
results = model.evaluate(train_features, train_label)
print('\nLoss, Binary_accuracy: \n',(results))
style.use('dark_background')
pd.DataFrame(history.history).plot(figsize=(11, 7),linewidth=4)
plt.title('Binary Cross-entropy',fontsize=14, fontweight='bold')
plt.xlabel('Epochs',fontsize=13)
plt.ylabel('Metrics',fontsize=13)
plt.show() | 37.473684 | 155 | 0.756086 | 297 | 2,136 | 5.299663 | 0.444444 | 0.082592 | 0.035578 | 0.048285 | 0.226811 | 0.191868 | 0.158831 | 0.158831 | 0.158831 | 0.158831 | 0 | 0.027821 | 0.091292 | 2,136 | 57 | 156 | 37.473684 | 0.783101 | 0 | 0 | 0.1 | 0 | 0 | 0.150679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.225 | 0 | 0.225 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77072fdae53cdd1a6845e697f296b01fe5c5e7e7 | 564 | py | Python | src/globus_cli/login_manager/__init__.py | globusonline/globus-cli | 696857baafac198141edc3c1c29c72215f217df1 | [
"Apache-2.0"
] | null | null | null | src/globus_cli/login_manager/__init__.py | globusonline/globus-cli | 696857baafac198141edc3c1c29c72215f217df1 | [
"Apache-2.0"
] | 1 | 2016-04-09T17:26:05.000Z | 2016-04-11T16:13:50.000Z | src/globus_cli/login_manager/__init__.py | globusonline/globus-cli | 696857baafac198141edc3c1c29c72215f217df1 | [
"Apache-2.0"
] | null | null | null | from .client_login import get_client_login, is_client_login
from .errors import MissingLoginError
from .local_server import is_remote_session
from .manager import LoginManager
from .tokenstore import (
delete_templated_client,
internal_auth_client,
internal_native_client,
token_storage_adapter,
)
__all__ = [
"MissingLoginError",
"is_remote_session",
"LoginManager",
"delete_templated_client",
"internal_auth_client",
"internal_native_client",
"token_storage_adapter",
"is_client_login",
"get_client_login",
]
| 24.521739 | 59 | 0.762411 | 64 | 564 | 6.1875 | 0.375 | 0.138889 | 0.070707 | 0.146465 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0.393939 | 0 | 0 | 0.163121 | 564 | 22 | 60 | 25.636364 | 0.838983 | 0 | 0 | 0 | 0 | 0 | 0.289007 | 0.117021 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
770748c97bba11b806c7330e84d55a8fd43fb597 | 41,761 | py | Python | game/tichu/cards/cards.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | game/tichu/cards/cards.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | game/tichu/cards/cards.py | lukaspestalozzi/Master_Semester_Project | 4e71d4034ae3f5e7efa0864b48c6fd4d876fef4e | [
"MIT"
] | null | null | null | import random
from collections import abc as collectionsabc
import abc
from collections import defaultdict
import base64 as b64
import itertools
from .card import Card, CardSuit, CardValue
from game.utils import check_param, check_isinstance, check_all_isinstance, check_true, ignored
__author__ = 'Lukas Pestalozzi'
class ImmutableCards(collectionsabc.Collection):
# TODO change all "isinstance(x, ImmutableClass)" to "self.__class__ == x.__class__"
__slots__ = ("_cards", "_hash", "_repr", "_str")
_card_val_to_sword_card = {
2: Card.TWO_SWORD,
3: Card.THREE_SWORD,
4: Card.FOUR_SWORD,
5: Card.FIVE_SWORD,
6: Card.SIX_SWORD,
7: Card.SEVEN_SWORD,
8: Card.EIGHT_SWORD,
9: Card.NINE_SWORD,
10: Card.TEN_SWORD,
11: Card.J_SWORD,
12: Card.Q_SWORD,
13: Card.K_SWORD,
14: Card.A_SWORD,
}
def __init__(self, cards):
"""
:param cards: An iterable containing Card instances or another Card instance.
"""
if isinstance(cards, ImmutableCards):
self._cards = frozenset(cards.cards_list)
elif all([isinstance(c, Card) for c in cards]):
self._cards = frozenset(cards)
else:
raise TypeError("Only instances of 'Card' can be put into 'Cards'. But was {}".format(cards))
self._hash = hash(self._cards)
self._repr = "(len: {}, cards: {})".format(len(self._cards), repr(self._cards))
self._str = "({})".format(', '.join([str(c) for c in sorted(self._cards)]))
@property
def cards_list(self):
return list(self._cards)
@property
def cards(self):
return frozenset(self._cards)
@property
def any_card(self):
return next(iter(self._cards))
@property
def highest_card(self):
return max(self._cards, key=lambda c: c.card_value)
@property
def lowest_card(self):
return min(self._cards, key=lambda c: c.card_value)
def copy(self):
"""
:return: copy of this ImmutableCards instance
"""
return ImmutableCards(self._cards)
def union(self, other):
"""
:param other:
:return: frozenset of the union of both cards sets
"""
return frozenset(self.cards.union(other.cards))
def count_points(self):
"""
:return the Tichu points in this set of cards.
"""
pts = sum([c.points for c in self._cards])
return pts
def issubset(self, other):
"""
:param other: Cards instance
:return True iff this cards all appear in 'other'.
"""
return self._cards.issubset(other._cards)
def sorted_tuple(self, *args, **kwargs):
"""
:param args, kwargs: same parameters as for the built in 'sorted' method
:return: The elements as a sorted tuple
"""
return sorted(tuple(self._cards), *args, **kwargs)
def partitions(self):
"""
:return: a set of partitions of the cards
"""
from .partition import Partition
# TODO test
# remove PHOENIX
no_phoenix_cards = [c for c in self._cards if c is not Card.PHOENIX]
# replace Phoenix once with all cards not in cards
# TODO handle phoenix
# store 'all single' partition
final_partitions = set()
open_partitions = set()
open_partitions.add(Partition([Single(c) for c in no_phoenix_cards]))
done = {}
# repeat "evolve" until no new partitions are generated
while len(open_partitions) > 0: # for pton in final_partitions:
pton = open_partitions.pop()
if pton not in done:
res = pton.evolve()
if len(res) > 0:
open_partitions.update(res)
done[pton] = res
final_partitions.add(pton) # TODO question, can be put in if clause?
return final_partitions
def value_dict(self, include_special=True):
"""
:param include_special: bool: if False, the special cards are not in the dict
:return: a dict mapping the card_values appearing in self._cards to the list of corresponding cards.
"""
# TODO precompute, -> must be overridden by mutable subclasses
val_dict = defaultdict(lambda: [])
for c in self._cards:
if include_special or c.suit is not CardSuit.SPECIAL:
val_dict[c.card_value].append(c)
return val_dict
# TODO cache the results -> (only in immutable cards)
def all_bombs(self, contains_value=None):
return itertools.chain(self.squarebombs(contains_value=contains_value),
self.straightbombs(contains_value=contains_value))
def squarebombs(self, contains_value=None):
must_contain_val = isinstance(contains_value, CardValue)
for l in self.value_dict().values():
if len(l) == 4:
b = SquareBomb(*l)
if not must_contain_val or (must_contain_val and b.contains_cardval(contains_value)):
yield b
def straightbombs(self, contains_value=None):
# group by card suit
suitdict = defaultdict(lambda: [])
for c in self._cards:
suitdict[c.suit].append(c)
# look only at cards of same suit
for suit, cards in suitdict.items():
if len(cards) >= 5: # must be at least 5 to be a straight (also excludes special cards)
yield from (StraightBomb(st) for st in ImmutableCards(cards).straights(contains_value=contains_value))
def singles(self, contains_value=None):
unique_cardvals = [crds[0] for cv, crds in self.value_dict().items()]
sgls = (Single(c) for c in unique_cardvals)
if isinstance(contains_value, CardValue):
return (s for s in sgls if s.contains_cardval(contains_value))
else:
return sgls
def pairs(self, ignore_phoenix=False, contains_value=None):
valdict = self.value_dict(include_special=False)
# if contains_value is specified, filter all other values out
if isinstance(contains_value, CardValue):
valdict = {k: v for k, v in valdict.items() if k is contains_value}
# phoenix
if not ignore_phoenix and Card.PHOENIX in self._cards:
for l in valdict.values():
assert len(l) > 0
yield Pair(l[0], Card.PHOENIX)
# normal pairs
for l in valdict.values():
if len(l) >= 2:
# 2 or more same valued cards -> take 2 of them
yield Pair(l[0], l[1])
if len(l) == 4:
# 4 same valued cards -> make 2 different pairs (l[0] and l[1] already yielded)
yield Pair(l[2], l[3])
def trios(self, ignore_phoenix=False, contains_value=None):
valdict = self.value_dict()
# if contains_value is specified, filter all other values out
if isinstance(contains_value, CardValue):
valdict = {k: v for k, v in valdict.items() if k is contains_value}
# phoenix
if not ignore_phoenix and Card.PHOENIX in self._cards:
for l in valdict.values():
if len(l) >= 2:
yield Trio(l[0], l[1], Card.PHOENIX)
# normal trios
for l in valdict.values():
if len(l) >= 3:
# 3 or more same valued cards -> take 2 of them
yield Trio(l[0], l[1], l[2])
def straights_old(self, length=None, ignore_phoenix=False, contains_value=None):
check_param(length is None or length >= 5, length)
can_use_phoenix = not ignore_phoenix and Card.PHOENIX in self._cards
if len(self._cards) < (5 if length is None else length):
# if not enough cards are available -> return.
return
elif isinstance(contains_value, CardValue) and contains_value not in (c.card_value for c in self._cards):
# does not contain the 'contains_value' card -> return
return
else:
sorted_cards = sorted([c for c in self._cards
if c is not Card.PHOENIX and c is not Card.DOG and c is not Card.DRAGON],
key=lambda c: c.card_value)
next_c = defaultdict(lambda: []) # card val height -> list of cards with height 1 higher
for c in sorted_cards:
next_c[c.card_height - 1].append(c)
def gen_from(card, remlength, ph):
if remlength <= 1:
yield [card] # finish a straight with this card
# a straight for one possible continuation
next_cards = next_c[card.card_height]
if len(next_cards) > 0:
for st in gen_from(next_cards[0], remlength - 1, ph=ph):
yield [card] + st
# Phoenix:
if ph is None and can_use_phoenix:
if remlength <= 2 and card.card_value is not CardValue.A:
phoenix_as = ImmutableCards._card_val_to_sword_card[card.card_height + 1]
yield [card, (Card.PHOENIX, phoenix_as)] # finish the straight with the Phoenix
# take phoenix instead of card
if card is not Card.MAHJONG:
if len(next_cards) > 0:
for st in gen_from(next_cards[0], remlength - 1, ph=card):
yield [(Card.PHOENIX, card)] + st
# take phoenix to jump a value
if card.card_value < CardValue.K: # can not jump the As
after_next_cards = next_c[card.card_height + 1]
if len(after_next_cards) > 0:
phoenix_as = ImmutableCards._card_val_to_sword_card[card.card_height + 1]
for st in gen_from(after_next_cards[0], remlength - 2, ph=phoenix_as):
yield [card, (Card.PHOENIX, phoenix_as)] + st
def gen_all_straights():
""" Take all possible starting cards and generate straights from them """
must_contain_val = isinstance(contains_value, CardValue)
max_card_val = CardValue.TEN # there is no possible straight starting from J (must have length 5)
if must_contain_val:
max_card_val = min(max_card_val, contains_value) # straight starting from a higher value than contains_val, can not contain that val
for c in sorted_cards:
if c.card_value <= max_card_val:
yield from gen_from(c, 5, ph=None) # all straights starting with normal card
if can_use_phoenix and c.card_value > CardValue.TWO:
# all straights starting with the Phoenix
phoenix = ImmutableCards._card_val_to_sword_card[c.card_height - 1]
for st in gen_from(c, 4, ph=phoenix):
yield [(Card.PHOENIX, phoenix)] + st
# make the Straights
must_contain_val = isinstance(contains_value, CardValue)
for st in gen_all_straights():
# TODO speed, make more efficient
straight = None
try: # raises Stop Iteration when phoenix is not in the straight
(phoenix, phoenix_as) = next(elem for elem in st if isinstance(elem, tuple))
st.remove((phoenix, phoenix_as)) # TODO switch to dictionaries {card->card, phoenix->card ...}
st.append(phoenix)
straight = Straight(st, phoenix_as=phoenix_as)
except StopIteration:
straight = Straight(st)
if not must_contain_val or (must_contain_val and straight.contains_cardval(contains_value)):
yield straight
def straights(self, length=None, ignore_phoenix=False, contains_value=None):
check_param(length is None or length >= 5, length)
can_use_phoenix = not ignore_phoenix and Card.PHOENIX in self._cards
if len(self._cards) < (5 if length is None else length):
# if not enough cards are available -> return.
return
elif isinstance(contains_value, CardValue) and contains_value not in (c.card_value for c in self._cards):
# does not contain the 'contains_value' card -> return
return
else:
sorted_cards = sorted([c for c in self._cards
if c is not Card.PHOENIX and c is not Card.DOG and c is not Card.DRAGON],
key=lambda c: c.card_value)
next_card = defaultdict(lambda: []) # card val height -> list of cards with height 1 higher
for c in sorted_cards:
next_card[c.card_height - 1].append(c)
def gen_from(card, remlength, ph):
if remlength <= 1:
yield {card:card} # finish a straight with this card
# a straight for one possible continuation
next_cards = next_card[card.card_height]
if len(next_cards) > 0:
for st in gen_from(next_cards[0], remlength - 1, ph=ph):
yield {card:card, **st}
# Phoenix:
if ph is None and can_use_phoenix:
# finish the straight with the Phoenix:
if remlength <= 2 and card.card_value is not CardValue.A:
phoenix_as = ImmutableCards._card_val_to_sword_card[card.card_height + 1]
yield {card:card, Card.PHOENIX: phoenix_as}
# take phoenix instead of card
if card is not Card.MAHJONG:
if len(next_cards) > 0:
for st in gen_from(next_cards[0], remlength - 1, ph=card):
yield {Card.PHOENIX: card, **st}
# take phoenix to jump a value
if card.card_value < CardValue.K and len(next_card[card.card_height]) == 0: # can not jump the As, and only jump if there is no next card
after_next_cards = next_card[card.card_height + 1]
if len(after_next_cards) > 0: # there is a card to 'land'
phoenix_as = ImmutableCards._card_val_to_sword_card[card.card_height + 1]
for st in gen_from(after_next_cards[0], remlength - 2, ph=phoenix_as):
yield {card:card, Card.PHOENIX: phoenix_as, **st}
def gen_all_straights():
""" Take all possible starting cards and generate straights from them """
must_contain_val = isinstance(contains_value, CardValue)
max_card_val = CardValue.TEN # there is no possible straight starting from J (must have length 5)
if must_contain_val:
max_card_val = min(max_card_val, contains_value) # straight starting from a higher value than contains_val, can not contain that val
for c in sorted_cards:
if c.card_value <= max_card_val:
yield from gen_from(c, 5, ph=None) # all straights starting with normal card
# all straights starting with the Phoenix:
if can_use_phoenix and c.card_value > CardValue.TWO:
phoenix = ImmutableCards._card_val_to_sword_card[c.card_height - 1]
for st in gen_from(c, 4, ph=phoenix):
yield {Card.PHOENIX:phoenix, **st}
# make and yield the Straights:
gen = (Straight(set(st.keys()), phoenix_as=st[Card.PHOENIX] if Card.PHOENIX in st else None) for st in gen_all_straights())
if isinstance(contains_value, CardValue):
yield from (st for st in gen if st.contains_cardval(contains_value))
else:
yield from gen
def fullhouses(self, ignore_phoenix=False, contains_value=None):
trios = list(self.trios(ignore_phoenix=ignore_phoenix))
pairs = list(self.pairs(ignore_phoenix=ignore_phoenix))
if isinstance(contains_value, CardValue):
for t in trios:
t_contains = t.contains_cardval(contains_value)
for p in pairs:
if t_contains or p.contains_cardval(contains_value):
try:
fh = FullHouse(pair=p, trio=t)
yield fh
except Exception:
pass
else:
for t in trios:
for p in pairs:
try:
fh = FullHouse(pair=p, trio=t)
yield fh
except Exception:
pass
def pairsteps(self, ignore_phoenix=False, length=None, contains_value=None):
check_param(length is None or length > 0, length)
sorted_pairs = sorted(self.pairs(ignore_phoenix=ignore_phoenix))
next_pair_no_ph = defaultdict(lambda: [])
next_pair_with_ph = defaultdict(lambda: [])
for p in sorted_pairs:
if p.contains_phoenix():
next_pair_with_ph[p.height-1].append(p)
else:
next_pair_no_ph[p.height - 1].append(p)
def gen_from(pair, remlength, ph_used):
if remlength <= 1:
yield [pair]
# continue without phoenix:
try:
for ps in gen_from(next_pair_no_ph[pair.height][0], remlength - 1, ph_used=ph_used):
yield [pair] + ps
except (StopIteration, IndexError):
pass
# continue with phoenix:
if not ph_used:
try:
for ps in gen_from(next_pair_with_ph[pair.height][0], remlength - 1, ph_used=True):
yield [pair] + ps
except (StopIteration, IndexError):
pass
def gen_all_pairsteps():
""" Take all possible starting pairs and generate pairsteps from them """
max_height = CardValue.A.value # there is no possible pairstep starting from As (must have length 2)
if isinstance(contains_value, CardValue):
max_height = min(max_height, contains_value.value) # straight starting from a higher value than contains_val, can not contain that val
for pair in sorted_pairs:
if pair.height <= max_height:
yield from gen_from(pair, 2, ph_used=pair.contains_phoenix()) # all steps starting with the pair
# make and yield the pairsteps:
gen = (PairSteps(pairs) for pairs in gen_all_pairsteps())
if isinstance(contains_value, CardValue):
yield from (ps for ps in gen if ps.contains_cardval(contains_value))
else:
yield from gen
def pairsteps_old(self, ignore_phoenix=False, length=None, contains_value=None):
check_param(length is None or length > 0, length)
pairs_s = sorted(self.pairs(ignore_phoenix=ignore_phoenix))
if len(pairs_s) < 2:
return
def ps_len2():
# find all pairsteps of length 2
for i1, p1 in enumerate(pairs_s):
for i2 in range(i1+1, len(pairs_s)):
p2 = pairs_s[i2]
try:
yield PairSteps([p1, p2])
except Exception:
pass
ps_length2 = list(ps_len2())
print("ps2:", ps_length2)
def ps_len_le_than(l):
if l <= 2:
yield from ps_length2
else:
for ps in ps_len_le_than(l - 1):
for p in pairs_s:
try:
yield ps.extend(p)
except Exception:
pass
gen = (ps for ps in ps_len_le_than(length) if len(ps) == length) if length is not None else ps_len_le_than(len(pairs_s))
if isinstance(contains_value, CardValue):
yield from (ps for ps in gen if ps.contains_cardval(contains_value))
else:
yield from gen
def all_combinations(self, played_on=None, ignore_phoenix=False, contains_value=None):
check_param(contains_value is None or isinstance(contains_value, CardValue), contains_value)
if played_on is None:
yield from itertools.chain(
self.singles(contains_value=contains_value),
self.all_bombs(contains_value=contains_value),
self.pairs(ignore_phoenix=ignore_phoenix, contains_value=contains_value),
self.trios(ignore_phoenix=ignore_phoenix, contains_value=contains_value),
self.straights(ignore_phoenix=ignore_phoenix, contains_value=contains_value),
self.fullhouses(ignore_phoenix=ignore_phoenix, contains_value=contains_value),
self.pairsteps(ignore_phoenix=ignore_phoenix, contains_value=contains_value)
)
elif isinstance(played_on, Combination):
if Card.DOG in played_on:
assert len(played_on) == 1
return # it is not possible to play on the dog
if isinstance(played_on, Bomb):
yield from (b for b in self.all_bombs(contains_value=contains_value) if b.can_be_played_on(played_on)) # only higher bombs
else:
yield from self.all_bombs(contains_value=contains_value) # all bombs
if Card.DRAGON in played_on:
assert len(played_on) == 1
return # only bombs can beat the Dragon
elif isinstance(played_on, Single):
# all single cards higher than the played_on
yield from (single for single in self.singles(contains_value=contains_value) if single.can_be_played_on(played_on))
elif isinstance(played_on, Pair):
# all pairs higher than the played_on.any_card
yield from (pair for pair in self.pairs(contains_value=contains_value) if pair.can_be_played_on(played_on))
elif isinstance(played_on, Trio):
# all trios higher than the played_on.any_card
yield from (trio for trio in self.trios(contains_value=contains_value) if trio.can_be_played_on(played_on))
elif isinstance(played_on, PairSteps):
# all higher pairsteps
yield from (ps for ps in self.pairsteps(length=len(played_on), contains_value=contains_value) if ps.can_be_played_on(played_on))
elif isinstance(played_on, Straight):
# all higher straights
yield from (st for st in self.straights(length=len(played_on), contains_value=contains_value) if st.can_be_played_on(played_on))
else:
raise ValueError("Wrong arguments! (played_on was {})".format(played_on))
def random_cards(self, n=1):
"""
:param n: int > 0
:return: n random cards.
"""
cds = list(self._cards)
random.shuffle(cds)
return cds[:n]
def unique_id(self) -> str:
"""
A string that has following property:
- A.unique_id() == B.unique_id() implies A == B
- A.unique_id() != B.unique_id() implies A != B
:return: A unique string for this instance
"""
s = ''.join([str(c) for c in sorted(c.number for c in self._cards)])
return b64.b64encode(s.encode()).decode()
def pretty_string(self):
# TODO
return self._str
def __str__(self):
return self._str
def __repr__(self):
return "[{}]({})".format(type(self).__name__, self._repr)
def __len__(self):
return len(self._cards)
def __iter__(self):
return self._cards.__iter__()
def __contains__(self, item):
return self._cards.__contains__(item)
def __add__(self, other):
check_isinstance(other, ImmutableCards)
return ImmutableCards(self._cards.union(other._cards))
def __hash__(self):
return self._hash
def __eq__(self, other):
if self.__class__ is other.__class__ and len(self._cards) == len(other._cards):
for c in self._cards:
if c not in other:
return False
return True
else:
return False
class Cards(ImmutableCards):
"""
A mutable set of Cards with some helpful functions.
"""
def __init__(self, cards=set()):
super().__init__(cards)
self._cards = set(self._cards)
self.__hash__ = None # diable hashing
def add(self, card):
"""
Adds the card to this Cards set
:param card: the Card to add
:return: Nothing
"""
if isinstance(card, Card):
self._cards.add(card)
assert card in self._cards
else:
raise TypeError("Only instances of 'Card' can be put into 'Cards', but was {}.".format(card))
def add_all(self, other):
"""
Adds all elements in 'other' to this Cards set.
:param other: Iterable containing only Card instances.
:return self
"""
for card in other:
self.add(card)
return self
def remove(self, card):
"""
Removes the card to this Cards set
:param card: the Card to remove
:return: Nothing
"""
assert card in self._cards, "card: {}; remove from cards: {}".format(card, self._cards)
self._cards.remove(card)
assert card not in self._cards
def remove_all(self, other):
"""
Removes all elements in 'other' from this Cards set.
:param other: Iterable containing only Card instances.
:return: self
"""
for card in other:
self.remove(card)
return self
def to_immutable(self):
"""
:return: An ImmutableCards instance containing the same cards as calling instance
"""
return ImmutableCards(self._cards)
def copy(self):
"""
:return: copy of this Cards instance
"""
return Cards(self._cards)
def __str__(self):
return "({})".format(', '.join([str(c) for c in sorted(self._cards)]))
def __repr__(self):
return "(len: {}, cards: {})".format(len(self._cards), repr(self._cards))
class Combination(metaclass=abc.ABCMeta):
def __init__(self, cards):
check_param(len(cards) > 0, cards)
check_all_isinstance(cards, Card)
self._cards = ImmutableCards(cards)
check_true(len(self._cards) == len(cards))
@property
def cards(self):
return self._cards
@property
@abc.abstractmethod
def height(self):
raise NotImplementedError()
@property
def points(self):
return sum(c.points for c in self._cards)
@staticmethod
def make(cards):
"""
makes a combiantion out of the given cards
:param cards: the cards
:return: the Combination
:raise ValueError: if cards don't make a valid combination
"""
nbr_cards = len(cards)
err = None
try:
check_param(0 < nbr_cards <= 15, nbr_cards)
if nbr_cards == 1:
return Single(*cards)
if nbr_cards == 2:
return Pair(*cards)
if nbr_cards == 3:
return Trio(*cards)
if nbr_cards % 2 == 0:
with ignored(Exception):
ps = PairSteps.from_cards(cards)
return ps
if nbr_cards == 4:
return SquareBomb(*cards)
if nbr_cards == 5:
with ignored(Exception):
fh = FullHouse.from_cards(cards)
return fh
if nbr_cards >= 5:
st, sb = None, None
with ignored(Exception):
st = Straight(cards)
sb = StraightBomb(st)
if sb:
return sb
if st:
return st
except Exception as e:
err = e
raise ValueError("Is no combination: {}\ncards: {}".format(err, str(cards)))
def contains_phoenix(self):
return Card.PHOENIX in self._cards
def issubset(self, other):
for c in self._cards:
if c not in other:
return False
return True
def fulfills_wish(self, wish):
return wish in (c.card_value for c in self._cards)
def contains_cardval(self, cardval):
return cardval in (c.card_value for c in self._cards)
def can_be_played_on(self, other_comb):
try:
return other_comb is None or other_comb < self
except TypeError:
return False
def unique_id(self) -> str:
"""
A string that has following property:
- A.unique_id() == B.unique_id() implies A == B
- A.unique_id() != B.unique_id() implies A != B
:return: A unique string for this instance
"""
return self.cards.unique_id()
def __iter__(self):
return iter(self._cards)
def __eq__(self, other):
return self.__class__ is other.__class__ and self.cards == other.cards and self.height == other.height
def __hash__(self):
return hash(self._cards)
def __len__(self):
return len(self._cards)
def __contains__(self, other):
return self._cards.__contains__(other)
def __ne__(self, other):
return not self.__eq__(other)
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __gt__(self, other):
return not self.__le__(other)
def __lt__(self, other):
check_isinstance(other, (type(self), Bomb), msg="Can't compare")
return self.height < other.height
def __str__(self):
return "{}({})".format(self.__class__.__name__.upper(), ",".join(str(c) for c in sorted(self._cards)))
def __repr__(self):
return self.__str__()
class Single(Combination):
__slots__ = ("_card", "_height")
def __init__(self, card):
super().__init__([card])
self._card = card
self._height = self._card.card_height
@property
def card(self):
return self._card
@property
def height(self):
return self._height
def set_phoenix_height(self, newheight):
"""
Set the height of tis single to the given height ONLY IF the Phoenix is the card of this single.
Otherwise the call is ignored and a warning is printed.
:param newheight:
:return: the height of the single after this call
"""
check_isinstance(newheight, (int, float))
check_param(newheight == Card.PHOENIX.card_height or 2 <= newheight < 15, param=newheight) # newheight must be between 2 and 14 (TWO and As)
if self._card is Card.PHOENIX:
self._height = newheight
# else:
# warnings.warn("Tried to set the height of a non-phoenix single. The height was not set.")
return self.height
def is_phoenix(self):
return self._card is Card.PHOENIX
def contains_cardval(self, cardval):
return cardval is self._card.card_value
def __lt__(self, other):
if isinstance(other, Bomb):
return True
check_isinstance(other, Single)
check_true(self.card is not Card.DOG and other.card is not Card.DOG, ex=TypeError, msg="Can't compare") # dog can't be compared
if self.card is Card.DRAGON:
return False # dragon is the highest single card
if other.is_phoenix() and other.height == Card.PHOENIX.card_height:
return True # if the phoenix is on the right hand side of '<' and its value has not been changed, return True
return self.height < other.height
def __contains__(self, item):
return self._card is item
class Pair(Combination):
__slots__ = ("_card_value", "_height")
def __init__(self, card1, card2):
check_param(card1 is not card2, param=(card1, card2)) # different cards
super().__init__((card1, card2))
if Card.PHOENIX in self._cards:
if card1 is Card.PHOENIX:
card1, card2 = card2, card1 # make sure card1 is not Phoenix
check_param(card1.suit is not CardSuit.SPECIAL, card1)
else:
check_param(card1.card_value is card2.card_value, (card1, card2)) # same value
self._height = card1.card_height
self._card_value = card1.card_value
@property
def height(self):
return self._height
class Trio(Combination):
__slots__ = ("_card_value", "_height")
def __init__(self, card1, card2, card3):
check_param(card1 is not card2 and card1 is not card3 and card2 is not card3, param=(card1, card2, card3)) # 3 different cards
super().__init__((card1, card2, card3))
if Card.PHOENIX in self._cards:
if card1 is Card.PHOENIX:
card1, card2 = card2, card1 # make sure card1 is not Phoenix
check_param(card1.suit is not CardSuit.SPECIAL)
else:
check_param(card1.card_value is card2.card_value is card3.card_value) # same values
self._height = card1.card_height
self._card_value = card1.card_value
@property
def height(self):
return self._height
class FullHouse(Combination):
__slots__ = ("_pair", "_trio", "_height")
def __init__(self, pair, trio):
check_isinstance(pair, Pair)
check_param(trio, Trio)
check_param(not(pair.contains_phoenix() and trio.contains_phoenix())) # phoenix can only be used once
cards = set(pair.cards + trio.cards)
check_param(len(cards) == 5, param=(pair, trio))
super().__init__(cards)
self._height = trio.height
self._pair = pair
self._trio = trio
@property
def height(self):
return self._height
@property
def trio(self):
return self._trio
@property
def pair(self):
return self._pair
@classmethod
def from_cards(cls, cards):
check_param(len(set(cards)) == 5) # 5 different cards
check_param(Card.PHOENIX not in cards, "can't make from cards when Phoenix is present")
pair = None
trio = None
for cs in ImmutableCards(cards).value_dict().values():
if len(cs) == 2:
pair = Pair(*cs)
elif len(cs) == 3:
trio = Trio(*cs)
else:
check_true(len(cs) == 0, ex=ValueError, msg="there is no fullhouse in the cards (cards: {})".format(cards)) # if this fails, then there is no fullhouse in the cards
return cls(pair, trio)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.trio == other.trio and self.pair == other.pair
def __hash__(self):
return hash((self._trio, self._pair))
def __str__(self):
return "{}(<{}><{}>)".format(self.__class__.__name__.upper(), ",".join(str(c) for c in self._trio), ",".join(str(c) for c in self._pair))
class PairSteps(Combination):
__slots__ = ("_lowest_pair_height", "_height", "_pairs")
def __init__(self, pairs):
check_param(len(pairs) >= 2)
check_all_isinstance(pairs, Pair)
pairheights = {p.height for p in pairs}
check_param(len(pairheights) == len(pairs)) # all pairs have different height
check_param(max(pairheights) - min(pairheights) + 1 == len(pairs)) # pairs are consecutive
cards = set(itertools.chain(*[p.cards for p in pairs]))
check_param(len(cards) == 2*len(pairs), param=pairs) # no duplicated card (takes care of multiple phoenix use)
super().__init__(cards)
self._height = max(pairheights)
self._lowest_pair_height = min(pairheights)
self._pairs = pairs
@property
def height(self):
return self._height
@property
def pairs(self):
return self._pairs
@property
def lowest_card_height(self):
return self._lowest_pair_height
@classmethod
def from_cards(cls, cards):
check_param(len(cards) >= 4 and len(cards) % 2 == 0)
check_param(Card.PHOENIX not in cards, "can't make pairstep from cards when Phoenix is present")
pairs = []
for cs in ImmutableCards(cards).value_dict().values():
if len(cs) == 2:
pairs.append(Pair(*cs))
check_true(len(cs) == 0, ex=ValueError, msg="Not a pairstep")
return cls(pairs)
def extend(self, pair):
return PairSteps(self._pairs + [pair])
def __str__(self):
return "{}({})".format(self.__class__.__name__.upper(), ", ".join("{c[0]}{c[1]}".format(c=sorted(p.cards)) for p in self._pairs))
def __lt__(self, other):
if isinstance(other, Bomb):
return True
check_isinstance(other, PairSteps)
check_true(len(other) == len(self), ex=TypeError, msg="Can't compare")
return self.height < other.height
class Straight(Combination):
__slots__ = ("_height", "_ph_as")
def __init__(self, cards, phoenix_as=None):
check_param(len(cards) >= 5)
if Card.PHOENIX in cards:
check_isinstance(phoenix_as, Card)
check_param(phoenix_as not in cards, param=(phoenix_as, cards))
check_param(phoenix_as.suit is not CardSuit.SPECIAL, param=phoenix_as)
cards_phoenix_replaced = [c for c in cards if c is not Card.PHOENIX] + [phoenix_as] if phoenix_as else cards
check_param(len({c.card_value for c in cards_phoenix_replaced}) == len(cards_phoenix_replaced)) # different card values
cardheights = [c.card_height for c in cards_phoenix_replaced]
check_param(max(cardheights) - min(cardheights) + 1 == len(cards_phoenix_replaced)) # cards are consecutive
super().__init__(cards)
self._height = max(cardheights)
self._ph_as = phoenix_as
@property
def height(self):
return self._height
@property
def phoenix_as(self):
return self._ph_as
def __lt__(self, other):
if isinstance(other, Bomb):
return True
check_isinstance(other, Straight)
check_true(len(other) == len(self), ex=TypeError, msg="Can't compare")
return self.height < other.height
def __eq__(self, other):
if self.contains_phoenix():
return super().__eq__(other) and self.phoenix_as.card_value is other.phoenix_as.card_value
else:
return super().__eq__(other)
def __str__(self):
if self.contains_phoenix():
return "{}({})".format(self.__class__.__name__.upper(), ",".join(str(c)+":"+str(self.phoenix_as) if c is Card.PHOENIX else str(c) for c in sorted(self._cards)))
else:
return super().__str__()
def __hash__(self):
if self.contains_phoenix():
return hash((self._cards, self.height, self.phoenix_as.card_value))
else:
return hash((self._cards, self.height))
class Bomb(Combination):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class SquareBomb(Bomb):
__slots__ = ("_height", )
def __init__(self, card1, card2, card3, card4):
super().__init__((card1, card2, card3, card4))
check_param(len(set(self.cards)) == 4) # all cards are different
# all cards have same card_value (takes also care of the phoenix)
check_param(len({c.card_value for c in self.cards}) == 1)
self._height = card1.card_height + 500 # 500 to make sure it is higher than any other non bomb combination
@property
def height(self):
return self._height
@classmethod
def from_cards(cls, cards):
return cls(*cards)
def __lt__(self, other):
if isinstance(other, StraightBomb):
return True
else:
return self.height < other.height
class StraightBomb(Bomb):
__slots__ = ("_height", )
def __init__(self, straight):
check_isinstance(straight, Straight)
check_true(len({c.suit for c in straight}) == 1) # only one suit (takes also care of the phoenix)
super().__init__(straight.cards)
self._height = straight.height + 1000 # 1000 to make sure it is higher than any other non straightbomb
@property
def height(self):
return self._height
@classmethod
def from_cards(cls, *cards):
return cls(Straight(cards))
def __lt__(self, other):
if isinstance(other, StraightBomb):
if len(self) < len(other):
return True
elif len(self) == len(other):
return self.height < other.height
return False
| 37.420251 | 181 | 0.585331 | 5,253 | 41,761 | 4.429088 | 0.078812 | 0.045818 | 0.00851 | 0.007307 | 0.554199 | 0.503567 | 0.439311 | 0.396157 | 0.372647 | 0.325926 | 0 | 0.008717 | 0.324202 | 41,761 | 1,115 | 182 | 37.453812 | 0.815676 | 0.144633 | 0 | 0.415119 | 0 | 0 | 0.020124 | 0 | 0 | 0 | 0 | 0.003587 | 0.007958 | 1 | 0.159151 | false | 0.007958 | 0.011936 | 0.072944 | 0.355438 | 0.001326 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77093cd28af85b82979f5dedf21e2001bb2b475c | 3,828 | py | Python | saarthi/mun_dashboard/views.py | muskangarg21/Lucid-project | 1b0c68ade3e24b17a8575bfb9672369cbb2846c7 | [
"MIT"
] | null | null | null | saarthi/mun_dashboard/views.py | muskangarg21/Lucid-project | 1b0c68ade3e24b17a8575bfb9672369cbb2846c7 | [
"MIT"
] | 4 | 2019-06-29T15:52:03.000Z | 2019-07-08T06:08:26.000Z | saarthi/mun_dashboard/views.py | muskangarg21/Lucid-project | 1b0c68ade3e24b17a8575bfb9672369cbb2846c7 | [
"MIT"
] | 3 | 2019-06-25T10:49:50.000Z | 2019-07-08T07:21:45.000Z | from django.shortcuts import render, redirect, get_object_or_404
from .models import *
from users.models import *
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .decorators import officials_only
@login_required
@officials_only
def welcome(request):
return render(request, 'mun_dashboard/welcome.html')
@login_required
@officials_only
def dashboard(request):
complaints = Complaint.objects.filter(is_verified = False).order_by('-date_filed')
return render(request, 'mun_dashboard/dashboard.html', {'complaints': complaints})
@login_required
@officials_only
def decline(request, pk):
complaint = get_object_or_404(Complaint, pk = pk)
declinenotif = "Your complaint with ID "+ str(complaint.complaint_id)[:8] + " is declined"
Notification.objects.create(user=complaint.filer, notification = declinenotif)
complaint.delete()
messages.success(request, f'Complaint deleted successfully!')
return redirect('dashboard')
@login_required
@officials_only
def mark_spam(request, pk):
complaint = get_object_or_404(Complaint, pk = pk)
spamnotif = "Your complaint with ID "+ str(complaint.complaint_id)[:8] + " is marked as spam, this is a warning that you should file truthful complaints"
Notification.objects.create(user=complaint.filer, notification = spamnotif)
complaint.filer.profile.spamcount += 1
if complaint.filer.profile.spamcount > 4 and complaint.filer.profile.spamcount % 5 == 0:
complaint.filer.profile.rewards -= 10
deductionnotif = "Your complaint with ID "+ str(complaint.complaint_id)[:8] + " is marked as spam, and due to continuous spamming, a reward of 10 is deducted from your account"
Notification.objects.create(user=complaint.filer, notification = deductionnotif)
complaint.filer.profile.save()
complaint.delete()
messages.success(request, f'Complaint marked as spam successfully!')
return redirect('dashboard')
@login_required
@officials_only
def approve_success(request, pk):
complaint = get_object_or_404(Complaint, pk = pk)
complaint.is_verified = True
complaint.status = 'Verified'
points = 10
complaint.filer.profile.rewards += points
complaint.filer.profile.save()
complaint.save()
approvenotif = "Your complaint with ID "+ str(complaint.complaint_id)[:8] + " is approved and reward of 10 is credited to your account. Thanks for bringing this to our notice."
Notification.objects.create(user=complaint.filer, notification = approvenotif)
messages.success(request, f'Complaint verified successfully!')
return render(request, 'mun_dashboard/approved_complaint.html', {'complaint': complaint, 'reward': points})
@login_required
@officials_only
def mark_solved(request, pk):
complaint = get_object_or_404(Complaint, pk = pk)
complaint.is_settled = True
complaint.status = 'Solved'
complaint.save()
successnotif = "Your complaint with ID "+ str(complaint.complaint_id)[:8] + " is settled, Keep helping us ahead."
Notification.objects.create(user=complaint.filer, notification = successnotif)
messages.success(request, f'Complaint marked solved successfully!')
return render(request, 'mun_dashboard/solved_complaint.html', {'complaint': complaint})
@login_required
@officials_only
def verified_complaints(request):
complaints = Complaint.objects.filter(is_verified = True, is_settled = False).order_by('-date_filed')
return render(request, 'mun_dashboard/verified_complaints.html', {'complaints': complaints})
@login_required
@officials_only
def solved_complaints(request):
complaints = Complaint.objects.filter(is_settled= True).order_by('-date_filed')
return render(request, 'mun_dashboard/solved_complaints.html', {'complaints': complaints})
| 41.608696 | 184 | 0.75418 | 471 | 3,828 | 5.991507 | 0.229299 | 0.059532 | 0.062367 | 0.073707 | 0.620836 | 0.546775 | 0.483345 | 0.296244 | 0.258682 | 0.196669 | 0 | 0.009738 | 0.141588 | 3,828 | 91 | 185 | 42.065934 | 0.849057 | 0 | 0 | 0.378378 | 0 | 0 | 0.233063 | 0.052315 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.081081 | 0.013514 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77094c63eb0fe5e5d8597019b9d3226251e51e8f | 921 | py | Python | project_euler/001-050/10.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/001-050/10.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | project_euler/001-050/10.py | floppp/programming_challenges | 42df1b72faf5ddf907296f90e9b14e014d2ea13b | [
"MIT"
] | null | null | null | '''
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
'''
import time
from numpy import sqrt
def primos(n_max):
for i in range(3, n_max + 1, 2):
for j in range(3, int(sqrt(i)) + 1, 2):
if i % j == 0: break
else:
yield i
n = 1
p = primos(3000000)
a = 0
suma = 2
t0 = time.clock()
while a < 2000000:
n += 1
#if n < 10: print(a)
#if n % 10000 == 0:
# print("iteraciones: {} - primo: {} - suma: {}".format(n, a, suma))
suma += a
a = next(p)
print("iteraciones: {} - primo: {} - suma: {}".format(n, a, suma))
print("La suma es {}".format(suma))
print("Tiempo transcurrido = {:.3f} seg".format(time.clock() - t0))
# RESPUESTA: 142913828922 - t: 9.377 seg
# iteraciones: 148934 - primo: 2000003 - suma: 142913828922
# La suma es 142913828922
# Tiempo transcurrido = 9.377 seg | 24.891892 | 75 | 0.560261 | 143 | 921 | 3.594406 | 0.447552 | 0.023346 | 0.031128 | 0.097276 | 0.143969 | 0.143969 | 0.143969 | 0.143969 | 0 | 0 | 0 | 0.152568 | 0.281216 | 921 | 37 | 76 | 24.891892 | 0.623867 | 0.39848 | 0 | 0 | 0 | 0 | 0.15342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.1 | 0 | 0.15 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
770a5744015c669293aefcf0911dbfc79a39291b | 927 | py | Python | socket/server.py | Panfilwk/SocketVsHttpDemo | f3da4689dfd838729f35fb29b96b438105a20784 | [
"MIT"
] | null | null | null | socket/server.py | Panfilwk/SocketVsHttpDemo | f3da4689dfd838729f35fb29b96b438105a20784 | [
"MIT"
] | null | null | null | socket/server.py | Panfilwk/SocketVsHttpDemo | f3da4689dfd838729f35fb29b96b438105a20784 | [
"MIT"
] | null | null | null | import tornado.ioloop
import tornado.web
import tornado.websocket
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class ReSTHandler(tornado.websocket.WebSocketHandler):
pressCount = {'count': 0}
activeConnections = set()
def open(self):
self.activeConnections.add(self)
print(self.activeConnections)
self.write_message(str(self.pressCount['count']))
def on_message(self, message):
self.pressCount['count'] += 1
for sock in self.activeConnections:
sock.write_message(str(self.pressCount['count']))
class Server:
def start_app(self):
app = tornado.web.Application([
(r"/", MainHandler),
(r"/count", ReSTHandler)
])
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
server = Server()
server.start_app() | 27.264706 | 61 | 0.642934 | 100 | 927 | 5.83 | 0.43 | 0.102916 | 0.09777 | 0.06518 | 0.116638 | 0.116638 | 0 | 0 | 0 | 0 | 0 | 0.00838 | 0.227616 | 927 | 34 | 62 | 27.264706 | 0.805866 | 0 | 0 | 0 | 0 | 0 | 0.048491 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.107143 | 0 | 0.428571 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
770d11d6fa51fd28850eee16d83a6c995e1f6741 | 1,996 | py | Python | models.py | Cranial-XIX/TRPO-and-its-variant | aa74102d013c998a666683667073c22aad8c5bce | [
"MIT"
] | null | null | null | models.py | Cranial-XIX/TRPO-and-its-variant | aa74102d013c998a666683667073c22aad8c5bce | [
"MIT"
] | null | null | null | models.py | Cranial-XIX/TRPO-and-its-variant | aa74102d013c998a666683667073c22aad8c5bce | [
"MIT"
] | null | null | null | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self, dim_inputs, dim_outputs):
super(Policy, self).__init__()
self.affine1 = nn.Linear(dim_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.action_mean = nn.Linear(64, dim_outputs)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter(torch.zeros(1, dim_outputs))
self.saved_actions = []
self.rewards = []
self.final_value = 0
self.act = nn.LeakyReLU()
def forward(self, x):
x = self.act(self.affine1(x))
x = self.act(self.affine2(x))
action_mean = self.action_mean(x)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return action_mean, action_log_std, action_std
class Value(nn.Module):
def __init__(self, dim_inputs):
super(Value, self).__init__()
self.affine1 = nn.Linear(dim_inputs, 64)
self.affine2 = nn.Linear(64, 64)
self.value_head = nn.Linear(64, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
self.act = nn.LeakyReLU()
def forward(self, x):
x = self.act(self.affine1(x))
x = self.act(self.affine2(x))
state_values = self.value_head(x)
return state_values
class Adv(nn.Module):
def __init__(self, dim_inputs, dropout):
super(Adv, self).__init__()
self.affine1 = nn.Linear(dim_inputs, 32)
self.affine2 = nn.Linear(32, 32)
self.adv_head = nn.Linear(32, 1)
self.act = nn.LeakyReLU()
self.drop = nn.Dropout(p=dropout)
def forward(self, x):
x = self.drop(self.act(self.affine1(x)))
x = self.drop(self.act(self.affine2(x)))
advantage = self.adv_head(x)
return advantage | 29.352941 | 71 | 0.624749 | 291 | 1,996 | 4.054983 | 0.185567 | 0.061017 | 0.030508 | 0.030508 | 0.454237 | 0.447458 | 0.378814 | 0.266102 | 0.235593 | 0.235593 | 0 | 0.032 | 0.248497 | 1,996 | 68 | 72 | 29.352941 | 0.754667 | 0 | 0 | 0.27451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.078431 | 0 | 0.313725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
770f2b75c70b18d41508ca22916ab1638811f0e1 | 1,008 | py | Python | warehouse/migrations/versions/a65114e48d6f_set_user_last_login_automatically_in_.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 2 | 2015-11-08T12:57:16.000Z | 2020-11-19T09:43:14.000Z | warehouse/migrations/versions/a65114e48d6f_set_user_last_login_automatically_in_.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 11 | 2020-01-06T18:55:57.000Z | 2022-03-11T23:27:05.000Z | warehouse/migrations/versions/a65114e48d6f_set_user_last_login_automatically_in_.py | matt-land/warehouse | 0acb5d94528099ed5356253457cf8dc0b4e50aad | [
"Apache-2.0"
] | 1 | 2020-12-18T08:29:01.000Z | 2020-12-18T08:29:01.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Set User.last_login automatically in the DB
Revision ID: a65114e48d6f
Revises: 104b4c56862b
Create Date: 2016-06-11 00:28:39.176496
"""
from alembic import op
import sqlalchemy as sa
revision = 'a65114e48d6f'
down_revision = '104b4c56862b'
def upgrade():
op.alter_column(
"accounts_user",
"last_login",
server_default=sa.func.now(),
)
def downgrade():
op.alter_column("accounts_user", "last_login", server_default=None)
| 26.526316 | 74 | 0.739087 | 147 | 1,008 | 5 | 0.659864 | 0.081633 | 0.053061 | 0.043537 | 0.127891 | 0.127891 | 0.127891 | 0.127891 | 0.127891 | 0 | 0 | 0.069712 | 0.174603 | 1,008 | 37 | 75 | 27.243243 | 0.813702 | 0.645833 | 0 | 0 | 0 | 0 | 0.20649 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7710a19ccdfec1a2c18670ed66b7c9c696c643f6 | 11,093 | py | Python | drosoph_vae/settings/config.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | drosoph_vae/settings/config.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | drosoph_vae/settings/config.py | samuelsmal/drosophVAE | 4b1887e55a5eed1d26c07b6c43de59ffab5fc7c7 | [
"MIT"
] | null | null | null | from functools import reduce
from datetime import datetime
import json
import numpy as np
from drosoph_vae.helpers.misc import EEnum, get_hostname
from drosoph_vae.settings.data import Behavior
class DataType(EEnum):
# Can't start a member with a number...
ANGLE_3D = 0
POS_2D = 1
class ModelType(EEnum):
TEMP_CONV = 0
PADD_CONV = 1
SKIP_PADD_CONV = 2
class BaseConfig(dict):
def __init__(self, **kwargs):
dict.__init__(self)
self.update(kwargs)
def __getitem__(self, key):
return dict.__getitem__(self, key)
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
def hash(self, digest_length=5):
return str(hash(json.dumps({**self, '_executed_at_': str(datetime.now())}, sort_keys=True)))[:digest_length]
@classmethod
def value(cls, *keys):
"""Helper to return one single value (may be what ever that value is).
E.g. value('hubert', 'fly_id')
"""
try:
val = reduce(lambda d, k: d[k], keys, cls())
if isinstance(val, str) and 'UNKOWN' in val:
raise ValueError('{0} value is not set! Reading {1}'.format(keys, val))
except KeyError:
raise ValueError("Could not find a value for the given key: {}".format(keys))
return val
####################################################
# #
# Setup config: Paths, experiment definitions, ... #
# #
####################################################
class SetupConfig(BaseConfig):
# some values are "dynamic" constants, see __init__ method
DEFAULT_VALUES = {
'debug': True,
'frames_per_second': 100,
'legs': [0, 1, 2], # no longer in use
'camera_of_interest': 1,
'n_axis': 2,
'n_tracked_points': 5, # per leg, igoring the rest for now
'n_cameras': 7,
'experiment_black_list': ['181220_Rpr_R57C10_GC6s_tdTom'], # all other experiments are used
'fly_black_list': ['180920_aDN_PR-Fly2-005_SG1',
'180921_aDN_CsCh-Fly6-003_SG1'], # for those flys the angle conversion give odd results,
# and their distributions seem way off compared to the others)
'hubert': { # there can only be one. hubert is the fly with which I started, use him to debug stuff
'study_id': '180920_aDN_CsCh',
'experiment_id': '001_SG1',
'fly_id': 'Fly2',
},
'training': {
'vae': {
'n_epochs': 200,
'n_epochs_eval': 25,
},
'supervised': {
'n_epochs': 50,
'n_epochs_eval': 10,
}
},
}
def __init__(self, **kwargs):
super(BaseConfig, self).__init__({**SetupConfig.DEFAULT_VALUES, **kwargs})
if self['debug'] and not SetupConfig.runs_on_lab_server():
self['training'] = {'vae': {'n_epochs': 6,
'n_epochs_eval': 2},
'supervised': {'n_epochs': 3,
'n_epochs_eval': 1}}
# host specific location for data
if get_hostname() == 'upramdyapc6':
data_root_path = '/home/samuel/neural_clustering_data'
experiment_root_path = '/ramdya-nas/SVB/experiments'
elif get_hostname() == 'contosam':
data_root_path = '/home/sam/proj/epfl/neural_clustering_data'
experiment_root_path = '/home/sam/Dropbox'
else:
data_root_path = '/home/sam/proj/epfl/neural_clustering_data'
experiment_root_path = f"{data_root_path}/experiments"
# This is so much pain, but since the data is all somewhere in some folder...
self['data_root_path'] = data_root_path
self['experiment_root_path'] = experiment_root_path
self['video_root_path'] = f"{data_root_path}/videos"
self['figures_root_path'] = f"{data_root_path}/figures"
self['grid_search_root_path'] = f"{data_root_path}/grid_search"
self['experiment_path_template'] = '{base_path}/{study_id}/{fly_id}/{experiment_id}'
# to be filled with `experiment_path_template` as `base_experiment_path`
self['experiment_limb_pos_data_dir'] = '{base_experiment_path}/behData/images/'
self['fly_image_template'] = f"{{base_experiment_path}}/behData/images/camera_{self['camera_of_interest']}_img_{{image_id:0>6}}.jpg"
@classmethod
def runs_on_lab_server(cls):
return get_hostname() == 'upramdyapc6'
#################################################
# #
# Run config, model definition, hyperparameters #
# #
#################################################
# Note that not all variables will be used by all models
class RunConfig(BaseConfig):
# some values are "dynamic" constants, see __init__ method
DEFAULT_VALUES = {
'debug': False, # general flag for debug mode, triggers all `d_.*`-options.
'd_zero_data': False, # overwrite the data with zeroed out data, the overall shape is kept.
'd_sinoid_data': False,
'd_sinoid_cluster_data': True,
'd_no_compression': False, # if true, the latent_space will be the same dimension as the input.
# allowing the model to learn the identity function.
'use_single_fly': True,
'data_type': DataType.ANGLE_3D,
'use_time_series': True, # triggers time series application, without this the model is only dense layers
'time_series_length': 16, # note that this is equal to the minimal wanted receptive field length
'conv_layer_kernel_size': 2, # you can set either this or `n_conv_layers` to None,
# it will be automatically computed.
'n_conv_layers': None, # you can set either this or `conv_layer_kernel_size` to None,
# it will be automatically computed.
'latent_dim': None, # should be adapted given the input dim
'batch_size': 128, # if you plan on using the supervised parts as well keep this
# small. it will create a batch_size^3 dim tensor to compute
# the loss
'loss_weight_reconstruction': 1.0,
'loss_weight_kl': 0.0, # if zero it will not even be computed
'dropout_rate': 0.,
'with_batch_norm': True,
'model_impl': ModelType.SKIP_PADD_CONV,
'optimizer': 'Adam',
'train_test_ratio': 0.7,
'preprocessing': {
'common': {
'blacklist_behavior': [Behavior.REST, Behavior.NONE],
},
DataType.ANGLE_3D: {
'low_variance_cutoff': 0.,
'normalize_features': False
},
DataType.POS_2D: {
'normalize_for_each_experiment': True,
}
},
'model_created_at': None,
'vae_learning_rate': 1e-4,
'supervised_learning_rate': 1e-5,
}
def __init__(self, **kwargs):
super(BaseConfig, self).__init__({**RunConfig.DEFAULT_VALUES, **kwargs})
if self['use_single_fly']:
self['batch_size'] = 128
self['train_test_ratio'] = 0.9
if not(self['data_type'] in DataType):
raise NotImplementedError(f"This data type is not supported. Must be one of either"
f"{DataType.list()}")
if self['n_conv_layers'] is None:
self['n_conv_layers'] = np.int(np.ceil(np.log2((self['time_series_length'] - 1) / (2 * (self['conv_layer_kernel_size'] - 1)) + 1)))
if self['conv_layer_kernel_size'] is None:
raise NotImplementedError('ups')
if self['data_type'] == DataType.POS_2D:
# goes from 15 * 2 = 30 -> 8
self['latent_dim'] = 8
elif self['data_type'] == DataType.ANGLE_3D:
# goes from 18 -> 4
self['latent_dim'] = 4
else:
raise ValueError(f"this data_type is not supported: {self['data_type']}")
def preprocessing_parameters(self):
return {**self.value('preprocessing', 'common'),
**(self['preprocessing'][self['data_type']])}
def description(self, short=True, verbosity=6):
def _bool_(v):
return 'T' if self[v] else 'F'
def _combine_(a, b):
if a == '':
return b
else:
return f"{a}-{b}"
valus_of_interest = [
('model_impl', '', self.get('model_impl').name),
('data', '', self['data_type'].name),
('time', 't', self['time_series_length'] if self['use_time_series'] else 'F'),
('latent_dim', 'ld', self['latent_dim']),
('vae_learning_rate', 'vlr', self['vae_learning_rate']),
('supervised_learning_rate', 'slr', self['supervised_learning_rate']),
('use_single_fly', 'mf', _bool_('use_single_fly')),
('loss_weight_recon', 'lwr', self.get('loss_weight_reconstruction')),
('loss_weight_kl', 'lwkl', self.get('loss_weight_kl')),
('dropout_rate', 'dr', self.get('dropout_rate')),
('kernel', 'k', self['conv_layer_kernel_size']),
('optimizer', 'opt', self.get('optimizer')),
('with_batch_norm', 'bn', _bool_('with_batch_norm')),
('n_clayers', 'ncl', self['n_conv_layers']),
]
valus_of_interest = valus_of_interest[:verbosity]
descr_idx = 1 if short else 0
descr_str = '-'.join((_combine_(v[descr_idx], v[2]) for v in valus_of_interest))
if self['debug']:
descr_str += '_' + ''.join([k for k, v in self.items() if k.startswith('d_') and v])
if self['model_created_at']:
descr_str += '_' + self['model_created_at']
return descr_str
def value(self, *keys):
"""Helper to return one single value (may be what ever that value is).
E.g. value('hubert', 'fly_id')
NOTE THAT THIS IS NOT THE SAME METHOD FROM THE BASE CLASS.
This one here is "alive" the other is one is "dead".
"""
try:
val = reduce(lambda d, k: d[k], keys, self)
if isinstance(val, str) and 'UNKOWN' in val:
raise ValueError('{0} value is not set! Reading {1}'.format(keys, val))
except KeyError:
raise ValueError("Could not find a value for the given key: {}".format(keys))
return val
@classmethod
def POS_2D(cls):
return cls(data_type=DataType.POS_2D)
@classmethod
def ANGLE_3D(cls):
return cls(data_type=DataType.ANGLE_3D)
| 40.338182 | 143 | 0.554223 | 1,320 | 11,093 | 4.391667 | 0.290152 | 0.02346 | 0.01863 | 0.016388 | 0.253579 | 0.208729 | 0.172158 | 0.150768 | 0.136967 | 0.127997 | 0 | 0.017813 | 0.311728 | 11,093 | 274 | 144 | 40.485401 | 0.741454 | 0.188858 | 0 | 0.135417 | 0 | 0.005208 | 0.282724 | 0.100299 | 0.005208 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.03125 | 0.036458 | 0.234375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77113ba42c3ff66adcd83ef5fbb3a1affd209149 | 11,521 | py | Python | pype/vendor/pico/__init__.py | tws0002/pype | 80b1aad9990f6c7efabf0430a3da6633054bf4a8 | [
"MIT"
] | null | null | null | pype/vendor/pico/__init__.py | tws0002/pype | 80b1aad9990f6c7efabf0430a3da6633054bf4a8 | [
"MIT"
] | null | null | null | pype/vendor/pico/__init__.py | tws0002/pype | 80b1aad9990f6c7efabf0430a3da6633054bf4a8 | [
"MIT"
] | null | null | null | """
Pico is a minimalistic HTTP API framework for Python.
Copyright (c) 2012, Fergal Walsh.
License: BSD
"""
from __future__ import unicode_literals
import sys
import traceback
import inspect
import importlib
import logging
import os.path
from io import open
from collections import defaultdict
from functools import partial
from werkzeug.exceptions import HTTPException, NotFound, BadRequest, InternalServerError
from werkzeug.wrappers import Request, Response
from . import pragmaticjson as json
from .decorators import base_decorator
from .wrappers import JsonResponse, JsonErrorResponse
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
try:
unicode
except NameError:
unicode = str
__author__ = 'Fergal Walsh'
__version__ = '2.0.4'
registry = defaultdict(dict)
def expose(*args, **kwargs):
@base_decorator()
def wrapper(wrapped, args, kwargs, request):
return wrapped(*args, **kwargs)
def decorator(func):
func = wrapper(func)
registry[func.__module__][func.__name__] = func
return func
return decorator
def prehandle(*args, **kwargs):
def decorator(f):
sys.modules[f.__module__]._prehandle = f
return f
return decorator
class PicoApp(object):
def __init__(self, debug=False):
self.debug = debug
self.registry = defaultdict(dict)
self.modules = {}
self.definitions = {}
self.aliases = {}
self.url_map = {}
path = os.path.dirname((inspect.getfile(inspect.currentframe())))
with open(path + '/pico.min.js') as f:
self._pico_js = f.read()
def register_module(self, module, alias=None):
if type(module) == str:
module = importlib.import_module(module)
module_name = module.__name__
alias = alias or module_name
self.aliases[module_name] = alias
self.modules[alias] = module
self.registry[alias] = registry[module_name]
self.definitions[alias] = {}
for func_name, func in self.registry[alias].items():
self.definitions[alias][func_name] = self.function_definition(func)
self._build_url_map()
def _get_alias(self, module_name):
return self.aliases.get(module_name, module_name)
def _build_url_map(self):
self.url_map = {}
self.url_map['/pico.js'] = self.pico_js
self.url_map['/'] = self.app_definition_handler
self.url_map['/picoapp.js'] = partial(self.app_definition_handler, 'pico.loadAppDefinition')
for module_name in self.registry:
url = self.module_url(module_name)
# assign definition response handler to function to urls
self.url_map[url] = partial(self.module_definition_handler, module_name)
self.url_map[url + '.js'] = partial(self.module_definition_handler, module_name, 'pico.loadModuleDefinition')
for func_name, func in self.registry[module_name].items():
url = self.func_url(func)
# assign the handler function to the the url
self.url_map[url] = func
def module_url(self, module_name, pico_url='/'):
module_path = module_name.replace('.', '/')
url = '{pico_url}{module}'.format(module=module_path, pico_url=pico_url)
return url
def func_url(self, func, pico_url='/'):
module_path = self._get_alias(func.__module__).replace('.', '/')
url = '{pico_url}{module}/{func_name}'.format(module=module_path, func_name=func.__name__, pico_url=pico_url)
return url
def app_definition_handler(self, callback=None, _request=None):
app_def = self.app_definition(pico_url=_request.url_root)
response = JsonResponse(app_def)
if callback:
response = response.to_jsonp(callback)
return response
def module_definition_handler(self, module_name, callback=None, _request=None):
module_def = self.module_definition(module_name, pico_url=_request.url_root)
response = JsonResponse(module_def)
if callback:
response = response.to_jsonp(callback)
return response
def app_definition(self, pico_url='/'):
d = {}
d['url'] = pico_url
d['modules'] = []
for module_name in self.registry:
d['modules'].append(self.module_definition(module_name, pico_url))
return d
def module_definition(self, module_name, pico_url='/'):
d = {}
d['name'] = module_name
d['doc'] = inspect.getdoc(self.modules[module_name])
d['url'] = self.module_url(module_name, pico_url)
d['functions'] = []
for func_name, func in self.registry[module_name].items():
func_def = dict(self.definitions[module_name][func_name])
func_def['url'] = self.func_url(func, pico_url)
d['functions'].append(func_def)
return d
def function_definition(self, func, pico_url='/'):
annotations = dict(func._annotations)
request_args = set(annotations.pop('request_args', []))
a = inspect.getargspec(func)
args = []
for i, arg_name in enumerate(a.args):
if arg_name and arg_name != 'self' and arg_name not in request_args:
arg = {'name': arg_name}
di = (len(a.defaults or []) - len(a.args)) + i
if di >= 0:
arg['default'] = a.defaults[di]
args.append(arg)
d = dict(
name=func.__name__,
doc=inspect.getdoc(func),
url=self.func_url(func, pico_url),
args=args,
)
if a.keywords is not None:
d['accept_extra_args'] = True
d.update(annotations)
return d
def pico_js(self, **kwargs):
response = Response(self._pico_js, content_type='text/javascript')
return response
def parse_args(self, request):
# first we take the GET querystring args
args = _multidict_to_dict(request.args)
# update and override args with post form data
args.update(_multidict_to_dict(request.form))
# try to parse any strings as json
for k in args:
if isinstance(args[k], list):
for i, v in enumerate(args[k]):
args[k][i] = self._try_json_load(v)
else:
args[k] = self._try_json_load(args[k])
# update args with files
args.update(_multidict_to_dict(request.files))
# update and override args with json data
if 'application/json' in request.headers.get('content-type', ''):
data = request.get_data(as_text=True)
if data:
args.update(self.json_load(data))
args['_request'] = request
return args
def json_load(self, value):
return json.loads(value)
def json_dump(self, value):
return json.dumps(value)
def _try_json_load(self, value):
try:
return self.json_load(value)
except ValueError:
return value
def dispatch_request(self, request):
path = request.path
if len(path) > 1 and path[-1] == '/':
path = path[:-1]
request.path = path
try:
handler = self.url_map[path]
except KeyError:
try:
path = request.script_root + path
handler = self.url_map[path]
request.path = path
except KeyError:
return NotFound()
return self.handle_request(request, handler)
def check_args(self, handler, kwargs):
module_name = self._get_alias(handler.__module__)
func_def = self.definitions[module_name][handler.__name__]
args = {a['name']: a for a in func_def['args']}
missing = [k for k in (set(args.keys()) - set(kwargs.keys())) if 'default' not in args[k]]
extra = [k for k in (set(kwargs.keys()) - set(args.keys())) if k[0] != '_']
message = ''
if extra and not func_def.get('accept_extra_args', False):
message += 'Unexpected parameters: [%s]. ' % ', '.join(extra)
if missing:
message += 'Missing required parameters: [%s]. ' % ', '.join(missing)
if message:
raise BadRequest(message)
def prehandle(self, request, kwargs):
if self.debug and kwargs.pop('_debug', None):
request.use_debugger = True
else:
request.use_debugger = False
try:
request.token = request.headers.get('Authorization', '').split('Token ')[-1]
except Exception:
request.token = None
def posthandle(self, request, response):
pass
def handle_exception(self, exception, request, **kwargs):
if isinstance(exception, HTTPException):
return JsonErrorResponse(exception, **kwargs)
else:
logger.exception(exception)
if request.use_debugger:
raise
e = InternalServerError()
if self.debug:
_, _, exc_tb = sys.exc_info()
trace = traceback.extract_tb(exc_tb)
trace = ['%s:%i in %s: %s' % t for t in trace if '/pico/' not in t[0]]
del exc_tb
d = dict(
name=type(exception).__name__,
message=unicode(exception),
stack_trace=trace,
)
kwargs['__debug__'] = d
return JsonErrorResponse(e, **kwargs)
def handle_request(self, request, handler):
try:
kwargs = self.parse_args(request)
callback = kwargs.pop('_callback', None)
if hasattr(handler, '__module__') and handler.__module__ in self.aliases:
module = self.modules.get(self._get_alias(handler.__module__))
if module and self.prehandle:
self.prehandle(request, kwargs)
if hasattr(module, '_prehandle'):
module._prehandle(request, kwargs)
self.check_args(handler, kwargs)
result = handler(**kwargs)
if isinstance(result, Response):
response = result
else:
response = JsonResponse(json_string=self.json_dump(result))
if callback:
response = response.to_jsonp(callback)
except Exception as e:
response = self.handle_exception(e, request)
finally:
self.posthandle(request, response)
return response
def wsgi_app(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
request = Request(environ)
request.app = self
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def _multidict_to_dict(m):
""" Returns a dict with list values only when a key has multiple values. """
d = {}
for k, v in m.lists():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = v
return d
| 35.449231 | 121 | 0.599514 | 1,360 | 11,521 | 4.852941 | 0.165441 | 0.040909 | 0.015152 | 0.012879 | 0.161515 | 0.119545 | 0.093788 | 0.031818 | 0.031818 | 0.031818 | 0 | 0.001964 | 0.292943 | 11,521 | 324 | 122 | 35.558642 | 0.808249 | 0.039059 | 0 | 0.172932 | 0 | 0 | 0.04814 | 0.006968 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112782 | false | 0.003759 | 0.06015 | 0.018797 | 0.278195 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7712c68e892849d5024854ccffea95e168d82721 | 11,822 | py | Python | src/ptpip/connection.py | DethCount/ptpip-d5300 | ed2f8a46600f2bb3330a7e5f4d5d47cf93fc10bd | [
"MIT"
] | null | null | null | src/ptpip/connection.py | DethCount/ptpip-d5300 | ed2f8a46600f2bb3330a7e5f4d5d47cf93fc10bd | [
"MIT"
] | null | null | null | src/ptpip/connection.py | DethCount/ptpip-d5300 | ed2f8a46600f2bb3330a7e5f4d5d47cf93fc10bd | [
"MIT"
] | null | null | null | import asyncio
import socket
import struct
import time
from ptpip.constants.cmd_type import CmdType
from ptpip.constants.property_type import PropertyType
from ptpip.constants.response_code import ResponseCode
from ptpip.constants.device.property_type import DevicePropertyType
from ptpip.constants.data_object_transfer_mode import DataObjectTransferMode
from ptpip.data_object.data_object import DataObject
from ptpip.data_object.device_info import DeviceInfo
from ptpip.data_object.device_prop_desc import DevicePropDesc
from ptpip.data_object.live_view_object import LiveViewObject
from ptpip.data_object.object_handle_array import ObjectHandleArray
from ptpip.data_object.object_info import ObjectInfo
from ptpip.data_object.object_prop_code_array import ObjectPropCodeArray
from ptpip.data_object.storage_id_array import StorageIdArray
from ptpip.data_object.storage_info import StorageInfo
from ptpip.event.factory import EventFactory
from ptpip.packet.packet import Packet
from ptpip.packet.stream_reader import StreamReader
from ptpip.packet.stream_writer import StreamWriter
from ptpip.packet.factory import PacketFactory
from ptpip.packet.cmd_request import CmdRequest
from ptpip.packet.cmd_response import CmdResponse
from ptpip.packet.start_data import StartDataPacket
from ptpip.packet.data import DataPacket
from ptpip.packet.end_data import EndDataPacket
from ptpip.packet.event_req import EventReq
from ptpip.packet.init_cmd_ack import InitCmdAck
from ptpip.packet.init_cmd_req import InitCmdReq
from ptpip.packet.ping import Ping
class Connection():
DEFAULT_HOST = '192.168.1.1'
DEFAULT_PORT = 15740
"""docstring for PtpIP"""
def __init__(self, debug = False):
super(Connection, self).__init__()
self.debug = debug
self.session = None
self.sessionEvents = None
self.sessionId = None
self.cmdQueue = []
self.eventQueue = []
self.objectQueue = []
self.lastTransactionId = 2021;
def createTransaction(self):
self.lastTransactionId += 1
return self.lastTransactionId
def open(self,
host = None,
port = None,
transactionId = None
):
# Open both session, first one for for commands, second for events
self.session = self.connect(host = host, port = port)
self.sendReceivePacket(InitCmdReq(), self.session)
self.sessionEvents = self.connect(host = host, port = port)
self.sendReceivePacket(EventReq(), self.sessionEvents)
cmd = CmdRequest(
transactionId = transactionId \
if transactionId != None \
else self.createTransaction(),
cmd = CmdType.OpenSession.value,
param1 = self.sessionId,
paramType1 = PropertyType.Uint32
)
self.sendReceivePacket(cmd, self.session)
def communicationThread(self, delay = 0):
try:
while True:
if len(self.cmdQueue) == 0:
# do a ping receive a pong (same as ping) as reply to keep the connection alive
# couldnt get any reply onto a propper Ping packet so i am querying the status
# of the device
"""
reply = self.sendReceivePacket(
CmdRequest(
transactionId = self.createTransaction(),
cmd = CmdType.DeviceReady.value
),
self.session
)
"""
else:
cmd = self.cmdQueue.pop()
reply = self.sendReceivePacket(cmd, self.session)
if self.debug \
and reply.code != ResponseCode.OK.value \
and reply.code != ResponseCode.DeviceBusy.value \
:
print("CCC Cmd reply is: " + str(reply.code))
time.sleep(delay)
except Exception as err:
raise(Exception('Error in communication thread: ' + str(err)))
print(str('End of communication'))
def queueObject(self, dataObject):
if not isinstance(dataObject.packet, CmdRequest):
self.objectQueue.append(dataObject)
return
if dataObject.packet.cmd == CmdType.GetEvent:
events = EventFactory(dataObject.data).getEvents()
for event in events:
self.eventQueue.append(event)
return
if dataObject.packet.cmd == CmdType.GetDeviceInfo:
device = DeviceInfo(dataObject.packet, dataObject.data)
self.objectQueue.append(device)
return
if dataObject.packet.cmd == CmdType.GetDevicePropDesc \
and dataObject.data != None \
:
devicePropDesc = DevicePropDesc(dataObject.packet, dataObject.data)
self.objectQueue.append(devicePropDesc)
return
if dataObject.packet.cmd == CmdType.GetStorageIDs:
storageIds = StorageIdArray(dataObject.packet, dataObject.data)
self.objectQueue.append(storageIds)
return
if dataObject.packet.cmd == CmdType.GetStorageInfo:
storage = StorageInfo(dataObject.packet, dataObject.data)
self.objectQueue.append(storage)
return
if dataObject.packet.cmd == CmdType.GetObjectHandles:
handles = ObjectHandleArray(dataObject.packet, dataObject.data)
self.objectQueue.append(handles)
return
if dataObject.packet.cmd == CmdType.GetObjectInfo:
self.objectQueue.append(ObjectInfo(dataObject.packet, dataObject.data))
return
if dataObject.packet.cmd == CmdType.GetObjectPropsSupported:
self.objectQueue.append(ObjectPropCodeArray(dataObject.packet, dataObject.data))
return
if dataObject.packet.cmd == CmdType.GetLiveViewImage:
self.objectQueue.append(LiveViewObject(dataObject.packet, dataObject.data))
return
self.objectQueue.append(dataObject)
async def listenObjectDataQueue(self, delay = 0):
while True:
# print('OOO Objects in queue: ' + str(len(self.objectQueue)))
for idx, dataObject in enumerate(self.objectQueue):
yield dataObject
time.sleep(delay)
pass
async def listenEventQueue(self, delay = 0):
if delay < 1:
delay = 1
while True:
# print('OOO Events in queue: ' + str(len(self.eventQueue)))
cmd = CmdRequest(
cmd = CmdType.GetEvent.value,
transactionId = self.createTransaction()
)
self.sendCmd(cmd)
for idx, event in enumerate(self.eventQueue):
yield event
time.sleep(delay)
pass
def sendCmd(self, packet):
self.cmdQueue.append(packet)
def connect(self, host = None, port = None):
if host == None:
host = self.DEFAULT_HOST
if port == None:
port = self.DEFAULT_PORT
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
s.connect((host, port))
except socket.error as err:
if s:
s.close()
raise(Exception("Could not open socket: " + str(err)))
return s
def sendReceivePacket(self, packet: Packet, session):
if isinstance(packet, EventReq) and packet.sessionId is None:
packet.sessionId = self.sessionId
self.sendPacket(packet, session)
# print(str(packet.transactionId))
# send data object
if packet.dataObject != None \
and (
packet.dataObjectTransferMode
== DataObjectTransferMode.Send
or packet.dataObjectTransferMode
== DataObjectTransferMode.SendAndReceive
) \
:
self.sendPacket(
StartDataPacket(
transactionId = packet.transactionId,
length = len(packet.dataObject.data)
),
session
)
self.sendPacket(
DataPacket(
content = packet.dataObject.data,
transactionId = packet.transactionId
),
session
)
self.sendPacket(
EndDataPacket(
transactionId = packet.transactionId
),
session
)
# receive response
reply = self.receivePacket(session)
if isinstance(reply, InitCmdAck):
self.sessionId = reply.sessionId
# receive data object
if packet.dataObjectTransferMode \
!= DataObjectTransferMode.NoTransfer \
:
dataLength = 0
if isinstance(reply, StartDataPacket):
dataLength = reply.length
reply = self.receivePacket(session, request = packet)
data = reply.data
while isinstance(reply, DataPacket):
data += reply.content
reply = self.receivePacket(session, request = packet)
if dataLength == len(data):
self.queueObject(DataObject(packet, data = data))
reply = self.receivePacket(session, request = packet)
else:
self.queueObject(DataObject(reply, data = None))
return reply
def sendEventReq(self, packet: Packet, session):
# add the session id of the object itself if it is not specified in the package
if packet.sessionId is None:
packet.sessionId = self.sessionId
self.sendPacket(packet, session)
def sendPacket(self, packet, session):
if self.debug:
print('---- SEND ----')
print(str(packet))
self.sendData(packet.pack(), session)
if self.debug:
print('---- End SEND ----')
def receivePacket(self, session, request: Packet = None):
if self.debug:
print('---- RECV ----')
packet = PacketFactory.createPacket(
data = self.receiveData(session),
request = request,
sessionId = self.sessionId
)
if self.debug:
print(str(packet))
print('---- End RECV ----')
return packet
def sendData(self, data, session):
err = session.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if 0 != err:
raise OSError('Socket error: ' + str(err))
session.send(
StreamWriter() \
.writeUint32(len(data) + 4) \
.writeBytes(data) \
.data
)
def receiveData(self, session, nbTries = 3):
if self.debug:
print('RECV DATA')
err = session.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if 0 != err:
raise OSError('Socket error: ' + str(err))
data = session.recv(4)
if len(data) < 4:
if nbTries > 1:
self.receiveData(session, nbTries - 1)
else:
raise(Exception('Communication lost'))
reader = StreamReader(data = data)
dataLength = reader.readUint32()
if self.debug:
print("RECV Data length: " + str(dataLength))
while dataLength > len(reader.data):
reader.data += session.recv(dataLength - len(reader.data))
return reader.readRest()
| 33.301408 | 99 | 0.589917 | 1,140 | 11,822 | 6.063158 | 0.195614 | 0.036458 | 0.028212 | 0.02474 | 0.245804 | 0.174913 | 0.115307 | 0.078414 | 0.064525 | 0.064525 | 0 | 0.005291 | 0.32854 | 11,822 | 354 | 100 | 33.39548 | 0.865457 | 0.043817 | 0 | 0.225191 | 0 | 0 | 0.021946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049618 | false | 0.007634 | 0.122137 | 0 | 0.240458 | 0.038168 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7712cf6206466d0ea2beef8574de8cc0252cad4c | 1,570 | py | Python | setup.py | hactar-is/yell | a61f0928dc89f3de0842a0aa11b7cd9c9004b9ef | [
"MIT"
] | null | null | null | setup.py | hactar-is/yell | a61f0928dc89f3de0842a0aa11b7cd9c9004b9ef | [
"MIT"
] | null | null | null | setup.py | hactar-is/yell | a61f0928dc89f3de0842a0aa11b7cd9c9004b9ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
import yell
METADATA = dict(
name='yell',
version="0.6.0",
author='Alen Mujezinovic',
author_email='flashingpumpkin@gmail.com',
description='User notification library with pluggable backends. Compatible with popular frameworks such as Django, Flask, Celery.',
long_description=open('README.rst').read(),
url='https://github.com/caffeinehit/yell',
keywords='django flask celery user notifications yell buffalo',
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Web Environment',
'Environment :: Other Environment',
'Environment :: X11 Applications',
'Framework :: Django',
'Framework :: Paste',
'Framework :: Pylons',
'Framework :: TurboGears',
'Framework :: Twisted',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications',
'Topic :: Communications :: Email',
'Topic :: Database',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
packages=find_packages(),
test_suite='yell.tests',
)
if __name__ == '__main__':
setup(**METADATA)
| 34.888889 | 135 | 0.63121 | 147 | 1,570 | 6.639456 | 0.687075 | 0.02459 | 0.034836 | 0.067623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005008 | 0.236943 | 1,570 | 44 | 136 | 35.681818 | 0.809683 | 0.012739 | 0 | 0 | 0 | 0 | 0.593932 | 0.016139 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77135215095bb43377bc4685c510bd101c3566fa | 3,696 | py | Python | hyperelastic/helpers.py | hecbarcab/energy-models | 0b9ec56f19c0062755be915664f9af2d968c8f00 | [
"MIT"
] | null | null | null | hyperelastic/helpers.py | hecbarcab/energy-models | 0b9ec56f19c0062755be915664f9af2d968c8f00 | [
"MIT"
] | null | null | null | hyperelastic/helpers.py | hecbarcab/energy-models | 0b9ec56f19c0062755be915664f9af2d968c8f00 | [
"MIT"
] | null | null | null | import sympy as sym
import sympy.utilities.codegen as cgen
import sympy.codegen.rewriting as copt
import numpy as np
import re
'''
Computes the gradient of f w.r.t. the given variables.
'''
def gradient(f, v):
return sym.Matrix([f]).jacobian(v)
'''
Computes the hessian of f w.r.t. the given variables.
'''
def hessian(f, v):
return sym.hessian(f, v)
'''
Helper tables that relate variable types and dimensions to specific Eigen types.
'''
DTypeToEigen = { "double": "d", "float": "f", "int": "i" }
NDimsToEigen = [ "%s%.0s", "%.0sVectorX%s", "%.0sMatrixX%s" ]
'''
Helper function to convert a generated SymPy variable to its corresponding
Eigen type. Also performs the changes necessary to ensure column-major access
to the data.
'''
def convert_var_to_eigen(var, lines, output=False):
name = str(var.name)
shape = [] if var.dimensions is None else [ 1+siz for _, siz in var.dimensions if siz != 0 ]
ndims = len(shape)
dtype = var.get_datatype('c')
mtype = NDimsToEigen[ndims] % (dtype, DTypeToEigen[dtype])
# Replace definition of variable (for vector and matrix result types).
lines = [ line.replace(dtype + ' *' + str(var.name), ("const " if not output else "") + mtype + '& ' + str(var.name)) for line in lines ]
# If the variable is an output, initialize with correct shape before assigning.
if output is True and ndims > 0:
index = np.where([ ("%s[0] =" % name) in line for line in lines ])[0][0]
lines.insert(index, " %s.resize(%s);" % (name, ", ".join([ str(size) for size in shape ])))
# In case the result is a matrix, use Eigen's parenthesis operator to access
# the data. NOTE: Assume SymPy outputs row major matrices. Might not be the
# case.
if ndims == 2:
for j in range(len(lines)):
match = re.search('%s\[([0-9]+)\]' % name, lines[j])
if match:
idx = int(match.group(1))
col = idx % shape[0]
row = idx // shape[1]
lines[j] = lines[j][:match.start(1)-1] + ("(%i, %i)" % (row, col)) + lines[j][match.end(1)+1:]
# Done!
return lines
'''
Given a set of SymPy symbols, generates the corresponding C++ code to evaluate
them in runtime.
'''
def generate(export):
# Generate code from exported routines.
generator = cgen.CCodeGen(cse=True)
routines = [ generator.routine(name, copt.optimize(sym.simplify(expr), copt.optims_c99), None, None) for name, expr in export]
codes = [ generator.write((routine,), "", header=False, empty=True) for routine in routines ]
codes = [ code for (_, code), (_, _) in codes ]
# Sanitize generated code.
for i in range(len(codes)):
# Split code into lines. Remove include ones.
lines = codes[i].split('\n')
lines = lines[3:]
# Reindent from 3 spaces to 4 spaces :|
lines = [ line.replace(' ', ' ') for line in lines ]
# Convert input and output argument variables to Eigen types.
for var in routines[i].arguments:
lines = convert_var_to_eigen(var, lines, var in routines[i].result_variables)
# Rename result variable to something legible.
result = routines[i].result_variables[-1]
name = str(result.name) if result.dimensions is not None else str(routines[i].name) + '_result'
lines = [ line.replace(name, 'out') for line in lines ]
# Rejoin code.
codes[i] = '\n'.join(lines)
# Done!
return codes
'''
Saves the given string to a file.
'''
def save(fname, str):
with open(fname, "w") as f:
f.write(str) | 36.235294 | 141 | 0.609037 | 517 | 3,696 | 4.324952 | 0.352031 | 0.012522 | 0.0161 | 0.025045 | 0.045617 | 0.045617 | 0.023256 | 0.023256 | 0.023256 | 0 | 0 | 0.009107 | 0.257305 | 3,696 | 102 | 142 | 36.235294 | 0.805464 | 0.157738 | 0 | 0 | 0 | 0 | 0.050953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.102041 | 0.040816 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77152e36d6791031fca0f4d06f5ea9d02b00f213 | 1,174 | py | Python | problems/all-nodes-distance-k-in-binary-tree.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | problems/all-nodes-distance-k-in-binary-tree.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | problems/all-nodes-distance-k-in-binary-tree.py | sailikhithk/tech-interview-prep | e833764cf98915d56118bddfa0e01871c58de75e | [
"Apache-2.0"
] | null | null | null | class Solution(object):
def distanceK(self, root, target, k):
graph = collections.defaultdict(list)
q = collections.deque([root]) #for traverse binary tree
q2 = collections.deque([(target, k)]) #for bfs the graph
visited = set() #for bfs the graph
ans = []
#build graph
while q:
node = q.popleft()
if node.left:
graph[node].append(node.left)
graph[node.left].append(node)
q.append(node.left)
if node.right:
graph[node].append(node.right)
graph[node.right].append(node)
q.append(node.right)
#bfs graph
while q2:
node, distance = q2.popleft()
if node.val in visited: continue
visited.add(node.val)
if distance==0: ans.append(node.val)
if distance<0 or distance>k: continue
for nei in graph[node]:
q2.append((nei, distance-1))
return ans
| 29.35 | 64 | 0.460818 | 122 | 1,174 | 4.434426 | 0.352459 | 0.12939 | 0.033272 | 0.051756 | 0.144177 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01072 | 0.443782 | 1,174 | 40 | 65 | 29.35 | 0.817764 | 0.06644 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77161fb80f8bb865d8dac4dbe77fb3933d655aaa | 3,269 | py | Python | tools/Demo.py | IceFlameWorm/iCAN | c4430c023851a1ada72f7bdc2b4f38b7679e1365 | [
"MIT"
] | 257 | 2018-08-31T01:41:42.000Z | 2022-01-11T07:38:12.000Z | tools/Demo.py | IceFlameWorm/iCAN | c4430c023851a1ada72f7bdc2b4f38b7679e1365 | [
"MIT"
] | 50 | 2018-09-16T16:48:49.000Z | 2022-02-11T02:28:26.000Z | tools/Demo.py | IceFlameWorm/iCAN | c4430c023851a1ada72f7bdc2b4f38b7679e1365 | [
"MIT"
] | 69 | 2018-08-31T06:51:43.000Z | 2022-03-12T16:07:41.000Z | # --------------------------------------------------------
# Tensorflow iCAN
# Licensed under The MIT License [see LICENSE for details]
# Written by Chen Gao, based on code from Zheqi he and Xinlei Chen
# --------------------------------------------------------
"""
Demo script generating HOI detections in sample images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import tensorflow as tf
import numpy as np
import argparse
import pickle
import json
import ipdb
import os
import os.path as osp
from ult.config import cfg
from models.test_demo import test_net
def parse_args():
parser = argparse.ArgumentParser(description='Test an iCAN on VCOCO')
parser.add_argument('--num_iteration', dest='iteration',
help='Specify which weight to load',
default=300000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='iCAN_ResNet50_VCOCO', type=str)
parser.add_argument('--prior_flag', dest='prior_flag',
help='whether use prior_flag',
default=3, type=int)
parser.add_argument('--object_thres', dest='object_thres',
help='Object threshold',
default=0.4, type=float)
parser.add_argument('--human_thres', dest='human_thres',
help='Human threshold',
default=0.8, type=float)
parser.add_argument('--img_dir', dest='img_dir',
help='Please specify the img folder',
default='/', type=str)
parser.add_argument('--Demo_RCNN', dest='Demo_RCNN',
help='The object detection .pkl file',
default='/', type=str)
parser.add_argument('--HOI_Detection', dest='HOI_Detection',
help='Where to save the final HOI_Detection',
default='/', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
prior_mask = pickle.load( open( cfg.DATA_DIR + '/' + 'prior_mask.pkl', "rb" ) )
Action_dic = json.load( open( cfg.DATA_DIR + '/' + 'action_index.json'))
Action_dic_inv = {y:x for x,y in Action_dic.iteritems()}
Demo_RCNN = pickle.load( open( args.Demo_RCNN, "rb" ) )
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print ('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(args.iteration) + ', path = ' + weight )
# init session
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth=True
sess = tf.Session(config=tfconfig)
if args.model == 'iCAN_ResNet50_VCOCO':
from networks.iCAN_ResNet50_VCOCO import ResNet50
if args.model == 'iCAN_ResNet50_VCOCO_Early':
from networks.iCAN_ResNet50_VCOCO_Early import ResNet50
net = ResNet50()
net.create_architecture(False)
saver = tf.train.Saver()
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
test_net(sess, net, Demo_RCNN, prior_mask, Action_dic_inv, args.img_dir, args.HOI_Detection, args.object_thres, args.human_thres, args.prior_flag)
sess.close()
| 35.150538 | 160 | 0.639033 | 413 | 3,269 | 4.828087 | 0.358354 | 0.036108 | 0.068205 | 0.024072 | 0.168506 | 0.059178 | 0 | 0 | 0 | 0 | 0 | 0.010465 | 0.210768 | 3,269 | 92 | 161 | 35.532609 | 0.762403 | 0.098195 | 0 | 0.046875 | 0 | 0 | 0.202044 | 0.008518 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.25 | 0 | 0.28125 | 0.046875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
771667b9589368a3855a6a3ed0fb98fb70367c24 | 6,083 | py | Python | Archive/reconstructions_fisher.py | Europium-152/isttok-tomography | e09119b680fbec2f6c9075d0c1b5e4efb141beee | [
"MIT"
] | 1 | 2018-12-05T15:57:54.000Z | 2018-12-05T15:57:54.000Z | Archive/reconstructions_fisher.py | Europium-152/isttok-tomography | e09119b680fbec2f6c9075d0c1b5e4efb141beee | [
"MIT"
] | null | null | null | Archive/reconstructions_fisher.py | Europium-152/isttok-tomography | e09119b680fbec2f6c9075d0c1b5e4efb141beee | [
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import ellipse
import sys
from scipy.ndimage.measurements import center_of_mass
from numpy import unravel_index
import scipy
plt.close("all")
plt.rcParams.update({'font.size': 18})
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return (idx,array[idx])
#########################################################################
# #
# PREPARATION SPECIFIC #
# #
#########################################################################
# Projections vector p ------------------------------------------------------
fname = 'projections.npy'
print('Reading:', fname)
projections = np.load(fname)
print('projections:', projections.shape, projections.dtype)
P = projections.reshape((projections.shape[0], -1))
print('P:', P.shape, P.dtype)
# Signals and vector f -----------------------------------------------------
fname = 'signals_data.npy'
print('Reading:', fname)
signals_data = np.load(fname)
print('signals_data:', signals_data.shape, signals_data.dtype)
fname = 'signals_time.npy'
print('Reading:', fname)
signals_time = np.load(fname)
print('signals_time:', signals_time.shape, signals_time.dtype)
time=18000.
time_index,time=find_nearest(signals_time[0],time)
f=signals_data[:,time_index]
# Reconstruction Resolution -----------------------------------------------
n_rows = projections.shape[1]
n_cols = projections.shape[2]
res=[4.4444,4.4444] # x,y (mm)
# x and y arrays for ploting purposes
x_array_plot=( np.arange(n_cols+1) - n_cols/2. )*res[0]
y_array_plot=( n_rows/2. - np.arange(n_rows+1) )*res[1]
# x and y arrays for calculation purposes
x_array=np.arange(n_cols)*res[0]-n_cols/2.*res[0]
y_array=n_rows/2.*res[1]-np.arange(n_rows)*res[1]
# Convergence parameters --------------------------------------------------
stop_criteria=1e-4
max_iterations=10
# Regularization parameters -----------------------------------------------
alpha_1 = 1e-5
alpha_2 = alpha_1
alpha_3 = alpha_1*10
#########################################################################
# #
# RECONSTRUCTION SPECIFIC #
# #
#########################################################################
# x and y grandient matrices ----------------------------------------------
Dh = np.eye(n_rows*n_cols) - np.roll(np.eye(n_rows*n_cols), 1, axis=1)
Dv = np.eye(n_rows*n_cols) - np.roll(np.eye(n_rows*n_cols), n_cols, axis=1)
print('Dh:', Dh.shape, Dh.dtype)
print('Dv:', Dv.shape, Dv.dtype)
# norm matrix --------------------------------------------------------------
ii, jj = ellipse(n_rows//2, n_cols//2, n_rows//2, n_cols//2)
mask = np.ones((n_rows, n_cols))
mask[ii,jj] = 0.
Io = np.eye(n_rows*n_cols) * mask.flatten()
print('Io:', Io.shape, Io.dtype)
# p transpose and PtP ------------------------------------------------------
Pt = np.transpose(P)
PtP = np.dot(Pt, P)
# Norm matrix transposed ---------------------------------------------------
ItIo = np.dot(np.transpose(Io), Io)
###################### FIRST ITERATION ##################################
# Weight matrix, first iteration sets W to 1 -------------------------------
W=np.eye(n_rows*n_cols)
# Fisher information (weighted derivatives) --------------------------------
DtWDh=np.dot(np.transpose(Dh), np.dot(W, Dh))
DtWDv=np.dot(np.transpose(Dv), np.dot(W, Dv))
# Inversion and calculation of vector g, storage of first guess ------------
inv = np.linalg.inv(PtP + alpha_1*DtWDh + alpha_2*DtWDv + alpha_3*ItIo)
M = np.dot(inv, Pt)
g_old = np.dot(M, f)
first_g = np.array(g_old)
# Iterative process --------------------------------------------------------
i=0
while True:
i=i+1;
W=np.diag(1.0/np.abs(g_old))
DtWDh=np.dot(np.transpose(Dh), np.dot(W, Dh))
DtWDv=np.dot(np.transpose(Dv), np.dot(W, Dv))
inv = np.linalg.inv(PtP + alpha_1*DtWDh + alpha_2*DtWDv + alpha_3*ItIo)
M = np.dot(inv, Pt)
g_new = np.dot(M, f)
error=np.sum(np.abs(g_new-g_old))/len(g_new)
print (error)
if error<stop_criteria:
print ("Minimum Fisher converged after ",i," iterations.")
break
if i>max_iterations:
print ("WARNING: Minimum Fisher did not converge after ",i," iterations.")
break
g_old=np.array(g_new) # Explicitly copy because python will not
# TODO: Swaping instead of copying
#########################################################################
# #
# PLOTING SPECIFIC #
# #
#########################################################################
plt.figure()
plt.imshow(first_g.reshape((n_rows, n_cols)))
plt.colorbar()
g_matrix=g_new.reshape((n_rows,n_cols))
centroid=center_of_mass(g_matrix)
print ('centroid in index coordinates:',(centroid[1],centroid[0]))
center_y=n_rows*res[1]/2.-centroid[0]*res[1]
center_x=-n_cols*res[0]/2.+centroid[1]*res[0]
maximum=unravel_index(g_matrix.argmax(), g_matrix.shape)
max_y=n_rows*res[1]/2.-maximum[0]*res[1]-res[1]/2.
max_x=-n_cols*res[0]/2.+maximum[1]*res[0]+res[1]/2.
print('coordinates of maximum:',(max_x,max_y))
print ('centroid in space coordinates:', (center_x,center_y))
plt.figure()
plt.axes().set_aspect('equal', 'datalim')
plt.pcolormesh(x_array_plot,y_array_plot,g_new.reshape((n_rows, n_cols)))
#plt.imshow(g_new.reshape((n_rows, n_cols)))
plt.plot(center_x, center_y, 'r+')
plt.plot(max_x, max_y, 'b+')
plt.colorbar()
| 31.682292 | 91 | 0.493835 | 753 | 6,083 | 3.831341 | 0.232404 | 0.036395 | 0.022877 | 0.038128 | 0.236742 | 0.186135 | 0.145234 | 0.126863 | 0.110225 | 0.110225 | 0 | 0.018337 | 0.21108 | 6,083 | 191 | 92 | 31.848168 | 0.58283 | 0.289167 | 0 | 0.173469 | 0 | 0 | 0.09 | 0 | 0 | 0 | 0 | 0.005236 | 0 | 1 | 0.010204 | false | 0 | 0.071429 | 0 | 0.091837 | 0.163265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7720c67400df014e01d1152193d7b58d68930417 | 2,490 | py | Python | signa/providers/aws.py | 05bit/python-signa | 825767a6d6970d357b627de4c5b5e88a14dc0e75 | [
"MIT"
] | 1 | 2017-06-04T10:26:30.000Z | 2017-06-04T10:26:30.000Z | signa/providers/aws.py | 05bit/python-signa | 825767a6d6970d357b627de4c5b5e88a14dc0e75 | [
"MIT"
] | null | null | null | signa/providers/aws.py | 05bit/python-signa | 825767a6d6970d357b627de4c5b5e88a14dc0e75 | [
"MIT"
] | null | null | null | import datetime
import hashlib
import hmac
import urllib.parse
from signa.logger import get_logger
utcnow = datetime.datetime.utcnow
logger = get_logger(__name__)
def aws_headers(method=None, region=None, service=None, uri=None,
auth=None, headers=None, payload=None):
headers = headers.copy() if headers else {}
access_key = auth['access_key']
secret_key = auth['secret_key']
timestamp = utcnow().strftime('%Y%m%dT%H%M%SZ')
date_only = timestamp[:8]
scope = '%s/%s/%s/aws4_request' % (date_only, region, service)
if payload == 'UNSIGNED-PAYLOAD':
payload_hash = 'UNSIGNED-PAYLOAD'
elif payload:
payload_hash = _sha256(payload)
else:
payload_hash = _sha256('')
headers['x-amz-content-sha256'] = payload_hash
headers['x-amz-date'] = timestamp
if uri:
uri_parts = urllib.parse.urlparse(uri)
path = uri_parts.path
query = uri_parts.query
else:
path = '/'
query = ''
headers_keys = sorted(list(headers.keys()))
canonical_request = '\n'.join([
method or 'GET',
path,
query,
'\n'.join(['%s:%s' % (k.lower(), headers[k])
for k in headers_keys]),
'',
';'.join(headers_keys).lower(),
payload_hash,
]).strip()
logger.debug(canonical_request)
str_to_sign = '\n'.join([
'AWS4-HMAC-SHA256',
timestamp,
scope,
_sha256(canonical_request),
])
# logger.debug(str_to_sign)
base_key = ('AWS4' + secret_key).encode('utf-8')
date_key = _hmac(base_key, date_only)
date_region_key = _hmac(date_key, region)
date_region_service_key = _hmac(date_region_key, 's3')
signing_key = _hmac(date_region_service_key, 'aws4_request')
# logger.debug(signing_key)
signature = _hmac(signing_key, str_to_sign, hexdigest=True)
# logger.debug(signature)
headers['Authorization'] = (
'AWS4-HMAC-SHA256 '
'Credential=%s/%s,'
'SignedHeaders=%s,'
'Signature=%s' % (
access_key,
scope,
';'.join(headers_keys),
signature)
)
return headers
def _sha256(data):
return hashlib.sha256(data.encode('utf-8')).hexdigest()
def _hmac(key, msg, hexdigest=False):
h = hmac.new(key, msg=msg.encode('utf-8'),
digestmod=hashlib.sha256)
if hexdigest:
return h.hexdigest()
else:
return h.digest()
| 24.653465 | 66 | 0.602008 | 300 | 2,490 | 4.776667 | 0.29 | 0.038381 | 0.018842 | 0.027913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020098 | 0.260643 | 2,490 | 100 | 67 | 24.9 | 0.758284 | 0.03012 | 0 | 0.067568 | 0 | 0 | 0.107424 | 0.00871 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.067568 | 0.013514 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7726bbbebd82a0a62defb470c750f3a3ffba127c | 4,067 | py | Python | core/domain/html_cleaner_test.py | spiritus72/oppia | 62464ec4583e970bc70e13be13d81fcd6397174f | [
"Apache-2.0"
] | 2 | 2016-09-25T23:08:37.000Z | 2016-09-25T23:08:48.000Z | core/domain/html_cleaner_test.py | spiritus72/oppia | 62464ec4583e970bc70e13be13d81fcd6397174f | [
"Apache-2.0"
] | null | null | null | core/domain/html_cleaner_test.py | spiritus72/oppia | 62464ec4583e970bc70e13be13d81fcd6397174f | [
"Apache-2.0"
] | 1 | 2021-08-04T13:03:16.000Z | 2021-08-04T13:03:16.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the HTML sanitizer."""
from core.domain import html_cleaner
from core.tests import test_utils
class HtmlCleanerUnitTests(test_utils.GenericTestBase):
"""Test the HTML sanitizer."""
def setUp(self):
super(HtmlCleanerUnitTests, self).setUp()
self.longMessage = True
def test_good_tags_allowed(self):
test_data = [(
'<a href="http://www.google.com">Hello</a>',
'<a href="http://www.google.com">Hello</a>'
), (
'Just some text 12345',
'Just some text 12345'
), (
'<code>Unfinished HTML',
'<code>Unfinished HTML</code>',
), (
'<br/>',
'<br>'
), (
'A big mix <div>Hello</div> Yes <span>No</span>',
'A big mix <div>Hello</div> Yes <span>No</span>'
)]
for datum in test_data:
self.assertEqual(
html_cleaner.clean(datum[0]), datum[1],
'\n\nOriginal text: %s' % datum[0])
def test_bad_tags_suppressed(self):
test_data = [(
'<incomplete-bad-tag>',
''
), (
'<complete-bad-tag></complete-bad-tag>',
''
), (
'<incomplete-bad-tag><div>OK tag</div>',
'<div>OK tag</div>'
), (
'<complete-bad-tag></complete-bad-tag><span>OK tag</span>',
'<span>OK tag</span>'
), (
'<bad-tag></bad-tag>Just some text 12345',
'Just some text 12345'
), (
'<script>alert(\'Here is some JS\');</script>',
'alert(\'Here is some JS\');'
), (
'<iframe src="https://oppiaserver.appspot.com"></iframe>',
''
)]
for datum in test_data:
self.assertEqual(
html_cleaner.clean(datum[0]), datum[1],
'\n\nOriginal text: %s' % datum[0])
def test_oppia_custom_tags(self):
test_data = [(
'<oppia-noninteractive-image filepath-with-value="1"/>',
'<oppia-noninteractive-image filepath-with-value="1">'
'</oppia-noninteractive-image>'
), (
'<oppia-noninteractive-image filepath-with-value="1">'
'</oppia-noninteractive-image>',
'<oppia-noninteractive-image filepath-with-value="1">'
'</oppia-noninteractive-image>'
), (
'<oppia-fake-tag></oppia-fake-tag>',
''
)]
for datum in test_data:
self.assertEqual(
html_cleaner.clean(datum[0]), datum[1],
'\n\nOriginal text: %s' % datum[0])
class HtmlStripperUnitTests(test_utils.GenericTestBase):
"""Test the HTML stripper."""
def test_strip_html_tags(self):
test_data = [(
'<a href="http://www.google.com">Hello</a>',
'Hello',
), (
'Just some text 12345',
'Just some text 12345',
), (
'<code>Unfinished HTML',
'Unfinished HTML',
), (
'<br/>',
'',
), (
'A big mix <div>Hello</div> Yes <span>No</span>',
'A big mix Hello Yes No',
), (
'Text with\nnewlines',
'Text with\nnewlines',
)]
for datum in test_data:
self.assertEqual(html_cleaner.strip_html_tags(datum[0]), datum[1])
| 31.527132 | 78 | 0.527908 | 456 | 4,067 | 4.642544 | 0.309211 | 0.030231 | 0.079358 | 0.048181 | 0.507322 | 0.504487 | 0.42324 | 0.42324 | 0.394426 | 0.373642 | 0 | 0.019665 | 0.324809 | 4,067 | 128 | 79 | 31.773438 | 0.751275 | 0.164986 | 0 | 0.652632 | 0 | 0 | 0.366013 | 0.135769 | 0 | 0 | 0 | 0 | 0.042105 | 1 | 0.052632 | false | 0 | 0.021053 | 0 | 0.094737 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7729980405e745a69820e95ad43e6629e3977748 | 2,199 | py | Python | sdk/python-sdk/verity_sdk/protocols/Protocol.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | 40 | 2020-07-09T01:52:31.000Z | 2022-02-19T04:01:23.000Z | sdk/python-sdk/verity_sdk/protocols/Protocol.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | 45 | 2020-06-19T11:00:20.000Z | 2022-03-02T14:48:12.000Z | sdk/python-sdk/verity_sdk/protocols/Protocol.py | tw-bc-group/verity-sdk | e932209ab849f04a389bdda0718cd6227187e5cf | [
"Apache-2.0"
] | 37 | 2020-06-19T10:37:04.000Z | 2022-03-15T14:06:40.000Z | from verity_sdk.transports import send_packed_message
from verity_sdk.utils import pack_message_for_verity, uuid
from verity_sdk.utils.MessageFamily import MessageFamily
class Protocol(MessageFamily):
"""The base class for all protocols"""
# Messages
STATUS = 'status-report'
"""Name for 'status-report' signal message"""
PROBLEM_REPORT = 'problem-report'
"""Name for 'problem-report' signal message"""
def __init__(self, msg_family: str, msg_family_version: str, msg_qualifier: str = None, thread_id: str = None):
"""
Args:
msg_family (str): the family name for the message family
msg_family_version (str): the version for the message family
msg_qualifier (str): the qualifier for the message family
thread_id (str): given ID used for the thread.
"""
super().__init__(msg_family, msg_family_version, msg_qualifier)
if thread_id is not None:
self.thread_id = thread_id
else:
self.thread_id = uuid()
def _add_thread(self, msg):
msg['~thread'] = {
'thid': self.thread_id
}
@staticmethod
def _add_relationship(msg, for_relationship):
msg['~for_relationship'] = for_relationship
@staticmethod
async def get_message_bytes(context, message) -> bytes:
"""
Packs the connection message for the verity
Args:
context (Context): an instance of Context that has been initialized with your wallet and key details
message (dict): the message to be packed for the verity-application
Returns:
bytes: Encrypted connection message ready to be sent to the verity
"""
return await pack_message_for_verity(context, message)
@staticmethod
async def send_message(context, packed_message):
"""
Sends a given packed message to Verity
Args:
context (Context): an instance of the Context object initialized to a verity-application agent
packed_message (bytes): the encrypted message bytes to send to Verity
"""
send_packed_message(context, packed_message)
| 36.65 | 115 | 0.657572 | 270 | 2,199 | 5.155556 | 0.3 | 0.04023 | 0.028017 | 0.040948 | 0.083333 | 0.051724 | 0.051724 | 0 | 0 | 0 | 0 | 0 | 0.267849 | 2,199 | 59 | 116 | 37.271186 | 0.864596 | 0.130514 | 0 | 0.12 | 0 | 0 | 0.048976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.12 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7729c36d0821852beab76be6febcdff37b789840 | 5,323 | py | Python | src/graphConstruction/extract_nodes_relations_from_texts.py | ubayram/COVIDGraphProject | 57cdb4e3cbcf3867c153c86eb99b26af634601c1 | [
"MIT"
] | null | null | null | src/graphConstruction/extract_nodes_relations_from_texts.py | ubayram/COVIDGraphProject | 57cdb4e3cbcf3867c153c86eb99b26af634601c1 | [
"MIT"
] | null | null | null | src/graphConstruction/extract_nodes_relations_from_texts.py | ubayram/COVIDGraphProject | 57cdb4e3cbcf3867c153c86eb99b26af634601c1 | [
"MIT"
] | null | null | null | #Author: Ulya Bayram
#email : ulya.bayram@comu.edu.tr
#
#------------------------------------------------------------------------------------------------------
#
#The content of this project is licensed under the MIT license. 2021 All rights reserved.
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software
#and associated documentation files (the "Software"), to deal with the Software without restriction,
#including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
#and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#Redistributions of source code must retain the above License notice, this list of conditions and
#the following disclaimers.
#
#Redistributions in binary form must reproduce the above License notice, this list of conditions and
#the following disclaimers in the documentation and/or other materials provided with the distribution.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
#LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE CONTRIBUTORS OR LICENSE HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
#OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
#------------------------------------------------------------------------------------------------------
#
#These code are writen for a research project, published in OIR. If you use any of them, please cite:
#Ulya Bayram, Runia Roy, Aqil Assalil, Lamia Ben Hiba,
#"The Unknown Knowns: A Graph-Based Approach for Temporal COVID-19 Literature Mining",
#Online Information Review (OIR), COVID-19 Special Issue, 2021.
#
#------------------------------------------------------------------------------------------------------
# This is where the node and edge extraction as well as the edge weight computation happens
import subject_verb_object_extraction as sci # get Lamia's code
import help_NER_UB as help_ner
import spacy
import en_core_web_sm
import pandas as pd
import numpy as np
import nltk
#from codetiming import Timer
import time
#t = Timer()
# use spacy small model
nlp = en_core_web_sm.load()
stopwords = help_ner.getStopwords()
for word in stopwords:
nlp.vocab[word].is_stop = True
def cleanText(curr_text):
curr_text = curr_text.lower()
return curr_text.split(' . ')
def collectSaveEdges(df, pd_eval, row_indices, date_flag):
for i_row in row_indices:
curr_text = df.full_text[i_row]
if not help_ner.isEnglish(curr_text): # skip non-English data rows
continue
list_sentences = cleanText(curr_text)
del curr_text
if date_flag:
curr_date = df.date[i_row]
else:
curr_date = df.year[i_row]
curr_tuples = []
#t.start()
t0 = time.time()
for curr_sent in list_sentences:
if len(curr_sent.split(' ')) > 4: # make sure a sentence has at least 4 words - to eliminate noise
#print('\n'+curr_sent)
links = sci.extract_link(nlp(curr_sent))
#if len(links): # Ulya: if links returns an empty list, curr_tuples won't be affected. I'm removing this if for speed
# print(links)
curr_tuples += links
#t.stop()
t1 = time.time()
print(t1-t0)
pd_eval = pd_eval.append(pd.DataFrame({'filename' : df.fullname[i_row], 'list_of_edges': [curr_tuples], 'timestamp': curr_date}))
print('Processing row ' + str(i_row))
return pd_eval
def eliminateImproperDatesPost2020(row_indices_pre, df):
row_indices_pre_clean = []
filenames_ = [] # store them too for just in case row orders gets messed up
for i_row in row_indices_pre: # clean up the indices - some dates are messed up
curr_date = df.date[i_row]
year_ = int(curr_date.split('-')[0])
if year_ >= 2020:
row_indices_pre_clean.append(i_row)
filenames_.append(df.fullname[i_row])
return row_indices_pre_clean, filenames_ # after done, write these indices, will be necessary to Aqil.
def extract_nodes_edges(input_dir):
# read the corpus
all_data = pd.read_csv(input_dir)
print('Processing after 2020, so COVID-19 specific papers only')
# list of rows where years are after 2020
row_indices_pre_ = all_data[all_data['year'] >= 2020].index.tolist()
row_indices_pre, filenames_ = eliminateImproperDatesPost2020(row_indices_pre_, all_data)
#new_df = pd.DataFrame({'filename': filenames_, 'i_rows': row_indices_pre})
#new_df.to_csv('post2020_qualifying_filenames_row_indices.csv')
#half_index = int(len(row_indices_pre)/2)
#row_indices_pre = row_indices_pre[half_index:]
empty_list = []
pd_eval = pd.DataFrame(data={'filename' : empty_list, 'list_of_edges': empty_list, 'timestamp': empty_list})
pd_eval = collectSaveEdges(all_data, pd_eval, row_indices_pre, True)
pd_eval.to_csv('../../graphs/list_of_edges_post2020.csv')
del pd_eval
| 40.633588 | 137 | 0.667105 | 743 | 5,323 | 4.600269 | 0.391655 | 0.046811 | 0.049444 | 0.021065 | 0.088356 | 0.060854 | 0.039204 | 0.039204 | 0.039204 | 0.039204 | 0 | 0.012658 | 0.198572 | 5,323 | 130 | 138 | 40.946154 | 0.788561 | 0.533158 | 0 | 0.035088 | 0 | 0 | 0.073221 | 0.016043 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.140351 | 0 | 0.263158 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
772d39e17b72b4731537fcaabc814e137a5349d8 | 6,777 | py | Python | examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py | g-pichler/flower | e455cdc3678921ece960287a0e1ae5123500c948 | [
"Apache-2.0"
] | 895 | 2020-03-22T20:34:16.000Z | 2022-03-31T15:20:42.000Z | examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py | g-pichler/flower | e455cdc3678921ece960287a0e1ae5123500c948 | [
"Apache-2.0"
] | 322 | 2020-02-19T10:16:33.000Z | 2022-03-31T09:49:08.000Z | examples/android/tflite_convertor/tfltransfer/tflite_transfer_converter.py | g-pichler/flower | e455cdc3678921ece960287a0e1ae5123500c948 | [
"Apache-2.0"
] | 234 | 2020-03-31T10:52:16.000Z | 2022-03-31T14:04:42.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFLite converter for transfer learning models.
This converter is the first stage in the transfer learning pipeline. It
allows to convert a pair of models representing fixed base and trainable
head models to a set of TFLite models, which can be then used by the
transfer learning library.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.compat import v1 as tfv1
class TFLiteTransferConverter(object):
"""Converter for transfer learning models.
There are three parts of the input to the converter: base and
head model configurations, and the optimizer configuration.
Each of them has several variants, defined in the respective
submodules, which are configured separately outside of the
converter.
The converter output format is currently a directory containing
multiple TFLite models, but this should be considered an
implementation detail and not relied upon.
"""
def __init__(
self, num_classes, base_model, head_model, optimizer, train_batch_size
):
"""Creates a new converter instance.
Args:
num_classes: number of classes for the classification task.
base_model: base model configuration of one of the supported types.
head_model: head model configuration of one of the supported types.
optimizer: optimizer configuration of one of the supported types.
train_batch_size: batch size that will be used for training.
"""
self.num_classes = num_classes
self.base_model = base_model
self.head_model = head_model
self.optimizer = optimizer
self.train_batch_size = train_batch_size
def convert_and_save(self, out_model_dir):
"""Saves the converted model to a target directory."""
if not os.path.isdir(out_model_dir):
os.makedirs(out_model_dir)
models = self._convert()
for name, model in models.items():
model_file_path = os.path.join(out_model_dir, name + ".tflite")
with open(model_file_path, "wb") as model_file:
model_file.write(model)
def _convert(self):
"""Converts all underlying models."""
initialize_model_lite = self._generate_initialize_model()
bottleneck_model_lite = self._generate_bottleneck_model()
train_head_model_lite = self._generate_train_head_model()
inference_model_lite = self._generate_inference_model()
parameter_shapes = self._read_parameter_shapes(inference_model_lite)
optimizer_model_lite = self.optimizer.generate_optimizer_model(parameter_shapes)
return {
"initialize": initialize_model_lite,
"bottleneck": bottleneck_model_lite,
"train_head": train_head_model_lite,
"inference": inference_model_lite,
"optimizer": optimizer_model_lite,
}
def _read_parameter_shapes(self, inference_model):
"""Infers shapes of model parameters from the inference model."""
interpreter = tfv1.lite.Interpreter(model_content=inference_model)
return [
parameter_in["shape"].tolist()
for parameter_in in interpreter.get_input_details()[1:]
]
def _generate_initialize_model(self):
"""Generates a model that outputs initial parameter values."""
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[self.head_model.generate_initial_params().get_concrete_function()]
)
return converter.convert()
def _generate_bottleneck_model(self):
"""Converts the bottleneck model, i.e. the base model.
Bottleneck is a name used in the transfer learning context for
the base model outputs, which are at the same time head model
inputs.
Returns:
TFLite bottleneck model.
"""
return self.base_model.tflite_model()
def _generate_train_head_model(self):
"""Converts the head training model.
Head training model is constructed from the head model passed
as converter input by adding a cross-entropy loss and gradient
calculation for all variables in the input SavedModel.
Returns:
TFLite train head model.
"""
with tf.Graph().as_default(), tfv1.Session() as sess:
bottleneck_shape = (self.train_batch_size,) + self.head_model.input_shape()
bottleneck = tfv1.placeholder(
tf.float32, bottleneck_shape, "placeholder_bottleneck"
)
# One-hot ground truth
labels = tfv1.placeholder(
tf.float32,
(self.train_batch_size, self.num_classes),
"placeholder_labels",
)
loss, gradients, variables = self.head_model.train(bottleneck, labels)
converter = tfv1.lite.TFLiteConverter.from_session(
sess, [bottleneck, labels] + variables, [loss] + gradients
)
if self.head_model.train_requires_flex():
converter.target_ops = [tf.lite.OpsSet.SELECT_TF_OPS]
return converter.convert()
def _generate_inference_model(self):
"""Converts the head inference model.
Inference model is constructed from the head model passed
as converted input. It accepts as inputs the bottlenecks
produces by the base model, and values for all trainable
head model parameters.
Returns:
TFLite inference model.
"""
with tf.Graph().as_default(), tfv1.Session() as sess:
bottleneck_shape = (1,) + self.head_model.input_shape()
bottleneck = tfv1.placeholder(
tf.float32, bottleneck_shape, "placeholder_bottleneck"
)
predictions, head_variables = self.head_model.predict(bottleneck)
converter = tfv1.lite.TFLiteConverter.from_session(
sess, [bottleneck] + head_variables, [predictions]
)
return converter.convert()
| 39.631579 | 88 | 0.676701 | 818 | 6,777 | 5.40709 | 0.294621 | 0.042731 | 0.020574 | 0.018992 | 0.190142 | 0.13995 | 0.13995 | 0.131585 | 0.086819 | 0.067827 | 0 | 0.005159 | 0.256308 | 6,777 | 170 | 89 | 39.864706 | 0.872421 | 0.393242 | 0 | 0.139241 | 0 | 0 | 0.032649 | 0.011585 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101266 | false | 0 | 0.075949 | 0 | 0.265823 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
773012eae1f47ed3aecb131888fdc05041550424 | 941 | py | Python | src/pretix/plugins/paypal/urls.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-25T00:11:00.000Z | 2020-04-25T00:11:00.000Z | src/pretix/plugins/paypal/urls.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2019-07-08T10:29:54.000Z | 2020-01-08T17:32:07.000Z | src/pretix/plugins/paypal/urls.py | pajowu/pretix | d6985123b4528f134ead71ce0a4613c9a309fd2c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from pretix.multidomain import event_url
from .views import (
abort, oauth_disconnect, oauth_return, redirect_view, success, webhook,
)
event_patterns = [
url(r'^paypal/', include([
url(r'^abort/$', abort, name='abort'),
url(r'^return/$', success, name='return'),
url(r'^redirect/$', redirect_view, name='redirect'),
url(r'w/(?P<cart_namespace>[a-zA-Z0-9]{16})/abort/', abort, name='abort'),
url(r'w/(?P<cart_namespace>[a-zA-Z0-9]{16})/return/', success, name='return'),
event_url(r'^webhook/$', webhook, name='webhook', require_live=False),
])),
]
urlpatterns = [
url(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/paypal/disconnect/',
oauth_disconnect, name='oauth.disconnect'),
url(r'^_paypal/webhook/$', webhook, name='webhook'),
url(r'^_paypal/oauth_return/$', oauth_return, name='oauth.return'),
]
| 32.448276 | 86 | 0.630181 | 123 | 941 | 4.699187 | 0.308943 | 0.069204 | 0.051903 | 0.065744 | 0.16609 | 0.16609 | 0.093426 | 0.093426 | 0.093426 | 0.093426 | 0 | 0.010063 | 0.155154 | 941 | 28 | 87 | 33.607143 | 0.716981 | 0 | 0 | 0 | 0 | 0 | 0.339001 | 0.194474 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
773111856b2b964c48b6aaccb02a91602aef7876 | 3,484 | py | Python | getFCEndPortStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 7 | 2018-11-05T21:19:22.000Z | 2022-01-05T11:05:43.000Z | getFCEndPortStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 3 | 2018-08-31T21:49:42.000Z | 2020-08-08T00:47:06.000Z | getFCEndPortStats.py | louisjia/dcnm-rest-sample | b3f589061eb33035337175e1fc527122c7cda174 | [
"BSD-3-Clause"
] | 9 | 2018-08-20T21:05:41.000Z | 2021-05-24T09:30:37.000Z | import http.client
import ssl
import base64
import string
import json
__author__ = "Louis Jia"
__copyright__ = "Copyright (C) 2018 Cisco System"
def getRestToken(username, password, serverip):
ssl._create_default_https_context = ssl._create_unverified_context
##replace server ip address here
conn = http.client.HTTPSConnection(serverip)
payload = "{\"expirationTime\" : 10000000000}\n"
## replace user name and password here
authenStr="%s:%s" % (username, password)
base64string = base64.encodebytes(bytes(authenStr, 'utf-8'))
tmpstr= "Basic %s" % base64string
authorizationStr = tmpstr.replace("b\'","").replace("\\n\'","");
print(authorizationStr);
headers = {
'content-type': "application/json",
'authorization': authorizationStr,
'cache-control': "no-cache"
}
conn.request("POST", "/rest/logon", payload, headers)
res = conn.getresponse()
data = res.read()
longstr=data.decode("utf-8")
print(longstr)
strArr=longstr.split("\"")
return strArr[3]
def getRrdID(serverip, switchid, interface, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/statistics/endportStat?interval=Day&endDeviceType=All&navId=-1", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
swIfName=str(switchid)+ " " + str(interface);
for x in decoded :
if ( x['swIfName'] == swIfName) :
return x['rrdFile']
return -1
def getFabricId(serverip, switchname, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/inventory/switches/?name=foo1&navId=-1", headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
decoded = json.loads(jsonstr)
for x in decoded :
if ( x['logicalName'] == switchname ) :
#print(x['fid'])
return x['fid']
return -1
def getInterfaceStats(serverip, fid, rrdid, resttoken):
ssl._create_default_https_context = ssl._create_unverified_context
conn = http.client.HTTPSConnection(serverip)
headers = {
'dcnm-token': resttoken,
'content-type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("GET", "/fm/fmrest/statistics/pmChartData?rrdFile="+rrdid+"&fid="+str(fid)+ "&pmType=3&interval=1",
headers=headers)
res = conn.getresponse()
data = res.read()
jsonstr=data.decode("utf-8")
items = json.loads(jsonstr)
#print(items)
print("rx")
print(items['items'][0])
print("tx")
print(items['items'][1])
print("millisec")
print(items['xLabels'])
return
#serverip
server="10.10.10.10"
# DCNM username, password, DCNM server ip address
restToken=getRestToken("admin", "xxxxxxx", server)
print(restToken)
fid=getFabricId(server, "minishan", restToken)
# DCNM server ip, switch ip, resetTotken
rrdID=getRrdID(server, "minishan", "fc1/2", restToken)
print(rrdID)
#serverip fabric-id interface-id restToken
getInterfaceStats(server,fid, rrdID, restToken)
| 25.430657 | 115 | 0.689437 | 415 | 3,484 | 5.701205 | 0.313253 | 0.030431 | 0.02705 | 0.035503 | 0.455621 | 0.439983 | 0.413779 | 0.398563 | 0.398563 | 0.37574 | 0 | 0.016343 | 0.157003 | 3,484 | 136 | 116 | 25.617647 | 0.789241 | 0.065729 | 0 | 0.428571 | 0 | 0 | 0.219618 | 0.081123 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043956 | false | 0.021978 | 0.054945 | 0 | 0.164835 | 0.10989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7733a976c75446b4be37deffff747e9e28f0902f | 860 | py | Python | ML Scripts/humidity.py | Susmit-A/Looking-Globe-Ally | ce9e3cde6e80b49d51addd2e295ae0ed99dc0d81 | [
"MIT"
] | null | null | null | ML Scripts/humidity.py | Susmit-A/Looking-Globe-Ally | ce9e3cde6e80b49d51addd2e295ae0ed99dc0d81 | [
"MIT"
] | 1 | 2018-10-27T10:30:17.000Z | 2018-10-27T10:35:52.000Z | ML Scripts/humidity.py | Susmit-A/Looking-Globe-Ally | ce9e3cde6e80b49d51addd2e295ae0ed99dc0d81 | [
"MIT"
] | 3 | 2018-10-20T09:47:03.000Z | 2018-10-28T13:54:58.000Z | import requests
urlp1 = 'https://weather.cit.api.here.com/weather/1.0/report.json?product=observation'
latitude = 0.0
longitude = 0.0
urlp2 = '&oneobservation=true&app_id=Yzd1hH5IOppt9cttpCvL&app_code=wiDCHLDcJlt3aUAD8pAJkQ'
url = urlp1 + '&latitude=' + str(latitude) + '&longitude=' + str(longitude) + urlp2
i = 0.0
j = 0.0
while i < 360:
while j < 360:
try:
url = urlp1 + '&latitude=' + str(i) + '&longitude=' + str(j) + urlp2
res = requests.get(url).json()
res = res['observations']
res = res['location'][0]
print(res['latitude'], end=',')
print(res['longitude'], end=',')
hum = res['observation'][0]
hum = hum['humidity']
print(hum)
i = i+1
j = j+1
except:
i = i + 1
j = j + 1
| 29.655172 | 90 | 0.523256 | 104 | 860 | 4.307692 | 0.394231 | 0.017857 | 0.071429 | 0.084821 | 0.026786 | 0.026786 | 0 | 0 | 0 | 0 | 0 | 0.056027 | 0.315116 | 860 | 28 | 91 | 30.714286 | 0.704584 | 0 | 0 | 0.16 | 0 | 0.04 | 0.297674 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.04 | 0 | 0.04 | 0.12 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77344f34d92422630db394ff84bdcbb2f12c3046 | 957 | py | Python | book-store/book_store.py | rdlu/exercism-python | 61e89f4969af5bcad028e843aaae6a8869b0187d | [
"MIT"
] | null | null | null | book-store/book_store.py | rdlu/exercism-python | 61e89f4969af5bcad028e843aaae6a8869b0187d | [
"MIT"
] | null | null | null | book-store/book_store.py | rdlu/exercism-python | 61e89f4969af5bcad028e843aaae6a8869b0187d | [
"MIT"
] | null | null | null | # just rewrote this: https://exercism.io/tracks/python/exercises/book-store/solutions/736ace5096a74f079343773f0d13ad57
# and made some comments for clarification
# basically we need to simulate different sizes of bundles
# BUT in this case the edge case [3, 5] is known
# so this solution hardcodes that
from collections import Counter
bundle_cost = [0, 800, 1520, 2160, 2560, 3000]
def total(basket: list) -> int:
books = Counter(basket)
bundles = []
while bundle_size := len(books):
bundles.append(bundle_size)
books.subtract(books.keys())
books += Counter() # trick to remove zeroed items
# edge case for bundle (4,4) is cheaper than bundle (3,5)
# non scalable: if we add a bundle of 6, for instance, we will have new edge cases
while 3 in bundles and 5 in bundles:
bundles.remove(3)
bundles.remove(5)
bundles += [4, 4]
return sum([bundle_cost[size] for size in bundles]) | 35.444444 | 118 | 0.6907 | 140 | 957 | 4.692857 | 0.607143 | 0.041096 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074766 | 0.217346 | 957 | 27 | 119 | 35.444444 | 0.802403 | 0.479624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77368cb5dc20a23713b08ed418ee572a518ecd8c | 4,869 | py | Python | tests/contrib.data_sources.standard/sources/test_plr.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 2 | 2018-01-23T13:16:12.000Z | 2018-01-26T06:27:29.000Z | tests/contrib.data_sources.standard/sources/test_plr.py | camptocamp/pyramid_oereb | 2d33aceb796f0afada6728820fa9d4691f7e273a | [
"BSD-2-Clause"
] | 298 | 2017-08-30T07:12:10.000Z | 2019-01-31T10:52:07.000Z | tests/contrib.data_sources.standard/sources/test_plr.py | pyramidoereb/pyramid_oereb | 764c03e98e01ebc709cd17bd0ffd817bfe318892 | [
"BSD-2-Clause"
] | 4 | 2017-12-01T09:51:42.000Z | 2018-11-21T11:02:47.000Z | import pytest
from unittest.mock import patch
from sqlalchemy import String
from sqlalchemy.orm import declarative_base
from pyramid_oereb.contrib.data_sources.standard.models import get_view_service, get_legend_entry
from pyramid_oereb.contrib.data_sources.standard.sources.plr import DatabaseSource
from pyramid_oereb.core import b64
from pyramid_oereb.core.records.theme import ThemeRecord
from pyramid_oereb.core.records.view_service import LegendEntryRecord
@pytest.fixture
def source_params(db_connection):
yield {
"code": "ch.Nutzungsplanung",
"geometry_type": "GEOMETRYCOLLECTION",
"thresholds": {
"length": {
"limit": 1.0,
"unit": 'm',
"precision": 2
},
"area": {
"limit": 1.0,
"unit": 'm²',
"precision": 2
},
"percentage": {
"precision": 1
}
},
"language": "de",
"federal": False,
"standard": True,
"view_service": {
"layer_index": 1,
"layer_opacity": 1.0
},
"source": {
"class": "pyramid_oereb.contrib.data_sources.standard.sources.plr.DatabaseSource",
"params": {
"db_connection": db_connection,
"model_factory": "pyramid_oereb.contrib.data_sources.standard.models.theme.model_factory_string_pk", # noqa: E501
"schema_name": "land_use_plans"
}
},
"hooks": {
"get_symbol": "pyramid_oereb.contrib.data_sources.standard.hook_methods.get_symbol",
"get_symbol_ref": "pyramid_oereb.core.hook_methods.get_symbol_ref"
},
"law_status_lookup": [{
"data_code": "inKraft",
"transfer_code": "inKraft",
"extract_code": "inForce"
}, {
"data_code": "AenderungMitVorwirkung",
"transfer_code": "AenderungMitVorwirkung",
"extract_code": "changeWithPreEffect"
}, {
"data_code": "AenderungOhneVorwirkung",
"transfer_code": "AenderungOhneVorwirkung",
"extract_code": "changeWithoutPreEffect"
}],
"document_types_lookup": [{
"data_code": "Rechtsvorschrift",
"transfer_code": "Rechtsvorschrift",
"extract_code": "LegalProvision"
}, {
"data_code": "GesetzlicheGrundlage",
"transfer_code": "GesetzlicheGrundlage",
"extract_code": "Law"
}, {
"data_code": "Hinweis",
"transfer_code": "Hinweis",
"extract_code": "Hint"
}]
}
@pytest.fixture(autouse=True)
def config(app_config):
themes = [
ThemeRecord(
"ch.Nutzungsplanung",
{"de": "Nutzungsplanung (kantonal/kommunal)"},
20
), ThemeRecord(
"ch.Nutzungsplanung",
{"de": "Nutzungsplanung (kantonal/kommunal)"},
20,
"ch.Subcode"
)
]
with patch('pyramid_oereb.core.config.Config.themes', themes):
yield
@pytest.fixture
def all_result_session(session, query):
class Query(query):
def all(self):
return []
class Session(session):
def query(self, term):
return Query()
yield Session
@pytest.fixture
def legend_entry_model_class():
Base = declarative_base()
ViewService = get_view_service(Base, 'test', String)
yield get_legend_entry(Base, 'test', String, ViewService)
@pytest.mark.parametrize('legend_entry_params', [{
'id': '1',
'legend_text': {'de': 'testlegende'},
'type_code': 'testCode',
'type_code_list': 'testCode,testCode2,testCode3',
'theme': 'ch.Nutzungsplanung',
'sub_theme': None,
'view_service_id': '1'
}, {
'id': '1',
'legend_text': {'de': 'testlegende'},
'type_code': 'testCode',
'type_code_list': 'testCode,testCode2,testCode3',
'theme': 'ch.Nutzungsplanung',
'sub_theme': "ch.Subcode",
'view_service_id': '1'
}])
def test_from_db_to_legend_entry_record(source_params, all_result_session, legend_entry_model_class, png_binary, legend_entry_params): # noqa: E501
legend_entry_params.update({'symbol': b64.encode(png_binary)})
legend_entry_from_db = legend_entry_model_class(**legend_entry_params)
with patch('pyramid_oereb.core.adapter.DatabaseAdapter.get_session', return_value=all_result_session()):
source = DatabaseSource(**source_params)
legend_entry_record = source.from_db_to_legend_entry_record(legend_entry_from_db)
assert isinstance(legend_entry_record, LegendEntryRecord)
assert legend_entry_record.theme.code == 'ch.Nutzungsplanung'
| 32.898649 | 148 | 0.597043 | 471 | 4,869 | 5.876858 | 0.292994 | 0.063584 | 0.034682 | 0.041546 | 0.263728 | 0.226156 | 0.194364 | 0.159682 | 0.07948 | 0.07948 | 0 | 0.009423 | 0.280756 | 4,869 | 147 | 149 | 33.122449 | 0.780982 | 0.004313 | 0 | 0.215385 | 0 | 0 | 0.329205 | 0.112487 | 0 | 0 | 0 | 0 | 0.015385 | 1 | 0.053846 | false | 0 | 0.069231 | 0.015385 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
773aabe8010d12babb7bc2d2c59fb026b29fad23 | 1,720 | py | Python | create_comment_list.py | renxinqiang/NLTK_EMOTION | f7935c11c97bba0e447f3c9430e5889d993b8907 | [
"MIT"
] | 2 | 2020-12-11T10:09:56.000Z | 2021-07-28T13:38:01.000Z | create_comment_list.py | renxinqiang/NLTK_EMOTION | f7935c11c97bba0e447f3c9430e5889d993b8907 | [
"MIT"
] | null | null | null | create_comment_list.py | renxinqiang/NLTK_EMOTION | f7935c11c97bba0e447f3c9430e5889d993b8907 | [
"MIT"
] | 1 | 2018-07-03T08:04:31.000Z | 2018-07-03T08:04:31.000Z | #!/usr/local/bin/python3
# exit('shuijiao')
import pymysql
import math
import jieba.analyse
import re
comment_file = 'comment.py'
connect = pymysql.connect('127.0.0.1', 'root', 'root', 'comment', charset = 'utf8')
cursor = connect.cursor()
id = 0
size = 50000
sql = "SELECT count(1) as num FROM comment"
cursor.execute(sql)
count = cursor.fetchone()[0]
page = math.ceil(count / size)
jieba.analyse.set_stop_words('stop_words.txt')
jieba.add_word('脑子瓦塔',freq=20000)
jieba.add_word('玛吉亚巴库内',freq=20000)
for x in range(1,page+1):
where = ''
if id :
where = " AND id > "+str(id)
limit = " LIMIT "+str(size)
sql = "SELECT id,comment FROM comment WHERE 1 " + where + limit
print(sql)
cursor.execute(sql)
result = cursor.fetchall()
comment_file = 'comment'+str(x)+'.py'
print(comment_file)
with open(comment_file, 'a', encoding="utf-8") as f:
f.write('ci = ')
for res in result:
comm = res[1]
if comm is 'None':
continue
r1 = u'[a-zA-Z0-9’!"#$%&\'()*+,-./:;<=>?@,。?★、…【】《》?“”‘’![\\]^_`{|}~~……]+' # 用户也可以在此进行自定义过滤字符
comm = re.sub(r1,'',comm)
if not comm :
continue
jieba_res = jieba.analyse.extract_tags(comm)
if not jieba_res :
continue
write_str = ''
for row in jieba_res:
if not row or row is ' ' or row is ' ':
continue
write_str += "'"+row+"'" + ','
if not write_str:
continue
write_str = write_str + '\\\n'
f.write(write_str)
id = result[-1][0]
cursor.close()
connect.close() | 24.225352 | 106 | 0.531395 | 222 | 1,720 | 4.072072 | 0.400901 | 0.053097 | 0.053097 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030962 | 0.305233 | 1,720 | 71 | 107 | 24.225352 | 0.717155 | 0.03314 | 0 | 0.134615 | 0 | 0 | 0.124022 | 0 | 0.019231 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
773b4b6f9337c5610318a46da5a0bc4b27c37f1a | 1,721 | py | Python | migrations/versions/5fa860be380f_.py | andela-football-league/afl-ug-backend | f95daa3c3eaa9660cb4be202b0fb03e99651e66b | [
"MIT"
] | null | null | null | migrations/versions/5fa860be380f_.py | andela-football-league/afl-ug-backend | f95daa3c3eaa9660cb4be202b0fb03e99651e66b | [
"MIT"
] | 25 | 2019-04-12T09:03:00.000Z | 2019-07-09T07:56:36.000Z | migrations/versions/5fa860be380f_.py | andela-football-league/afl-ug-backend | f95daa3c3eaa9660cb4be202b0fb03e99651e66b | [
"MIT"
] | 1 | 2019-04-18T07:48:03.000Z | 2019-04-18T07:48:03.000Z | """empty message
Revision ID: 5fa860be380f
Revises: a0089a928d64
Create Date: 2019-04-20 06:07:06.835430
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "5fa860be380f"
down_revision = "a0089a928d64"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"person", "email", existing_type=sa.VARCHAR(length=50), nullable=False
)
op.alter_column(
"person", "name", existing_type=sa.VARCHAR(length=50), nullable=False
)
op.add_column(
"person_profile", sa.Column("person_id", sa.Integer(), nullable=False)
)
op.drop_constraint(
"person_profile_person_fkey", "person_profile", type_="foreignkey"
)
op.create_foreign_key(
None, "person_profile", "person", ["person_id"], ["person_id"]
)
op.drop_column("person_profile", "person")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"person_profile",
sa.Column("person", sa.INTEGER(), autoincrement=False, nullable=False),
)
op.drop_constraint(None, "person_profile", type_="foreignkey")
op.create_foreign_key(
"person_profile_person_fkey",
"person_profile",
"person",
["person"],
["person_id"],
)
op.drop_column("person_profile", "person_id")
op.alter_column(
"person", "name", existing_type=sa.VARCHAR(length=50), nullable=True
)
op.alter_column(
"person", "email", existing_type=sa.VARCHAR(length=50), nullable=True
)
# ### end Alembic commands ###
| 27.758065 | 79 | 0.650203 | 201 | 1,721 | 5.358209 | 0.313433 | 0.111421 | 0.10585 | 0.070566 | 0.654596 | 0.600743 | 0.545961 | 0.545961 | 0.23584 | 0.224698 | 0 | 0.044053 | 0.2086 | 1,721 | 61 | 80 | 28.213115 | 0.746696 | 0.171412 | 0 | 0.232558 | 0 | 0 | 0.233981 | 0.037437 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0 | 0.093023 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7740f23e87f0dfa24fcd85a238507b0827b348c3 | 18,444 | py | Python | vispy/ext/freetype.py | lcampagn/vispy | 28c25d6904d697cde9bb4c37909bc3f934621134 | [
"BSD-3-Clause"
] | 1 | 2017-04-14T19:04:59.000Z | 2017-04-14T19:04:59.000Z | vispy/ext/freetype.py | lcampagn/vispy | 28c25d6904d697cde9bb4c37909bc3f934621134 | [
"BSD-3-Clause"
] | 19 | 2015-06-16T14:33:22.000Z | 2015-07-27T21:18:15.000Z | vispy/ext/freetype.py | campagnola/vispy | 28c25d6904d697cde9bb4c37909bc3f934621134 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
FreeType high-level python API
Adapted from freetype-py.
'''
import sys
import struct
from ctypes import (byref, c_char_p, c_ushort, cast, util, CDLL, Structure,
POINTER, c_int, c_short, c_long, c_void_p, c_uint,
c_char, c_ubyte, CFUNCTYPE)
from .six import string_types
from ..util import load_data_file
FT_LOAD_RENDER = 4
FT_KERNING_DEFAULT = 0
FT_KERNING_UNFITTED = 1
FT_LOAD_NO_HINTING = 2
FT_LOAD_FORCE_AUTOHINT = 32
FT_LOAD_NO_AUTOHINT = 32768
FT_LOAD_TARGET_LCD = 196608
FT_LOAD_TARGET_LIGHT = 65536
FT_LOAD_NO_SCALE = 1
FT_FACE_FLAG_SCALABLE = 1
_64_bit = (8 * struct.calcsize("P")) == 64
##############################################################################
# ft_structs
FT_Int = c_int
FT_UInt = c_uint
FT_F2Dot14 = c_short
FT_Pos = FT_Fixed = FT_Long = c_long
FT_Glyph_Format = c_int
FT_String_p = c_char_p
FT_Short = c_short # A typedef for signed short.
FT_UShort = c_ushort # A typedef for unsigned short.
FT_Generic_Finalizer = CFUNCTYPE(None, c_void_p)
FT_Encoding = c_int
class FT_LibraryRec(Structure):
_fields_ = []
FT_Library = POINTER(FT_LibraryRec)
class FT_Vector(Structure):
_fields_ = [('x', FT_Pos), ('y', FT_Pos)]
class FT_UnitVector(Structure):
_fields_ = [('x', FT_F2Dot14), ('y', FT_F2Dot14)]
class FT_Matrix(Structure):
_fields_ = [('xx', FT_Fixed), ('xy', FT_Fixed),
('yx', FT_Fixed), ('yy', FT_Fixed)]
class FT_GlyphRec(Structure):
_fields_ = [('library', FT_Library), ('clazz', c_void_p),
('format', FT_Glyph_Format), ('advance', FT_Vector)]
FT_Glyph = POINTER(FT_GlyphRec)
class FT_Bitmap(Structure):
_fields_ = [('rows', c_int), ('width', c_int),
('pitch', c_int), ('buffer', POINTER(c_ubyte)),
('num_grays', c_short), ('pixel_mode', c_ubyte),
('palette_mode', c_char), ('palette', c_void_p)]
class FT_BitmapGlyphRec(Structure):
_fields_ = [('root', FT_GlyphRec), ('left', FT_Int),
('top', FT_Int), ('bitmap', FT_Bitmap)]
FT_BitmapGlyph = POINTER(FT_BitmapGlyphRec)
class FT_Glyph_Metrics(Structure):
_fields_ = [('width', FT_Pos), ('height', FT_Pos),
('horiBearingX', FT_Pos), ('horiBearingY', FT_Pos),
('horiAdvance', FT_Pos), ('vertBearingX', FT_Pos),
('vertBearingY', FT_Pos), ('vertAdvance', FT_Pos)]
class FT_Outline(Structure):
_fields_ = [('n_contours', c_short), ('n_points', c_short),
('points', POINTER(FT_Vector)), ('tags', POINTER(c_ubyte)),
('contours', POINTER(c_short)), ('flags', c_int)]
class FT_Size_Metrics(Structure):
_fields_ = [('x_ppem', FT_UShort), ('y_ppem', FT_UShort),
('x_scale', FT_Fixed), ('y_scale', FT_Fixed),
('ascender', FT_Pos), ('descender', FT_Pos),
('height', FT_Pos), ('max_advance', FT_Pos)]
class FT_BBox(Structure):
_fields_ = [('xMin', FT_Pos), ('yMin', FT_Pos),
('xMax', FT_Pos), ('yMax', FT_Pos)]
class FT_Generic(Structure):
_fields_ = [('data', c_void_p), ('finalizer', FT_Generic_Finalizer)]
class FT_SizeRec(Structure):
_fields_ = [('face', c_void_p), ('generic', FT_Generic),
('metrics', FT_Size_Metrics), ('internal', c_void_p)]
FT_Size = POINTER(FT_SizeRec)
class FT_CharmapRec(Structure):
_fields_ = [('face', c_void_p), ('encoding', FT_Encoding),
('platform_id', FT_UShort), ('encoding_id', FT_UShort)]
FT_Charmap = POINTER(FT_CharmapRec)
class FT_Bitmap_Size(Structure):
_fields_ = [('height', FT_Short), ('width', FT_Short),
('size', FT_Pos), ('x_ppem', FT_Pos), ('y_ppem', FT_Pos)]
class FT_GlyphSlotRec(Structure):
_fields_ = [('library', FT_Library), ('face', c_void_p),
('next', c_void_p), ('reserved', c_uint),
('generic', FT_Generic), ('metrics', FT_Glyph_Metrics),
('linearHoriAdvance', FT_Fixed),
('linearVertAdvance', FT_Fixed),
('advance', FT_Vector), ('format', FT_Glyph_Format),
('bitmap', FT_Bitmap), ('bitmap_left', FT_Int),
('bitmap_top', FT_Int), ('outline', FT_Outline),
('num_subglyphs', FT_UInt), ('subglyphs', c_void_p),
('control_data', c_void_p), ('control_len', c_long),
('lsb_delta', FT_Pos), ('rsb_delta', FT_Pos),
('other', c_void_p), ('internal', c_void_p)]
FT_GlyphSlot = POINTER(FT_GlyphSlotRec)
class FT_FaceRec(Structure):
_fields_ = [('num_faces', FT_Long), ('face_index', FT_Long),
('face_flags', FT_Long), ('style_flags', FT_Long),
('num_glyphs', FT_Long), ('family_name', FT_String_p),
('style_name', FT_String_p), ('num_fixed_sizes', FT_Int),
('available_sizes', POINTER(FT_Bitmap_Size)),
('num_charmaps', c_int), ('charmaps', POINTER(FT_Charmap)),
('generic', FT_Generic), ('bbox', FT_BBox),
('units_per_EM', FT_UShort), ('ascender', FT_Short),
('descender', FT_Short), ('height', FT_Short),
('max_advance_width', FT_Short),
('max_advance_height', FT_Short),
('underline_position', FT_Short),
('underline_thickness', FT_Short),
('glyph', FT_GlyphSlot), ('size', FT_Size),
('charmap', FT_Charmap),
('driver', c_void_p), ('memory', c_void_p),
('stream', c_void_p), ('sizes_list_head', c_void_p),
('sizes_list_tail', c_void_p), ('autohint', FT_Generic),
('extensions', c_void_p), ('internal', c_void_p)]
FT_Face = POINTER(FT_FaceRec)
##############################################################################
# __init__.py
__dll__ = None
FT_Library_filename = util.find_library('freetype')
if not FT_Library_filename and sys.platform.startswith('win'):
fname_end = '_x64.dll' if _64_bit else '.dll'
FT_Library_filename = load_data_file('freetype/freetype253' + fname_end)
if not FT_Library_filename:
raise ImportError('Freetype library not found')
if not __dll__:
__dll__ = CDLL(FT_Library_filename)
FT_Init_FreeType = __dll__.FT_Init_FreeType
FT_Done_FreeType = __dll__.FT_Done_FreeType
FT_Library_Version = __dll__.FT_Library_Version
__handle__ = None
# Comment out to avoid segfaults on Py34
# def __del_library__(self):
# global __handle__
# if __handle__:
# try:
# FT_Done_FreeType(self)
# __handle__ = None
# except:
# pass
# FT_Library.__del__ = __del_library__
def get_handle():
'''
Get unique FT_Library handle
'''
global __handle__
if not __handle__:
__handle__ = FT_Library()
error = FT_Init_FreeType(byref(__handle__))
if error:
raise RuntimeError(hex(error))
return __handle__
def version():
'''
Return the version of the FreeType library being used as a tuple of
( major version number, minor version number, patch version number )
'''
amajor = FT_Int()
aminor = FT_Int()
apatch = FT_Int()
library = get_handle()
FT_Library_Version(library, byref(amajor), byref(aminor), byref(apatch))
return (amajor.value, aminor.value, apatch.value)
try:
FT_Library_SetLcdFilter = __dll__.FT_Library_SetLcdFilter
except:
def FT_Library_SetLcdFilter(*args, **kwargs):
return 0
if version() >= (2, 4, 0):
FT_Library_SetLcdFilterWeights = __dll__.FT_Library_SetLcdFilterWeights
FT_New_Face = __dll__.FT_New_Face
FT_New_Memory_Face = __dll__.FT_New_Memory_Face
FT_Open_Face = __dll__.FT_Open_Face
FT_Attach_File = __dll__.FT_Attach_File
FT_Attach_Stream = __dll__.FT_Attach_Stream
if version() >= (2, 4, 2):
FT_Reference_Face = __dll__.FT_Reference_Face
FT_Done_Face = __dll__.FT_Done_Face
FT_Done_Glyph = __dll__.FT_Done_Glyph
FT_Select_Size = __dll__.FT_Select_Size
FT_Request_Size = __dll__.FT_Request_Size
FT_Set_Char_Size = __dll__.FT_Set_Char_Size
FT_Set_Pixel_Sizes = __dll__.FT_Set_Pixel_Sizes
FT_Load_Glyph = __dll__.FT_Load_Glyph
FT_Load_Char = __dll__.FT_Load_Char
FT_Set_Transform = __dll__.FT_Set_Transform
FT_Render_Glyph = __dll__.FT_Render_Glyph
FT_Get_Kerning = __dll__.FT_Get_Kerning
FT_Get_Track_Kerning = __dll__.FT_Get_Track_Kerning
FT_Get_Glyph_Name = __dll__.FT_Get_Glyph_Name
FT_Get_Glyph = __dll__.FT_Get_Glyph
FT_Glyph_Get_CBox = __dll__.FT_Glyph_Get_CBox
FT_Get_Postscript_Name = __dll__.FT_Get_Postscript_Name
FT_Get_Postscript_Name.restype = c_char_p
FT_Select_Charmap = __dll__.FT_Select_Charmap
FT_Set_Charmap = __dll__.FT_Set_Charmap
FT_Get_Charmap_Index = __dll__.FT_Get_Charmap_Index
FT_Get_CMap_Language_ID = __dll__.FT_Get_CMap_Language_ID
FT_Get_CMap_Format = __dll__.FT_Get_CMap_Format
FT_Get_Char_Index = __dll__.FT_Get_Char_Index
FT_Get_First_Char = __dll__.FT_Get_First_Char
FT_Get_Next_Char = __dll__.FT_Get_Next_Char
FT_Get_Name_Index = __dll__.FT_Get_Name_Index
FT_Get_SubGlyph_Info = __dll__.FT_Get_SubGlyph_Info
if version() >= (2, 3, 8):
FT_Get_FSType_Flags = __dll__.FT_Get_FSType_Flags
FT_Get_FSType_Flags.restype = c_ushort
FT_Get_X11_Font_Format = __dll__.FT_Get_X11_Font_Format
FT_Get_X11_Font_Format.restype = c_char_p
FT_Get_Sfnt_Name_Count = __dll__.FT_Get_Sfnt_Name_Count
FT_Get_Sfnt_Name = __dll__.FT_Get_Sfnt_Name
FT_Get_Advance = __dll__.FT_Get_Advance
FT_Outline_GetInsideBorder = __dll__.FT_Outline_GetInsideBorder
FT_Outline_GetOutsideBorder = __dll__.FT_Outline_GetOutsideBorder
FT_Outline_Get_BBox = __dll__.FT_Outline_Get_BBox
FT_Outline_Get_CBox = __dll__.FT_Outline_Get_CBox
FT_Stroker_New = __dll__.FT_Stroker_New
FT_Stroker_Set = __dll__.FT_Stroker_Set
FT_Stroker_Rewind = __dll__.FT_Stroker_Rewind
FT_Stroker_ParseOutline = __dll__.FT_Stroker_ParseOutline
FT_Stroker_BeginSubPath = __dll__.FT_Stroker_BeginSubPath
FT_Stroker_EndSubPath = __dll__.FT_Stroker_EndSubPath
FT_Stroker_LineTo = __dll__.FT_Stroker_LineTo
FT_Stroker_ConicTo = __dll__.FT_Stroker_ConicTo
FT_Stroker_CubicTo = __dll__.FT_Stroker_CubicTo
FT_Stroker_GetBorderCounts = __dll__.FT_Stroker_GetBorderCounts
FT_Stroker_ExportBorder = __dll__.FT_Stroker_ExportBorder
FT_Stroker_GetCounts = __dll__.FT_Stroker_GetCounts
FT_Stroker_Export = __dll__.FT_Stroker_Export
FT_Stroker_Done = __dll__.FT_Stroker_Done
FT_Glyph_Stroke = __dll__.FT_Glyph_Stroke
FT_Glyph_StrokeBorder = __dll__.FT_Glyph_StrokeBorder
FT_Glyph_To_Bitmap = __dll__.FT_Glyph_To_Bitmap
Vector = FT_Vector
Matrix = FT_Matrix
class Bitmap(object):
def __init__(self, bitmap):
self._FT_Bitmap = bitmap
rows = property(lambda self: self._FT_Bitmap.rows)
width = property(lambda self: self._FT_Bitmap.width)
pitch = property(lambda self: self._FT_Bitmap.pitch)
buffer = property(lambda self:
[self._FT_Bitmap.buffer[i]
for i in range(self.rows * self.pitch)])
class Glyph(object):
def __init__(self, glyph):
self._FT_Glyph = glyph
def __del__(self):
if self._FT_Glyph is not None and FT_Done_Glyph is not None:
FT_Done_Glyph(self._FT_Glyph)
def to_bitmap(self, mode, origin, destroy=False):
error = FT_Glyph_To_Bitmap(byref(self._FT_Glyph),
mode, origin, destroy)
if error:
raise RuntimeError(hex(error))
return BitmapGlyph(self._FT_Glyph)
class BitmapGlyph(object):
def __init__(self, glyph):
self._FT_BitmapGlyph = cast(glyph, FT_BitmapGlyph)
bitmap = property(lambda self:
Bitmap(self._FT_BitmapGlyph.contents.bitmap))
left = property(lambda self: self._FT_BitmapGlyph.contents.left)
top = property(lambda self: self._FT_BitmapGlyph.contents.top)
class GlyphSlot(object):
def __init__(self, slot):
self._FT_GlyphSlot = slot
def get_glyph(self):
aglyph = FT_Glyph()
error = FT_Get_Glyph(self._FT_GlyphSlot, byref(aglyph))
if error:
raise RuntimeError(hex(error))
return Glyph(aglyph)
bitmap = property(lambda self: Bitmap(self._FT_GlyphSlot.contents.bitmap))
metrics = property(lambda self: self._FT_GlyphSlot.contents.metrics)
next = property(lambda self: GlyphSlot(self._FT_GlyphSlot.contents.next))
advance = property(lambda self: self._FT_GlyphSlot.contents.advance)
format = property(lambda self: self._FT_GlyphSlot.contents.format)
bitmap_top = property(lambda self: self._FT_GlyphSlot.contents.bitmap_top)
bitmap_left = property(lambda self:
self._FT_GlyphSlot.contents.bitmap_left)
class Face(object):
def __init__(self, filename, index=0):
library = get_handle()
face = FT_Face()
self._FT_Face = None
# error = FT_New_Face( library, filename, 0, byref(face) )
u_filename = c_char_p(filename.encode('utf-8'))
error = FT_New_Face(library, u_filename, index, byref(face))
if error:
raise RuntimeError(hex(error))
self._filename = filename
self._index = index
self._FT_Face = face
def __del__(self):
if self._FT_Face is not None and FT_Done_Face is not None:
FT_Done_Face(self._FT_Face)
def attach_file(self, filename):
error = FT_Attach_File(self._FT_Face, filename)
if error:
raise RuntimeError(hex(error))
def set_char_size(self, width=0, height=0, hres=72, vres=72):
error = FT_Set_Char_Size(self._FT_Face, width, height, hres, vres)
if error:
raise RuntimeError('Could not set size: %s' % hex(error))
def set_pixel_sizes(self, width, height):
error = FT_Set_Pixel_Sizes(self._FT_Face, width, height)
if error:
raise RuntimeError(hex(error))
def select_charmap(self, encoding):
error = FT_Select_Charmap(self._FT_Face, encoding)
if error:
raise RuntimeError(hex(error))
def set_charmap(self, charmap):
error = FT_Set_Charmap(self._FT_Face, charmap._FT_Charmap)
if error:
raise RuntimeError(hex(error))
def get_char_index(self, charcode):
if isinstance(charcode, string_types):
charcode = ord(charcode)
return FT_Get_Char_Index(self._FT_Face, charcode)
def get_first_char(self):
agindex = FT_UInt()
charcode = FT_Get_First_Char(self._FT_Face, byref(agindex))
return charcode, agindex.value
def get_next_char(self, charcode, agindex):
agindex = FT_UInt(0) # agindex )
charcode = FT_Get_Next_Char(self._FT_Face, charcode, byref(agindex))
return charcode, agindex.value
def get_name_index(self, name):
return FT_Get_Name_Index(self._FT_Face, name)
def set_transform(self, matrix, delta):
FT_Set_Transform(self._FT_Face,
byref(matrix), byref(delta))
def select_size(self, strike_index):
error = FT_Select_Size(self._FT_Face, strike_index)
if error:
raise RuntimeError(hex(error))
def load_glyph(self, index, flags=FT_LOAD_RENDER):
error = FT_Load_Glyph(self._FT_Face, index, flags)
if error:
raise RuntimeError(hex(error))
def load_char(self, char, flags=FT_LOAD_RENDER):
if len(char) == 1:
char = ord(char)
error = FT_Load_Char(self._FT_Face, char, flags)
if error:
raise RuntimeError(hex(error))
def get_advance(self, gindex, flags):
padvance = FT_Fixed(0)
error = FT_Get_Advance(self._FT_Face, gindex, flags, byref(padvance))
if error:
raise RuntimeError(hex(error))
return padvance.value
def get_kerning(self, left, right, mode=FT_KERNING_DEFAULT):
left_glyph = self.get_char_index(left)
right_glyph = self.get_char_index(right)
kerning = FT_Vector(0, 0)
error = FT_Get_Kerning(self._FT_Face,
left_glyph, right_glyph, mode, byref(kerning))
if error:
raise RuntimeError(hex(error))
return kerning
def get_format(self):
return FT_Get_X11_Font_Format(self._FT_Face)
sfnt_name_count = property(lambda self:
FT_Get_Sfnt_Name_Count(self._FT_Face))
postscript_name = property(lambda self:
FT_Get_Postscript_Name(self._FT_Face))
num_faces = property(lambda self: self._FT_Face.contents.num_faces)
face_index = property(lambda self: self._FT_Face.contents.face_index)
face_flags = property(lambda self: self._FT_Face.contents.face_flags)
style_flags = property(lambda self: self._FT_Face.contents.style_flags)
num_glyphs = property(lambda self: self._FT_Face.contents.num_glyphs)
family_name = property(lambda self: self._FT_Face.contents.family_name)
style_name = property(lambda self: self._FT_Face.contents.style_name)
num_fixed_sizes = property(lambda self:
self._FT_Face.contents.num_fixed_sizes)
num_charmaps = property(lambda self: self._FT_Face.contents.num_charmaps)
units_per_EM = property(lambda self: self._FT_Face.contents.units_per_EM)
ascender = property(lambda self: self._FT_Face.contents.ascender)
descender = property(lambda self: self._FT_Face.contents.descender)
height = property(lambda self: self._FT_Face.contents.height)
max_advance_width = property(lambda self:
self._FT_Face.contents.max_advance_width)
max_advance_height = property(lambda self:
self._FT_Face.contents.max_advance_height)
underline_position = property(lambda self:
self._FT_Face.contents.underline_position)
underline_thickness = property(lambda self:
self._FT_Face.contents.underline_thickness)
glyph = property(lambda self: GlyphSlot(self._FT_Face.contents.glyph))
| 36.814371 | 79 | 0.667588 | 2,439 | 18,444 | 4.507995 | 0.128741 | 0.035471 | 0.03638 | 0.056025 | 0.257481 | 0.192269 | 0.159891 | 0.097135 | 0.008367 | 0 | 0 | 0.005781 | 0.21221 | 18,444 | 500 | 80 | 36.888 | 0.750929 | 0.05129 | 0 | 0.09434 | 0 | 0 | 0.059617 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.016173 | 0.008086 | 0.320755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7742e56e6336d1a7ed7c53c3c0b6c2037a895f7b | 1,887 | py | Python | animal.py | ComputerScientist-01/Tile-Matching-Game | 29fd9c54d1d7e1bc7360243dc788db4ea9c805ef | [
"MIT"
] | 6 | 2020-06-01T11:37:31.000Z | 2021-12-20T03:50:10.000Z | animal.py | ComputerScientist-01/Tile-Matching-Game | 29fd9c54d1d7e1bc7360243dc788db4ea9c805ef | [
"MIT"
] | null | null | null | animal.py | ComputerScientist-01/Tile-Matching-Game | 29fd9c54d1d7e1bc7360243dc788db4ea9c805ef | [
"MIT"
] | null | null | null | import random
import os
import game_config as gc
from pygame import image, transform
animals_count = dict((a, 0) for a in gc.ASSET_FILES)
#creating a dictionary to keep count of animals
#setting all the values to 0 for all the keys
#the keys are going to be all the files from our asset dir
def available_animals():
return [animal for animal, count in animals_count.items() if count < 2]
#returns a list of animals which are available
#meaning all the keys whose values is less tha 2
class Animal:
def __init__(self, index):
self.index = index
self.name = random.choice(available_animals())
# randomly choose the name from all the available names
self.image_path = os.path.join(gc.ASSET_DIR, self.name)
# setting up our image path
# os.path.join -> Join one or more path components intelligently
self.row = index // gc.NUM_TILES_SIDE
# we will get the resultant int as the row number
self.col = index % gc.NUM_TILES_SIDE
# we will get the resultant int as column number
self.skip = False
# if the animals are matched the we can skip
# printing the image/box as it has been
# removed from the game
self.image = image.load(self.image_path)
# loading the images
self.image = transform.scale(self.image, (gc.IMAGE_SIZE - 2 * gc.MARGIN, gc.IMAGE_SIZE - 2 * gc.MARGIN))
#pygame.transform.scale -> resize to new resolution
# we are subtracting the margin length to get actual size
self.box = self.image.copy()
# creating a copy of the image
self.box.fill((200, 200, 200))
# filling the box to get a light grey color
animals_count[self.name] += 1
# updating the key value in the dictonary
| 35.603774 | 113 | 0.63911 | 279 | 1,887 | 4.250896 | 0.405018 | 0.045531 | 0.016863 | 0.025295 | 0.141653 | 0.109612 | 0.075885 | 0.075885 | 0.075885 | 0.075885 | 0 | 0.012039 | 0.295707 | 1,887 | 52 | 114 | 36.288462 | 0.880361 | 0.432432 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0.05 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |