hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79066daedd611efcd4250452e2493c5793c201b7
| 4,199
|
py
|
Python
|
jhunt/qt/main.py
|
jeremiedecock/jhunt
|
d5b189a72c6eb0f152508eeedb9541f28f907937
|
[
"MIT"
] | null | null | null |
jhunt/qt/main.py
|
jeremiedecock/jhunt
|
d5b189a72c6eb0f152508eeedb9541f28f907937
|
[
"MIT"
] | null | null | null |
jhunt/qt/main.py
|
jeremiedecock/jhunt
|
d5b189a72c6eb0f152508eeedb9541f28f907937
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from dve.io.table import TableDataBase
from jhunt.qt.widgets.mainwindow import MainWindow
import datetime
from PyQt5.QtWidgets import QApplication
APPLICATION_NAME = "JHunt"
def main():
adverts_file_name = ".jhunt_adverts"
adverts_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 5},
{"header": "Application", "default_value": False, "dtype": bool, "mapped": False},
{"header": "Category", "default_value": "Entreprise", "dtype": str, "mapped": False, "values": ("Entreprise", "IR/IE", "PostDoc")},
{"header": "Organization", "default_value": "", "dtype": str, "mapped": False},
{"header": "Ref.", "default_value": "", "dtype": str, "mapped": False},
{"header": "Title", "default_value": "", "dtype": str, "mapped": False},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"},
{"header": "Pros", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Cons", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"}
]
adverts_database = TableDataBase(adverts_data_schema, adverts_file_name)
websites_file_name = ".jhunt_websites"
websites_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False, "hidden": True},
{"header": "Name", "default_value": "", "dtype": str, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 3},
{"header": "Category", "default_value": "Private Company", "dtype": str, "mapped": False, "values": ("Private Company", "Public Research", "School", "Search Engine")},
{"header": "Last visit", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Today status", "default_value": "None", "dtype": str, "mapped": False, "values": ("None", "Partial", "Full")},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"}
]
websites_database = TableDataBase(websites_data_schema, websites_file_name)
adverts_data = adverts_database.load() # TODO ?
websites_data = websites_database.load() # TODO ?
app = QApplication(sys.argv)
app.setApplicationName(APPLICATION_NAME)
# Make widgets
window = MainWindow(adverts_data, websites_data)
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
adverts_database.save(adverts_data) # TODO ?
websites_database.save(websites_data) # TODO ?
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 57.520548
| 201
| 0.526554
|
import sys
from dve.io.table import TableDataBase
from jhunt.qt.widgets.mainwindow import MainWindow
import datetime
from PyQt5.QtWidgets import QApplication
APPLICATION_NAME = "JHunt"
def main():
adverts_file_name = ".jhunt_adverts"
adverts_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 5},
{"header": "Application", "default_value": False, "dtype": bool, "mapped": False},
{"header": "Category", "default_value": "Entreprise", "dtype": str, "mapped": False, "values": ("Entreprise", "IR/IE", "PostDoc")},
{"header": "Organization", "default_value": "", "dtype": str, "mapped": False},
{"header": "Ref.", "default_value": "", "dtype": str, "mapped": False},
{"header": "Title", "default_value": "", "dtype": str, "mapped": False},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"},
{"header": "Pros", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Cons", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"}
]
adverts_database = TableDataBase(adverts_data_schema, adverts_file_name)
websites_file_name = ".jhunt_websites"
websites_data_schema = [
{"header": "Date", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False, "hidden": True},
{"header": "Name", "default_value": "", "dtype": str, "mapped": False},
{"header": "Score", "default_value": int(0), "dtype": int, "mapped": False, "min_value": 0, "max_value": 3},
{"header": "Category", "default_value": "Private Company", "dtype": str, "mapped": False, "values": ("Private Company", "Public Research", "School", "Search Engine")},
{"header": "Last visit", "default_value": datetime.datetime.now(), "dtype": datetime.datetime, "mapped": False},
{"header": "Today status", "default_value": "None", "dtype": str, "mapped": False, "values": ("None", "Partial", "Full")},
{"header": "Description", "default_value": "", "dtype": str, "mapped": True, "widget": "QPlainTextEdit"},
{"header": "URL", "default_value": "", "dtype": str, "mapped": True, "widget": "QLineEdit"}
]
websites_database = TableDataBase(websites_data_schema, websites_file_name)
adverts_data = adverts_database.load()
websites_data = websites_database.load()
app = QApplication(sys.argv)
app.setApplicationName(APPLICATION_NAME)
window = MainWindow(adverts_data, websites_data)
exit_code = app.exec_()
adverts_database.save(adverts_data)
websites_database.save(websites_data)
sys.exit(exit_code)
if __name__ == '__main__':
main()
| true
| true
|
79066e7dd47e2b519bc4867cd9c1c11f8de00430
| 4,468
|
py
|
Python
|
hooks/commit-msg.py
|
wotsen/learning_platform_server
|
38b3483ae5496927acec53c179a78f86f403e2d3
|
[
"MIT"
] | null | null | null |
hooks/commit-msg.py
|
wotsen/learning_platform_server
|
38b3483ae5496927acec53c179a78f86f403e2d3
|
[
"MIT"
] | null | null | null |
hooks/commit-msg.py
|
wotsen/learning_platform_server
|
38b3483ae5496927acec53c179a78f86f403e2d3
|
[
"MIT"
] | 1
|
2020-02-16T09:57:01.000Z
|
2020-02-16T09:57:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/1/28 14:07
# @Author : ywl
# @Email : astralrovers@outlook.com
# @File : commit-msg.py.py
import os
import sys
import re
import json
crc_list = (
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
)
def sum_crc16(crc, file_bit):
"""
计算CRC16
@param crc:初始校验码
@param file_bit:文件2进制流
@return:校验码
"""
for bit in file_bit:
crc = 0xffff & crc
# temp = crc // 256
temp = crc >> 8
crc = 0xffff & crc
crc <<= 8
crc = 0xffff & crc
crc ^= crc_list[0xff & (temp ^ bit)]
return crc
def sum_file_crc16(file_name):
"""
计算文件校验码,每次计算4096字节
@param file_name:文件名
@return:校验码
"""
crc = 0
with open(file_name, 'rb') as f:
crc = sum_crc16(crc, f.read())
return crc
def get_version():
with open("pack/release_version.json", "r+", encoding="utf8") as f:
try:
versions = json.load(f)
return 'v' + versions["release_versions"][-1][-1]
except Exception as e:
print("读取版本失败 : ", e)
exit(-1)
def get_srv_crc():
srv_name = "src/AIService"
if not os.path.exists(srv_name) or os.path.isdir(srv_name):
print("no srv build AIService")
exit(-1)
return sum_file_crc16(srv_name)
# crc = hex(sum_file_crc16(srv_name)).upper()[2:]
# crc = '0' * (4 - len(crc)) + crc
# return crc
def check_first_line(line):
return re.match(r'\[version : [vV](0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\] '
r'\[srv-crc16 : [0-9A-F]{4}\]$', line)
def add_pre_msg():
commit_msg_file = sys.argv[1]
with open(commit_msg_file, "r+") as f:
text = list()
while True:
line = f.readline()
if not line:
break
text.append(line)
pre = "[version : %s] [srv-crc16 : %04X]\n" % (get_version(), get_srv_crc())
if check_first_line(text[0]):
text[0] = pre
else:
text.insert(0, pre)
f.seek(0)
f.truncate()
f.writelines(text)
if __name__ == "__main__":
add_pre_msg()
| 33.096296
| 98
| 0.597359
|
import os
import sys
import re
import json
crc_list = (
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
)
def sum_crc16(crc, file_bit):
for bit in file_bit:
crc = 0xffff & crc
temp = crc >> 8
crc = 0xffff & crc
crc <<= 8
crc = 0xffff & crc
crc ^= crc_list[0xff & (temp ^ bit)]
return crc
def sum_file_crc16(file_name):
crc = 0
with open(file_name, 'rb') as f:
crc = sum_crc16(crc, f.read())
return crc
def get_version():
with open("pack/release_version.json", "r+", encoding="utf8") as f:
try:
versions = json.load(f)
return 'v' + versions["release_versions"][-1][-1]
except Exception as e:
print("读取版本失败 : ", e)
exit(-1)
def get_srv_crc():
srv_name = "src/AIService"
if not os.path.exists(srv_name) or os.path.isdir(srv_name):
print("no srv build AIService")
exit(-1)
return sum_file_crc16(srv_name)
def check_first_line(line):
return re.match(r'\[version : [vV](0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\.(0|[1-9][0-9]*.?)\] '
r'\[srv-crc16 : [0-9A-F]{4}\]$', line)
def add_pre_msg():
commit_msg_file = sys.argv[1]
with open(commit_msg_file, "r+") as f:
text = list()
while True:
line = f.readline()
if not line:
break
text.append(line)
pre = "[version : %s] [srv-crc16 : %04X]\n" % (get_version(), get_srv_crc())
if check_first_line(text[0]):
text[0] = pre
else:
text.insert(0, pre)
f.seek(0)
f.truncate()
f.writelines(text)
if __name__ == "__main__":
add_pre_msg()
| true
| true
|
79066e8a12ef024ead477fe9eba93cf2181701e0
| 17,919
|
py
|
Python
|
pjproject_android/tests/cdash/builder.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2019-11-11T08:16:08.000Z
|
2020-08-25T03:08:44.000Z
|
pjproject_android/tests/cdash/builder.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-20T06:58:16.000Z
|
2020-02-20T07:08:07.000Z
|
my_softphone/pjproject-2.9/tests/cdash/builder.py
|
sashkaseltsov1/reposCpp
|
3ff5ce2a14a368a36b1758099ce4f3e8c4cdf11d
|
[
"Unlicense"
] | 5
|
2019-07-02T02:03:24.000Z
|
2022-03-30T09:58:52.000Z
|
#
# builder.py - PJSIP test scenarios builder
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import ccdash
import os
import platform
import re
import subprocess
import sys
import time
class Operation:
"""\
The Operation class describes the individual ccdash operation to be
performed.
"""
# Types:
UPDATE = "update" # Update operation
CONFIGURE = "configure" # Configure operation
BUILD = "build" # Build operation
TEST = "test" # Unit test operation
def __init__(self, type, cmdline, name="", wdir=""):
self.type = type
self.cmdline = cmdline
self.name = name
self.wdir = wdir
if self.type==self.TEST and not self.name:
raise "name required for tests"
def encode(self, base_dir):
s = [self.type]
if self.type == self.TEST:
s.append(self.name)
if self.type != self.UPDATE:
s.append(self.cmdline)
s.append("-w")
if self.wdir:
s.append(base_dir + "/" + self.wdir)
else:
s.append(base_dir)
return s
#
# Update operation
#
update_ops = [Operation(Operation.UPDATE, "")]
#
# The standard library tests (e.g. pjlib-test, pjsip-test, etc.)
#
std_test_ops= [
Operation(Operation.TEST, "./pjlib-test$SUFFIX", name="pjlib test",
wdir="pjlib/bin"),
Operation(Operation.TEST, "./pjlib-util-test$SUFFIX",
name="pjlib-util test", wdir="pjlib-util/bin"),
Operation(Operation.TEST, "./pjnath-test$SUFFIX", name="pjnath test",
wdir="pjnath/bin"),
Operation(Operation.TEST, "./pjmedia-test$SUFFIX", name="pjmedia test",
wdir="pjmedia/bin"),
Operation(Operation.TEST, "./pjsip-test$SUFFIX", name="pjsip test",
wdir="pjsip/bin")
]
#
# These are pjsua Python based unit test operations
#
def build_pjsua_test_ops(pjsua_exe=""):
ops = []
if pjsua_exe:
exe = " -e ../../pjsip-apps/bin/" + pjsua_exe
else:
exe = ""
cwd = os.getcwd()
os.chdir("../pjsua")
os.system("python runall.py --list > list")
f = open("list", "r")
for e in f:
e = e.rstrip("\r\n ")
(mod,param) = e.split(None,2)
name = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.find(".py")]
ops.append(Operation(Operation.TEST, "python run.py" + exe + " " + \
e, name=name, wdir="tests/pjsua"))
f.close()
os.remove("list")
os.chdir(cwd)
return ops
#
# Get gcc version
#
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
#
# Get Visual Studio version
#
def vs_get_version():
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
return "vs6"
elif major=="13":
return "vs2003"
elif major=="14":
return "vs2005"
elif major=="15":
return "vs2008"
else:
return "vs-" + major
proc.wait()
return "vs-unknown"
#
# Test config
#
class BaseConfig:
def __init__(self, base_dir, url, site, group, options=None):
self.base_dir = base_dir
self.url = url
self.site = site
self.group = group
self.options = options
#
# Base class for test configurator
#
class TestBuilder:
def __init__(self, config, build_config_name="",
user_mak="", config_site="", exclude=[], not_exclude=[]):
self.config = config # BaseConfig instance
self.build_config_name = build_config_name # Optional build suffix
self.user_mak = user_mak # To be put in user.mak
self.config_site = config_site # To be put in config_s..
self.saved_user_mak = "" # To restore user.mak
self.saved_config_site = "" # To restore config_s..
self.exclude = exclude # List of exclude pattern
self.not_exclude = not_exclude # List of include pattern
self.ccdash_args = [] # ccdash cmd line
def stamp(self):
return time.strftime("%Y%m%d-%H%M", time.localtime())
def pre_action(self):
# Override user.mak
name = self.config.base_dir + "/user.mak"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_user_mak = f.read()
f.close()
if True:
f = open(name, "w")
f.write(self.user_mak)
f.close()
# Override config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_config_site= f.read()
f.close()
if True:
f = open(name, "wt")
f.write(self.config_site)
f.close()
def post_action(self):
# Restore user.mak
name = self.config.base_dir + "/user.mak"
f = open(name, "wt")
f.write(self.saved_user_mak)
f.close()
# Restore config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
f = open(name, "wt")
f.write(self.saved_config_site)
f.close()
def build_tests(self):
# This should be overridden by subclasses
pass
def execute(self):
if len(self.ccdash_args)==0:
self.build_tests()
self.pre_action()
mandatory_op = ["update", "configure", "build"]
counter = 0
for a in self.ccdash_args:
# Check if this test is in exclusion list
fullcmd = " ".join(a)
excluded = False
included = False
for pat in self.exclude:
if pat and re.search(pat, fullcmd) != None:
excluded = True
break
if excluded:
for pat in self.not_exclude:
if pat and re.search(pat, fullcmd) != None:
included = True
break
if excluded and not included:
if len(fullcmd)>60:
fullcmd = fullcmd[0:60] + ".."
print "Skipping '%s'" % (fullcmd)
continue
b = ["ccdash.py"]
b.extend(a)
a = b
#print a
try:
rc = ccdash.main(a)
except Exception, e:
errmsg = str(e)
print "**** Error: ccdash got exception %s ****" % errmsg
rc = -1
except:
print "**** Error: ccdash got unknown exception ****"
rc = -1
if rc!=0 and a[1] in mandatory_op:
print "Stopping because of error.."
break
counter = counter + 1
self.post_action()
#
# GNU test configurator
#
class GNUTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for GNU targets.
"""
def __init__(self, config, build_config_name="", user_mak="", \
config_site="", cross_compile="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
build_config_name - Optional name to be added as suffix to the build
name. Sample: "min-size", "O4", "TLS", etc.
user_mak - Contents to be put on user.mak
config_site - Contents to be put on config_site.h
cross_compile - Optional cross-compile prefix. Must include the
trailing dash, e.g. "arm-unknown-linux-"
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
user_mak=user_mak, config_site=config_site,
exclude=exclude, not_exclude=not_exclude)
self.cross_compile = cross_compile
if self.cross_compile and self.cross_compile[-1] != '-':
self.cross_compile.append("-")
def build_tests(self):
if self.cross_compile:
suffix = "-" + self.cross_compile[0:-1]
build_name = self.cross_compile + \
gcc_version(self.cross_compile + "gcc")
else:
proc = subprocess.Popen("sh "+self.config.base_dir+"/config.guess",
shell=True, stdout=subprocess.PIPE)
plat = proc.stdout.readline().rstrip(" \r\n")
build_name = plat + "-"+gcc_version(self.cross_compile + "gcc")
suffix = "-" + plat
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "sh ./configure"))
if sys.platform=="win32":
# Don't build python module on Mingw
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make'"))
else:
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make" + \
" && cd pjsip-apps/src/python && " + \
"python setup.py clean build'"))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops())
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# MSVC test configurator
#
class MSVCTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Visual Studio builds.
You need to set the MSVC environment variables (typically by calling
vcvars32.bat) prior to running this class.
"""
def __init__(self, config, target="Release|Win32", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Visual Studio build configuration to build.
Sample: "Debug|Win32", "Release|Win32".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "Debug", "Release", "IPv6", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
(vsbuild,sys) = self.target.split("|",2)
build_name = sys + "-" + vs_get_version() + "-" + vsbuild
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
vccmd = "vcbuild.exe /nologo /nohtmllog /nocolor /rebuild " + \
"pjproject-vs8.sln " + " \"" + self.target + "\""
suffix = "-i386-win32-vc8-" + vsbuild
pjsua = "pjsua_vc8"
if vsbuild=="debug":
pjsua = pjsua + "d"
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.append(Operation(Operation.BUILD, vccmd))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops(pjsua))
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# Symbian test configurator
#
class SymbianTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Symbian builds. You need to
set the command line build settings prior to running this class (typically
that involves setting the EPOCROOT variable and current device).
"""
def __init__(self, config, target="gcce urel", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Symbian target to build. Default is "gcce urel".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "APS", "VAS", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
print "Error: EPOCROOT environment variable is not set"
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
sdk1 = epocroot.split("\\")[-2]
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
print "Error: default SDK in device doesn't match EPOCROOT"
print "Default device SDK =", sdk2
print "EPOCROOT SDK =", sdk1
sys.exit(1)
build_name = sdk2.replace("_", "-") + "-" + \
self.target.replace(" ", "-")
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmdline = "cmd /C \"cd build.symbian && bldmake bldfiles && abld build %s\"" % (self.target)
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.extend([Operation(Operation.BUILD, cmdline)])
self.ccdash_args = []
suffix = ""
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
| 35.695219
| 100
| 0.53686
|
import ccdash
import os
import platform
import re
import subprocess
import sys
import time
class Operation:
"""\
The Operation class describes the individual ccdash operation to be
performed.
"""
UPDATE = "update"
CONFIGURE = "configure"
BUILD = "build"
TEST = "test"
def __init__(self, type, cmdline, name="", wdir=""):
self.type = type
self.cmdline = cmdline
self.name = name
self.wdir = wdir
if self.type==self.TEST and not self.name:
raise "name required for tests"
def encode(self, base_dir):
s = [self.type]
if self.type == self.TEST:
s.append(self.name)
if self.type != self.UPDATE:
s.append(self.cmdline)
s.append("-w")
if self.wdir:
s.append(base_dir + "/" + self.wdir)
else:
s.append(base_dir)
return s
update_ops = [Operation(Operation.UPDATE, "")]
std_test_ops= [
Operation(Operation.TEST, "./pjlib-test$SUFFIX", name="pjlib test",
wdir="pjlib/bin"),
Operation(Operation.TEST, "./pjlib-util-test$SUFFIX",
name="pjlib-util test", wdir="pjlib-util/bin"),
Operation(Operation.TEST, "./pjnath-test$SUFFIX", name="pjnath test",
wdir="pjnath/bin"),
Operation(Operation.TEST, "./pjmedia-test$SUFFIX", name="pjmedia test",
wdir="pjmedia/bin"),
Operation(Operation.TEST, "./pjsip-test$SUFFIX", name="pjsip test",
wdir="pjsip/bin")
]
def build_pjsua_test_ops(pjsua_exe=""):
ops = []
if pjsua_exe:
exe = " -e ../../pjsip-apps/bin/" + pjsua_exe
else:
exe = ""
cwd = os.getcwd()
os.chdir("../pjsua")
os.system("python runall.py --list > list")
f = open("list", "r")
for e in f:
e = e.rstrip("\r\n ")
(mod,param) = e.split(None,2)
name = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.find(".py")]
ops.append(Operation(Operation.TEST, "python run.py" + exe + " " + \
e, name=name, wdir="tests/pjsua"))
f.close()
os.remove("list")
os.chdir(cwd)
return ops
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
def vs_get_version():
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
return "vs6"
elif major=="13":
return "vs2003"
elif major=="14":
return "vs2005"
elif major=="15":
return "vs2008"
else:
return "vs-" + major
proc.wait()
return "vs-unknown"
class BaseConfig:
def __init__(self, base_dir, url, site, group, options=None):
self.base_dir = base_dir
self.url = url
self.site = site
self.group = group
self.options = options
class TestBuilder:
def __init__(self, config, build_config_name="",
user_mak="", config_site="", exclude=[], not_exclude=[]):
self.config = config
self.build_config_name = build_config_name
self.user_mak = user_mak
self.config_site = config_site
self.saved_user_mak = ""
self.saved_config_site = ""
self.exclude = exclude
self.not_exclude = not_exclude
self.ccdash_args = []
def stamp(self):
return time.strftime("%Y%m%d-%H%M", time.localtime())
def pre_action(self):
name = self.config.base_dir + "/user.mak"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_user_mak = f.read()
f.close()
if True:
f = open(name, "w")
f.write(self.user_mak)
f.close()
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_config_site= f.read()
f.close()
if True:
f = open(name, "wt")
f.write(self.config_site)
f.close()
def post_action(self):
name = self.config.base_dir + "/user.mak"
f = open(name, "wt")
f.write(self.saved_user_mak)
f.close()
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
f = open(name, "wt")
f.write(self.saved_config_site)
f.close()
def build_tests(self):
pass
def execute(self):
if len(self.ccdash_args)==0:
self.build_tests()
self.pre_action()
mandatory_op = ["update", "configure", "build"]
counter = 0
for a in self.ccdash_args:
fullcmd = " ".join(a)
excluded = False
included = False
for pat in self.exclude:
if pat and re.search(pat, fullcmd) != None:
excluded = True
break
if excluded:
for pat in self.not_exclude:
if pat and re.search(pat, fullcmd) != None:
included = True
break
if excluded and not included:
if len(fullcmd)>60:
fullcmd = fullcmd[0:60] + ".."
print "Skipping '%s'" % (fullcmd)
continue
b = ["ccdash.py"]
b.extend(a)
a = b
try:
rc = ccdash.main(a)
except Exception, e:
errmsg = str(e)
print "**** Error: ccdash got exception %s ****" % errmsg
rc = -1
except:
print "**** Error: ccdash got unknown exception ****"
rc = -1
if rc!=0 and a[1] in mandatory_op:
print "Stopping because of error.."
break
counter = counter + 1
self.post_action()
class GNUTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for GNU targets.
"""
def __init__(self, config, build_config_name="", user_mak="", \
config_site="", cross_compile="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
build_config_name - Optional name to be added as suffix to the build
name. Sample: "min-size", "O4", "TLS", etc.
user_mak - Contents to be put on user.mak
config_site - Contents to be put on config_site.h
cross_compile - Optional cross-compile prefix. Must include the
trailing dash, e.g. "arm-unknown-linux-"
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
user_mak=user_mak, config_site=config_site,
exclude=exclude, not_exclude=not_exclude)
self.cross_compile = cross_compile
if self.cross_compile and self.cross_compile[-1] != '-':
self.cross_compile.append("-")
def build_tests(self):
if self.cross_compile:
suffix = "-" + self.cross_compile[0:-1]
build_name = self.cross_compile + \
gcc_version(self.cross_compile + "gcc")
else:
proc = subprocess.Popen("sh "+self.config.base_dir+"/config.guess",
shell=True, stdout=subprocess.PIPE)
plat = proc.stdout.readline().rstrip(" \r\n")
build_name = plat + "-"+gcc_version(self.cross_compile + "gcc")
suffix = "-" + plat
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "sh ./configure"))
if sys.platform=="win32":
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make'"))
else:
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make" + \
" && cd pjsip-apps/src/python && " + \
"python setup.py clean build'"))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops())
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# MSVC test configurator
#
class MSVCTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Visual Studio builds.
You need to set the MSVC environment variables (typically by calling
vcvars32.bat) prior to running this class.
"""
def __init__(self, config, target="Release|Win32", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Visual Studio build configuration to build.
Sample: "Debug|Win32", "Release|Win32".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "Debug", "Release", "IPv6", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
(vsbuild,sys) = self.target.split("|",2)
build_name = sys + "-" + vs_get_version() + "-" + vsbuild
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
vccmd = "vcbuild.exe /nologo /nohtmllog /nocolor /rebuild " + \
"pjproject-vs8.sln " + " \"" + self.target + "\""
suffix = "-i386-win32-vc8-" + vsbuild
pjsua = "pjsua_vc8"
if vsbuild=="debug":
pjsua = pjsua + "d"
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.append(Operation(Operation.BUILD, vccmd))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops(pjsua))
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# Symbian test configurator
#
class SymbianTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Symbian builds. You need to
set the command line build settings prior to running this class (typically
that involves setting the EPOCROOT variable and current device).
"""
def __init__(self, config, target="gcce urel", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Symbian target to build. Default is "gcce urel".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "APS", "VAS", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
print "Error: EPOCROOT environment variable is not set"
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
sdk1 = epocroot.split("\\")[-2]
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
print "Error: default SDK in device doesn't match EPOCROOT"
print "Default device SDK =", sdk2
print "EPOCROOT SDK =", sdk1
sys.exit(1)
build_name = sdk2.replace("_", "-") + "-" + \
self.target.replace(" ", "-")
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmdline = "cmd /C \"cd build.symbian && bldmake bldfiles && abld build %s\"" % (self.target)
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.extend([Operation(Operation.BUILD, cmdline)])
self.ccdash_args = []
suffix = ""
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
| false
| true
|
79066f0e2be157515e702c9b507e1dcd97481d92
| 15,329
|
py
|
Python
|
tensorflow_probability/python/distributions/zipf_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-07-12T22:40:42.000Z
|
2020-07-12T22:40:42.000Z
|
tensorflow_probability/python/distributions/zipf_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 2
|
2019-08-01T18:31:41.000Z
|
2019-08-01T19:42:15.000Z
|
tensorflow_probability/python/distributions/zipf_test.py
|
nagachika/probability
|
2a5609ceec01a388ec03b583b4f8e813cfbad981
|
[
"Apache-2.0"
] | 1
|
2020-04-17T18:01:47.000Z
|
2020-04-17T18:01:47.000Z
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class ZipfTest(test_case.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.power)
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(zipf.dtype, zipf.sample(10).dtype)
self.assertEqual(zipf.dtype, zipf.sample(1).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample())
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
# Non-integer samples are rejected if validate_args is True and
# interpolate_nondiscrete is False.
non_integer_samples = [0.99, 4.5, 5.001, 1e-6, -3, -2, -1, -0., 0]
for x in non_integer_samples:
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
# Check that log_pmf(x) of tfd.Zipf is between the values of
# stats.zipf.logpmf for ceil(x) and floor(x).
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
# Check that pmf(x) of tfd.Zipf is between the values of stats.zipf.pmf for
# ceil(x) and floor(x).
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5] # var is undefined for power <= 3
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)]) # 1 x 10
zipf = tfd.Zipf(power=power_v)
n = int(100e3)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
# stats.zipf wants float64 params.
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)]) # 1 x 5
zipf = tfd.Zipf(power=power_v)
n = int(100e4)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
# stats.zipf wants float64 params.
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
# Test that sampling with the same seed twice gives the same results.
def testZipfSampleMultipleTimes(self):
n = 1000
seed = tfp_test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1")
tf.compat.v1.set_random_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2")
tf.compat.v1.set_random_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1.)
n = 1000
self.evaluate(zipf.sample(n, seed=tfp_test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| 36.584726
| 115
| 0.666058
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import stats
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class ZipfTest(test_case.TestCase):
def assertBetween(self, x, minimum, maximum):
self.assertGreaterEqual(x, minimum)
self.assertLessEqual(x, maximum)
def assertAllBetween(self, a, minval, maxval, atol=1e-6):
a = self._GetNdArray(a)
minval = self._GetNdArray(minval)
maxval = self._GetNdArray(maxval)
self.assertEqual(a.shape, minval.shape)
self.assertEqual(a.shape, maxval.shape)
for idx, _ in np.ndenumerate(a):
self.assertBetween(a[idx], minval[idx] - atol, maxval[idx] + atol)
def testZipfShape(self):
power = tf.constant([3.0] * 5)
zipf = tfd.Zipf(power=power)
self.assertEqual(self.evaluate(zipf.batch_shape_tensor()), (5,))
self.assertEqual(zipf.batch_shape, tf.TensorShape([5]))
self.assertAllEqual(self.evaluate(zipf.event_shape_tensor()), [])
self.assertEqual(zipf.event_shape, tf.TensorShape([]))
def testInvalidPower(self):
invalid_powers = [-.02, 0.5, -2., .99, 1.]
for power in invalid_powers:
with self.assertRaisesOpError("Condition x > y"):
zipf = tfd.Zipf(power=power, validate_args=True)
self.evaluate(zipf.power)
def testNanPower(self):
zipf = tfd.Zipf(power=np.nan, validate_args=False)
self.assertAllNan(self.evaluate(zipf.power))
def testValidPower_ImplicitlyConvertsToFloat32(self):
powers = [2, 10, 1.1]
for power in powers:
zipf = tfd.Zipf(power=power, validate_args=True)
self.assertEqual(zipf.power.dtype, tf.float32)
def testEventDtype(self):
for power_dtype in [tf.float32, tf.float64]:
for event_dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
power_dtype = tf.float32
event_dtype = tf.int32
power = tf.constant(5., dtype=power_dtype)
zipf = tfd.Zipf(power=power, dtype=event_dtype)
self.assertEqual(zipf.dtype, event_dtype)
self.assertEqual(zipf.dtype, zipf.sample(10).dtype)
self.assertEqual(zipf.dtype, zipf.sample(1).dtype)
self.assertEqual(zipf.dtype, zipf.mode().dtype)
def testInvalidEventDtype(self):
with self.assertRaisesWithPredicateMatch(
TypeError, "power.dtype .* not a supported .* type"):
power = tf.constant(5., dtype=tf.float16)
zipf = tfd.Zipf(power=power, dtype=tf.int32, validate_args=True)
self.evaluate(zipf.sample())
def testZipfLogPmf_InvalidArgs(self):
power = tf.constant([4.0])
non_integer_samples = [0.99, 4.5, 5.001, 1e-6, -3, -2, -1, -0., 0]
for x in non_integer_samples:
zipf = tfd.Zipf(
power=power, interpolate_nondiscrete=False, validate_args=True)
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.log_prob(x))
with self.assertRaisesOpError("Condition (x == y|x > 0)"):
self.evaluate(zipf.prob(x))
def testZipfLogPmf_IntegerArgs(self):
batch_size = 9
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = np.array([-3., -0., 0., 2., 3., 4., 5., 6., 7.], dtype=np.float32)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
floor_x = np.floor(x)
ceil_x = np.ceil(x)
self.assertAllBetween(log_pmf_values, stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllBetween(pmf_values, stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfLogPmf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3., -0.5, 0., 2., 2.2, 3., 3.1, 4., 5., 5.5, 6., 7.2]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_pmf = zipf.log_prob(x)
self.assertEqual((batch_size,), log_pmf.shape)
log_pmf_values = self.evaluate(log_pmf)
self.assertAllClose(log_pmf_values, stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((batch_size,), pmf.shape)
pmf_values = self.evaluate(pmf)
self.assertAllClose(pmf_values, stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.1, 3.5, 4.9, 5., 6.6, 7.]], dtype=np.int32).T
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllClose(self.evaluate(log_pmf), stats.zipf.logpmf(x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllClose(self.evaluate(pmf), stats.zipf.pmf(x, power_v))
def testZipfLogPmfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3.2, 4.3, 5.5, 6.9, 7.]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_pmf = zipf.log_prob(x)
self.assertEqual((6, 3), log_pmf.shape)
self.assertAllBetween(
self.evaluate(log_pmf), stats.zipf.logpmf(ceil_x, power_v),
stats.zipf.logpmf(floor_x, power_v))
pmf = zipf.prob(x)
self.assertEqual((6, 3), pmf.shape)
self.assertAllBetween(
self.evaluate(pmf), stats.zipf.pmf(ceil_x, power_v),
stats.zipf.pmf(floor_x, power_v))
def testZipfCdf_IntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsNoInterpolation(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
zipf = tfd.Zipf(power=power, interpolate_nondiscrete=False)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdf_NonIntegerArgsInterpolated(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdf_NonIntegerArgs(self):
batch_size = 12
power = tf.constant([3.0] * batch_size)
power_v = 3.0
x = [-3.5, -0.5, 0., 1, 1.1, 2.2, 3.1, 4., 5., 5.5, 6.4, 7.8]
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((batch_size,), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((batch_size,), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfCdfMultidimensional_IntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2., 3., 4., 5., 6., 7.]], dtype=np.float32).T
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllClose(self.evaluate(log_cdf), stats.zipf.logcdf(x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllClose(self.evaluate(cdf), stats.zipf.cdf(x, power_v))
def testZipfCdfMultidimensional_NonIntegerArgs(self):
batch_size = 6
power = tf.constant([[2.0, 4.0, 5.0]] * batch_size)
power_v = [2.0, 4.0, 5.0]
x = np.array([[2.3, 3.5, 4.1, 5.5, 6.8, 7.9]], dtype=np.float32).T
floor_x = np.floor(x)
ceil_x = np.ceil(x)
zipf = tfd.Zipf(power=power)
log_cdf = zipf.log_cdf(x)
self.assertEqual((6, 3), log_cdf.shape)
self.assertAllBetween(
self.evaluate(log_cdf), stats.zipf.logcdf(floor_x, power_v),
stats.zipf.logcdf(ceil_x, power_v))
cdf = zipf.cdf(x)
self.assertEqual((6, 3), cdf.shape)
self.assertAllBetween(
self.evaluate(cdf), stats.zipf.cdf(floor_x, power_v),
stats.zipf.cdf(ceil_x, power_v))
def testZipfMean(self):
power_v = [2.0, 3.0, 2.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.mean().shape)
self.assertAllClose(self.evaluate(zipf.mean()), stats.zipf.mean(power_v))
def testZipfVariance(self):
power_v = [4.0, 3.0, 5.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.variance().shape)
stat_vars = np.vectorize(stats.zipf.var)(power_v)
self.assertAllClose(self.evaluate(zipf.variance()), stat_vars)
def testZipfStd(self):
power_v = [4.0, 3.5, 4.5]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((3,), zipf.stddev().shape)
stat_stddevs = np.vectorize(stats.zipf.std)(power_v)
self.assertAllClose(self.evaluate(zipf.stddev()), stat_stddevs)
def testZipfMode(self):
power_v = [10.0, 3.0, 2.5, 3.2, 1.1, 0.05]
zipf = tfd.Zipf(power=power_v)
self.assertEqual((6,), zipf.mode().shape)
self.assertAllClose(self.evaluate(zipf.mode()), np.ones_like(power_v))
def testZipfSample(self):
power_v = 5.
n = int(500e4)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertAllClose(
sample_values.mean(), stats.zipf.mean(power_v), rtol=.01)
self.assertAllClose(
sample_values.std(), stats.zipf.std(power_v), rtol=.03)
def testZipfSample_ValidateArgs(self):
power_v = 3.
n = int(100e3)
for power_dtype in [tf.float32, tf.float64]:
power = tf.constant(power_v, dtype=power_dtype)
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64]:
zipf = tfd.Zipf(power=power, dtype=dtype, validate_args=True)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
self.evaluate(samples)
def testZipfSampleMultidimensionalMean(self):
power_v = np.array([np.arange(5, 15, dtype=np.float32)])
zipf = tfd.Zipf(power=power_v)
n = int(100e3)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 10,), samples.shape)
self.assertEqual((n, 1, 10,), sample_values.shape)
stats_mean = np.vectorize(stats.zipf.mean)(power_v.astype(np.float64))
self.assertAllClose(sample_values.mean(axis=0), stats_mean, rtol=.01)
def testZipfSampleMultidimensionalStd(self):
power_v = np.array([np.arange(5, 10, dtype=np.float32)])
zipf = tfd.Zipf(power=power_v)
n = int(100e4)
samples = zipf.sample(n, seed=tfp_test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n, 1, 5), samples.shape)
self.assertEqual((n, 1, 5), sample_values.shape)
stats_std = np.vectorize(stats.zipf.std)(power_v.astype(np.float64))
self.assertAllClose(sample_values.std(axis=0), stats_std, rtol=.04)
def testZipfSampleMultipleTimes(self):
n = 1000
seed = tfp_test_util.test_seed()
power = 1.5
zipf1 = tfd.Zipf(power=power, name="zipf1")
tf.compat.v1.set_random_seed(seed)
samples1 = self.evaluate(zipf1.sample(n, seed=seed))
zipf2 = tfd.Zipf(power=power, name="zipf2")
tf.compat.v1.set_random_seed(seed)
samples2 = self.evaluate(zipf2.sample(n, seed=seed))
self.assertAllEqual(samples1, samples2)
def testZipfSample_AvoidsInfiniteLoop(self):
zipf = tfd.Zipf(power=1.)
n = 1000
self.evaluate(zipf.sample(n, seed=tfp_test_util.test_seed()))
if __name__ == "__main__":
tf.test.main()
| true
| true
|
79066f15425046ee9018415f5aaf5407552e7a17
| 749
|
py
|
Python
|
services/backend/migrations/versions/f6d196dc5629_.py
|
YA-androidapp/vuejs-flask-docker
|
de0fe6bbee7c80fe7de4a91c70767494289aa448
|
[
"MIT"
] | 16
|
2020-05-03T19:58:58.000Z
|
2021-08-23T19:37:13.000Z
|
services/backend/migrations/versions/f6d196dc5629_.py
|
YA-androidapp/vuejs-flask-docker
|
de0fe6bbee7c80fe7de4a91c70767494289aa448
|
[
"MIT"
] | 3
|
2021-09-01T20:34:17.000Z
|
2022-02-27T18:09:27.000Z
|
services/backend/migrations/versions/f6d196dc5629_.py
|
YA-androidapp/vuejs-flask-docker
|
de0fe6bbee7c80fe7de4a91c70767494289aa448
|
[
"MIT"
] | 13
|
2020-05-03T22:00:30.000Z
|
2021-06-20T02:44:20.000Z
|
"""empty message
Revision ID: f6d196dc5629
Revises: fd5076041bff
Create Date: 2019-04-06 22:25:32.133764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f6d196dc5629'
down_revision = 'fd5076041bff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('admin', sa.Boolean(), nullable=True))
op.execute('UPDATE users SET admin=False')
op.alter_column('users', 'admin', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'admin')
# ### end Alembic commands ###
| 24.16129
| 75
| 0.688919
|
from alembic import op
import sqlalchemy as sa
revision = 'f6d196dc5629'
down_revision = 'fd5076041bff'
branch_labels = None
depends_on = None
def upgrade():
| true
| true
|
79066fc2a841fa7bdb3b73ee9a5121a5360e3603
| 2,576
|
py
|
Python
|
tests/test_client.py
|
goodeggs/dbt-rpc-client
|
48843f67d72ed732aa4cb7d70c436fad25bf23d6
|
[
"MIT"
] | 2
|
2020-03-07T11:19:03.000Z
|
2021-07-20T16:18:31.000Z
|
tests/test_client.py
|
goodeggs/dbt-rpc-client
|
48843f67d72ed732aa4cb7d70c436fad25bf23d6
|
[
"MIT"
] | 2
|
2020-01-03T18:33:49.000Z
|
2020-05-07T10:21:01.000Z
|
tests/test_client.py
|
goodeggs/dbt-rpc-client
|
48843f67d72ed732aa4cb7d70c436fad25bf23d6
|
[
"MIT"
] | 1
|
2020-11-23T23:14:21.000Z
|
2020-11-23T23:14:21.000Z
|
import pytest
import responses
def test_url(client):
assert client.url == "http://0.0.0.0:8580/jsonrpc"
@pytest.mark.parametrize('method', ['status', 'poll', 'kill', 'cli_args'])
def test_default_request(client, method):
expected = {
"jsonrpc": client.jsonrpc_version,
"method": method,
"params": {}
}
resp = client._default_request(method=method)
assert resp["jsonrpc"] == expected["jsonrpc"]
assert resp["method"] == expected["method"]
assert resp["params"] == expected["params"]
def test_selection(client):
expected = {
"models": "@model_1 +model_2+ model_3+",
"select": "snapshot_1 snapshot_2 snapshot_3",
"exclude": "model_4+"
}
data = client._selection(models=["@model_1", "+model_2+", "model_3+", "model_3+"],
select=["snapshot_1", "snapshot_2", "snapshot_3"],
exclude=["model_4+"])
assert set(data["models"].split(' ')) == set(expected["models"].split(' '))
assert set(data["select"].split(' ')) == set(expected["select"].split(' '))
assert set(data["exclude"].split(' ')) == set(expected["exclude"].split(' '))
def test_status(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"status": "ready",
"error": "null",
"logs": [],
"timestamp": "2019-10-07T16:30:09.875534Z",
"pid": 76715
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.status()
assert resp.json() == expected
def test_poll(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"results": [],
"generated_at": "2019-10-11T18:25:22.477203Z",
"elapsed_time": 0.8381369113922119,
"logs": [],
"tags": {
"command": "run --models my_model",
"branch": "abc123"
},
"status": "success"
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.poll(request_token="f86926fa-6535-4891-8d24-2cfc65d2a347")
assert resp.json() == expected
| 32.607595
| 86
| 0.527562
|
import pytest
import responses
def test_url(client):
assert client.url == "http://0.0.0.0:8580/jsonrpc"
@pytest.mark.parametrize('method', ['status', 'poll', 'kill', 'cli_args'])
def test_default_request(client, method):
expected = {
"jsonrpc": client.jsonrpc_version,
"method": method,
"params": {}
}
resp = client._default_request(method=method)
assert resp["jsonrpc"] == expected["jsonrpc"]
assert resp["method"] == expected["method"]
assert resp["params"] == expected["params"]
def test_selection(client):
expected = {
"models": "@model_1 +model_2+ model_3+",
"select": "snapshot_1 snapshot_2 snapshot_3",
"exclude": "model_4+"
}
data = client._selection(models=["@model_1", "+model_2+", "model_3+", "model_3+"],
select=["snapshot_1", "snapshot_2", "snapshot_3"],
exclude=["model_4+"])
assert set(data["models"].split(' ')) == set(expected["models"].split(' '))
assert set(data["select"].split(' ')) == set(expected["select"].split(' '))
assert set(data["exclude"].split(' ')) == set(expected["exclude"].split(' '))
def test_status(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"status": "ready",
"error": "null",
"logs": [],
"timestamp": "2019-10-07T16:30:09.875534Z",
"pid": 76715
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.status()
assert resp.json() == expected
def test_poll(client):
with responses.RequestsMock() as rsps:
expected = {
"result": {
"results": [],
"generated_at": "2019-10-11T18:25:22.477203Z",
"elapsed_time": 0.8381369113922119,
"logs": [],
"tags": {
"command": "run --models my_model",
"branch": "abc123"
},
"status": "success"
},
"id": "2db9a2fe-9a39-41ef-828c-25e04dd6b07d",
"jsonrpc": client.jsonrpc_version
}
rsps.add(responses.POST, client.url,
json=expected, status=202)
resp = client.poll(request_token="f86926fa-6535-4891-8d24-2cfc65d2a347")
assert resp.json() == expected
| true
| true
|
790670acb878af0d0ba02cbf021cbb8fcaffcb96
| 2,512
|
py
|
Python
|
examples/crate.py
|
einarf/ModernGL
|
e4a7f53289043a0ac06130c67edc75b878484a0e
|
[
"MIT"
] | 1
|
2019-10-18T03:23:16.000Z
|
2019-10-18T03:23:16.000Z
|
examples/crate.py
|
einarf/ModernGL
|
e4a7f53289043a0ac06130c67edc75b878484a0e
|
[
"MIT"
] | null | null | null |
examples/crate.py
|
einarf/ModernGL
|
e4a7f53289043a0ac06130c67edc75b878484a0e
|
[
"MIT"
] | null | null | null |
import os
import moderngl
import numpy as np
from objloader import Obj
from PIL import Image
from pyrr import Matrix44
import data
from window import Example, run_example
class CrateExample(Example):
title = "Crate"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_norm;
in vec2 in_text;
out vec3 v_vert;
out vec3 v_norm;
out vec2 v_text;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
v_vert = in_vert;
v_norm = in_norm;
v_text = in_text;
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
uniform sampler2D Texture;
in vec3 v_vert;
in vec3 v_norm;
in vec2 v_text;
out vec4 f_color;
void main() {
float lum = clamp(dot(normalize(Light - v_vert), normalize(v_norm)), 0.0, 1.0) * 0.8 + 0.2;
f_color = vec4(texture(Texture, v_text).rgb * lum, 1.0);
}
''',
)
self.mvp = self.prog['Mvp']
self.light = self.prog['Light']
obj = Obj.open(data.find('crate.obj'))
img = Image.open(data.find('crate.png')).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
self.texture = self.ctx.texture(img.size, 3, img.tobytes())
self.texture.use()
self.vbo = self.ctx.buffer(obj.pack('vx vy vz nx ny nz tx ty'))
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert', 'in_norm', 'in_text')
def render(self, time, frame_time):
angle = time
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
camera_pos = (np.cos(angle) * 5.0, np.sin(angle) * 5.0, 2.0)
proj = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0)
lookat = Matrix44.look_at(
camera_pos,
(0.0, 0.0, 0.5),
(0.0, 0.0, 1.0),
)
self.mvp.write((proj * lookat).astype('f4').tobytes())
self.light.value = camera_pos
self.vao.render()
if __name__ == '__main__':
run_example(CrateExample)
| 27.604396
| 111
| 0.513137
|
import os
import moderngl
import numpy as np
from objloader import Obj
from PIL import Image
from pyrr import Matrix44
import data
from window import Example, run_example
class CrateExample(Example):
title = "Crate"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 330
uniform mat4 Mvp;
in vec3 in_vert;
in vec3 in_norm;
in vec2 in_text;
out vec3 v_vert;
out vec3 v_norm;
out vec2 v_text;
void main() {
gl_Position = Mvp * vec4(in_vert, 1.0);
v_vert = in_vert;
v_norm = in_norm;
v_text = in_text;
}
''',
fragment_shader='''
#version 330
uniform vec3 Light;
uniform sampler2D Texture;
in vec3 v_vert;
in vec3 v_norm;
in vec2 v_text;
out vec4 f_color;
void main() {
float lum = clamp(dot(normalize(Light - v_vert), normalize(v_norm)), 0.0, 1.0) * 0.8 + 0.2;
f_color = vec4(texture(Texture, v_text).rgb * lum, 1.0);
}
''',
)
self.mvp = self.prog['Mvp']
self.light = self.prog['Light']
obj = Obj.open(data.find('crate.obj'))
img = Image.open(data.find('crate.png')).transpose(Image.FLIP_TOP_BOTTOM).convert('RGB')
self.texture = self.ctx.texture(img.size, 3, img.tobytes())
self.texture.use()
self.vbo = self.ctx.buffer(obj.pack('vx vy vz nx ny nz tx ty'))
self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, 'in_vert', 'in_norm', 'in_text')
def render(self, time, frame_time):
angle = time
self.ctx.clear(1.0, 1.0, 1.0)
self.ctx.enable(moderngl.DEPTH_TEST)
camera_pos = (np.cos(angle) * 5.0, np.sin(angle) * 5.0, 2.0)
proj = Matrix44.perspective_projection(45.0, self.aspect_ratio, 0.1, 1000.0)
lookat = Matrix44.look_at(
camera_pos,
(0.0, 0.0, 0.5),
(0.0, 0.0, 1.0),
)
self.mvp.write((proj * lookat).astype('f4').tobytes())
self.light.value = camera_pos
self.vao.render()
if __name__ == '__main__':
run_example(CrateExample)
| true
| true
|
7906710d6af97d58759f8acac35239116f393087
| 243
|
py
|
Python
|
tagger/calculate_tagger.py
|
buoyancy99/glove
|
aa0e6eaaaafb47fc3a5d4eb693c8f24e90b8f402
|
[
"MIT"
] | 2
|
2019-06-10T18:16:14.000Z
|
2020-03-27T19:10:40.000Z
|
tagger/calculate_tagger.py
|
buoyancy99/glove
|
aa0e6eaaaafb47fc3a5d4eb693c8f24e90b8f402
|
[
"MIT"
] | null | null | null |
tagger/calculate_tagger.py
|
buoyancy99/glove
|
aa0e6eaaaafb47fc3a5d4eb693c8f24e90b8f402
|
[
"MIT"
] | 3
|
2019-02-27T23:07:58.000Z
|
2020-02-11T06:00:33.000Z
|
"""Author: Brandon Trabucco
Calculate the part of speech tagger using the brown corpus.
"""
import glove.configuration
import glove.tagger
config = glove.configuration.TaggerConfiguration(
tagger_dir="./")
glove.tagger.dump(config)
| 15.1875
| 59
| 0.761317
|
import glove.configuration
import glove.tagger
config = glove.configuration.TaggerConfiguration(
tagger_dir="./")
glove.tagger.dump(config)
| true
| true
|
790671d115575aa5ed8617405544c36d6f9865d2
| 478
|
py
|
Python
|
pynzb/lxml_nzb.py
|
DavidM42/pynzb
|
cfb7f4f88b928f00bae637be655c592a2493c2e1
|
[
"BSD-3-Clause"
] | null | null | null |
pynzb/lxml_nzb.py
|
DavidM42/pynzb
|
cfb7f4f88b928f00bae637be655c592a2493c2e1
|
[
"BSD-3-Clause"
] | null | null | null |
pynzb/lxml_nzb.py
|
DavidM42/pynzb
|
cfb7f4f88b928f00bae637be655c592a2493c2e1
|
[
"BSD-3-Clause"
] | null | null | null |
from pynzb.base import BaseETreeNZBParser, NZBFile, NZBSegment
try:
from lxml import etree
except ImportError:
raise ImportError("You must have lxml installed before you can use the " +
"lxml NZB parser.")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class LXMLNZBParser(BaseETreeNZBParser):
def get_etree_iter(self, xml, et=etree):
return iter(et.iterparse(StringIO(xml), events=("start", "end")))
| 29.875
| 78
| 0.732218
|
from pynzb.base import BaseETreeNZBParser, NZBFile, NZBSegment
try:
from lxml import etree
except ImportError:
raise ImportError("You must have lxml installed before you can use the " +
"lxml NZB parser.")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class LXMLNZBParser(BaseETreeNZBParser):
def get_etree_iter(self, xml, et=etree):
return iter(et.iterparse(StringIO(xml), events=("start", "end")))
| true
| true
|
790671d2bc8c19473d0c70d4a838f7a209b6d26d
| 590
|
py
|
Python
|
phd_courses/theoretical_low_energy_astroparticle/figures/make_all_figures.py
|
jacopok/notes
|
805ebe1be49bbd14c6b46b24055f9fc7d1cd2586
|
[
"Apache-2.0"
] | 6
|
2019-10-10T13:10:57.000Z
|
2022-01-13T14:52:50.000Z
|
phd_courses/theoretical_high_energy_astroparticle/figures/make_all_figures.py
|
jacopok/notes
|
805ebe1be49bbd14c6b46b24055f9fc7d1cd2586
|
[
"Apache-2.0"
] | null | null | null |
phd_courses/theoretical_high_energy_astroparticle/figures/make_all_figures.py
|
jacopok/notes
|
805ebe1be49bbd14c6b46b24055f9fc7d1cd2586
|
[
"Apache-2.0"
] | 3
|
2019-10-03T16:20:19.000Z
|
2021-08-06T16:11:07.000Z
|
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
rc('figure', dpi=150)
def plot_and_save(plotting_func):
plotting_func()
plt.savefig(str(plotting_func.__name__).split(sep='.')[0] + '.pdf', bbox_inches='tight', pad_inches = 0)
plt.close()
if __name__ == "__main__":
plotter_list = []
for plotting_func in tqdm(plotter_list):
plot_and_save(plotting_func)
| 26.818182
| 108
| 0.69661
|
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
rc('figure', dpi=150)
def plot_and_save(plotting_func):
plotting_func()
plt.savefig(str(plotting_func.__name__).split(sep='.')[0] + '.pdf', bbox_inches='tight', pad_inches = 0)
plt.close()
if __name__ == "__main__":
plotter_list = []
for plotting_func in tqdm(plotter_list):
plot_and_save(plotting_func)
| true
| true
|
790671ddc269e09bd844f0beb38916b50bed5073
| 41,319
|
py
|
Python
|
pycryptobot.py
|
treggit/pycryptobot
|
47ed7f260d19fd1ec7e607e0654ebb768a3f035c
|
[
"Apache-2.0"
] | null | null | null |
pycryptobot.py
|
treggit/pycryptobot
|
47ed7f260d19fd1ec7e607e0654ebb768a3f035c
|
[
"Apache-2.0"
] | null | null | null |
pycryptobot.py
|
treggit/pycryptobot
|
47ed7f260d19fd1ec7e607e0654ebb768a3f035c
|
[
"Apache-2.0"
] | null | null | null |
"""Python Crypto Bot consuming Coinbase Pro or Binance APIs"""
import functools
import os
import sched
import sys
import time
import pandas as pd
from datetime import datetime
from models.PyCryptoBot import PyCryptoBot, truncate as _truncate
from models.AppState import AppState
from models.Trading import TechnicalAnalysis
from models.TradingAccount import TradingAccount
from models.helper.MarginHelper import calculate_margin
from views.TradingGraphs import TradingGraphs
from models.Strategy import Strategy
from models.helper.LogHelper import Logger
# minimal traceback
sys.tracebacklimit = 1
app = PyCryptoBot()
account = TradingAccount(app)
technical_analysis = None
state = AppState(app, account)
state.initLastAction()
s = sched.scheduler(time.time, time.sleep)
def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
"""Trading bot job which runs at a scheduled interval"""
global technical_analysis
# connectivity check (only when running live)
if app.isLive() and app.getTime() is None:
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
# poll every 5 minute
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
# increment state.iterations
state.iterations = state.iterations + 1
if not app.isSimulation():
# retrieve the app.getMarket() data
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
else:
if len(trading_data) == 0:
return None
# analyse the market data
if app.isSimulation() and len(trading_data.columns) > 8:
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if len(df_last.index.format()) > 0:
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = f'{current_df_index} 00:00:00' if len(current_df_index) == 10 else current_df_index
if app.getSmartSwitch() == 1 and app.getGranularity() == 3600 and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram(app.getMarket() + " smart switch from granularity 3600 (1 hour) to 900 (15 min)")
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getSmartSwitch() == 1 and app.getGranularity() == 900 and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:
Logger.info("*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***")
app.notifyTelegram(app.getMarket() + " smart switch from granularity 900 (15 min) to 3600 (1 hour)")
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getExchange() == 'binance' and app.getGranularity() == 86400:
if len(df) < 250:
# data frame should have 250 rows, if not retry
Logger.error('error: data frame length is < 250 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
else:
if len(df) < 300:
if not app.isSimulation():
# data frame should have 300 rows, if not retry
Logger.error('error: data frame length is < 300 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if len(df_last) > 0:
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if not app.isSimulation():
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if price < df_last['low'].values[0] or price == 0:
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if price < 0.0001:
raise Exception(app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!')
# technical indicators
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
# if simulation interations < 200 set goldencross to true
if app.isSimulation() and state.iterations < 200:
goldencross = True
# candlestick detection
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
margin, profit, sell_fee = 0, 0, 0
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# update last buy high
if price > state.last_buy_high:
state.last_buy_high = price
if state.last_buy_high > 0:
change_pcnt_high = ((price / state.last_buy_high) - 1) * 100
else:
change_pcnt_high = 0
# buy and sell calculations
state.last_buy_fee = round(state.last_buy_size * app.getTakerFee(), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
# if not a simulation, sync with exchange orders
if not app.isSimulation():
exchange_last_buy = app.getLastBuy()
if exchange_last_buy is not None:
if state.last_buy_size != exchange_last_buy['size']:
state.last_buy_size = exchange_last_buy['size']
if state.last_buy_filled != exchange_last_buy['filled']:
state.last_buy_filled = exchange_last_buy['filled']
if state.last_buy_price != exchange_last_buy['price']:
state.last_buy_price = exchange_last_buy['price']
if app.getExchange() == 'coinbasepro':
if state.last_buy_fee != exchange_last_buy['fee']:
state.last_buy_fee = exchange_last_buy['fee']
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
# handle immedate sell actions
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
# handle overriding wait actions (do not sell if sell at loss disabled!)
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext = ''
if app.disableBullOnly() is True or (df_last['sma50'].values[0] == df_last['sma200'].values[0]):
bullbeartext = ''
elif goldencross is True:
bullbeartext = ' (BULL)'
elif goldencross is False:
bullbeartext = ' (BEAR)'
# polling is every 5 minutes (even for hourly intervals), but only process once per interval
if (immediate_action is True or state.last_df_index != current_df_index):
precision = 4
if (price < 0.01):
precision = 8
# Since precision does not change after this point, it is safe to prepare a tailored `truncate()` that would
# work with this precision. It should save a couple of `precision` uses, one for each `truncate()` call.
truncate = functools.partial(_truncate, n=precision)
price_text = 'Close: ' + truncate(price)
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text = ''
if app.disableBuyMACD() is False:
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text = ''
if app.disableBuyOBV() is False:
obv_text = 'OBV: ' + truncate(df_last['obv'].values[0]) + ' (' + str(
truncate(df_last['obv_pc'].values[0])) + '%)'
state.eri_text = ''
if app.disableBuyElderRay() is False:
if elder_ray_buy is True:
state.eri_text = 'ERI: buy | '
elif elder_ray_sell is True:
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if hammer is True:
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if shooting_star is True:
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if hanging_man is True:
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if inverted_hammer is True:
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if three_white_soldiers is True:
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_black_crows is True:
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_star is True:
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_star is True:
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_line_strike is True:
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if abandoned_baby is True:
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_doji_star is True:
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_doji_star is True:
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if two_black_gapping is True:
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
ema_co_prefix = ''
ema_co_suffix = ''
if ema12gtema26co is True:
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif ema12ltema26co is True:
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif ema12gtema26 is True:
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif ema12ltema26 is True:
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix = ''
macd_co_suffix = ''
if app.disableBuyMACD() is False:
if macdgtsignalco is True:
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif macdltsignalco is True:
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif macdgtsignal is True:
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif macdltsignal is True:
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix = ''
obv_suffix = ''
if app.disableBuyOBV() is False:
if float(obv_pc) > 0:
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif float(obv_pc) < 0:
obv_prefix = 'v '
obv_suffix = ' v | '
if not app.isVerbose():
if state.last_action != '':
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + \
' | Last Action: ' + state.last_action
else:
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + ' '
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
output_text += ' | ' + margin_text + ' (delta: ' + str(round(price - state.last_buy_price, precision)) + ')'
Logger.info(output_text)
# Seasonal Autoregressive Integrated Moving Average (ARIMA) model (ML prediction for 3 intervals from now)
if not app.isSimulation():
try:
prediction = technical_analysis.seasonalARIMAModelPrediction(int(app.getGranularity() / 60) * 3) # 3 intervals from now
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round(prediction[1] - price, 2)})')
except:
pass
if state.last_action == 'BUY':
# display support, resistance and fibonacci levels
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug('-- Iteration: ' + str(state.iterations) + ' --' + bullbeartext)
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.debug('-- Margin: ' + margin_text + ' --')
Logger.debug('price: ' + truncate(price))
Logger.debug('ema12: ' + truncate(float(df_last['ema12'].values[0])))
Logger.debug('ema26: ' + truncate(float(df_last['ema26'].values[0])))
Logger.debug('ema12gtema26co: ' + str(ema12gtema26co))
Logger.debug('ema12gtema26: ' + str(ema12gtema26))
Logger.debug('ema12ltema26co: ' + str(ema12ltema26co))
Logger.debug('ema12ltema26: ' + str(ema12ltema26))
Logger.debug('sma50: ' + truncate(float(df_last['sma50'].values[0])))
Logger.debug('sma200: ' + truncate(float(df_last['sma200'].values[0])))
Logger.debug('macd: ' + truncate(float(df_last['macd'].values[0])))
Logger.debug('signal: ' + truncate(float(df_last['signal'].values[0])))
Logger.debug('macdgtsignal: ' + str(macdgtsignal))
Logger.debug('macdltsignal: ' + str(macdltsignal))
Logger.debug('obv: ' + str(obv))
Logger.debug('obv_pc: ' + str(obv_pc))
Logger.debug('action: ' + state.action)
# informational output on the most recent entry
Logger.info('')
Logger.info('================================================================================')
txt = ' Iteration : ' + str(state.iterations) + bullbeartext
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Timestamp : ' + str(df_last.index.format()[0])
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Close : ' + truncate(price)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA12 : ' + truncate(float(df_last['ema12'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA26 : ' + truncate(float(df_last['ema26'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Above : ' + str(ema12gtema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(ema12gtema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Below : ' + str(ema12ltema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(ema12ltema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (ema12gtema26 is True and ema12gtema26co is True):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif (ema12gtema26 is True and ema12gtema26co is False):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif (ema12ltema26 is True and ema12ltema26co is True):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif (ema12ltema26 is True and ema12ltema26co is False):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA20 : ' + truncate(float(df_last['sma20'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA200 : ' + truncate(float(df_last['sma200'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' MACD : ' + truncate(float(df_last['macd'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Signal : ' + truncate(float(df_last['signal'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(macdgtsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(macdltsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (macdgtsignal is True and macdgtsignalco is True):
txt = ' Condition : MACD is currently crossing above Signal'
elif (macdgtsignal is True and macdgtsignalco is False):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif (macdltsignal is True and macdltsignalco is True):
txt = ' Condition : MACD is currently crossing below Signal'
elif (macdltsignal is True and macdltsignalco is False):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Action : ' + state.action
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
if state.last_action == 'BUY':
txt = ' Margin : ' + margin_text
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
# if a buy signal
if state.action == 'BUY':
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') BUY at ' + price_text)
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market buy
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if app.getBuyMaxSize() and state.last_buy_size > app.getBuyMaxSize():
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST BUY at ' + price_text)
# TODO: Improve simulator calculations by including calculations for buy and sell limit configurations.
if state.last_buy_size == 0 and state.last_buy_filled == 0:
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = state.buy_count + 1
state.buy_sum = state.buy_sum + state.last_buy_size
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
technical_analysis.printSupportResistanceLevel(float(price))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_buy_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# if a sell signal
elif state.action == 'SELL':
# if live
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | SELL')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
# display balances
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
# execute a live market sell
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())),
app.getSellPercent())
Logger.debug(resp)
# display balances
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
# if not live
else:
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
# Preserve next buy values for simulator
state.sell_count = state.sell_count + 1
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = buy_size - sell_fee
state.sell_sum = state.sell_sum + state.last_buy_size
if not app.isVerbose():
if price > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' +
app.printGranularity() + ' | SELL | ' + str(price) + ' | BUY | ' +
str(state.last_buy_price) + ' | DIFF | ' + str(price - state.last_buy_price) +
' | DIFF | ' + str(profit) + ' | MARGIN NO FEES | ' +
margin_text + ' | MARGIN FEES | ' + str(round(sell_fee, precision)))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_sell_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
# last significant action
if state.action in ['BUY', 'SELL']:
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if not app.isLive() and state.iterations == len(df):
Logger.info("\nSimulation Summary: ")
if state.buy_count > state.sell_count and app.allowSellAtLoss():
# Calculate last sell size
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
# Reduce sell fee from last sell size
state.last_buy_size = state.last_buy_size - state.last_buy_price * app.getTakerFee()
state.sell_sum = state.sell_sum + state.last_buy_size
state.sell_count = state.sell_count + 1
elif state.buy_count > state.sell_count and not app.allowSellAtLoss():
Logger.info("\n")
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info("\n")
Logger.info(' Buy Count : ' + str(state.buy_count))
Logger.info(' Sell Count : ' + str(state.sell_count))
Logger.info(' First Buy : ' + str(state.first_buy_size))
Logger.info(' Last Sell : ' + str(state.last_buy_size))
app.notifyTelegram(f"Simulation Summary\n Buy Count: {state.buy_count}\n Sell Count: {state.sell_count}\n First Buy: {state.first_buy_size}\n Last Sell: {state.last_buy_size}\n")
if state.sell_count > 0:
Logger.info("\n")
Logger.info(' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4) + '%')
Logger.info("\n")
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f" Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%\n ** non-live simulation, assuming highest fees\n")
else:
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
# show profit and margin if already bought
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price) + ' | Margin: ' + str(margin) + ' | Profit: ' + str(profit))
else:
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price))
# decrement ignored iteration
state.iterations = state.iterations - 1
# if live
if not app.disableTracker() and app.isLive():
# update order tracker csv
if app.getExchange() == 'binance':
account.saveTrackerCSV(app.getMarket())
elif app.getExchange() == 'coinbasepro':
account.saveTrackerCSV()
if app.isSimulation():
if state.iterations < 300:
if app.simuluationSpeed() in ['fast', 'fast-sample']:
# fast processing
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
# slow processing
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
# poll every 1 minute
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state))
def main():
try:
message = 'Starting '
if app.getExchange() == 'coinbasepro':
message += 'Coinbase Pro bot'
elif app.getExchange() == 'binance':
message += 'Binance bot'
message += ' for ' + app.getMarket() + ' using granularity ' + app.printGranularity()
app.notifyTelegram(message)
# initialise and start application
trading_data = app.startApp(account, state.last_action)
def runApp():
# run the first job immediately after starting
if app.isSimulation():
executeJob(s, app, state, trading_data)
else:
executeJob(s, app, state)
s.run()
try:
runApp()
except KeyboardInterrupt:
raise
except(BaseException, Exception) as e:
if app.autoRestart():
# Wait 30 second and try to relaunch application
time.sleep(30)
Logger.critical('Restarting application after exception: ' + repr(e))
app.notifyTelegram('Auto restarting bot for ' + app.getMarket() + ' after exception: ' + repr(e))
# Cancel the events queue
map(s.cancel, s.queue)
# Restart the app
runApp()
else:
raise
# catches a keyboard break of app, exits gracefully
except KeyboardInterrupt:
Logger.warning(str(datetime.now()) + ' bot is closed via keyboard interrupt...')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except(BaseException, Exception) as e:
# catch all not managed exceptions and send a Telegram message if configured
app.notifyTelegram('Bot for ' + app.getMarket() + ' got an exception: ' + repr(e))
Logger.critical(repr(e))
raise
main()
| 51.137376
| 202
| 0.504417
|
import functools
import os
import sched
import sys
import time
import pandas as pd
from datetime import datetime
from models.PyCryptoBot import PyCryptoBot, truncate as _truncate
from models.AppState import AppState
from models.Trading import TechnicalAnalysis
from models.TradingAccount import TradingAccount
from models.helper.MarginHelper import calculate_margin
from views.TradingGraphs import TradingGraphs
from models.Strategy import Strategy
from models.helper.LogHelper import Logger
sys.tracebacklimit = 1
app = PyCryptoBot()
account = TradingAccount(app)
technical_analysis = None
state = AppState(app, account)
state.initLastAction()
s = sched.scheduler(time.time, time.sleep)
def executeJob(sc=None, app: PyCryptoBot=None, state: AppState=None, trading_data=pd.DataFrame()):
global technical_analysis
if app.isLive() and app.getTime() is None:
Logger.warning('Your connection to the exchange has gone down, will retry in 1 minute!')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
return
state.iterations = state.iterations + 1
if not app.isSimulation():
trading_data = app.getHistoricalData(app.getMarket(), app.getGranularity())
else:
if len(trading_data) == 0:
return None
if app.isSimulation() and len(trading_data.columns) > 8:
df = trading_data
else:
trading_dataCopy = trading_data.copy()
technical_analysis = TechnicalAnalysis(trading_dataCopy)
technical_analysis.addAll()
df = technical_analysis.getDataFrame()
if app.isSimulation():
df_last = app.getInterval(df, state.iterations)
else:
df_last = app.getInterval(df)
if len(df_last.index.format()) > 0:
current_df_index = str(df_last.index.format()[0])
else:
current_df_index = state.last_df_index
formatted_current_df_index = f'{current_df_index} 00:00:00' if len(current_df_index) == 10 else current_df_index
if app.getSmartSwitch() == 1 and app.getGranularity() == 3600 and app.is1hEMA1226Bull() is True and app.is6hEMA1226Bull() is True:
Logger.info('*** smart switch from granularity 3600 (1 hour) to 900 (15 min) ***')
app.notifyTelegram(app.getMarket() + " smart switch from granularity 3600 (1 hour) to 900 (15 min)")
app.setGranularity(900)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getSmartSwitch() == 1 and app.getGranularity() == 900 and app.is1hEMA1226Bull() is False and app.is6hEMA1226Bull() is False:
Logger.info("*** smart switch from granularity 900 (15 min) to 3600 (1 hour) ***")
app.notifyTelegram(app.getMarket() + " smart switch from granularity 900 (15 min) to 3600 (1 hour)")
app.setGranularity(3600)
list(map(s.cancel, s.queue))
s.enter(5, 1, executeJob, (sc, app, state))
if app.getExchange() == 'binance' and app.getGranularity() == 86400:
if len(df) < 250:
Logger.error('error: data frame length is < 250 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
else:
if len(df) < 300:
if not app.isSimulation():
Logger.error('error: data frame length is < 300 (' + str(len(df)) + ')')
list(map(s.cancel, s.queue))
s.enter(300, 1, executeJob, (sc, app, state))
if len(df_last) > 0:
now = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
if not app.isSimulation():
ticker = app.getTicker(app.getMarket())
now = ticker[0]
price = ticker[1]
if price < df_last['low'].values[0] or price == 0:
price = float(df_last['close'].values[0])
else:
price = float(df_last['close'].values[0])
if price < 0.0001:
raise Exception(app.getMarket() + ' is unsuitable for trading, quote price is less than 0.0001!')
ema12gtema26 = bool(df_last['ema12gtema26'].values[0])
ema12gtema26co = bool(df_last['ema12gtema26co'].values[0])
goldencross = bool(df_last['goldencross'].values[0])
macdgtsignal = bool(df_last['macdgtsignal'].values[0])
macdgtsignalco = bool(df_last['macdgtsignalco'].values[0])
ema12ltema26 = bool(df_last['ema12ltema26'].values[0])
ema12ltema26co = bool(df_last['ema12ltema26co'].values[0])
macdltsignal = bool(df_last['macdltsignal'].values[0])
macdltsignalco = bool(df_last['macdltsignalco'].values[0])
obv = float(df_last['obv'].values[0])
obv_pc = float(df_last['obv_pc'].values[0])
elder_ray_buy = bool(df_last['eri_buy'].values[0])
elder_ray_sell = bool(df_last['eri_sell'].values[0])
if app.isSimulation() and state.iterations < 200:
goldencross = True
hammer = bool(df_last['hammer'].values[0])
inverted_hammer = bool(df_last['inverted_hammer'].values[0])
hanging_man = bool(df_last['hanging_man'].values[0])
shooting_star = bool(df_last['shooting_star'].values[0])
three_white_soldiers = bool(df_last['three_white_soldiers'].values[0])
three_black_crows = bool(df_last['three_black_crows'].values[0])
morning_star = bool(df_last['morning_star'].values[0])
evening_star = bool(df_last['evening_star'].values[0])
three_line_strike = bool(df_last['three_line_strike'].values[0])
abandoned_baby = bool(df_last['abandoned_baby'].values[0])
morning_doji_star = bool(df_last['morning_doji_star'].values[0])
evening_doji_star = bool(df_last['evening_doji_star'].values[0])
two_black_gapping = bool(df_last['two_black_gapping'].values[0])
strategy = Strategy(app, state, df, state.iterations)
state.action = strategy.getAction()
immediate_action = False
margin, profit, sell_fee = 0, 0, 0
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
if price > state.last_buy_high:
state.last_buy_high = price
if state.last_buy_high > 0:
change_pcnt_high = ((price / state.last_buy_high) - 1) * 100
else:
change_pcnt_high = 0
state.last_buy_fee = round(state.last_buy_size * app.getTakerFee(), 8)
state.last_buy_filled = round(((state.last_buy_size - state.last_buy_fee) / state.last_buy_price), 8)
if not app.isSimulation():
exchange_last_buy = app.getLastBuy()
if exchange_last_buy is not None:
if state.last_buy_size != exchange_last_buy['size']:
state.last_buy_size = exchange_last_buy['size']
if state.last_buy_filled != exchange_last_buy['filled']:
state.last_buy_filled = exchange_last_buy['filled']
if state.last_buy_price != exchange_last_buy['price']:
state.last_buy_price = exchange_last_buy['price']
if app.getExchange() == 'coinbasepro':
if state.last_buy_fee != exchange_last_buy['fee']:
state.last_buy_fee = exchange_last_buy['fee']
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
if strategy.isSellTrigger(price, technical_analysis.getTradeExit(price), margin, change_pcnt_high, obv_pc, macdltsignal):
state.action = 'SELL'
state.last_action = 'BUY'
immediate_action = True
if strategy.isWaitTrigger(margin):
state.action = 'WAIT'
state.last_action = 'BUY'
immediate_action = False
bullbeartext = ''
if app.disableBullOnly() is True or (df_last['sma50'].values[0] == df_last['sma200'].values[0]):
bullbeartext = ''
elif goldencross is True:
bullbeartext = ' (BULL)'
elif goldencross is False:
bullbeartext = ' (BEAR)'
if (immediate_action is True or state.last_df_index != current_df_index):
precision = 4
if (price < 0.01):
precision = 8
truncate = functools.partial(_truncate, n=precision)
price_text = 'Close: ' + truncate(price)
ema_text = app.compare(df_last['ema12'].values[0], df_last['ema26'].values[0], 'EMA12/26', precision)
macd_text = ''
if app.disableBuyMACD() is False:
macd_text = app.compare(df_last['macd'].values[0], df_last['signal'].values[0], 'MACD', precision)
obv_text = ''
if app.disableBuyOBV() is False:
obv_text = 'OBV: ' + truncate(df_last['obv'].values[0]) + ' (' + str(
truncate(df_last['obv_pc'].values[0])) + '%)'
state.eri_text = ''
if app.disableBuyElderRay() is False:
if elder_ray_buy is True:
state.eri_text = 'ERI: buy | '
elif elder_ray_sell is True:
state.eri_text = 'ERI: sell | '
else:
state.eri_text = 'ERI: | '
if hammer is True:
log_text = '* Candlestick Detected: Hammer ("Weak - Reversal - Bullish Signal - Up")'
Logger.info(log_text)
if shooting_star is True:
log_text = '* Candlestick Detected: Shooting Star ("Weak - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
if hanging_man is True:
log_text = '* Candlestick Detected: Hanging Man ("Weak - Continuation - Bearish Pattern - Down")'
Logger.info(log_text)
if inverted_hammer is True:
log_text = '* Candlestick Detected: Inverted Hammer ("Weak - Continuation - Bullish Pattern - Up")'
Logger.info(log_text)
if three_white_soldiers is True:
log_text = '*** Candlestick Detected: Three White Soldiers ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_black_crows is True:
log_text = '* Candlestick Detected: Three Black Crows ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_star is True:
log_text = '*** Candlestick Detected: Morning Star ("Strong - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_star is True:
log_text = '*** Candlestick Detected: Evening Star ("Strong - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if three_line_strike is True:
log_text = '** Candlestick Detected: Three Line Strike ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if abandoned_baby is True:
log_text = '** Candlestick Detected: Abandoned Baby ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if morning_doji_star is True:
log_text = '** Candlestick Detected: Morning Doji Star ("Reliable - Reversal - Bullish Pattern - Up")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if evening_doji_star is True:
log_text = '** Candlestick Detected: Evening Doji Star ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
if two_black_gapping is True:
log_text = '*** Candlestick Detected: Two Black Gapping ("Reliable - Reversal - Bearish Pattern - Down")'
Logger.info(log_text)
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') ' + log_text)
ema_co_prefix = ''
ema_co_suffix = ''
if ema12gtema26co is True:
ema_co_prefix = '*^ '
ema_co_suffix = ' ^*'
elif ema12ltema26co is True:
ema_co_prefix = '*v '
ema_co_suffix = ' v*'
elif ema12gtema26 is True:
ema_co_prefix = '^ '
ema_co_suffix = ' ^'
elif ema12ltema26 is True:
ema_co_prefix = 'v '
ema_co_suffix = ' v'
macd_co_prefix = ''
macd_co_suffix = ''
if app.disableBuyMACD() is False:
if macdgtsignalco is True:
macd_co_prefix = '*^ '
macd_co_suffix = ' ^*'
elif macdltsignalco is True:
macd_co_prefix = '*v '
macd_co_suffix = ' v*'
elif macdgtsignal is True:
macd_co_prefix = '^ '
macd_co_suffix = ' ^'
elif macdltsignal is True:
macd_co_prefix = 'v '
macd_co_suffix = ' v'
obv_prefix = ''
obv_suffix = ''
if app.disableBuyOBV() is False:
if float(obv_pc) > 0:
obv_prefix = '^ '
obv_suffix = ' ^ | '
elif float(obv_pc) < 0:
obv_prefix = 'v '
obv_suffix = ' v | '
if not app.isVerbose():
if state.last_action != '':
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + \
' | Last Action: ' + state.last_action
else:
output_text = formatted_current_df_index + ' | ' + app.getMarket() + bullbeartext + ' | ' + \
app.printGranularity() + ' | ' + price_text + ' | ' + ema_co_prefix + \
ema_text + ema_co_suffix + ' | ' + macd_co_prefix + macd_text + macd_co_suffix + \
obv_prefix + obv_text + obv_suffix + state.eri_text + ' | ' + state.action + ' '
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
output_text += ' | ' + margin_text + ' (delta: ' + str(round(price - state.last_buy_price, precision)) + ')'
Logger.info(output_text)
if not app.isSimulation():
try:
prediction = technical_analysis.seasonalARIMAModelPrediction(int(app.getGranularity() / 60) * 3)
Logger.info(f'Seasonal ARIMA model predicts the closing price will be {str(round(prediction[1], 2))} at {prediction[0]} (delta: {round(prediction[1] - price, 2)})')
except:
pass
if state.last_action == 'BUY':
Logger.info(technical_analysis.printSupportResistanceFibonacciLevels(price))
else:
Logger.debug('-- Iteration: ' + str(state.iterations) + ' --' + bullbeartext)
if state.last_action == 'BUY':
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.debug('-- Margin: ' + margin_text + ' --')
Logger.debug('price: ' + truncate(price))
Logger.debug('ema12: ' + truncate(float(df_last['ema12'].values[0])))
Logger.debug('ema26: ' + truncate(float(df_last['ema26'].values[0])))
Logger.debug('ema12gtema26co: ' + str(ema12gtema26co))
Logger.debug('ema12gtema26: ' + str(ema12gtema26))
Logger.debug('ema12ltema26co: ' + str(ema12ltema26co))
Logger.debug('ema12ltema26: ' + str(ema12ltema26))
Logger.debug('sma50: ' + truncate(float(df_last['sma50'].values[0])))
Logger.debug('sma200: ' + truncate(float(df_last['sma200'].values[0])))
Logger.debug('macd: ' + truncate(float(df_last['macd'].values[0])))
Logger.debug('signal: ' + truncate(float(df_last['signal'].values[0])))
Logger.debug('macdgtsignal: ' + str(macdgtsignal))
Logger.debug('macdltsignal: ' + str(macdltsignal))
Logger.debug('obv: ' + str(obv))
Logger.debug('obv_pc: ' + str(obv_pc))
Logger.debug('action: ' + state.action)
Logger.info('')
Logger.info('================================================================================')
txt = ' Iteration : ' + str(state.iterations) + bullbeartext
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Timestamp : ' + str(df_last.index.format()[0])
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Close : ' + truncate(price)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA12 : ' + truncate(float(df_last['ema12'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' EMA26 : ' + truncate(float(df_last['ema26'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Above : ' + str(ema12gtema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(ema12gtema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Crossing Below : ' + str(ema12ltema26co)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(ema12ltema26)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (ema12gtema26 is True and ema12gtema26co is True):
txt = ' Condition : EMA12 is currently crossing above EMA26'
elif (ema12gtema26 is True and ema12gtema26co is False):
txt = ' Condition : EMA12 is currently above EMA26 and has crossed over'
elif (ema12ltema26 is True and ema12ltema26co is True):
txt = ' Condition : EMA12 is currently crossing below EMA26'
elif (ema12ltema26 is True and ema12ltema26co is False):
txt = ' Condition : EMA12 is currently below EMA26 and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA20 : ' + truncate(float(df_last['sma20'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' SMA200 : ' + truncate(float(df_last['sma200'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' MACD : ' + truncate(float(df_last['macd'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Signal : ' + truncate(float(df_last['signal'].values[0]))
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Above : ' + str(macdgtsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
txt = ' Currently Below : ' + str(macdltsignal)
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
if (macdgtsignal is True and macdgtsignalco is True):
txt = ' Condition : MACD is currently crossing above Signal'
elif (macdgtsignal is True and macdgtsignalco is False):
txt = ' Condition : MACD is currently above Signal and has crossed over'
elif (macdltsignal is True and macdltsignalco is True):
txt = ' Condition : MACD is currently crossing below Signal'
elif (macdltsignal is True and macdltsignalco is False):
txt = ' Condition : MACD is currently below Signal and has crossed over'
else:
txt = ' Condition : -'
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('--------------------------------------------------------------------------------')
txt = ' Action : ' + state.action
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
if state.last_action == 'BUY':
txt = ' Margin : ' + margin_text
Logger.info(' | ' + txt + (' ' * (75 - len(txt))) + ' | ')
Logger.info('================================================================================')
if state.action == 'BUY':
state.last_buy_price = price
state.last_buy_high = state.last_buy_price
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') BUY at ' + price_text)
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
state.last_buy_size = float(account.getBalance(app.getQuoteCurrency()))
if app.getBuyMaxSize() and state.last_buy_size > app.getBuyMaxSize():
state.last_buy_size = app.getBuyMaxSize()
resp = app.marketBuy(app.getMarket(), state.last_buy_size, app.getBuyPercent())
Logger.debug(resp)
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
else:
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST BUY at ' + price_text)
if state.last_buy_size == 0 and state.last_buy_filled == 0:
state.last_buy_size = 1000
state.first_buy_size = 1000
state.buy_count = state.buy_count + 1
state.buy_sum = state.buy_sum + state.last_buy_size
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | BUY')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
technical_analysis.printSupportResistanceLevel(float(price))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Buy Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_buy_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
elif state.action == 'SELL':
if app.isLive():
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
if not app.isVerbose():
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' + app.printGranularity() + ' | ' + price_text + ' | SELL')
bands = technical_analysis.getFibonacciRetracementLevels(float(price))
Logger.info(' Fibonacci Retracement Levels:' + str(bands))
if len(bands) >= 1 and len(bands) <= 2:
if len(bands) == 1:
first_key = list(bands.keys())[0]
if first_key == 'ratio1':
state.fib_low = 0
state.fib_high = bands[first_key]
if first_key == 'ratio1_618':
state.fib_low = bands[first_key]
state.fib_high = bands[first_key] * 2
else:
state.fib_low = bands[first_key]
elif len(bands) == 2:
first_key = list(bands.keys())[0]
second_key = list(bands.keys())[1]
state.fib_low = bands[first_key]
state.fib_high = bands[second_key]
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing LIVE Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
Logger.info(app.getBaseCurrency() + ' balance before order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance before order: ' + str(account.getBalance(app.getQuoteCurrency())))
resp = app.marketSell(app.getMarket(), float(account.getBalance(app.getBaseCurrency())),
app.getSellPercent())
Logger.debug(resp)
Logger.info(app.getBaseCurrency() + ' balance after order: ' + str(account.getBalance(app.getBaseCurrency())))
Logger.info(app.getQuoteCurrency() + ' balance after order: ' + str(account.getBalance(app.getQuoteCurrency())))
else:
margin, profit, sell_fee = calculate_margin(
buy_size=state.last_buy_size,
buy_filled=state.last_buy_filled,
buy_price=state.last_buy_price,
buy_fee=state.last_buy_fee,
sell_percent=app.getSellPercent(),
sell_price=price,
sell_taker_fee=app.getTakerFee())
if state.last_buy_size > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
app.notifyTelegram(app.getMarket() + ' (' + app.printGranularity() + ') TEST SELL at ' +
price_text + ' (margin: ' + margin_text + ', (delta: ' +
str(round(price - state.last_buy_price, precision)) + ')')
state.sell_count = state.sell_count + 1
buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = buy_size - sell_fee
state.sell_sum = state.sell_sum + state.last_buy_size
if not app.isVerbose():
if price > 0:
margin_text = truncate(margin) + '%'
else:
margin_text = '0%'
Logger.info(formatted_current_df_index + ' | ' + app.getMarket() + ' | ' +
app.printGranularity() + ' | SELL | ' + str(price) + ' | BUY | ' +
str(state.last_buy_price) + ' | DIFF | ' + str(price - state.last_buy_price) +
' | DIFF | ' + str(profit) + ' | MARGIN NO FEES | ' +
margin_text + ' | MARGIN FEES | ' + str(round(sell_fee, precision)))
else:
Logger.info('--------------------------------------------------------------------------------')
Logger.info('| *** Executing TEST Sell Order *** |')
Logger.info('--------------------------------------------------------------------------------')
if app.shouldSaveGraphs():
tradinggraphs = TradingGraphs(technical_analysis)
ts = datetime.now().timestamp()
filename = app.getMarket() + '_' + app.printGranularity() + '_sell_' + str(ts) + '.png'
tradinggraphs.renderEMAandMACD(len(trading_data), 'graphs/' + filename, True)
if state.action in ['BUY', 'SELL']:
state.last_action = state.action
state.last_df_index = str(df_last.index.format()[0])
if not app.isLive() and state.iterations == len(df):
Logger.info("\nSimulation Summary: ")
if state.buy_count > state.sell_count and app.allowSellAtLoss():
state.last_buy_size = ((app.getSellPercent() / 100) * ((price / state.last_buy_price) * (state.last_buy_size - state.last_buy_fee)))
state.last_buy_size = state.last_buy_size - state.last_buy_price * app.getTakerFee()
state.sell_sum = state.sell_sum + state.last_buy_size
state.sell_count = state.sell_count + 1
elif state.buy_count > state.sell_count and not app.allowSellAtLoss():
Logger.info("\n")
Logger.info(' Note : "sell at loss" is disabled and you have an open trade, if the margin')
Logger.info(' result below is negative it will assume you sold at the end of the')
Logger.info(' simulation which may not be ideal. Try setting --sellatloss 1')
Logger.info("\n")
Logger.info(' Buy Count : ' + str(state.buy_count))
Logger.info(' Sell Count : ' + str(state.sell_count))
Logger.info(' First Buy : ' + str(state.first_buy_size))
Logger.info(' Last Sell : ' + str(state.last_buy_size))
app.notifyTelegram(f"Simulation Summary\n Buy Count: {state.buy_count}\n Sell Count: {state.sell_count}\n First Buy: {state.first_buy_size}\n Last Sell: {state.last_buy_size}\n")
if state.sell_count > 0:
Logger.info("\n")
Logger.info(' Margin : ' + _truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4) + '%')
Logger.info("\n")
Logger.info(' ** non-live simulation, assuming highest fees')
app.notifyTelegram(f" Margin: {_truncate((((state.last_buy_size - state.first_buy_size) / state.first_buy_size) * 100), 4)}%\n ** non-live simulation, assuming highest fees\n")
else:
if state.last_buy_size > 0 and state.last_buy_price > 0 and price > 0 and state.last_action == 'BUY':
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price) + ' | Margin: ' + str(margin) + ' | Profit: ' + str(profit))
else:
Logger.info(now + ' | ' + app.getMarket() + bullbeartext + ' | ' + app.printGranularity() + ' | Current Price: ' + str(price))
state.iterations = state.iterations - 1
if not app.disableTracker() and app.isLive():
if app.getExchange() == 'binance':
account.saveTrackerCSV(app.getMarket())
elif app.getExchange() == 'coinbasepro':
account.saveTrackerCSV()
if app.isSimulation():
if state.iterations < 300:
if app.simuluationSpeed() in ['fast', 'fast-sample']:
list(map(s.cancel, s.queue))
s.enter(0, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(1, 1, executeJob, (sc, app, state, df))
else:
list(map(s.cancel, s.queue))
s.enter(60, 1, executeJob, (sc, app, state))
def main():
try:
message = 'Starting '
if app.getExchange() == 'coinbasepro':
message += 'Coinbase Pro bot'
elif app.getExchange() == 'binance':
message += 'Binance bot'
message += ' for ' + app.getMarket() + ' using granularity ' + app.printGranularity()
app.notifyTelegram(message)
trading_data = app.startApp(account, state.last_action)
def runApp():
if app.isSimulation():
executeJob(s, app, state, trading_data)
else:
executeJob(s, app, state)
s.run()
try:
runApp()
except KeyboardInterrupt:
raise
except(BaseException, Exception) as e:
if app.autoRestart():
time.sleep(30)
Logger.critical('Restarting application after exception: ' + repr(e))
app.notifyTelegram('Auto restarting bot for ' + app.getMarket() + ' after exception: ' + repr(e))
map(s.cancel, s.queue)
runApp()
else:
raise
except KeyboardInterrupt:
Logger.warning(str(datetime.now()) + ' bot is closed via keyboard interrupt...')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
except(BaseException, Exception) as e:
app.notifyTelegram('Bot for ' + app.getMarket() + ' got an exception: ' + repr(e))
Logger.critical(repr(e))
raise
main()
| true
| true
|
79067294fd51c172579948486cb84fd93fd219a3
| 2,472
|
py
|
Python
|
WolfEyes/Utils/TypeChecker.py
|
TBIproject/WolfEye
|
adaa604ed07dcef81162f59fe4f6fb5f2e9b6f09
|
[
"BSD-3-Clause"
] | 2
|
2015-12-14T15:20:44.000Z
|
2016-11-17T07:54:10.000Z
|
WolfEyes/Utils/TypeChecker.py
|
TBIproject/WolfEyes
|
adaa604ed07dcef81162f59fe4f6fb5f2e9b6f09
|
[
"BSD-3-Clause"
] | null | null | null |
WolfEyes/Utils/TypeChecker.py
|
TBIproject/WolfEyes
|
adaa604ed07dcef81162f59fe4f6fb5f2e9b6f09
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
WOLFEYES'S FRAMEWORK
Python 3 / OpenCV 3
This file describes some TypeCheking decorators.
Might be useless, but allows for almost very precise type checking,
especially on keyworded args, which might help.
"""
# 'kargs' get the arguments and passes the decorator
def args(*types, **ktypes):
"""Allow testing of input types:
argkey=(types) or argkey=type"""
# The decorator modifies the function
def decorator(func):
def modified(*args, **kargs):
# The modified fonction takes some args and kargs,
# Which we need to check before passing it to func.
# Works much like a proxy/firewall.
# Check args:
position = 1
for arg, T in zip(args, types):
if not isinstance(arg, T):
raise TypeError("Positional arg (%d) should be of type(s) %s, got %s" % (position, T, type(arg)))
position += 1
# Check kargs:
for key, arg in kargs.items():
if key in ktypes:
T = ktypes[key]
if not isinstance(arg, T):
raise TypeError("Keyworded arg '%s' should be of type(s) %s, got %s" % (key, T, type(arg)))
# Actual result after check
return func(*args, **kargs)
# The decorator has decorated 'func' in 'modified'
return modified
# We pass the actual decorator right after getting the kargs
return decorator
# 'ret' gets the possible output types
def ret(*types, **kargs):
# Garde-fou
if len(types) is 1 and not isinstance(types[0], type) and callable(types[0]):
raise ValueError("You should not pass a function to TypeError.ret, maybe you did not write as '@TypeError.ret()...'")
# This decorator will modify the function 'func'
def decorator(func):
def modified(*args, **kargs):
# This is the modified function, 'modified' Works
# like a proxy, just passes the arguments and
# checks the type of the return's value
ret = func(*args, **kargs)
if not isinstance(ret, types):
raise TypeError("The function %s is returning an abnormal value, expected %s but got %s" % (func, types, type(ret)))
# func's return
return ret
# Modified function
return modified
# The actual decorator
return decorator
| 33.405405
| 132
| 0.588592
|
def args(*types, **ktypes):
def decorator(func):
def modified(*args, **kargs):
position = 1
for arg, T in zip(args, types):
if not isinstance(arg, T):
raise TypeError("Positional arg (%d) should be of type(s) %s, got %s" % (position, T, type(arg)))
position += 1
for key, arg in kargs.items():
if key in ktypes:
T = ktypes[key]
if not isinstance(arg, T):
raise TypeError("Keyworded arg '%s' should be of type(s) %s, got %s" % (key, T, type(arg)))
return func(*args, **kargs)
return modified
return decorator
def ret(*types, **kargs):
if len(types) is 1 and not isinstance(types[0], type) and callable(types[0]):
raise ValueError("You should not pass a function to TypeError.ret, maybe you did not write as '@TypeError.ret()...'")
def decorator(func):
def modified(*args, **kargs):
ret = func(*args, **kargs)
if not isinstance(ret, types):
raise TypeError("The function %s is returning an abnormal value, expected %s but got %s" % (func, types, type(ret)))
# func's return
return ret
return modified
return decorator
| true
| true
|
790672b510ce3c4c7c76a9f1a7a9fd7deb79ce39
| 12,346
|
py
|
Python
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_user.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2019-04-16T21:23:15.000Z
|
2019-04-16T21:23:15.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_user.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 5
|
2020-02-26T20:10:50.000Z
|
2021-09-23T23:23:18.000Z
|
ansible/my_env/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_user.py
|
otus-devops-2019-02/yyashkin_infra
|
0cd0c003884155ac922e3e301305ac202de7028c
|
[
"MIT"
] | 1
|
2020-02-13T14:24:57.000Z
|
2020-02-13T14:24:57.000Z
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_user
short_description: Manage AWS IAM users
description:
- Manage AWS IAM users
version_added: "2.5"
author: Josh Souza, @joshsouza
options:
name:
description:
- The name of the user to create.
required: true
managed_policy:
description:
- A list of managed policy ARNs or friendly names to attach to the user. To embed an inline policy, use M(iam_policy).
required: false
state:
description:
- Create or remove the IAM user
required: true
choices: [ 'present', 'absent' ]
purge_policy:
description:
- Detach policies which are not included in managed_policy list
required: false
default: false
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: This module does not allow management of groups that users belong to.
# Groups should manage their membership directly using `iam_group`,
# as users belong to them.
# Create a user
- iam_user:
name: testuser1
state: present
# Create a user and attach a managed policy using its ARN
- iam_user:
name: testuser1
managed_policy:
- arn:aws:iam::aws:policy/AmazonSNSFullAccess
state: present
# Remove all managed policies from an existing user with an empty list
- iam_user:
name: testuser1
state: present
purge_policy: true
# Delete the user
- iam_user:
name: testuser1
state: absent
'''
RETURN = '''
user:
description: dictionary containing all the user information
returned: success
type: complex
contains:
arn:
description: the Amazon Resource Name (ARN) specifying the user
type: string
sample: "arn:aws:iam::1234567890:user/testuser1"
create_date:
description: the date and time, in ISO 8601 date-time format, when the user was created
type: string
sample: "2017-02-08T04:36:28+00:00"
user_id:
description: the stable and unique string identifying the user
type: string
sample: AGPAIDBWE12NSFINE55TM
user_name:
description: the friendly name that identifies the user
type: string
sample: testuser1
path:
description: the path to the user
type: string
sample: /
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass # caught by imported HAS_BOTO3
def compare_attached_policies(current_attached_policies, new_attached_policies):
# If new_attached_policies is None it means we want to remove all policies
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
# List comprehension that looks for any policy in the 'policy_names' list
# that does not begin with 'arn'. If there aren't any, short circuit.
# If there are, translate friendly name to the full arn
if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
managed_policies = module.params.get('managed_policy')
purge_policy = module.params.get('purge_policy')
changed = False
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
# Get user
user = get_user(connection, module, params['UserName'])
# If user is None, create it
if user is None:
# Check mode means we would create the user
if module.check_mode:
module.exit_json(changed=True)
try:
connection.create_user(**params)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
# Manage managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
if not compare_attached_policies(current_attached_policies, managed_policies):
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
# If managed_policies has a single empty element we want to remove all attached policies
if purge_policy:
# Detach policies not present
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
changed = True
if not module.check_mode:
try:
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
# If there are policies to adjust that aren't in the current list, then things have changed
# Otherwise the only changes were in purging above
if set(managed_policies).difference(set(current_attached_policies_arn_list)):
changed = True
# If there are policies in managed_policies attach each policy
if managed_policies != [None] and not module.check_mode:
for policy_arn in managed_policies:
try:
connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
if module.check_mode:
module.exit_json(changed=changed)
# Get the user again
user = get_user(connection, module, params['UserName'])
module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
def destroy_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
if get_user(connection, module, params['UserName']):
# Check mode means we would remove this user
if module.check_mode:
module.exit_json(changed=True)
# Remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['UserName']):
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc())
try:
connection.delete_user(**params)
except ClientError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc())
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_user(connection, module, name):
params = dict()
params['UserName'] = name
try:
return connection.get_user(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get policies for user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
managed_policy=dict(default=[], type='list'),
state=dict(choices=['present', 'absent'], required=True),
purge_policy=dict(default=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_user(connection, module)
else:
destroy_user(connection, module)
if __name__ == '__main__':
main()
| 37.987692
| 126
| 0.640936
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_user
short_description: Manage AWS IAM users
description:
- Manage AWS IAM users
version_added: "2.5"
author: Josh Souza, @joshsouza
options:
name:
description:
- The name of the user to create.
required: true
managed_policy:
description:
- A list of managed policy ARNs or friendly names to attach to the user. To embed an inline policy, use M(iam_policy).
required: false
state:
description:
- Create or remove the IAM user
required: true
choices: [ 'present', 'absent' ]
purge_policy:
description:
- Detach policies which are not included in managed_policy list
required: false
default: false
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: This module does not allow management of groups that users belong to.
# Groups should manage their membership directly using `iam_group`,
# as users belong to them.
# Create a user
- iam_user:
name: testuser1
state: present
# Create a user and attach a managed policy using its ARN
- iam_user:
name: testuser1
managed_policy:
- arn:aws:iam::aws:policy/AmazonSNSFullAccess
state: present
# Remove all managed policies from an existing user with an empty list
- iam_user:
name: testuser1
state: present
purge_policy: true
# Delete the user
- iam_user:
name: testuser1
state: absent
'''
RETURN = '''
user:
description: dictionary containing all the user information
returned: success
type: complex
contains:
arn:
description: the Amazon Resource Name (ARN) specifying the user
type: string
sample: "arn:aws:iam::1234567890:user/testuser1"
create_date:
description: the date and time, in ISO 8601 date-time format, when the user was created
type: string
sample: "2017-02-08T04:36:28+00:00"
user_id:
description: the stable and unique string identifying the user
type: string
sample: AGPAIDBWE12NSFINE55TM
user_name:
description: the friendly name that identifies the user
type: string
sample: testuser1
path:
description: the path to the user
type: string
sample: /
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass
def compare_attached_policies(current_attached_policies, new_attached_policies):
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
# If there are, translate friendly name to the full arn
if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
managed_policies = module.params.get('managed_policy')
purge_policy = module.params.get('purge_policy')
changed = False
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
user = get_user(connection, module, params['UserName'])
if user is None:
if module.check_mode:
module.exit_json(changed=True)
try:
connection.create_user(**params)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
if not compare_attached_policies(current_attached_policies, managed_policies):
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if purge_policy:
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
changed = True
if not module.check_mode:
try:
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
# Otherwise the only changes were in purging above
if set(managed_policies).difference(set(current_attached_policies_arn_list)):
changed = True
# If there are policies in managed_policies attach each policy
if managed_policies != [None] and not module.check_mode:
for policy_arn in managed_policies:
try:
connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
if module.check_mode:
module.exit_json(changed=changed)
# Get the user again
user = get_user(connection, module, params['UserName'])
module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
def destroy_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
if get_user(connection, module, params['UserName']):
# Check mode means we would remove this user
if module.check_mode:
module.exit_json(changed=True)
# Remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['UserName']):
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc())
try:
connection.delete_user(**params)
except ClientError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc())
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_user(connection, module, name):
params = dict()
params['UserName'] = name
try:
return connection.get_user(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get policies for user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
managed_policy=dict(default=[], type='list'),
state=dict(choices=['present', 'absent'], required=True),
purge_policy=dict(default=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_user(connection, module)
else:
destroy_user(connection, module)
if __name__ == '__main__':
main()
| true
| true
|
7906734319e39a54e78ce0361f0f7d13a787c3d4
| 12,188
|
py
|
Python
|
testing/agent_test.py
|
jkondic/overcooked_ai
|
a8d5fb1f9c16c410c47ea4b639ddc20a64276e86
|
[
"MIT"
] | 302
|
2019-08-15T17:00:41.000Z
|
2022-03-31T07:07:22.000Z
|
testing/agent_test.py
|
jkondic/overcooked_ai
|
a8d5fb1f9c16c410c47ea4b639ddc20a64276e86
|
[
"MIT"
] | 55
|
2019-08-15T00:53:50.000Z
|
2022-02-01T16:05:45.000Z
|
testing/agent_test.py
|
jkondic/overcooked_ai
|
a8d5fb1f9c16c410c47ea4b639ddc20a64276e86
|
[
"MIT"
] | 60
|
2019-09-30T11:43:04.000Z
|
2022-03-29T21:59:39.000Z
|
import unittest
import numpy as np
from overcooked_ai_py.agents.agent import AgentPair, FixedPlanAgent, GreedyHumanModel, RandomAgent, SampleAgent
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, OvercookedState, PlayerState, ObjectState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name('cramped_room')
large_mdp = OvercookedGridworld.from_layout_name('corridor')
class TestAgentEvaluator(unittest.TestCase):
def setUp(self):
self.agent_eval = AgentEvaluator.from_layout_name({"layout_name": "cramped_room"}, {"horizon": 100})
def test_human_model_pair(self):
trajs = self.agent_eval.evaluate_human_model_pair()
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_rollouts(self):
ap = AgentPair(RandomAgent(), RandomAgent())
trajs = self.agent_eval.evaluate_agent_pair(ap, num_games=5)
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_mlam_computation(self):
try:
self.agent_eval.env.mlam
except Exception as e:
self.fail("Failed to compute MediumLevelActionManager:\n{}".format(e))
class TestBasicAgents(unittest.TestCase):
def setUp(self):
self.mlam_large = MediumLevelActionManager.from_pickle_or_compute(large_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute_large)
def test_fixed_plan_agents(self):
a0 = FixedPlanAgent([s, e, n, w])
a1 = FixedPlanAgent([s, w, n, e])
agent_pair = AgentPair(a0, a1)
env = OvercookedEnv.from_mdp(large_mdp, horizon=10)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
end_state = trajectory[-1][0]
self.assertEqual(time_taken, 10)
self.assertEqual(env.mdp.get_standard_start_state().player_positions, end_state.player_positions)
def test_two_greedy_human_open_map(self):
scenario_2_mdp = OvercookedGridworld.from_layout_name('scenario2')
mlam = MediumLevelActionManager.from_pickle_or_compute(scenario_2_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute)
a0 = GreedyHumanModel(mlam)
a1 = GreedyHumanModel(mlam)
agent_pair = AgentPair(a0, a1)
start_state = OvercookedState(
[P((8, 1), s),
P((1, 1), s)],
{},
all_orders=scenario_2_mdp.start_all_orders
)
env = OvercookedEnv.from_mdp(scenario_2_mdp, start_state_fn=lambda: start_state, horizon=100)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
def test_sample_agent(self):
agent = SampleAgent([RandomAgent(all_actions=False), RandomAgent(all_actions=True)])
probs = agent.action(None)[1]["action_probs"]
expected_probs = np.array([0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.08333333])
self.assertTrue(np.allclose(probs, expected_probs))
class TestAgentEvaluatorStatic(unittest.TestCase):
layout_name_lst = ["asymmetric_advantages", "asymmetric_advantages_tomato", "bonus_order_test", "bottleneck",
"centre_objects", "centre_pots", "corridor", "forced_coordination_tomato", "unident",
"marshmallow_experiment", "marshmallow_experiment_coordination", "you_shall_not_pass"]
def test_from_mdp(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_mdp(mdp=orignal_mdp, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
def test_from_mdp_params_layout(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_layout_name(mdp_params={"layout_name": layout_name}, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
mdp_gen_params_1 = {
"inner_shape": (10, 7),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_2 = {
"inner_shape": (10, 7),
"prop_empty": 0.7,
"prop_feats": 0.5,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_3 = {
"inner_shape": (10, 7),
"prop_empty": 0.5,
"prop_feats": 0.4,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_lst = [mdp_gen_params_1, mdp_gen_params_2, mdp_gen_params_3]
outer_shape = (10, 7)
def test_from_mdp_params_variable_across(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae0 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
ae1 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
self.assertFalse(ae0.env.mdp == ae1.env.mdp,
"2 randomly generated layouts across 2 evaluators are the same, which is wrong")
def test_from_mdp_params_variable_infinite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_infinite_no_regen(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=False)
mdp_1 = ae.env.mdp
self.assertTrue(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=False, the 2 layouts should be the same")
def test_from_mdp_params_variable_infinite_specified(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_finite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_finite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": 2},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
seen = [mdp_0]
for _ in range(20):
ae.env.reset(regen_mdp=True)
mdp_i = ae.env.mdp
if len(seen) == 1:
if mdp_i != seen[0]:
seen.append(mdp_i.copy())
elif len(seen) == 2:
mdp_0, mdp_1 = seen
self.assertTrue((mdp_i == mdp_0 or mdp_i == mdp_1),
"more than 2 mdp was created, the function failed to perform")
else:
self.assertTrue(False, "theoretically unreachable statement")
layout_name_short_lst = ["cramped_room", "cramped_room_tomato", "simple_o", "simple_tomato", "simple_o_t"]
biased = [0.1, 0.15, 0.2, 0.25, 0.3]
num_reset = 200000
def test_from_mdp_lst_default(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400})
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_uniform(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=[0.2, 0.2, 0.2, 0.2, 0.2])
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_biased(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=self.biased)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
# construct the ground truth
gt = {self.layout_name_short_lst[i]: self.biased[i] for i in range(len(self.layout_name_short_lst))}
for k, v in counts.items():
self.assertAlmostEqual(gt[k], v/self.num_reset, 2, "more than 2 places off for " + k)
if __name__ == '__main__':
unittest.main()
| 45.992453
| 139
| 0.6138
|
import unittest
import numpy as np
from overcooked_ai_py.agents.agent import AgentPair, FixedPlanAgent, GreedyHumanModel, RandomAgent, SampleAgent
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld, OvercookedState, PlayerState, ObjectState
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.planning.planners import MediumLevelActionManager, NO_COUNTERS_PARAMS
from overcooked_ai_py.agents.benchmarking import AgentEvaluator
np.random.seed(42)
n, s = Direction.NORTH, Direction.SOUTH
e, w = Direction.EAST, Direction.WEST
stay, interact = Action.STAY, Action.INTERACT
P, Obj = PlayerState, ObjectState
force_compute_large = False
force_compute = True
DISPLAY = False
simple_mdp = OvercookedGridworld.from_layout_name('cramped_room')
large_mdp = OvercookedGridworld.from_layout_name('corridor')
class TestAgentEvaluator(unittest.TestCase):
def setUp(self):
self.agent_eval = AgentEvaluator.from_layout_name({"layout_name": "cramped_room"}, {"horizon": 100})
def test_human_model_pair(self):
trajs = self.agent_eval.evaluate_human_model_pair()
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_rollouts(self):
ap = AgentPair(RandomAgent(), RandomAgent())
trajs = self.agent_eval.evaluate_agent_pair(ap, num_games=5)
try:
AgentEvaluator.check_trajectories(trajs, verbose=False)
except AssertionError as e:
self.fail("Trajectories were not returned in standard format:\n{}".format(e))
def test_mlam_computation(self):
try:
self.agent_eval.env.mlam
except Exception as e:
self.fail("Failed to compute MediumLevelActionManager:\n{}".format(e))
class TestBasicAgents(unittest.TestCase):
def setUp(self):
self.mlam_large = MediumLevelActionManager.from_pickle_or_compute(large_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute_large)
def test_fixed_plan_agents(self):
a0 = FixedPlanAgent([s, e, n, w])
a1 = FixedPlanAgent([s, w, n, e])
agent_pair = AgentPair(a0, a1)
env = OvercookedEnv.from_mdp(large_mdp, horizon=10)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
end_state = trajectory[-1][0]
self.assertEqual(time_taken, 10)
self.assertEqual(env.mdp.get_standard_start_state().player_positions, end_state.player_positions)
def test_two_greedy_human_open_map(self):
scenario_2_mdp = OvercookedGridworld.from_layout_name('scenario2')
mlam = MediumLevelActionManager.from_pickle_or_compute(scenario_2_mdp, NO_COUNTERS_PARAMS, force_compute=force_compute)
a0 = GreedyHumanModel(mlam)
a1 = GreedyHumanModel(mlam)
agent_pair = AgentPair(a0, a1)
start_state = OvercookedState(
[P((8, 1), s),
P((1, 1), s)],
{},
all_orders=scenario_2_mdp.start_all_orders
)
env = OvercookedEnv.from_mdp(scenario_2_mdp, start_state_fn=lambda: start_state, horizon=100)
trajectory, time_taken, _, _ = env.run_agents(agent_pair, include_final_state=True, display=DISPLAY)
def test_sample_agent(self):
agent = SampleAgent([RandomAgent(all_actions=False), RandomAgent(all_actions=True)])
probs = agent.action(None)[1]["action_probs"]
expected_probs = np.array([0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.18333333, 0.08333333])
self.assertTrue(np.allclose(probs, expected_probs))
class TestAgentEvaluatorStatic(unittest.TestCase):
layout_name_lst = ["asymmetric_advantages", "asymmetric_advantages_tomato", "bonus_order_test", "bottleneck",
"centre_objects", "centre_pots", "corridor", "forced_coordination_tomato", "unident",
"marshmallow_experiment", "marshmallow_experiment_coordination", "you_shall_not_pass"]
def test_from_mdp(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_mdp(mdp=orignal_mdp, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
def test_from_mdp_params_layout(self):
for layout_name in self.layout_name_lst:
orignal_mdp = OvercookedGridworld.from_layout_name(layout_name)
ae = AgentEvaluator.from_layout_name(mdp_params={"layout_name": layout_name}, env_params={"horizon": 400})
ae_mdp = ae.env.mdp
self.assertEqual(orignal_mdp, ae_mdp, "mdp with name " + layout_name + " experienced an inconsistency")
mdp_gen_params_1 = {
"inner_shape": (10, 7),
"prop_empty": 0.95,
"prop_feats": 0.1,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_2 = {
"inner_shape": (10, 7),
"prop_empty": 0.7,
"prop_feats": 0.5,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_3 = {
"inner_shape": (10, 7),
"prop_empty": 0.5,
"prop_feats": 0.4,
"start_all_orders": [
{"ingredients": ["onion", "onion", "onion"]}
],
"display": False,
}
mdp_gen_params_lst = [mdp_gen_params_1, mdp_gen_params_2, mdp_gen_params_3]
outer_shape = (10, 7)
def test_from_mdp_params_variable_across(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae0 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
ae1 = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
self.assertFalse(ae0.env.mdp == ae1.env.mdp,
"2 randomly generated layouts across 2 evaluators are the same, which is wrong")
def test_from_mdp_params_variable_infinite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_infinite_no_regen(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=False)
mdp_1 = ae.env.mdp
self.assertTrue(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=False, the 2 layouts should be the same")
def test_from_mdp_params_variable_infinite_specified(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_infinite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": np.inf},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
for _ in range(5):
ae.env.reset(regen_mdp=True)
mdp_1 = ae.env.mdp
self.assertFalse(mdp_0 == mdp_1,
"with infinite layout generator and regen_mdp=True, the 2 layouts should not be the same")
def test_from_mdp_params_variable_finite(self):
for mdp_gen_params in self.mdp_gen_params_lst:
ae = AgentEvaluator.from_mdp_params_finite(mdp_params=mdp_gen_params,
env_params={"horizon": 400, "num_mdp": 2},
outer_shape=self.outer_shape)
mdp_0 = ae.env.mdp.copy()
seen = [mdp_0]
for _ in range(20):
ae.env.reset(regen_mdp=True)
mdp_i = ae.env.mdp
if len(seen) == 1:
if mdp_i != seen[0]:
seen.append(mdp_i.copy())
elif len(seen) == 2:
mdp_0, mdp_1 = seen
self.assertTrue((mdp_i == mdp_0 or mdp_i == mdp_1),
"more than 2 mdp was created, the function failed to perform")
else:
self.assertTrue(False, "theoretically unreachable statement")
layout_name_short_lst = ["cramped_room", "cramped_room_tomato", "simple_o", "simple_tomato", "simple_o_t"]
biased = [0.1, 0.15, 0.2, 0.25, 0.3]
num_reset = 200000
def test_from_mdp_lst_default(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400})
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_uniform(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=[0.2, 0.2, 0.2, 0.2, 0.2])
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
for k, v in counts.items():
self.assertAlmostEqual(0.2, v/self.num_reset, 2, "more than 2 places off for " + k)
def test_from_mdp_lst_biased(self):
mdp_lst = [OvercookedGridworld.from_layout_name(name) for name in self.layout_name_short_lst]
ae = AgentEvaluator.from_mdp_lst(mdp_lst=mdp_lst, env_params={"horizon": 400}, sampling_freq=self.biased)
counts = {}
for _ in range(self.num_reset):
ae.env.reset(regen_mdp=True)
if ae.env.mdp.layout_name in counts:
counts[ae.env.mdp.layout_name] += 1
else:
counts[ae.env.mdp.layout_name] = 1
gt = {self.layout_name_short_lst[i]: self.biased[i] for i in range(len(self.layout_name_short_lst))}
for k, v in counts.items():
self.assertAlmostEqual(gt[k], v/self.num_reset, 2, "more than 2 places off for " + k)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790673c5f5f5d80b053cc251929fc8e0806e5951
| 14,103
|
py
|
Python
|
tests/system/python/e2e/test_e2e_notification_service_with_plugins.py
|
doug-dianomic/fledge
|
cab620d1f31e6dca8e31ca8e483adaad7ce94834
|
[
"Apache-2.0"
] | null | null | null |
tests/system/python/e2e/test_e2e_notification_service_with_plugins.py
|
doug-dianomic/fledge
|
cab620d1f31e6dca8e31ca8e483adaad7ce94834
|
[
"Apache-2.0"
] | null | null | null |
tests/system/python/e2e/test_e2e_notification_service_with_plugins.py
|
doug-dianomic/fledge
|
cab620d1f31e6dca8e31ca8e483adaad7ce94834
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
""" Test end to end flow with:
Notification service with
Threshold in-built rule plugin
notify-python35 delivery channel plugin
"""
import os
import time
import subprocess
import http.client
import json
from threading import Event
import urllib.parse
import pytest
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SERVICE = "notification"
SERVICE_NAME = "NotificationServer #1"
NOTIFY_PLUGIN = "python35"
NOTIFY_INBUILT_RULES = ["Threshold"]
def _configure_and_start_service(service_branch, fledge_url, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_service {} {}"
.format(service_branch, SERVICE)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(SERVICE)
finally:
remove_directories("/tmp/fledge-service-{}".format(SERVICE))
# Start service
conn = http.client.HTTPConnection(fledge_url)
data = {"name": SERVICE_NAME,
"type": "notification",
"enabled": "true"
}
conn.request("POST", '/fledge/service', json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 2 == len(jdoc)
assert SERVICE_NAME == jdoc['name']
def _install_notify_plugin(notify_branch, plugin_name, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_plugin {} notify {}".format(
notify_branch, plugin_name)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(plugin_name)
finally:
remove_directories("/tmp/fledge-notify-{}".format(plugin_name))
def _get_result(fledge_url, path):
conn = http.client.HTTPConnection(fledge_url)
conn.request("GET", path)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def _verify_service(fledge_url, status):
jdoc = _get_result(fledge_url, '/fledge/service')
srvc = [s for s in jdoc['services'] if s['name'] == SERVICE_NAME]
assert 1 == len(srvc)
svc = srvc[0]
assert SERVICE.capitalize() == svc['type']
assert status == svc['status']
def _verify_audit_log_entry(fledge_url, path, name, severity='INFORMATION', count=1):
jdoc = _get_result(fledge_url, path)
assert len(jdoc['audit'])
assert count == jdoc['totalCount']
audit_detail = jdoc['audit'][0]
assert severity == audit_detail['severity']
assert name == audit_detail['details']['name']
def _add_notification_instance(fledge_url, payload):
conn = http.client.HTTPConnection(fledge_url)
conn.request("POST", '/fledge/notification', json.dumps(payload))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} created successfully".format(payload['name']) == jdoc['result']
def pause_for_x_seconds(x=1):
wait_e = Event()
wait_e.clear()
wait_e.wait(timeout=x)
class TestNotificationService:
def test_service(self, reset_and_start_fledge, service_branch, fledge_url, wait_time, retries, remove_directories):
_configure_and_start_service(service_branch, fledge_url, remove_directories)
retry_count = 0
# only 2 services is being up by default i.e core and storage
default_registry_count = 2
service_registry = default_registry_count
while service_registry != 3 and retry_count < retries:
svc = _get_result(fledge_url, '/fledge/service')
service_registry = svc['services']
retry_count += 1
pause_for_x_seconds(x=wait_time * 2)
if len(service_registry) == default_registry_count:
assert False, "Failed to start the {} service".format(SERVICE)
_verify_service(fledge_url, status='running')
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME)
def test_get_default_notification_plugins(self, fledge_url, remove_directories):
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationRule')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationRule')
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert [] == jdoc['delivery']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
class TestNotificationCRUD:
@pytest.mark.parametrize("data", [
{"name": "Test 1", "description": "Test 1 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "retriggered"},
{"name": "Test2", "description": "Test 2 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "toggled"},
{"name": "Test #3", "description": "Test 3 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "one shot"}
])
def test_create_notification_instances_with_default_rule_and_channel_python35(self, fledge_url, notify_branch,
data,
remove_directories):
if data['name'] == 'Test 1':
_install_notify_plugin(notify_branch, NOTIFY_PLUGIN, remove_directories)
_add_notification_instance(fledge_url, data)
def test_inbuilt_rule_plugin_and_notify_python35_delivery(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert 1 == len(jdoc['delivery'])
assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
def test_get_notifications_and_audit_entry(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification')
assert 3 == len(jdoc['notifications'])
# Test 1, Test2 and Test #3
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFAD')
assert 3 == jdoc['totalCount']
def test_update_notification(self, fledge_url, name="Test 1"):
conn = http.client.HTTPConnection(fledge_url)
data = {"notification_type": "toggled"}
conn.request("PUT", '/fledge/notification/{}'.format(urllib.parse.quote(name))
, json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} updated successfully".format(name) == jdoc["result"]
# Verify updated notification info
jdoc = _get_result(fledge_url, '/fledge/notification/{}'.format(urllib.parse.quote(name)))
assert "toggled" == jdoc['notification']['notificationType']
def test_delete_notification(self, fledge_url, name="Test #3"):
conn = http.client.HTTPConnection(fledge_url)
conn.request("DELETE", '/fledge/notification/{}'.format(urllib.parse.quote(name)))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} deleted successfully.".format(name) == jdoc["result"]
# Verify only two notifications should exist NOT 3
jdoc = _get_result(fledge_url, '/fledge/notification')
notifications = jdoc['notifications']
assert 2 == len(notifications)
assert "Test 1" == notifications[0]['name']
assert "Test2" == notifications[1]['name']
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFDL')
assert 1 == jdoc['totalCount']
class TestSentAndReceiveNotification:
FOGBENCH_TEMPLATE = "fogbench-template.json"
SENSOR_VALUE = 20
SOUTH_PLUGIN_NAME = "coap"
ASSET_NAME = "{}".format(SOUTH_PLUGIN_NAME)
@pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
""" This fixture clone a south repo and starts south instance
add_south: Fixture that starts any south service with given configuration
remove_data_file: Fixture that remove data file created during the tests
remove_directories: Fixture that remove directories created during the tests """
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
yield self.start_south
# Cleanup code that runs after the test is over
remove_data_file(fogbench_template_path)
remove_directories("/tmp/fledge-south-{}".format(self.SOUTH_PLUGIN_NAME))
def prepare_template_reading_from_fogbench(self):
""" Define the template file for fogbench readings """
fogbench_template_path = os.path.join(
os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (
self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE))
return fogbench_template_path
def ingest_readings_from_fogbench(self, fledge_url, wait_time):
pause_for_x_seconds(x=wait_time*3)
conn = http.client.HTTPConnection(fledge_url)
subprocess.run(["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -"
.format(self.FOGBENCH_TEMPLATE)], shell=True, check=True, stdout=subprocess.DEVNULL)
pause_for_x_seconds(x=wait_time)
conn.request("GET", '/fledge/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert self.ASSET_NAME == val[0]["assetCode"]
assert 1 == val[0]["count"]
conn.request("GET", '/fledge/asset/{}'.format(self.ASSET_NAME))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert {'sensor': self.SENSOR_VALUE} == val[0]["reading"]
def configure_rule_with_single_item_eval_type(self, fledge_url, cat_name):
conn = http.client.HTTPConnection(fledge_url)
data = {"asset": self.ASSET_NAME,
"datapoint": "sensor",
"evaluation_data": "Single Item",
"condition": ">",
"trigger_value": str(self.SENSOR_VALUE - 10),
}
conn.request("PUT", '/fledge/category/rule{}'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def enable_notification(self, fledge_url, cat_name, is_enabled=True):
_enabled = "true" if is_enabled else "false"
data = {"value": _enabled}
conn = http.client.HTTPConnection(fledge_url)
conn.request("PUT", '/fledge/category/{}/enable'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def test_sent_and_receive_notification(self, fledge_url, start_south, wait_time):
data = {"name": "Test4",
"description": "Test4_Notification",
"rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN,
"enabled": True,
"notification_type": "one shot"
}
name = data['name']
_add_notification_instance(fledge_url, data)
self.configure_rule_with_single_item_eval_type(fledge_url, name)
# upload script NotifyPython35::configure() -> lowercase(categoryName) + _script_ + method_name + ".py"
cat_name = "delivery{}".format(name)
script_path = '$FLEDGE_ROOT/tests/system/python/data/notify35.py'
url = 'http://' + fledge_url + '/fledge/category/' + cat_name + '/script/upload'
upload_script = 'curl -F "script=@{}" {}'.format(script_path, url)
subprocess.run(upload_script, shell=True, check=True, stdout=subprocess.DEVNULL)
# enable notification delivery (it was getting disabled, as no script file was available)
self.enable_notification(fledge_url, "delivery" + name)
self.ingest_readings_from_fogbench(fledge_url, wait_time)
time.sleep(wait_time)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSN', name=name)
class TestStartStopNotificationService:
def test_shutdown_service_with_schedule_disable(self, fledge_url, disable_schedule, wait_time):
disable_schedule(fledge_url, SERVICE_NAME)
_verify_service(fledge_url, status='shutdown')
pause_for_x_seconds(x=wait_time)
# After shutdown there should be 1 entry for NTFSD (shutdown)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSD', name=SERVICE_NAME, count=1)
def test_restart_notification_service(self, fledge_url, enable_schedule, wait_time):
enable_schedule(fledge_url, SERVICE_NAME)
pause_for_x_seconds(x=wait_time)
_verify_service(fledge_url, status='running')
# After restart there should be 2 entries for NTFST (start)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME, count=2)
| 41.973214
| 119
| 0.657307
|
import os
import time
import subprocess
import http.client
import json
from threading import Event
import urllib.parse
import pytest
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
SERVICE = "notification"
SERVICE_NAME = "NotificationServer #1"
NOTIFY_PLUGIN = "python35"
NOTIFY_INBUILT_RULES = ["Threshold"]
def _configure_and_start_service(service_branch, fledge_url, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_service {} {}"
.format(service_branch, SERVICE)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(SERVICE)
finally:
remove_directories("/tmp/fledge-service-{}".format(SERVICE))
conn = http.client.HTTPConnection(fledge_url)
data = {"name": SERVICE_NAME,
"type": "notification",
"enabled": "true"
}
conn.request("POST", '/fledge/service', json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert 2 == len(jdoc)
assert SERVICE_NAME == jdoc['name']
def _install_notify_plugin(notify_branch, plugin_name, remove_directories):
try:
subprocess.run(["$FLEDGE_ROOT/tests/system/python/scripts/install_c_plugin {} notify {}".format(
notify_branch, plugin_name)], shell=True, check=True, stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
assert False, "{} installation failed".format(plugin_name)
finally:
remove_directories("/tmp/fledge-notify-{}".format(plugin_name))
def _get_result(fledge_url, path):
conn = http.client.HTTPConnection(fledge_url)
conn.request("GET", path)
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def _verify_service(fledge_url, status):
jdoc = _get_result(fledge_url, '/fledge/service')
srvc = [s for s in jdoc['services'] if s['name'] == SERVICE_NAME]
assert 1 == len(srvc)
svc = srvc[0]
assert SERVICE.capitalize() == svc['type']
assert status == svc['status']
def _verify_audit_log_entry(fledge_url, path, name, severity='INFORMATION', count=1):
jdoc = _get_result(fledge_url, path)
assert len(jdoc['audit'])
assert count == jdoc['totalCount']
audit_detail = jdoc['audit'][0]
assert severity == audit_detail['severity']
assert name == audit_detail['details']['name']
def _add_notification_instance(fledge_url, payload):
conn = http.client.HTTPConnection(fledge_url)
conn.request("POST", '/fledge/notification', json.dumps(payload))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} created successfully".format(payload['name']) == jdoc['result']
def pause_for_x_seconds(x=1):
wait_e = Event()
wait_e.clear()
wait_e.wait(timeout=x)
class TestNotificationService:
def test_service(self, reset_and_start_fledge, service_branch, fledge_url, wait_time, retries, remove_directories):
_configure_and_start_service(service_branch, fledge_url, remove_directories)
retry_count = 0
default_registry_count = 2
service_registry = default_registry_count
while service_registry != 3 and retry_count < retries:
svc = _get_result(fledge_url, '/fledge/service')
service_registry = svc['services']
retry_count += 1
pause_for_x_seconds(x=wait_time * 2)
if len(service_registry) == default_registry_count:
assert False, "Failed to start the {} service".format(SERVICE)
_verify_service(fledge_url, status='running')
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME)
def test_get_default_notification_plugins(self, fledge_url, remove_directories):
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + '/plugins/notificationRule')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationDelivery')
remove_directories(os.environ['FLEDGE_ROOT'] + 'cmake_build/C/plugins/notificationRule')
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert [] == jdoc['delivery']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
class TestNotificationCRUD:
@pytest.mark.parametrize("data", [
{"name": "Test 1", "description": "Test 1 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "retriggered"},
{"name": "Test2", "description": "Test 2 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "toggled"},
{"name": "Test #3", "description": "Test 3 notification", "rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN, "enabled": "false", "notification_type": "one shot"}
])
def test_create_notification_instances_with_default_rule_and_channel_python35(self, fledge_url, notify_branch,
data,
remove_directories):
if data['name'] == 'Test 1':
_install_notify_plugin(notify_branch, NOTIFY_PLUGIN, remove_directories)
_add_notification_instance(fledge_url, data)
def test_inbuilt_rule_plugin_and_notify_python35_delivery(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification/plugin')
assert 1 == len(jdoc['delivery'])
assert NOTIFY_PLUGIN == jdoc['delivery'][0]['name']
assert 1 == len(jdoc['rules'])
assert NOTIFY_INBUILT_RULES[0] == jdoc['rules'][0]['name']
def test_get_notifications_and_audit_entry(self, fledge_url):
jdoc = _get_result(fledge_url, '/fledge/notification')
assert 3 == len(jdoc['notifications'])
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFAD')
assert 3 == jdoc['totalCount']
def test_update_notification(self, fledge_url, name="Test 1"):
conn = http.client.HTTPConnection(fledge_url)
data = {"notification_type": "toggled"}
conn.request("PUT", '/fledge/notification/{}'.format(urllib.parse.quote(name))
, json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} updated successfully".format(name) == jdoc["result"]
jdoc = _get_result(fledge_url, '/fledge/notification/{}'.format(urllib.parse.quote(name)))
assert "toggled" == jdoc['notification']['notificationType']
def test_delete_notification(self, fledge_url, name="Test #3"):
conn = http.client.HTTPConnection(fledge_url)
conn.request("DELETE", '/fledge/notification/{}'.format(urllib.parse.quote(name)))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Notification {} deleted successfully.".format(name) == jdoc["result"]
jdoc = _get_result(fledge_url, '/fledge/notification')
notifications = jdoc['notifications']
assert 2 == len(notifications)
assert "Test 1" == notifications[0]['name']
assert "Test2" == notifications[1]['name']
jdoc = _get_result(fledge_url, '/fledge/audit?source=NTFDL')
assert 1 == jdoc['totalCount']
class TestSentAndReceiveNotification:
FOGBENCH_TEMPLATE = "fogbench-template.json"
SENSOR_VALUE = 20
SOUTH_PLUGIN_NAME = "coap"
ASSET_NAME = "{}".format(SOUTH_PLUGIN_NAME)
@pytest.fixture
def start_south(self, add_south, remove_data_file, remove_directories, south_branch, fledge_url):
fogbench_template_path = self.prepare_template_reading_from_fogbench()
add_south(self.SOUTH_PLUGIN_NAME, south_branch, fledge_url, service_name=self.SOUTH_PLUGIN_NAME)
yield self.start_south
remove_data_file(fogbench_template_path)
remove_directories("/tmp/fledge-south-{}".format(self.SOUTH_PLUGIN_NAME))
def prepare_template_reading_from_fogbench(self):
fogbench_template_path = os.path.join(
os.path.expandvars('${FLEDGE_ROOT}'), 'data/{}'.format(self.FOGBENCH_TEMPLATE))
with open(fogbench_template_path, "w") as f:
f.write(
'[{"name": "%s", "sensor_values": '
'[{"name": "sensor", "type": "number", "min": %d, "max": %d, "precision": 0}]}]' % (
self.ASSET_NAME, self.SENSOR_VALUE, self.SENSOR_VALUE))
return fogbench_template_path
def ingest_readings_from_fogbench(self, fledge_url, wait_time):
pause_for_x_seconds(x=wait_time*3)
conn = http.client.HTTPConnection(fledge_url)
subprocess.run(["cd $FLEDGE_ROOT/extras/python; python3 -m fogbench -t ../../data/{}; cd -"
.format(self.FOGBENCH_TEMPLATE)], shell=True, check=True, stdout=subprocess.DEVNULL)
pause_for_x_seconds(x=wait_time)
conn.request("GET", '/fledge/asset')
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert self.ASSET_NAME == val[0]["assetCode"]
assert 1 == val[0]["count"]
conn.request("GET", '/fledge/asset/{}'.format(self.ASSET_NAME))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
val = json.loads(r)
assert 1 == len(val)
assert {'sensor': self.SENSOR_VALUE} == val[0]["reading"]
def configure_rule_with_single_item_eval_type(self, fledge_url, cat_name):
conn = http.client.HTTPConnection(fledge_url)
data = {"asset": self.ASSET_NAME,
"datapoint": "sensor",
"evaluation_data": "Single Item",
"condition": ">",
"trigger_value": str(self.SENSOR_VALUE - 10),
}
conn.request("PUT", '/fledge/category/rule{}'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def enable_notification(self, fledge_url, cat_name, is_enabled=True):
_enabled = "true" if is_enabled else "false"
data = {"value": _enabled}
conn = http.client.HTTPConnection(fledge_url)
conn.request("PUT", '/fledge/category/{}/enable'.format(cat_name), json.dumps(data))
r = conn.getresponse()
assert 200 == r.status
def test_sent_and_receive_notification(self, fledge_url, start_south, wait_time):
data = {"name": "Test4",
"description": "Test4_Notification",
"rule": NOTIFY_INBUILT_RULES[0],
"channel": NOTIFY_PLUGIN,
"enabled": True,
"notification_type": "one shot"
}
name = data['name']
_add_notification_instance(fledge_url, data)
self.configure_rule_with_single_item_eval_type(fledge_url, name)
cat_name = "delivery{}".format(name)
script_path = '$FLEDGE_ROOT/tests/system/python/data/notify35.py'
url = 'http://' + fledge_url + '/fledge/category/' + cat_name + '/script/upload'
upload_script = 'curl -F "script=@{}" {}'.format(script_path, url)
subprocess.run(upload_script, shell=True, check=True, stdout=subprocess.DEVNULL)
self.enable_notification(fledge_url, "delivery" + name)
self.ingest_readings_from_fogbench(fledge_url, wait_time)
time.sleep(wait_time)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSN', name=name)
class TestStartStopNotificationService:
def test_shutdown_service_with_schedule_disable(self, fledge_url, disable_schedule, wait_time):
disable_schedule(fledge_url, SERVICE_NAME)
_verify_service(fledge_url, status='shutdown')
pause_for_x_seconds(x=wait_time)
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFSD', name=SERVICE_NAME, count=1)
def test_restart_notification_service(self, fledge_url, enable_schedule, wait_time):
enable_schedule(fledge_url, SERVICE_NAME)
pause_for_x_seconds(x=wait_time)
_verify_service(fledge_url, status='running')
_verify_audit_log_entry(fledge_url, '/fledge/audit?source=NTFST', name=SERVICE_NAME, count=2)
| true
| true
|
7906749671c5df5c56f040275e25cf58665e7525
| 833
|
py
|
Python
|
tests/utils.py
|
dkomisar/conda-concourse-ci
|
0d8515089884d4559fa4b7df448b6311664a0d45
|
[
"BSD-3-Clause"
] | 12
|
2016-12-06T03:18:41.000Z
|
2020-04-01T16:12:20.000Z
|
tests/utils.py
|
dkomisar/conda-concourse-ci
|
0d8515089884d4559fa4b7df448b6311664a0d45
|
[
"BSD-3-Clause"
] | 82
|
2016-11-16T14:53:49.000Z
|
2021-09-09T15:40:12.000Z
|
tests/utils.py
|
dkomisar/conda-concourse-ci
|
0d8515089884d4559fa4b7df448b6311664a0d45
|
[
"BSD-3-Clause"
] | 28
|
2016-11-16T03:29:55.000Z
|
2021-09-15T17:23:51.000Z
|
import os
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
test_config_dir = os.path.join(test_data_dir, 'config-test')
graph_data_dir = os.path.join(test_data_dir, 'graph_data')
default_worker = {"platform": 'linux',
'arch': '64',
'label': 'linux',
'pool_name': 'linux_pool'}
def make_recipe(name, dependencies=()):
os.makedirs(name)
with open(os.path.join(name, 'meta.yaml'), 'w') as f:
# not valid meta.yaml. Doesn't matter for test.
f.write('package:\n')
f.write(' name: {0}\n'.format(name))
f.write(' version: 1.0\n')
if dependencies:
f.write('requirements:\n')
f.write(' build:\n')
for dep in dependencies:
f.write(' - {0}\n'.format(dep))
| 33.32
| 63
| 0.552221
|
import os
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
test_config_dir = os.path.join(test_data_dir, 'config-test')
graph_data_dir = os.path.join(test_data_dir, 'graph_data')
default_worker = {"platform": 'linux',
'arch': '64',
'label': 'linux',
'pool_name': 'linux_pool'}
def make_recipe(name, dependencies=()):
os.makedirs(name)
with open(os.path.join(name, 'meta.yaml'), 'w') as f:
f.write('package:\n')
f.write(' name: {0}\n'.format(name))
f.write(' version: 1.0\n')
if dependencies:
f.write('requirements:\n')
f.write(' build:\n')
for dep in dependencies:
f.write(' - {0}\n'.format(dep))
| true
| true
|
790674bca8d9e8074241bdbbd9ab9878eeeeffce
| 2,751
|
py
|
Python
|
rova/rova.py
|
synoniem/rova
|
6b94b56748608866308afdfae0399039cb0fdb0f
|
[
"MIT"
] | null | null | null |
rova/rova.py
|
synoniem/rova
|
6b94b56748608866308afdfae0399039cb0fdb0f
|
[
"MIT"
] | null | null | null |
rova/rova.py
|
synoniem/rova
|
6b94b56748608866308afdfae0399039cb0fdb0f
|
[
"MIT"
] | null | null | null |
"""
Wrapper to get ROVA calendar from Rova's API
Acces to this ROVA API has been simplified since version 0.2.1 of this wrapper
Just use https://www.rova.nl/api/waste-calendar/upcoming?postalcode=1000AA&houseNumber=1&addition=&take=5
with a existing combination of postalcode, housenumber, housenumber addition
Be aware that this API has not been officially published by ROVA.
"""
from datetime import datetime
import random
import requests
__title__ = "rova"
__version__ = "0.3.0"
__author__ = "Gido Hakvoort and synoniem <synoniem@hotmail.com>"
__license__ = "MIT"
class Rova:
"""
ROVA class
"""
def __init__(self, zip_code, house_number, house_addition=""):
"""
To fetch the garbage calendar, you need to set a zip_code and house_number.
"""
self.zip_code = zip_code.replace(' ', '')
self.house_number = house_number.strip()
self.house_addition = house_addition.strip()
def is_rova_area(self):
"""
Check if ROVA collects garbage at this address
"""
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
# request data from rova API and check if garbage is collected at this address
# requesting with a non-existing postalcode will result in a error message
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': '1',
})
response.raise_for_status()
rova_response = response.text.strip()
if rova_response != '[]':
rova_response = "OK"
return rova_response == "OK"
def get_calendar_items(self, take=5):
"""
Get next pickup date for each garbage types
"""
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
# request data from rova API and save response first 5 items (default)
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': take,
})
response.raise_for_status()
rova_response = response.json()
items = []
types = []
# add next pickup date for each garbage type
for item in rova_response:
date = datetime.strptime(item["date"], "%Y-%m-%dT%H:%M:%SZ")
date = date.strftime("%Y-%m-%dT%H:%M:%S")
garbage_type = item["garbageTypeCode"].upper()
items.append({
'GarbageTypeCode': garbage_type,
'Date': date
})
types.append(garbage_type)
return items
| 31.261364
| 105
| 0.608142
|
from datetime import datetime
import random
import requests
__title__ = "rova"
__version__ = "0.3.0"
__author__ = "Gido Hakvoort and synoniem <synoniem@hotmail.com>"
__license__ = "MIT"
class Rova:
def __init__(self, zip_code, house_number, house_addition=""):
self.zip_code = zip_code.replace(' ', '')
self.house_number = house_number.strip()
self.house_addition = house_addition.strip()
def is_rova_area(self):
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': '1',
})
response.raise_for_status()
rova_response = response.text.strip()
if rova_response != '[]':
rova_response = "OK"
return rova_response == "OK"
def get_calendar_items(self, take=5):
url = 'https://www.rova.nl/api/waste-calendar/upcoming'
response = requests.get(url, params={
'postalcode': self.zip_code,
'houseNumber': self.house_number,
'addition': self.house_addition,
'take': take,
})
response.raise_for_status()
rova_response = response.json()
items = []
types = []
for item in rova_response:
date = datetime.strptime(item["date"], "%Y-%m-%dT%H:%M:%SZ")
date = date.strftime("%Y-%m-%dT%H:%M:%S")
garbage_type = item["garbageTypeCode"].upper()
items.append({
'GarbageTypeCode': garbage_type,
'Date': date
})
types.append(garbage_type)
return items
| true
| true
|
7906767174c2238844344663057dc570f09adb70
| 1,660
|
py
|
Python
|
get_data/get_last_price.py
|
lte2000/cwfx
|
dc8daee44cea4b7c0286a7676e4a2829744fee64
|
[
"MIT"
] | null | null | null |
get_data/get_last_price.py
|
lte2000/cwfx
|
dc8daee44cea4b7c0286a7676e4a2829744fee64
|
[
"MIT"
] | null | null | null |
get_data/get_last_price.py
|
lte2000/cwfx
|
dc8daee44cea4b7c0286a7676e4a2829744fee64
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from pytdx.hq import TdxHq_API
from pytdx.params import TDXParams
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
if __name__ == '__main__':
with io.open(r'..\all_other_data\symbol.txt', 'r', encoding='utf-8') as f:
symbol = [s.strip() for s in f.readlines()]
TDXHQ = TdxHq_API(raise_exception=True, auto_retry=True)
if not TDXHQ.connect('121.14.110.200', 443):
raise Exception("Can't connect.")
#symbol = symbol[0:5]
first_df = True
for code in symbol:
if code[0:2] == 'SH':
market = 1
else:
market = 0
code = code [2:]
#quote_info = TDXHQ.get_security_quotes([(market, code)])
quote_info = TDXHQ.get_security_bars(9, market, code, 0, 1)
try:
if first_df:
columns = ['code', 'price']
quote_df = pd.DataFrame(columns=columns)
first_df = False
values = [code, quote_info[0]['close']]
quote_df.loc[quote_df.shape[0]] = values
except Exception as e:
print "code {}, process bars error, skipped.".format(code)
print e.message
print quote_info
quote_df = quote_df.rename(columns={
'code':'代码',
'price':'价格',
})
# string_columns = ['代码']
# quote_df[string_columns] = quote_df[string_columns].applymap(
# lambda x: '=""' if type(x) is float else '="' + str(x) + '"')
quote_df.to_csv(r"..\all_other_data\all_last_price.csv", encoding="gbk", quoting=csv.QUOTE_NONE, index=False)
TDXHQ.disconnect()
| 28.62069
| 113
| 0.586747
|
from pytdx.hq import TdxHq_API
from pytdx.params import TDXParams
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
if __name__ == '__main__':
with io.open(r'..\all_other_data\symbol.txt', 'r', encoding='utf-8') as f:
symbol = [s.strip() for s in f.readlines()]
TDXHQ = TdxHq_API(raise_exception=True, auto_retry=True)
if not TDXHQ.connect('121.14.110.200', 443):
raise Exception("Can't connect.")
#symbol = symbol[0:5]
first_df = True
for code in symbol:
if code[0:2] == 'SH':
market = 1
else:
market = 0
code = code [2:]
#quote_info = TDXHQ.get_security_quotes([(market, code)])
quote_info = TDXHQ.get_security_bars(9, market, code, 0, 1)
try:
if first_df:
columns = ['code', 'price']
quote_df = pd.DataFrame(columns=columns)
first_df = False
values = [code, quote_info[0]['close']]
quote_df.loc[quote_df.shape[0]] = values
except Exception as e:
print "code {}, process bars error, skipped.".format(code)
print e.message
print quote_info
quote_df = quote_df.rename(columns={
'code':'代码',
'price':'价格',
})
# string_columns = ['代码']
# quote_df[string_columns] = quote_df[string_columns].applymap(
# lambda x: '=""' if type(x) is float else '="' + str(x) + '"')
quote_df.to_csv(r"..\all_other_data\all_last_price.csv", encoding="gbk", quoting=csv.QUOTE_NONE, index=False)
TDXHQ.disconnect()
| false
| true
|
7906769bda7c8153d00596b0636a37bfdf58136a
| 1,851
|
py
|
Python
|
src/pytorch-template/old/models/baseline_3D_single.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | 5
|
2021-01-07T10:11:57.000Z
|
2022-01-16T04:57:51.000Z
|
src/pytorch-template/old/models/baseline_3D_single.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | null | null | null |
src/pytorch-template/old/models/baseline_3D_single.py
|
kaderghal/ADNI_Data_processing
|
454462d3913d77e3bc4de2b9725b456301c7b351
|
[
"MIT"
] | 1
|
2021-08-05T07:34:16.000Z
|
2021-08-05T07:34:16.000Z
|
import os
import sys
import errno
import random
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
#==============================================================================
# Network definition
#==============================================================================
class SE_HIPP_3D_Net(nn.Module):
def __init__(self):
super(SE_HIPP_3D_Net, self).__init__()
self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64*7*7, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.bn2(x)
x = self.relu(x)
# print("size", x.size())
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
# print("size", x.size())
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
| 27.626866
| 79
| 0.562939
|
import os
import sys
import errno
import random
import pickle
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
from torch.utils.data.dataset import Dataset
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import DatasetFolder
from torchvision import transforms
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
class SE_HIPP_3D_Net(nn.Module):
def __init__(self):
super(SE_HIPP_3D_Net, self).__init__()
self.conv1 = nn.Conv2d(28, 32, kernel_size=4, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(32)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=2, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64*7*7, 120)
self.dropout = nn.Dropout(0.5)
self.fc2 = nn.Linear(120, 2)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.bn2(x)
x = self.relu(x)
x = x.view(-1, self.num_flat_features(x))
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
| true
| true
|
790676f6e483d94edfb45d6ee9cb0c992f85cf7e
| 364
|
py
|
Python
|
service/scripts/create_superuser.py
|
jellyocean/django-project-starter
|
73a7f01dd22a2cd4f2b9f041e93d9f9da785736f
|
[
"MIT"
] | null | null | null |
service/scripts/create_superuser.py
|
jellyocean/django-project-starter
|
73a7f01dd22a2cd4f2b9f041e93d9f9da785736f
|
[
"MIT"
] | null | null | null |
service/scripts/create_superuser.py
|
jellyocean/django-project-starter
|
73a7f01dd22a2cd4f2b9f041e93d9f9da785736f
|
[
"MIT"
] | 1
|
2020-12-16T16:21:12.000Z
|
2020-12-16T16:21:12.000Z
|
from django.contrib.auth import get_user_model
User = get_user_model()
superuser_username = 'admin'
superuser_email = 'superuser@admin.test'
superuser_password = 'admin_test'
try:
User.objects.get(username=superuser_username)
except User.DoesNotExist:
User.objects.create_superuser(
superuser_username, superuser_email, superuser_password
)
| 24.266667
| 63
| 0.785714
|
from django.contrib.auth import get_user_model
User = get_user_model()
superuser_username = 'admin'
superuser_email = 'superuser@admin.test'
superuser_password = 'admin_test'
try:
User.objects.get(username=superuser_username)
except User.DoesNotExist:
User.objects.create_superuser(
superuser_username, superuser_email, superuser_password
)
| true
| true
|
7906776168799cb2302e84c58cbeeb52ff286d68
| 251
|
py
|
Python
|
core/__init__.py
|
shijieqin/flatfish
|
2db30ede58493da3b6970518422f6fce53cbc5fb
|
[
"Apache-2.0"
] | 7
|
2018-11-14T11:18:15.000Z
|
2020-02-24T05:59:08.000Z
|
core/__init__.py
|
shijieqin/flatfish
|
2db30ede58493da3b6970518422f6fce53cbc5fb
|
[
"Apache-2.0"
] | 7
|
2019-05-19T11:17:57.000Z
|
2021-06-10T21:14:45.000Z
|
core/__init__.py
|
shijieqin/flatfish
|
2db30ede58493da3b6970518422f6fce53cbc5fb
|
[
"Apache-2.0"
] | 1
|
2018-11-14T11:18:45.000Z
|
2018-11-14T11:18:45.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: Shijie Qin
@license: Apache Licence
@contact: qsj4work@gmail.com
@site: https://shijieqin.github.io
@software: PyCharm
@file: __init__.py.py
@time: 2018/11/8 3:13 PM
"""
| 17.928571
| 35
| 0.669323
| true
| true
|
|
790677733159ec098d7e4053e1c3c79a29c123ad
| 20,058
|
py
|
Python
|
espnet/bin/asr_train.py
|
Advanjef/espnet
|
47f51a77906c4c44d0da23da04e68676e4b931ab
|
[
"Apache-2.0"
] | 4
|
2021-06-18T01:57:08.000Z
|
2021-12-23T05:26:02.000Z
|
espnet/bin/asr_train.py
|
Advanjef/espnet
|
47f51a77906c4c44d0da23da04e68676e4b931ab
|
[
"Apache-2.0"
] | null | null | null |
espnet/bin/asr_train.py
|
Advanjef/espnet
|
47f51a77906c4c44d0da23da04e68676e4b931ab
|
[
"Apache-2.0"
] | 1
|
2021-07-19T07:35:46.000Z
|
2021-07-19T07:35:46.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Automatic speech recognition model training script."""
import logging
import os
import random
import subprocess
import sys
from distutils.version import LooseVersion
import configargparse
import numpy as np
import torch
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2")
# NOTE: you need this func to generate our sphinx doc
def get_parser(parser=None, required=True):
"""Get default arguments."""
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
# general configuration
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
# task related
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
# network architecture
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
# encoder
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
# loss related
parser.add_argument(
"--ctc_type",
default="warpctc",
type=str,
choices=["builtin", "warpctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
# recognition options to compute CER/WER
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
# minibatch related
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
# optimization related
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
# asr_mix related
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
# decoder related
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
# finetuning related
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
# front end related
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
# WPE related
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
# Beamformer related
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
# Feature transform: Normalization
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
# Feature transform: Fbank
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
"""Run the main training function."""
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# If --ngpu is not given,
# 1. if CUDA_VISIBLE_DEVICES is set, all visible devices
# 2. if nvidia-smi exists, use all devices
# 3. else ngpu=0
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if is_torch_1_2_plus and args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
# display PYTHONPATH
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
# set random seed
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# load dictionary for debug log
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
# for non-autoregressive training using Transformer
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
# train
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
# FIXME(kamo): Support --model-module
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| 31.438871
| 88
| 0.577376
|
import logging
import os
import random
import subprocess
import sys
from distutils.version import LooseVersion
import configargparse
import numpy as np
import torch
from espnet.utils.cli_utils import strtobool
from espnet.utils.training.batchfy import BATCH_COUNT_CHOICES
is_torch_1_2_plus = LooseVersion(torch.__version__) >= LooseVersion("1.2")
def get_parser(parser=None, required=True):
if parser is None:
parser = configargparse.ArgumentParser(
description="Train an automatic speech recognition (ASR) model on one CPU, "
"one or multiple GPUs",
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
)
parser.add("--config", is_config_file=True, help="config file path")
parser.add(
"--config2",
is_config_file=True,
help="second config file path that overwrites the settings in `--config`.",
)
parser.add(
"--config3",
is_config_file=True,
help="third config file path that overwrites the settings in "
"`--config` and `--config2`.",
)
parser.add_argument(
"--ngpu",
default=None,
type=int,
help="Number of GPUs. If not given, use all visible devices",
)
parser.add_argument(
"--train-dtype",
default="float32",
choices=["float16", "float32", "float64", "O0", "O1", "O2", "O3"],
help="Data type for training (only pytorch backend). "
"O0,O1,.. flags require apex. "
"See https://nvidia.github.io/apex/amp.html#opt-levels",
)
parser.add_argument(
"--backend",
default="chainer",
type=str,
choices=["chainer", "pytorch"],
help="Backend library",
)
parser.add_argument(
"--outdir", type=str, required=required, help="Output directory"
)
parser.add_argument("--debugmode", default=1, type=int, help="Debugmode")
parser.add_argument("--dict", required=required, help="Dictionary")
parser.add_argument("--seed", default=1, type=int, help="Random seed")
parser.add_argument("--debugdir", type=str, help="Output directory for debugging")
parser.add_argument(
"--resume",
"-r",
default="",
nargs="?",
help="Resume the training from snapshot",
)
parser.add_argument(
"--minibatches",
"-N",
type=int,
default="-1",
help="Process only N minibatches (for debug)",
)
parser.add_argument("--verbose", "-V", default=0, type=int, help="Verbose option")
parser.add_argument(
"--tensorboard-dir",
default=None,
type=str,
nargs="?",
help="Tensorboard log dir path",
)
parser.add_argument(
"--report-interval-iters",
default=100,
type=int,
help="Report interval iterations",
)
parser.add_argument(
"--save-interval-iters",
default=0,
type=int,
help="Save snapshot interval iterations",
)
parser.add_argument(
"--train-json",
type=str,
default=None,
help="Filename of train label data (json)",
)
parser.add_argument(
"--valid-json",
type=str,
default=None,
help="Filename of validation label data (json)",
)
parser.add_argument(
"--model-module",
type=str,
default=None,
help="model defined module (default: espnet.nets.xxx_backend.e2e_asr:E2E)",
)
parser.add_argument(
"--num-encs", default=1, type=int, help="Number of encoders in the model."
)
parser.add_argument(
"--ctc_type",
default="warpctc",
type=str,
choices=["builtin", "warpctc"],
help="Type of CTC implementation to calculate loss.",
)
parser.add_argument(
"--mtlalpha",
default=0.5,
type=float,
help="Multitask learning coefficient, "
"alpha: alpha*ctc_loss + (1-alpha)*att_loss ",
)
parser.add_argument(
"--lsm-weight", default=0.0, type=float, help="Label smoothing weight"
)
parser.add_argument(
"--report-cer",
default=False,
action="store_true",
help="Compute CER on development set",
)
parser.add_argument(
"--report-wer",
default=False,
action="store_true",
help="Compute WER on development set",
)
parser.add_argument("--nbest", type=int, default=1, help="Output N-best hypotheses")
parser.add_argument("--beam-size", type=int, default=4, help="Beam size")
parser.add_argument("--penalty", default=0.0, type=float, help="Incertion penalty")
parser.add_argument(
"--maxlenratio",
default=0.0,
type=float,
help="""Input length ratio to obtain max output length.
If maxlenratio=0.0 (default), it uses a end-detect function
to automatically find maximum hypothesis lengths""",
)
parser.add_argument(
"--minlenratio",
default=0.0,
type=float,
help="Input length ratio to obtain min output length",
)
parser.add_argument(
"--ctc-weight", default=0.3, type=float, help="CTC weight in joint decoding"
)
parser.add_argument(
"--rnnlm", type=str, default=None, help="RNNLM model file to read"
)
parser.add_argument(
"--rnnlm-conf", type=str, default=None, help="RNNLM model config file to read"
)
parser.add_argument("--lm-weight", default=0.1, type=float, help="RNNLM weight.")
parser.add_argument("--sym-space", default="<space>", type=str, help="Space symbol")
parser.add_argument("--sym-blank", default="<blank>", type=str, help="Blank symbol")
parser.add_argument(
"--sortagrad",
default=0,
type=int,
nargs="?",
help="How many epochs to use sortagrad for. 0 = deactivated, -1 = all epochs",
)
parser.add_argument(
"--batch-count",
default="auto",
choices=BATCH_COUNT_CHOICES,
help="How to count batch_size. "
"The default (auto) will find how to count by args.",
)
parser.add_argument(
"--batch-size",
"--batch-seqs",
"-b",
default=0,
type=int,
help="Maximum seqs in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-bins",
default=0,
type=int,
help="Maximum bins in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-in",
default=0,
type=int,
help="Maximum input frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-out",
default=0,
type=int,
help="Maximum output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--batch-frames-inout",
default=0,
type=int,
help="Maximum input+output frames in a minibatch (0 to disable)",
)
parser.add_argument(
"--maxlen-in",
"--batch-seq-maxlen-in",
default=800,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the input sequence length > ML.",
)
parser.add_argument(
"--maxlen-out",
"--batch-seq-maxlen-out",
default=150,
type=int,
metavar="ML",
help="When --batch-count=seq, "
"batch size is reduced if the output sequence length > ML",
)
parser.add_argument(
"--n-iter-processes",
default=0,
type=int,
help="Number of processes of iterator",
)
parser.add_argument(
"--preprocess-conf",
type=str,
default=None,
nargs="?",
help="The configuration file for the pre-processing",
)
parser.add_argument(
"--opt",
default="adadelta",
type=str,
choices=["adadelta", "adam", "noam"],
help="Optimizer",
)
parser.add_argument(
"--accum-grad", default=1, type=int, help="Number of gradient accumuration"
)
parser.add_argument(
"--eps", default=1e-8, type=float, help="Epsilon constant for optimizer"
)
parser.add_argument(
"--eps-decay", default=0.01, type=float, help="Decaying ratio of epsilon"
)
parser.add_argument(
"--weight-decay", default=0.0, type=float, help="Weight decay ratio"
)
parser.add_argument(
"--criterion",
default="acc",
type=str,
choices=["loss", "acc"],
help="Criterion to perform epsilon decay",
)
parser.add_argument(
"--threshold", default=1e-4, type=float, help="Threshold to stop iteration"
)
parser.add_argument(
"--epochs", "-e", default=30, type=int, help="Maximum number of epochs"
)
parser.add_argument(
"--early-stop-criterion",
default="validation/main/acc",
type=str,
nargs="?",
help="Value to monitor to trigger an early stopping of the training",
)
parser.add_argument(
"--patience",
default=3,
type=int,
nargs="?",
help="Number of epochs to wait without improvement "
"before stopping the training",
)
parser.add_argument(
"--grad-clip", default=5, type=float, help="Gradient norm threshold to clip"
)
parser.add_argument(
"--num-save-attention",
default=3,
type=int,
help="Number of samples of attention to be saved",
)
parser.add_argument(
"--num-save-ctc",
default=3,
type=int,
help="Number of samples of CTC probability to be saved",
)
parser.add_argument(
"--grad-noise",
type=strtobool,
default=False,
help="The flag to switch to use noise injection to gradients during training",
)
parser.add_argument(
"--num-spkrs",
default=1,
type=int,
choices=[1, 2],
help="Number of speakers in the speech.",
)
parser.add_argument(
"--context-residual",
default=False,
type=strtobool,
nargs="?",
help="The flag to switch to use context vector residual in the decoder network",
)
parser.add_argument(
"--enc-init",
default=None,
type=str,
help="Pre-trained ASR model to initialize encoder.",
)
parser.add_argument(
"--enc-init-mods",
default="enc.enc.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of encoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--dec-init",
default=None,
type=str,
help="Pre-trained ASR, MT or LM model to initialize decoder.",
)
parser.add_argument(
"--dec-init-mods",
default="att., dec.",
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of decoder modules to initialize, separated by a comma.",
)
parser.add_argument(
"--freeze-mods",
default=None,
type=lambda s: [str(mod) for mod in s.split(",") if s != ""],
help="List of modules to freeze, separated by a comma.",
)
parser.add_argument(
"--use-frontend",
type=strtobool,
default=False,
help="The flag to switch to use frontend system.",
)
parser.add_argument(
"--use-wpe",
type=strtobool,
default=False,
help="Apply Weighted Prediction Error",
)
parser.add_argument(
"--wtype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for WPE. "
"",
)
parser.add_argument("--wlayers", type=int, default=2, help="")
parser.add_argument("--wunits", type=int, default=300, help="")
parser.add_argument("--wprojs", type=int, default=300, help="")
parser.add_argument("--wdropout-rate", type=float, default=0.0, help="")
parser.add_argument("--wpe-taps", type=int, default=5, help="")
parser.add_argument("--wpe-delay", type=int, default=3, help="")
parser.add_argument(
"--use-dnn-mask-for-wpe",
type=strtobool,
default=False,
help="Use DNN to estimate the power spectrogram. "
"This option is experimental.",
)
parser.add_argument("--use-beamformer", type=strtobool, default=True, help="")
parser.add_argument(
"--btype",
default="blstmp",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture "
"of the mask estimator for Beamformer.",
)
parser.add_argument("--blayers", type=int, default=2, help="")
parser.add_argument("--bunits", type=int, default=300, help="")
parser.add_argument("--bprojs", type=int, default=300, help="")
parser.add_argument("--badim", type=int, default=320, help="")
parser.add_argument(
"--bnmask",
type=int,
default=2,
help="Number of beamforming masks, " "default is 2 for [speech, noise].",
)
parser.add_argument(
"--ref-channel",
type=int,
default=-1,
help="The reference channel used for beamformer. "
"By default, the channel is estimated by DNN.",
)
parser.add_argument("--bdropout-rate", type=float, default=0.0, help="")
parser.add_argument(
"--stats-file",
type=str,
default=None,
help="The stats file for the feature normalization",
)
parser.add_argument(
"--apply-uttmvn",
type=strtobool,
default=True,
help="Apply utterance level mean " "variance normalization.",
)
parser.add_argument("--uttmvn-norm-means", type=strtobool, default=True, help="")
parser.add_argument("--uttmvn-norm-vars", type=strtobool, default=False, help="")
parser.add_argument(
"--fbank-fs",
type=int,
default=16000,
help="The sample frequency used for " "the mel-fbank creation.",
)
parser.add_argument(
"--n-mels", type=int, default=80, help="The number of mel-frequency bins."
)
parser.add_argument("--fbank-fmin", type=float, default=0.0, help="")
parser.add_argument("--fbank-fmax", type=float, default=None, help="")
return parser
def main(cmd_args):
parser = get_parser()
args, _ = parser.parse_known_args(cmd_args)
if args.backend == "chainer" and args.train_dtype != "float32":
raise NotImplementedError(
f"chainer backend does not support --train-dtype {args.train_dtype}."
"Use --dtype float32."
)
if args.ngpu == 0 and args.train_dtype in ("O0", "O1", "O2", "O3", "float16"):
raise ValueError(
f"--train-dtype {args.train_dtype} does not support the CPU backend."
)
from espnet.utils.dynamic_import import dynamic_import
if args.model_module is None:
model_module = "espnet.nets." + args.backend + "_backend.e2e_asr:E2E"
else:
model_module = args.model_module
model_class = dynamic_import(model_module)
model_class.add_arguments(parser)
args = parser.parse_args(cmd_args)
args.model_module = model_module
if "chainer_backend" in args.model_module:
args.backend = "chainer"
if "pytorch_backend" in args.model_module:
args.backend = "pytorch"
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
if args.ngpu is None:
cvd = os.environ.get("CUDA_VISIBLE_DEVICES")
if cvd is not None:
ngpu = len(cvd.split(","))
else:
logging.warning("CUDA_VISIBLE_DEVICES is not set.")
try:
p = subprocess.run(
["nvidia-smi", "-L"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except (subprocess.CalledProcessError, FileNotFoundError):
ngpu = 0
else:
ngpu = len(p.stderr.decode().split("\n")) - 1
else:
if is_torch_1_2_plus and args.ngpu != 1:
logging.debug(
"There are some bugs with multi-GPU processing in PyTorch 1.2+"
+ " (see https://github.com/pytorch/pytorch/issues/21108)"
)
ngpu = args.ngpu
logging.info(f"ngpu: {ngpu}")
logging.info("python path = " + os.environ.get("PYTHONPATH", "(None)"))
logging.info("random seed = %d" % args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.dict is not None:
with open(args.dict, "rb") as f:
dictionary = f.readlines()
char_list = [entry.decode("utf-8").split(" ")[0] for entry in dictionary]
char_list.insert(0, "<blank>")
char_list.append("<eos>")
if hasattr(args, "decoder_mode") and args.decoder_mode == "maskctc":
char_list.append("<mask>")
args.char_list = char_list
else:
args.char_list = None
logging.info("backend = " + args.backend)
if args.num_spkrs == 1:
if args.backend == "chainer":
from espnet.asr.chainer_backend.asr import train
train(args)
elif args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr import train
train(args)
else:
raise ValueError("Only chainer and pytorch are supported.")
else:
if args.backend == "pytorch":
from espnet.asr.pytorch_backend.asr_mix import train
train(args)
else:
raise ValueError("Only pytorch is supported.")
if __name__ == "__main__":
main(sys.argv[1:])
| true
| true
|
790677a74eaad2595faf06747accff6c5405ae03
| 23
|
py
|
Python
|
rl/action.py
|
nickswalker/counterpoint-reinforcement-learning
|
1d0481bd2c9976533175339e411a41f4eb1650aa
|
[
"MIT"
] | 1
|
2016-12-09T18:29:30.000Z
|
2016-12-09T18:29:30.000Z
|
rl/action.py
|
nickswalker/counterpoint-reinforcement-learning
|
1d0481bd2c9976533175339e411a41f4eb1650aa
|
[
"MIT"
] | null | null | null |
rl/action.py
|
nickswalker/counterpoint-reinforcement-learning
|
1d0481bd2c9976533175339e411a41f4eb1650aa
|
[
"MIT"
] | null | null | null |
class Action():
()
| 7.666667
| 15
| 0.478261
|
class Action():
()
| true
| true
|
790677bc6463dfcc7132724f898a5d50b3702bae
| 13,300
|
py
|
Python
|
addon.py
|
jonjomckay/plugin.audio.bbcsounds
|
c4d9b3296a7ad3f2f7648bfea4a849ff65fd481b
|
[
"MIT"
] | 7
|
2020-05-31T13:13:38.000Z
|
2021-09-07T12:06:42.000Z
|
addon.py
|
jonjomckay/plugin.audio.bbcsounds
|
c4d9b3296a7ad3f2f7648bfea4a849ff65fd481b
|
[
"MIT"
] | 6
|
2020-04-12T21:11:45.000Z
|
2021-02-17T20:58:57.000Z
|
addon.py
|
jonjomckay/plugin.audio.bbcsounds
|
c4d9b3296a7ad3f2f7648bfea4a849ff65fd481b
|
[
"MIT"
] | 1
|
2020-09-28T15:05:24.000Z
|
2020-09-28T15:05:24.000Z
|
import datetime
import json
import os
import sys
import urllib
import urlparse
from collections import OrderedDict
from time import mktime
import dateutil.parser
import feedparser
import requests
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from bs4 import BeautifulSoup
stations = {
'p00fzl68': {'name': 'BBC Asian Network', 'image': 'bbc_asian_network_colour'},
'p00fzl78': {'name': 'BBC Coventry & Warwickshire', 'image': 'bbc_radio_coventry_warwickshire_colour'},
'p00fzl7f': {'name': 'BBC Essex', 'image': 'bbc_radio_essex_colour'},
'p00fzl7q': {'name': 'BBC Hereford & Worcester', 'image': 'bbc_radio_hereford_worcester_colour'},
'p00fzl82': {'name': 'BBC Newcastle', 'image': 'bbc_radio_newcastle_colour'},
'p00fzl86': {'name': 'BBC Radio 1', 'image': 'bbc_radio_one_colour'},
'p00fzl64': {'name': 'BBC Radio 1Xtra', 'image': 'bbc_1xtra_colour'},
'p00fzl8v': {'name': 'BBC Radio 2', 'image': 'bbc_radio_two_colour'},
'p00fzl8t': {'name': 'BBC Radio 3', 'image': 'bbc_radio_three_colour'},
'p00fzl7j': {'name': 'BBC Radio 4 FM', 'image': 'bbc_radio_fourfm_colour'},
'p00fzl7k': {'name': 'BBC Radio 4 LW', 'image': 'bbc_radio_four_colour'},
'p00fzl7l': {'name': 'BBC Radio 4 Extra', 'image': 'bbc_radio_four_extra_colour'},
'p00fzl7g': {'name': 'BBC Radio 5 live', 'image': 'bbc_radio_five_live_colour'},
'p00fzl7h': {'name': 'BBC Radio 5 live sports extra', 'image': 'bbc_radio_five_live_sports_extra_colour'},
'p00fzl65': {'name': 'BBC Radio 6 Music', 'image': 'bbc_6music_colour'},
'p00fzl74': {'name': 'BBC Radio Berkshire', 'image': 'bbc_radio_berkshire_colour'},
'p00fzl75': {'name': 'BBC Radio Bristol', 'image': 'bbc_radio_bristol_colour'},
'p00fzl76': {'name': 'BBC Radio Cambridgeshire', 'image': 'bbc_radio_cambridge_colour'},
'p00fzl77': {'name': 'BBC Radio Cornwall', 'image': 'bbc_radio_cornwall_colour'},
'p00fzl79': {'name': 'BBC Radio Cumbria', 'image': 'bbc_radio_cumbria_colour'},
'p00fzl7b': {'name': 'BBC Radio Cymru', 'image': 'bbc_radio_cymru_colour'},
'p00fzl7c': {'name': 'BBC Radio Derby', 'image': 'bbc_radio_derby_colour'},
'p00fzl7d': {'name': 'BBC Radio Devon', 'image': 'bbc_radio_devon_colour'},
'p00fzl7m': {'name': 'BBC Radio Foyle', 'image': 'bbc_radio_foyle_colour'},
'p00fzl7n': {'name': 'BBC Radio Gloucestershire', 'image': 'bbc_radio_gloucestershire_colour'},
'p00fzl7p': {'name': 'BBC Radio Guernsey', 'image': 'bbc_radio_guernsey_colour'},
'p00fzl7r': {'name': 'BBC Radio Humberside', 'image': 'bbc_radio_humberside_colour'},
'p00fzl7s': {'name': 'BBC Radio Jersey', 'image': 'bbc_radio_jersey_colour'},
'p00fzl7t': {'name': 'BBC Radio Kent', 'image': 'bbc_radio_kent_colour'},
'p00fzl7v': {'name': 'BBC Radio Lancashire', 'image': 'bbc_radio_lancashire_colour'},
'p00fzl7w': {'name': 'BBC Radio Leeds', 'image': 'bbc_radio_leeds_colour'},
'p00fzl7x': {'name': 'BBC Radio Leicester', 'image': 'bbc_radio_leicester_colour'},
'p00fzl7y': {'name': 'BBC Radio Lincolnshire', 'image': 'bbc_radio_lincolnshire_colour'},
'p00fzl6f': {'name': 'BBC Radio London', 'image': 'bbc_london_colour'},
'p00fzl7z': {'name': 'BBC Radio Manchester', 'image': 'bbc_radio_manchester_colour'},
'p00fzl80': {'name': 'BBC Radio Merseyside', 'image': 'bbc_radio_merseyside_colour'},
'p00fzl81': {'name': 'BBC Radio Nan Gaidheal', 'image': 'bbc_radio_nan_gaidheal_colour'},
'p00fzl83': {'name': 'BBC Radio Norfolk', 'image': 'bbc_radio_norfolk_colour'},
'p00fzl84': {'name': 'BBC Radio Northampton', 'image': 'bbc_radio_northampton_colour'},
'p00fzl85': {'name': 'BBC Radio Nottingham', 'image': 'bbc_radio_nottingham_colour'},
'p00fzl8c': {'name': 'BBC Radio Oxford', 'image': 'bbc_radio_oxford_colour'},
'p00fzl8d': {'name': 'BBC Radio Scotland (FM)', 'image': 'bbc_radio_scotland_fm_colour'},
'p00fzl8g': {'name': 'BBC Radio Scotland (MW)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8b': {'name': 'BBC Radio Scotland (Orkney)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8j': {'name': 'BBC Radio Scotland (Shetland)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8h': {'name': 'BBC Radio Sheffield', 'image': 'bbc_radio_sheffield_colour'},
'p00fzl8k': {'name': 'BBC Radio Shropshire', 'image': 'bbc_radio_shropshire_colour'},
'p00fzl8l': {'name': 'BBC Radio Solent', 'image': 'bbc_radio_solent_colour'},
'p00fzl8n': {'name': 'BBC Radio Stoke', 'image': 'bbc_radio_stoke_colour'},
'p00fzl8p': {'name': 'BBC Radio Suffolk', 'image': 'bbc_radio_suffolk_colour'},
'p00fzl8w': {'name': 'BBC Radio Ulster', 'image': 'bbc_radio_ulster_colour'},
'p00fzl8y': {'name': 'BBC Radio Wales (FM)', 'image': 'bbc_radio_wales_fm_colour'},
'p00fzl8x': {'name': 'BBC Radio Wales (LW)', 'image': 'bbc_radio_wales_colour'},
'p00fzl90': {'name': 'BBC Radio York', 'image': 'bbc_radio_york_colour'},
'p00fzl8m': {'name': 'BBC Somerset', 'image': 'bbc_radio_somerset_sound_colour'},
'p00fzl8q': {'name': 'BBC Surrey', 'image': 'bbc_radio_surrey_colour'},
'p00fzl8r': {'name': 'BBC Sussex', 'image': 'bbc_radio_sussex_colour'},
'p00fzl93': {'name': 'BBC Tees', 'image': 'bbc_tees_colour'},
'p00fzl96': {'name': 'BBC Three Counties Radio', 'image': 'bbc_three_counties_radio_colour'},
'p00fzl8z': {'name': 'BBC Wiltshire', 'image': 'bbc_radio_wiltshire_colour'},
'p00fzl9f': {'name': 'BBC WM 95.6', 'image': 'bbc_wm_colour'},
'p02zbmb3': {'name': 'BBC World Service', 'image': 'bbc_world_service_colour'},
'p02jf21y': {'name': 'CBeebies Radio', 'image': 'cbeebies_radio_colour'},
}
stations_ordered = OrderedDict(sorted(stations.items(), key=lambda x: x[1]['name']))
def get_page(url):
# download the source HTML for the page using requests
# and parse the page using BeautifulSoup
return BeautifulSoup(requests.get(url).text, 'html.parser')
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
# Parse the stuff passed into the addon
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
xbmcplugin.setContent(addon_handle, 'audio')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def mode_default():
categories = {
'podcasts': 'Podcasts',
'stations': 'Stations'
}
for mode, category in categories.items():
url = build_url({'mode': mode})
li = xbmcgui.ListItem(category)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_episode(pid):
programme = requests.get('https://www.bbc.co.uk/programmes/' + pid + '.json')
programme_json = programme.json()["programme"]
picked_url = None
for version in programme_json["versions"]:
playlist = requests.get(
'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/iptv-all/vpid/' + version["pid"] + '/format/json')
playlist_json = playlist.json()
if "media" not in playlist_json:
# TODO
continue
# Filter by only audio items, and order with the highest bitrate first
audio_items = [item for item in playlist_json['media'] if item['kind'] == 'audio']
audio_items.sort(key=lambda x: x['bitrate'], reverse=True)
xbmc.log('Found {0} audio items for the programme version {1}'.format(len(audio_items), version['pid']), level=xbmc.LOGNOTICE)
# Pick the first stream available for the highest bitrate item
picked_stream = audio_items[0]
picked_url = picked_stream["connection"][1]["href"]
xbmc.log('Picked the {0} stream with the bitrate {1}'.format(picked_stream['encoding'], picked_stream['bitrate']), level=xbmc.LOGNOTICE)
play_item = xbmcgui.ListItem(path=picked_url)
play_item.setArt({
'thumb': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg',
'icon': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg'
})
play_item.setInfo('music', {
'title': programme_json["display_title"]["title"],
'artist': programme_json["display_title"]["subtitle"],
'album': programme_json["ownership"]["service"]["title"],
'comment': programme_json["short_synopsis"]
})
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
if picked_url is None:
xbmcgui.Dialog().notification(__addonname__, "Episode not available to stream", icon=xbmcgui.NOTIFICATION_ERROR)
def mode_podcasts():
podcasts = requests.get('https://www.bbc.co.uk/podcasts.json')
podcasts_json = podcasts.json()["podcasts"]
# Sort the podcasts by title
podcasts_ordered = sorted(podcasts_json, key=lambda x: x["title"])
for podcast in podcasts_ordered:
url = build_url({'mode': 'podcast', 'pid': podcast["shortTitle"]})
li = xbmcgui.ListItem(podcast["title"])
li.setInfo('video', {'plot': podcast["description"]})
if "imageUrl" in podcast:
li.setThumbnailImage(podcast["imageUrl"].replace('{recipe}', '624x624'))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_podcast(pid):
podcast = feedparser.parse('https://podcasts.files.bbci.co.uk/' + pid + '.rss')
image_url = None
if "image" in podcast.feed:
image_url = podcast.feed.image.url
for entry in podcast.entries:
entry_pid = entry.ppg_canonical.split('/')
entry_date = datetime.datetime.fromtimestamp(mktime(entry.published_parsed)).strftime('%Y-%m-%d')
entry_title = entry_date + ": " + entry.title
if len(entry_pid) > 2:
url = build_url({'mode': 'episode', 'pid': entry_pid[2]})
li = xbmcgui.ListItem(entry_title)
li.setInfo('video', {'plot': entry.description})
li.setThumbnailImage(image_url)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
else:
xbmc.log('No pid could be found for the item at ' + entry.link, level=xbmc.LOGERROR)
xbmcplugin.endOfDirectory(addon_handle)
def mode_stations():
for pid, station in stations_ordered.items():
url = build_url({'mode': 'station', 'pid': pid})
li = xbmcgui.ListItem(station['name'])
li.setThumbnailImage(xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', station['image'] + '.png')))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station(pid):
base = datetime.datetime.today()
# Create a range of the last 30 days
for delta in range(30):
date = base - datetime.timedelta(days=delta)
year = '%04d' % date.year
month = '%02d' % date.month
day = '%02d' % date.day
url = build_url({'mode': 'station_date', 'pid': pid, 'year': year, 'month': month, 'day': day})
list_item = xbmcgui.ListItem(date.strftime('%Y-%m-%d (%A)'))
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station_date(pid, year, month, day):
# Load the schedules for the station
schedule = get_page('https://www.bbc.co.uk/schedules/' + pid + '/' + year + '/' + month + '/' + day)
result = None
for tag in schedule.find_all('script', type='application/ld+json'):
if 'RadioEpisode' in tag.contents[0]:
result = json.loads(tag.contents[0])
if result is None:
xbmcgui.Dialog().notification(__addonname__, "Something went wrong parsing the station's schedule",
icon=xbmcgui.NOTIFICATION_ERROR)
return
for episode in result["@graph"]:
date = dateutil.parser.parse(episode["publication"]["startDate"])
time = date.strftime('%Y-%m-%d, %H:%M')
if "partOfSeries" in episode:
title = time + ": " + episode["partOfSeries"]["name"] + " - " + episode["name"]
else:
title = time + ": " + episode["name"]
url = build_url({'mode': 'episode', 'pid': episode["identifier"]})
list_item = xbmcgui.ListItem(title)
list_item.setInfo('video', {'plot': episode["description"]})
list_item.setPath(url)
list_item.setProperty('IsPlayable', "true")
list_item.setThumbnailImage(episode["image"])
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
mode = args.get('mode', None)
if mode is None:
mode_default()
elif mode == 'episode':
mode_episode(args['pid'])
elif mode == 'podcasts':
mode_podcasts()
elif mode == 'podcast':
mode_podcast(args['pid'])
elif mode == 'stations':
mode_stations()
elif mode == 'station':
mode_station(args['pid'])
elif mode == 'station_date':
mode_station_date(args['pid'], args['year'], args['month'], args['day'])
| 44.781145
| 144
| 0.656917
|
import datetime
import json
import os
import sys
import urllib
import urlparse
from collections import OrderedDict
from time import mktime
import dateutil.parser
import feedparser
import requests
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from bs4 import BeautifulSoup
stations = {
'p00fzl68': {'name': 'BBC Asian Network', 'image': 'bbc_asian_network_colour'},
'p00fzl78': {'name': 'BBC Coventry & Warwickshire', 'image': 'bbc_radio_coventry_warwickshire_colour'},
'p00fzl7f': {'name': 'BBC Essex', 'image': 'bbc_radio_essex_colour'},
'p00fzl7q': {'name': 'BBC Hereford & Worcester', 'image': 'bbc_radio_hereford_worcester_colour'},
'p00fzl82': {'name': 'BBC Newcastle', 'image': 'bbc_radio_newcastle_colour'},
'p00fzl86': {'name': 'BBC Radio 1', 'image': 'bbc_radio_one_colour'},
'p00fzl64': {'name': 'BBC Radio 1Xtra', 'image': 'bbc_1xtra_colour'},
'p00fzl8v': {'name': 'BBC Radio 2', 'image': 'bbc_radio_two_colour'},
'p00fzl8t': {'name': 'BBC Radio 3', 'image': 'bbc_radio_three_colour'},
'p00fzl7j': {'name': 'BBC Radio 4 FM', 'image': 'bbc_radio_fourfm_colour'},
'p00fzl7k': {'name': 'BBC Radio 4 LW', 'image': 'bbc_radio_four_colour'},
'p00fzl7l': {'name': 'BBC Radio 4 Extra', 'image': 'bbc_radio_four_extra_colour'},
'p00fzl7g': {'name': 'BBC Radio 5 live', 'image': 'bbc_radio_five_live_colour'},
'p00fzl7h': {'name': 'BBC Radio 5 live sports extra', 'image': 'bbc_radio_five_live_sports_extra_colour'},
'p00fzl65': {'name': 'BBC Radio 6 Music', 'image': 'bbc_6music_colour'},
'p00fzl74': {'name': 'BBC Radio Berkshire', 'image': 'bbc_radio_berkshire_colour'},
'p00fzl75': {'name': 'BBC Radio Bristol', 'image': 'bbc_radio_bristol_colour'},
'p00fzl76': {'name': 'BBC Radio Cambridgeshire', 'image': 'bbc_radio_cambridge_colour'},
'p00fzl77': {'name': 'BBC Radio Cornwall', 'image': 'bbc_radio_cornwall_colour'},
'p00fzl79': {'name': 'BBC Radio Cumbria', 'image': 'bbc_radio_cumbria_colour'},
'p00fzl7b': {'name': 'BBC Radio Cymru', 'image': 'bbc_radio_cymru_colour'},
'p00fzl7c': {'name': 'BBC Radio Derby', 'image': 'bbc_radio_derby_colour'},
'p00fzl7d': {'name': 'BBC Radio Devon', 'image': 'bbc_radio_devon_colour'},
'p00fzl7m': {'name': 'BBC Radio Foyle', 'image': 'bbc_radio_foyle_colour'},
'p00fzl7n': {'name': 'BBC Radio Gloucestershire', 'image': 'bbc_radio_gloucestershire_colour'},
'p00fzl7p': {'name': 'BBC Radio Guernsey', 'image': 'bbc_radio_guernsey_colour'},
'p00fzl7r': {'name': 'BBC Radio Humberside', 'image': 'bbc_radio_humberside_colour'},
'p00fzl7s': {'name': 'BBC Radio Jersey', 'image': 'bbc_radio_jersey_colour'},
'p00fzl7t': {'name': 'BBC Radio Kent', 'image': 'bbc_radio_kent_colour'},
'p00fzl7v': {'name': 'BBC Radio Lancashire', 'image': 'bbc_radio_lancashire_colour'},
'p00fzl7w': {'name': 'BBC Radio Leeds', 'image': 'bbc_radio_leeds_colour'},
'p00fzl7x': {'name': 'BBC Radio Leicester', 'image': 'bbc_radio_leicester_colour'},
'p00fzl7y': {'name': 'BBC Radio Lincolnshire', 'image': 'bbc_radio_lincolnshire_colour'},
'p00fzl6f': {'name': 'BBC Radio London', 'image': 'bbc_london_colour'},
'p00fzl7z': {'name': 'BBC Radio Manchester', 'image': 'bbc_radio_manchester_colour'},
'p00fzl80': {'name': 'BBC Radio Merseyside', 'image': 'bbc_radio_merseyside_colour'},
'p00fzl81': {'name': 'BBC Radio Nan Gaidheal', 'image': 'bbc_radio_nan_gaidheal_colour'},
'p00fzl83': {'name': 'BBC Radio Norfolk', 'image': 'bbc_radio_norfolk_colour'},
'p00fzl84': {'name': 'BBC Radio Northampton', 'image': 'bbc_radio_northampton_colour'},
'p00fzl85': {'name': 'BBC Radio Nottingham', 'image': 'bbc_radio_nottingham_colour'},
'p00fzl8c': {'name': 'BBC Radio Oxford', 'image': 'bbc_radio_oxford_colour'},
'p00fzl8d': {'name': 'BBC Radio Scotland (FM)', 'image': 'bbc_radio_scotland_fm_colour'},
'p00fzl8g': {'name': 'BBC Radio Scotland (MW)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8b': {'name': 'BBC Radio Scotland (Orkney)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8j': {'name': 'BBC Radio Scotland (Shetland)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8h': {'name': 'BBC Radio Sheffield', 'image': 'bbc_radio_sheffield_colour'},
'p00fzl8k': {'name': 'BBC Radio Shropshire', 'image': 'bbc_radio_shropshire_colour'},
'p00fzl8l': {'name': 'BBC Radio Solent', 'image': 'bbc_radio_solent_colour'},
'p00fzl8n': {'name': 'BBC Radio Stoke', 'image': 'bbc_radio_stoke_colour'},
'p00fzl8p': {'name': 'BBC Radio Suffolk', 'image': 'bbc_radio_suffolk_colour'},
'p00fzl8w': {'name': 'BBC Radio Ulster', 'image': 'bbc_radio_ulster_colour'},
'p00fzl8y': {'name': 'BBC Radio Wales (FM)', 'image': 'bbc_radio_wales_fm_colour'},
'p00fzl8x': {'name': 'BBC Radio Wales (LW)', 'image': 'bbc_radio_wales_colour'},
'p00fzl90': {'name': 'BBC Radio York', 'image': 'bbc_radio_york_colour'},
'p00fzl8m': {'name': 'BBC Somerset', 'image': 'bbc_radio_somerset_sound_colour'},
'p00fzl8q': {'name': 'BBC Surrey', 'image': 'bbc_radio_surrey_colour'},
'p00fzl8r': {'name': 'BBC Sussex', 'image': 'bbc_radio_sussex_colour'},
'p00fzl93': {'name': 'BBC Tees', 'image': 'bbc_tees_colour'},
'p00fzl96': {'name': 'BBC Three Counties Radio', 'image': 'bbc_three_counties_radio_colour'},
'p00fzl8z': {'name': 'BBC Wiltshire', 'image': 'bbc_radio_wiltshire_colour'},
'p00fzl9f': {'name': 'BBC WM 95.6', 'image': 'bbc_wm_colour'},
'p02zbmb3': {'name': 'BBC World Service', 'image': 'bbc_world_service_colour'},
'p02jf21y': {'name': 'CBeebies Radio', 'image': 'cbeebies_radio_colour'},
}
stations_ordered = OrderedDict(sorted(stations.items(), key=lambda x: x[1]['name']))
def get_page(url):
return BeautifulSoup(requests.get(url).text, 'html.parser')
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
xbmcplugin.setContent(addon_handle, 'audio')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def mode_default():
categories = {
'podcasts': 'Podcasts',
'stations': 'Stations'
}
for mode, category in categories.items():
url = build_url({'mode': mode})
li = xbmcgui.ListItem(category)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_episode(pid):
programme = requests.get('https://www.bbc.co.uk/programmes/' + pid + '.json')
programme_json = programme.json()["programme"]
picked_url = None
for version in programme_json["versions"]:
playlist = requests.get(
'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/iptv-all/vpid/' + version["pid"] + '/format/json')
playlist_json = playlist.json()
if "media" not in playlist_json:
continue
audio_items = [item for item in playlist_json['media'] if item['kind'] == 'audio']
audio_items.sort(key=lambda x: x['bitrate'], reverse=True)
xbmc.log('Found {0} audio items for the programme version {1}'.format(len(audio_items), version['pid']), level=xbmc.LOGNOTICE)
picked_stream = audio_items[0]
picked_url = picked_stream["connection"][1]["href"]
xbmc.log('Picked the {0} stream with the bitrate {1}'.format(picked_stream['encoding'], picked_stream['bitrate']), level=xbmc.LOGNOTICE)
play_item = xbmcgui.ListItem(path=picked_url)
play_item.setArt({
'thumb': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg',
'icon': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg'
})
play_item.setInfo('music', {
'title': programme_json["display_title"]["title"],
'artist': programme_json["display_title"]["subtitle"],
'album': programme_json["ownership"]["service"]["title"],
'comment': programme_json["short_synopsis"]
})
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
if picked_url is None:
xbmcgui.Dialog().notification(__addonname__, "Episode not available to stream", icon=xbmcgui.NOTIFICATION_ERROR)
def mode_podcasts():
podcasts = requests.get('https://www.bbc.co.uk/podcasts.json')
podcasts_json = podcasts.json()["podcasts"]
podcasts_ordered = sorted(podcasts_json, key=lambda x: x["title"])
for podcast in podcasts_ordered:
url = build_url({'mode': 'podcast', 'pid': podcast["shortTitle"]})
li = xbmcgui.ListItem(podcast["title"])
li.setInfo('video', {'plot': podcast["description"]})
if "imageUrl" in podcast:
li.setThumbnailImage(podcast["imageUrl"].replace('{recipe}', '624x624'))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_podcast(pid):
podcast = feedparser.parse('https://podcasts.files.bbci.co.uk/' + pid + '.rss')
image_url = None
if "image" in podcast.feed:
image_url = podcast.feed.image.url
for entry in podcast.entries:
entry_pid = entry.ppg_canonical.split('/')
entry_date = datetime.datetime.fromtimestamp(mktime(entry.published_parsed)).strftime('%Y-%m-%d')
entry_title = entry_date + ": " + entry.title
if len(entry_pid) > 2:
url = build_url({'mode': 'episode', 'pid': entry_pid[2]})
li = xbmcgui.ListItem(entry_title)
li.setInfo('video', {'plot': entry.description})
li.setThumbnailImage(image_url)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
else:
xbmc.log('No pid could be found for the item at ' + entry.link, level=xbmc.LOGERROR)
xbmcplugin.endOfDirectory(addon_handle)
def mode_stations():
for pid, station in stations_ordered.items():
url = build_url({'mode': 'station', 'pid': pid})
li = xbmcgui.ListItem(station['name'])
li.setThumbnailImage(xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', station['image'] + '.png')))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station(pid):
base = datetime.datetime.today()
for delta in range(30):
date = base - datetime.timedelta(days=delta)
year = '%04d' % date.year
month = '%02d' % date.month
day = '%02d' % date.day
url = build_url({'mode': 'station_date', 'pid': pid, 'year': year, 'month': month, 'day': day})
list_item = xbmcgui.ListItem(date.strftime('%Y-%m-%d (%A)'))
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station_date(pid, year, month, day):
schedule = get_page('https://www.bbc.co.uk/schedules/' + pid + '/' + year + '/' + month + '/' + day)
result = None
for tag in schedule.find_all('script', type='application/ld+json'):
if 'RadioEpisode' in tag.contents[0]:
result = json.loads(tag.contents[0])
if result is None:
xbmcgui.Dialog().notification(__addonname__, "Something went wrong parsing the station's schedule",
icon=xbmcgui.NOTIFICATION_ERROR)
return
for episode in result["@graph"]:
date = dateutil.parser.parse(episode["publication"]["startDate"])
time = date.strftime('%Y-%m-%d, %H:%M')
if "partOfSeries" in episode:
title = time + ": " + episode["partOfSeries"]["name"] + " - " + episode["name"]
else:
title = time + ": " + episode["name"]
url = build_url({'mode': 'episode', 'pid': episode["identifier"]})
list_item = xbmcgui.ListItem(title)
list_item.setInfo('video', {'plot': episode["description"]})
list_item.setPath(url)
list_item.setProperty('IsPlayable', "true")
list_item.setThumbnailImage(episode["image"])
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
mode = args.get('mode', None)
if mode is None:
mode_default()
elif mode == 'episode':
mode_episode(args['pid'])
elif mode == 'podcasts':
mode_podcasts()
elif mode == 'podcast':
mode_podcast(args['pid'])
elif mode == 'stations':
mode_stations()
elif mode == 'station':
mode_station(args['pid'])
elif mode == 'station_date':
mode_station_date(args['pid'], args['year'], args['month'], args['day'])
| true
| true
|
790677da85f3962f3021eb3ed6ec45da968b32e4
| 9,211
|
py
|
Python
|
components/aws/sagemaker/tests/unit_tests/tests/test_ground_truth.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | null | null | null |
components/aws/sagemaker/tests/unit_tests/tests/test_ground_truth.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | null | null | null |
components/aws/sagemaker/tests/unit_tests/tests/test_ground_truth.py
|
Intellicode/pipelines
|
f1d90407a8a2f56db11199c9c73e6df6c4a8b093
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest.mock import patch, call, Mock, MagicMock, mock_open
from botocore.exceptions import ClientError
from ground_truth.src import ground_truth
from common import _utils
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'fake-task',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
class GroundTruthTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = ground_truth.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
# Mock out all of utils except parser
ground_truth._utils = MagicMock()
ground_truth._utils.add_default_client_arguments = _utils.add_default_client_arguments
# Set some static returns
ground_truth._utils.get_labeling_job_outputs.return_value = ('s3://fake-bucket/output', 'arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
with patch('builtins.open', mock_open()) as file_open:
ground_truth.main(required_args)
# Check if correct requests were created and triggered
ground_truth._utils.create_labeling_job.assert_called()
ground_truth._utils.wait_for_labeling_job.assert_called()
ground_truth._utils.get_labeling_job_outputs.assert_called()
# Check the file outputs
file_open.assert_has_calls([
call('/tmp/output_manifest_location.txt', 'w'),
call('/tmp/active_learning_model_arn.txt', 'w')
], any_order=True)
file_open().write.assert_has_calls([
call('s3://fake-bucket/output'),
call('arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
], any_order=False)
def test_ground_truth(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args)
response = _utils.create_labeling_job(mock_client, vars(mock_args))
mock_client.create_labeling_job.assert_called_once_with(
HumanTaskConfig={'WorkteamArn': None, 'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': '', 'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job', 'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': ''}},
InputConfig={'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}}},
LabelAttributeName='test_job', LabelingJobName='test_job',
OutputConfig={'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
RoleArn='arn:aws:iam::123456789012:user/Development/product_1234/*', Tags=[]
)
self.assertEqual(response, 'test_job')
def test_sagemaker_exception_in_ground_truth(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "ground_truth")
mock_client.create_labeling_job.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
_utils.get_labeling_job_outputs(mock_client, vars(mock_args))
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Completed"},
{"LabelingJobStatus": "Should not be called"}
]
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Failed"},
{"LabelingJobStatus": "Should not be called"}
]
with self.assertRaises(Exception):
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_get_labeling_job_output_from_job(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.return_value = {"LabelingJobOutput": {
"OutputDatasetS3Uri": "s3://path/",
"FinalActiveLearningModelArn": "fake-arn"
}}
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(mock_client, 'labeling-job', True)
self.assertEqual(output_manifest, 's3://path/')
self.assertEqual(active_learning_model_arn, 'fake-arn')
def test_pass_most_args(self):
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'image classification',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
arguments = required_args + ['--label_attribute_name', 'fake-attribute',
'--max_human_labeled_objects', '10',
'--max_percent_objects', '50',
'--enable_auto_labeling', 'True',
'--initial_model_arn', 'fake-model-arn',
'--task_availibility', '30',
'--max_concurrent_tasks', '10',
'--task_keywords', 'fake-keyword',
'--worker_type', 'public',
'--no_adult_content', 'True',
'--no_ppi', 'True',
'--tags', '{"fake_key": "fake_value"}'
]
response = _utils.create_labeling_job_request(vars(self.parser.parse_args(arguments)))
print(response)
self.assertEqual(response, {'LabelingJobName': 'test_job',
'LabelAttributeName': 'fake-attribute',
'InputConfig': {'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}},
'DataAttributes': {'ContentClassifiers': ['FreeOfAdultContent', 'FreeOfPersonallyIdentifiableInformation']}},
'OutputConfig': {'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
'RoleArn': 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'StoppingConditions': {'MaxHumanLabeledObjectCount': 10, 'MaxPercentageOfInputDatasetLabeled': 50},
'LabelingJobAlgorithmsConfig': {'LabelingJobAlgorithmSpecificationArn': 'arn:aws:sagemaker:us-west-2:027400017018:labeling-job-algorithm-specification/image-classification',
'InitialActiveLearningModelArn': 'fake-model-arn',
'LabelingJobResourceConfig': {'VolumeKmsKeyId': ''}},
'HumanTaskConfig': {'WorkteamArn': 'arn:aws:sagemaker:us-west-2:394669845002:workteam/public-crowd/default',
'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass',
'TaskKeywords': ['fake-keyword'],
'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job',
'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'TaskAvailabilityLifetimeInSeconds': 30,
'MaxConcurrentTaskCount': 10,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass'},
'PublicWorkforceTaskPrice': {'AmountInUsd': {'Dollars': 0, 'Cents': 0, 'TenthFractionsOfACent': 0}}},
'Tags': [{'Key': 'fake_key', 'Value': 'fake_value'}]}
)
| 51.747191
| 205
| 0.594072
|
import unittest
from unittest.mock import patch, call, Mock, MagicMock, mock_open
from botocore.exceptions import ClientError
from ground_truth.src import ground_truth
from common import _utils
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'fake-task',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
class GroundTruthTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = ground_truth.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
ground_truth._utils = MagicMock()
ground_truth._utils.add_default_client_arguments = _utils.add_default_client_arguments
ground_truth._utils.get_labeling_job_outputs.return_value = ('s3://fake-bucket/output', 'arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
with patch('builtins.open', mock_open()) as file_open:
ground_truth.main(required_args)
ground_truth._utils.create_labeling_job.assert_called()
ground_truth._utils.wait_for_labeling_job.assert_called()
ground_truth._utils.get_labeling_job_outputs.assert_called()
file_open.assert_has_calls([
call('/tmp/output_manifest_location.txt', 'w'),
call('/tmp/active_learning_model_arn.txt', 'w')
], any_order=True)
file_open().write.assert_has_calls([
call('s3://fake-bucket/output'),
call('arn:aws:sagemaker:us-east-1:999999999999:labeling-job')
], any_order=False)
def test_ground_truth(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args)
response = _utils.create_labeling_job(mock_client, vars(mock_args))
mock_client.create_labeling_job.assert_called_once_with(
HumanTaskConfig={'WorkteamArn': None, 'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': '', 'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job', 'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': ''}},
InputConfig={'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}}},
LabelAttributeName='test_job', LabelingJobName='test_job',
OutputConfig={'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
RoleArn='arn:aws:iam::123456789012:user/Development/product_1234/*', Tags=[]
)
self.assertEqual(response, 'test_job')
def test_sagemaker_exception_in_ground_truth(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "ground_truth")
mock_client.create_labeling_job.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
_utils.get_labeling_job_outputs(mock_client, vars(mock_args))
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Completed"},
{"LabelingJobStatus": "Should not be called"}
]
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_wait_for_labeling_job_creation(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.side_effect = [
{"LabelingJobStatus": "InProgress"},
{"LabelingJobStatus": "Failed"},
{"LabelingJobStatus": "Should not be called"}
]
with self.assertRaises(Exception):
_utils.wait_for_labeling_job(mock_client, 'test-batch', 0)
self.assertEqual(mock_client.describe_labeling_job.call_count, 2)
def test_get_labeling_job_output_from_job(self):
mock_client = MagicMock()
mock_client.describe_labeling_job.return_value = {"LabelingJobOutput": {
"OutputDatasetS3Uri": "s3://path/",
"FinalActiveLearningModelArn": "fake-arn"
}}
output_manifest, active_learning_model_arn = _utils.get_labeling_job_outputs(mock_client, 'labeling-job', True)
self.assertEqual(output_manifest, 's3://path/')
self.assertEqual(active_learning_model_arn, 'fake-arn')
def test_pass_most_args(self):
required_args = [
'--region', 'us-west-2',
'--role', 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'--job_name', 'test_job',
'--manifest_location', 's3://fake-bucket/manifest',
'--output_location', 's3://fake-bucket/output',
'--task_type', 'image classification',
'--worker_type', 'fake_worker',
'--ui_template', 's3://fake-bucket/ui_template',
'--title', 'fake-image-labelling-work',
'--description', 'fake job',
'--num_workers_per_object', '1',
'--time_limit', '180',
]
arguments = required_args + ['--label_attribute_name', 'fake-attribute',
'--max_human_labeled_objects', '10',
'--max_percent_objects', '50',
'--enable_auto_labeling', 'True',
'--initial_model_arn', 'fake-model-arn',
'--task_availibility', '30',
'--max_concurrent_tasks', '10',
'--task_keywords', 'fake-keyword',
'--worker_type', 'public',
'--no_adult_content', 'True',
'--no_ppi', 'True',
'--tags', '{"fake_key": "fake_value"}'
]
response = _utils.create_labeling_job_request(vars(self.parser.parse_args(arguments)))
print(response)
self.assertEqual(response, {'LabelingJobName': 'test_job',
'LabelAttributeName': 'fake-attribute',
'InputConfig': {'DataSource': {'S3DataSource': {'ManifestS3Uri': 's3://fake-bucket/manifest'}},
'DataAttributes': {'ContentClassifiers': ['FreeOfAdultContent', 'FreeOfPersonallyIdentifiableInformation']}},
'OutputConfig': {'S3OutputPath': 's3://fake-bucket/output', 'KmsKeyId': ''},
'RoleArn': 'arn:aws:iam::123456789012:user/Development/product_1234/*',
'StoppingConditions': {'MaxHumanLabeledObjectCount': 10, 'MaxPercentageOfInputDatasetLabeled': 50},
'LabelingJobAlgorithmsConfig': {'LabelingJobAlgorithmSpecificationArn': 'arn:aws:sagemaker:us-west-2:027400017018:labeling-job-algorithm-specification/image-classification',
'InitialActiveLearningModelArn': 'fake-model-arn',
'LabelingJobResourceConfig': {'VolumeKmsKeyId': ''}},
'HumanTaskConfig': {'WorkteamArn': 'arn:aws:sagemaker:us-west-2:394669845002:workteam/public-crowd/default',
'UiConfig': {'UiTemplateS3Uri': 's3://fake-bucket/ui_template'},
'PreHumanTaskLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass',
'TaskKeywords': ['fake-keyword'],
'TaskTitle': 'fake-image-labelling-work',
'TaskDescription': 'fake job',
'NumberOfHumanWorkersPerDataObject': 1,
'TaskTimeLimitInSeconds': 180,
'TaskAvailabilityLifetimeInSeconds': 30,
'MaxConcurrentTaskCount': 10,
'AnnotationConsolidationConfig': {'AnnotationConsolidationLambdaArn': 'arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass'},
'PublicWorkforceTaskPrice': {'AmountInUsd': {'Dollars': 0, 'Cents': 0, 'TenthFractionsOfACent': 0}}},
'Tags': [{'Key': 'fake_key', 'Value': 'fake_value'}]}
)
| true
| true
|
7906784fec245ebf3d7cee120ef91221bb8f933f
| 7,076
|
py
|
Python
|
blueapps/contrib/bk_commands/management/commands/startweixin.py
|
xianmao/bk-sops
|
2bc89a81f332122ef215adb9b7dc82641f35eb70
|
[
"Apache-2.0"
] | null | null | null |
blueapps/contrib/bk_commands/management/commands/startweixin.py
|
xianmao/bk-sops
|
2bc89a81f332122ef215adb9b7dc82641f35eb70
|
[
"Apache-2.0"
] | null | null | null |
blueapps/contrib/bk_commands/management/commands/startweixin.py
|
xianmao/bk-sops
|
2bc89a81f332122ef215adb9b7dc82641f35eb70
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import json
import os
import sys
import shutil
from os import path
import django
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.conf import settings
import blueapps
PY_VER = sys.version
class Command(TemplateCommand):
help = u"基于蓝鲸开发框架初始化开发样例"
def add_arguments(self, parser):
parser.add_argument('directory', nargs='?', default='./',
help='Optional destination directory')
def handle(self, **options):
target = options.pop('directory')
# 先获取原内容
if not path.exists('config/default.py'):
raise CommandError("config/default.py does not exist,"
" please init a django project first.")
if PY_VER[0] == '2':
old_file = open('config/default.py')
else:
old_file = open('config/default.py', encoding='utf-8')
# if some directory is given, make sure it's nicely expanded
top_dir = path.abspath(path.expanduser(target))
if not path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please init first." % top_dir)
if not path.exists(path.join(top_dir, 'manage.py')):
raise CommandError("Current directory '%s' is not "
"a django project dir, please init first. "
"(bk-admin init ${app_code})" %
top_dir)
base_subdir = 'weixin_template'
append_file_tuple = (('', 'requirements.txt'),)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = path.join(blueapps.__path__[0], 'conf', base_subdir)
run_ver = None
if PY_VER[0] == '2':
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'))
else:
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'), encoding='utf-8')
for line in conf_file.readlines():
if line.startswith('RUN_VER'):
run_ver = line[11:-2]
conf_file.close()
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
relative_dir = root[prefix_length:]
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
flag = root.endswith('sites')
for dirname in dirs[:]:
if (
dirname.startswith('.') or
dirname == '__pycache__' or
(flag and dirname != run_ver)
):
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class', '.json')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir, filename)
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
with io.open(old_path, 'rb') as template_file:
content = template_file.read()
w_mode = 'wb'
for _root, _filename in append_file_tuple:
if _root == relative_dir and _filename == filename:
w_mode = 'ab'
with io.open(new_path, w_mode) as new_file:
new_file.write(content)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
# 修改文件
modify_default_file(old_file)
# 获取原先的 default 文件并对其进行追加和覆盖
def modify_default_file(old_file):
# 打开覆盖前的文件和替换的 json 文件
if PY_VER[0] == '2':
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r') as json_file:
get_default_content(old_file, json_file)
else:
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r', encoding='utf-8') as json_file:
get_default_content(old_file, json_file)
def get_default_content(old_file, json_file):
with old_file as old_file:
# 获取 json 数据内容
result_content = old_file.read()
json_dict = json.load(json_file)
# 根据 key 进行替换会追加内容
for replace_property in json_dict:
# 获得 key 值
propertys = json_dict.get(replace_property)
# 寻找 key 值所在位置
start_index = result_content.find(str(replace_property))
# 获得 key 的 content 内容
content = propertys.get('content')
# mode 为 add 追加内容
if propertys.get('mode') == 'add':
end_index = result_content.find(')', start_index) - 1
temp_content = result_content[start_index:end_index]
# 检查最后一个是不是,结尾
if temp_content[-1] == ',' or temp_content[-1] == '(':
temp_content += '\n'
else:
temp_content += ',\n'
# 内容替换 content 需要进行 str 方法转换
result_content = ''.join(
[result_content[:start_index], temp_content,
str(content),
result_content[end_index:]])
# mode 为 cover 进行覆盖内容
elif propertys.get('mode') == 'cover':
end_index = result_content.find('\n', start_index)
# 即最后一个是 True 不需要做任何覆盖
if result_content[start_index: end_index].strip() == 'IS_USE_CELERY = False':
continue
# 需要位移 start_index 防止覆盖变量名称
start_index += len(replace_property)
# 内容覆盖
result_content = ''.join(
[result_content[:start_index],
'%s' % str(content),
result_content[end_index:]])
else:
# 其他情况
break
if PY_VER[0] == '2':
with open('config/default.py', 'w') as default_file:
default_file.write(result_content)
else:
with open('config/default.py', 'w',
encoding='utf-8') as default_file:
default_file.write(result_content)
| 39.093923
| 95
| 0.535048
|
import io
import json
import os
import sys
import shutil
from os import path
import django
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
from django.conf import settings
import blueapps
PY_VER = sys.version
class Command(TemplateCommand):
help = u"基于蓝鲸开发框架初始化开发样例"
def add_arguments(self, parser):
parser.add_argument('directory', nargs='?', default='./',
help='Optional destination directory')
def handle(self, **options):
target = options.pop('directory')
if not path.exists('config/default.py'):
raise CommandError("config/default.py does not exist,"
" please init a django project first.")
if PY_VER[0] == '2':
old_file = open('config/default.py')
else:
old_file = open('config/default.py', encoding='utf-8')
top_dir = path.abspath(path.expanduser(target))
if not path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please init first." % top_dir)
if not path.exists(path.join(top_dir, 'manage.py')):
raise CommandError("Current directory '%s' is not "
"a django project dir, please init first. "
"(bk-admin init ${app_code})" %
top_dir)
base_subdir = 'weixin_template'
append_file_tuple = (('', 'requirements.txt'),)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = path.join(blueapps.__path__[0], 'conf', base_subdir)
run_ver = None
if PY_VER[0] == '2':
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'))
else:
conf_file = open(path.join(os.getcwd(), 'config', '__init__.py'), encoding='utf-8')
for line in conf_file.readlines():
if line.startswith('RUN_VER'):
run_ver = line[11:-2]
conf_file.close()
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
relative_dir = root[prefix_length:]
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
flag = root.endswith('sites')
for dirname in dirs[:]:
if (
dirname.startswith('.') or
dirname == '__pycache__' or
(flag and dirname != run_ver)
):
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class', '.json')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir, filename)
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
with io.open(old_path, 'rb') as template_file:
content = template_file.read()
w_mode = 'wb'
for _root, _filename in append_file_tuple:
if _root == relative_dir and _filename == filename:
w_mode = 'ab'
with io.open(new_path, w_mode) as new_file:
new_file.write(content)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
# 修改文件
modify_default_file(old_file)
# 获取原先的 default 文件并对其进行追加和覆盖
def modify_default_file(old_file):
# 打开覆盖前的文件和替换的 json 文件
if PY_VER[0] == '2':
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r') as json_file:
get_default_content(old_file, json_file)
else:
with open("%s/conf/weixin_template/config/default.json" % blueapps.__path__[0],
'r', encoding='utf-8') as json_file:
get_default_content(old_file, json_file)
def get_default_content(old_file, json_file):
with old_file as old_file:
# 获取 json 数据内容
result_content = old_file.read()
json_dict = json.load(json_file)
# 根据 key 进行替换会追加内容
for replace_property in json_dict:
# 获得 key 值
propertys = json_dict.get(replace_property)
# 寻找 key 值所在位置
start_index = result_content.find(str(replace_property))
# 获得 key 的 content 内容
content = propertys.get('content')
# mode 为 add 追加内容
if propertys.get('mode') == 'add':
end_index = result_content.find(')', start_index) - 1
temp_content = result_content[start_index:end_index]
# 检查最后一个是不是,结尾
if temp_content[-1] == ',' or temp_content[-1] == '(':
temp_content += '\n'
else:
temp_content += ',\n'
# 内容替换 content 需要进行 str 方法转换
result_content = ''.join(
[result_content[:start_index], temp_content,
str(content),
result_content[end_index:]])
# mode 为 cover 进行覆盖内容
elif propertys.get('mode') == 'cover':
end_index = result_content.find('\n', start_index)
# 即最后一个是 True 不需要做任何覆盖
if result_content[start_index: end_index].strip() == 'IS_USE_CELERY = False':
continue
# 需要位移 start_index 防止覆盖变量名称
start_index += len(replace_property)
# 内容覆盖
result_content = ''.join(
[result_content[:start_index],
'%s' % str(content),
result_content[end_index:]])
else:
# 其他情况
break
if PY_VER[0] == '2':
with open('config/default.py', 'w') as default_file:
default_file.write(result_content)
else:
with open('config/default.py', 'w',
encoding='utf-8') as default_file:
default_file.write(result_content)
| true
| true
|
790678ac92e434d7078e43c30e6ed24239787f76
| 12,552
|
py
|
Python
|
examples/getting_started/plot_getting_started.py
|
JuliaSprenger/spikeinterface
|
d5d3d3992a6d430d7008e16db4ee030734e685e5
|
[
"MIT"
] | 116
|
2019-07-12T14:33:43.000Z
|
2022-03-29T01:10:00.000Z
|
examples/getting_started/plot_getting_started.py
|
JuliaSprenger/spikeinterface
|
d5d3d3992a6d430d7008e16db4ee030734e685e5
|
[
"MIT"
] | 424
|
2019-07-15T13:29:34.000Z
|
2022-03-30T13:30:45.000Z
|
examples/getting_started/plot_getting_started.py
|
JuliaSprenger/spikeinterface
|
d5d3d3992a6d430d7008e16db4ee030734e685e5
|
[
"MIT"
] | 60
|
2019-08-26T11:59:07.000Z
|
2022-03-24T20:05:38.000Z
|
"""
Getting started tutorial
========================
In this introductory example, you will see how to use the :code:`spikeinterface` to perform a full electrophysiology analysis.
We will first create some simulated data, and we will then perform some pre-processing, run a couple of spike sorting
algorithms, inspect and validate the results, export to Phy, and compare spike sorters.
"""
import matplotlib.pyplot as plt
##############################################################################
# The spikeinterface module by itself import only the spikeinterface.core submodule
# which is not useful for end user
import spikeinterface
##############################################################################
# We need to import one by one different submodules separately (preferred).
# There are 5 modules:
#
# - :code:`extractors` : file IO
# - :code:`toolkit` : processing toolkit for pre-, post-processing, validation, and automatic curation
# - :code:`sorters` : Python wrappers of spike sorters
# - :code:`comparison` : comparison of spike sorting output
# - :code:`widgets` : visualization
import spikeinterface as si # import core only
import spikeinterface.extractors as se
import spikeinterface.toolkit as st
import spikeinterface.sorters as ss
import spikeinterface.comparison as sc
import spikeinterface.widgets as sw
##############################################################################
# We can also import all submodules at once with this
# this internally import core+extractors+toolkit+sorters+comparison+widgets+exporters
#
# This is useful for notebooks but this is a more heavy import because internally many more dependency
# are imported (scipy/sklearn/networkx/matplotlib/h5py...)
import spikeinterface.full as si
##############################################################################
# First, let's download a simulated dataset from the
# 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' repo
#
# Then we can open it. Note that `MEArec <https://mearec.readthedocs.io>`_ simulated file
# contains both "recording" and a "sorting" object.
local_path = si.download_dataset(remote_path='mearec/mearec_test_10s.h5')
recording, sorting_true = se.read_mearec(local_path)
print(recording)
print(sorting_true)
##############################################################################
# :code:`recording` is a :code:`RecordingExtractor` object, which extracts information about channel ids, channel locations
# (if present), the sampling frequency of the recording, and the extracellular traces. :code:`sorting_true` is a
# :code:`SortingExtractor` object, which contains information about spike-sorting related information, including unit ids,
# spike trains, etc. Since the data are simulated, :code:`sorting_true` has ground-truth information of the spiking
# activity of each unit.
#
# Let's use the :code:`widgets` module to visualize the traces and the raster plots.
w_ts = sw.plot_timeseries(recording, time_range=(0, 5))
w_rs = sw.plot_rasters(sorting_true, time_range=(0, 5))
##############################################################################
# This is how you retrieve info from a :code:`RecordingExtractor`...
channel_ids = recording.get_channel_ids()
fs = recording.get_sampling_frequency()
num_chan = recording.get_num_channels()
num_seg = recording.get_num_segments()
print('Channel ids:', channel_ids)
print('Sampling frequency:', fs)
print('Number of channels:', num_chan)
print('Number of segments:', num_seg)
##############################################################################
# ...and a :code:`SortingExtractor`
num_seg = recording.get_num_segments()
unit_ids = sorting_true.get_unit_ids()
spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
print('Number of segments:', num_seg)
print('Unit ids:', unit_ids)
print('Spike train of first unit:', spike_train)
##################################################################
# :code:`spikeinterface` internally uses the :code:`probeinterface`
# to handle Probe and ProbeGroup.
# So any probe in the probeinterface collections can be download
# and set to a Recording object.
# In this case, the MEArec dataset already handles a Probe and we don't need to set it.
probe = recording.get_probe()
print(probe)
from probeinterface.plotting import plot_probe
plot_probe(probe)
##############################################################################
# Using the :code:`toolkit`, you can perform preprocessing on the recordings.
# Each pre-processing function also returns a :code:`RecordingExtractor`,
# which makes it easy to build pipelines. Here, we filter the recording and
# apply common median reference (CMR).
# All theses preprocessing steps are "lazy". The computation is done on demand when we call
# `recording.get_traces(...)` or when we save the object to disk.
recording_cmr = recording
recording_f = st.bandpass_filter(recording, freq_min=300, freq_max=6000)
print(recording_f)
recording_cmr = st.common_reference(recording_f, reference='global', operator='median')
print(recording_cmr)
# this computes and saves the recording after applying the preprocessing chain
recording_preprocessed = recording_cmr.save(format='binary')
print(recording_preprocessed)
##############################################################################
# Now you are ready to spike sort using the :code:`sorters` module!
# Let's first check which sorters are implemented and which are installed
print('Available sorters', ss.available_sorters())
print('Installed sorters', ss.installed_sorters())
##############################################################################
# The :code:`ss.installed_sorters()` will list the sorters installed in the machine.
# We can see we have HerdingSpikes and Tridesclous installed.
# Spike sorters come with a set of parameters that users can change.
# The available parameters are dictionaries and can be accessed with:
print(ss.get_default_params('herdingspikes'))
print(ss.get_default_params('tridesclous'))
##############################################################################
# Let's run herdingspikes and change one of the parameter, say, the detect_threshold:
sorting_HS = ss.run_herdingspikes(recording=recording_preprocessed, detect_threshold=4)
print(sorting_HS)
##############################################################################
# Alternatively we can pass full dictionary containing the parameters:
other_params = ss.get_default_params('herdingspikes')
other_params['detect_threshold'] = 5
# parameters set by params dictionary
sorting_HS_2 = ss.run_herdingspikes(recording=recording_preprocessed, output_folder="redringspikes_output2",
**other_params)
print(sorting_HS_2)
##############################################################################
# Let's run tridesclous as well, with default parameters:
sorting_TDC = ss.run_tridesclous(recording=recording_preprocessed)
##############################################################################
# The :code:`sorting_HS` and :code:`sorting_TDC` are :code:`SortingExtractor`
# objects. We can print the units found using:
print('Units found by herdingspikes:', sorting_HS.get_unit_ids())
print('Units found by tridesclous:', sorting_TDC.get_unit_ids())
##############################################################################
# :code:`spikeinterface` provides a efficient way to extractor waveform snippets from paired recording/sorting objects.
# The :code:`WaveformExtractor` class samples some spikes (:code:`max_spikes_per_unit=500`) for each cluster and stores
# them on disk. These waveforms per cluster are helpful to compute the average waveform, or "template", for each unit
# and then to compute, for example, quality metrics.
we_TDC = si.WaveformExtractor.create(recording_preprocessed, sorting_TDC, 'waveforms', remove_if_exists=True)
we_TDC.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500)
we_TDC.run(n_jobs=-1, chunk_size=30000)
print(we_TDC)
unit_id0 = sorting_TDC.unit_ids[0]
wavefroms = we_TDC.get_waveforms(unit_id0)
print(wavefroms.shape)
template = we_TDC.get_template(unit_id0)
print(template.shape)
##############################################################################
# Once we have the `WaveformExtractor` object
# we can post-process, validate, and curate the results. With
# the :code:`toolkit.postprocessing` submodule, one can, for example,
# get waveforms, templates, maximum channels, PCA scores, or export the data
# to Phy. `Phy <https://github.com/cortex-lab/phy>`_ is a GUI for manual
# curation of the spike sorting output. To export to phy you can run:
from spikeinterface.exporters import export_to_phy
export_to_phy(we_TDC, './phy_folder_for_TDC',
compute_pc_features=False, compute_amplitudes=True)
##############################################################################
# Then you can run the template-gui with: :code:`phy template-gui phy/params.py`
# and manually curate the results.
##############################################################################
# Quality metrics for the spike sorting output are very important to asses the spike sorting performance.
# The :code:`spikeinterface.toolkit.qualitymetrics` module implements several quality metrics
# to assess the goodness of sorted units. Among those, for example,
# are signal-to-noise ratio, ISI violation ratio, isolation distance, and many more.
# Theses metrics are built on top of WaveformExtractor class and return a dictionary with the unit ids as keys:
snrs = st.compute_snrs(we_TDC)
print(snrs)
isi_violations_rate, isi_violations_count = st.compute_isi_violations(we_TDC, isi_threshold_ms=1.5)
print(isi_violations_rate)
print(isi_violations_count)
##############################################################################
# All theses quality metrics can be computed in one shot and returned as
# a :code:`pandas.Dataframe`
metrics = st.compute_quality_metrics(we_TDC, metric_names=['snr', 'isi_violation', 'amplitude_cutoff'])
print(metrics)
##############################################################################
# Quality metrics can be also used to automatically curate the spike sorting
# output. For example, you can select sorted units with a SNR above a
# certain threshold:
keep_mask = (metrics['snr'] > 7.5) & (metrics['isi_violations_rate'] < 0.01)
print(keep_mask)
keep_unit_ids = keep_mask[keep_mask].index.values
print(keep_unit_ids)
curated_sorting = sorting_TDC.select_units(keep_unit_ids)
print(curated_sorting)
##############################################################################
# The final part of this tutorial deals with comparing spike sorting outputs.
# We can either (1) compare the spike sorting results with the ground-truth
# sorting :code:`sorting_true`, (2) compare the output of two (HerdingSpikes
# and Tridesclous), or (3) compare the output of multiple sorters:
comp_gt_TDC = sc.compare_sorter_to_ground_truth(gt_sorting=sorting_true, tested_sorting=sorting_TDC)
comp_TDC_HS = sc.compare_two_sorters(sorting1=sorting_TDC, sorting2=sorting_HS)
comp_multi = sc.compare_multiple_sorters(sorting_list=[sorting_TDC, sorting_HS],
name_list=['tdc', 'hs'])
##############################################################################
# When comparing with a ground-truth sorting extractor (1), you can get the sorting performance and plot a confusion
# matrix
comp_gt_TDC.get_performance()
w_conf = sw.plot_confusion_matrix(comp_gt_TDC)
w_agr = sw.plot_agreement_matrix(comp_gt_TDC)
##############################################################################
# When comparing two sorters (2), we can see the matching of units between sorters.
# Units which are not matched has -1 as unit id:
comp_TDC_HS.hungarian_match_12
##############################################################################
# or the reverse:
comp_TDC_HS.hungarian_match_21
##############################################################################
# When comparing multiple sorters (3), you can extract a :code:`SortingExtractor` object with units in agreement
# between sorters. You can also plot a graph showing how the units are matched between the sorters.
sorting_agreement = comp_multi.get_agreement_sorting(minimum_agreement_count=2)
print('Units in agreement between Klusta and Mountainsort4:', sorting_agreement.get_unit_ids())
w_multi = sw.plot_multicomp_graph(comp_multi)
plt.show()
| 44.669039
| 126
| 0.654557
|
import matplotlib.pyplot as plt
| true
| true
|
7906797e7c32060d28729d82d76b97fda2ac04e6
| 1,430
|
py
|
Python
|
osf_tests/management_commands/test_move_egap_regs_to_provider.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 11
|
2018-12-11T16:39:40.000Z
|
2022-02-26T09:51:32.000Z
|
osf_tests/management_commands/test_move_egap_regs_to_provider.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 52
|
2018-04-13T05:03:21.000Z
|
2022-03-22T02:56:19.000Z
|
osf_tests/management_commands/test_move_egap_regs_to_provider.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 16
|
2018-07-09T01:44:51.000Z
|
2021-06-30T01:57:16.000Z
|
# encoding: utf-8
import pytest
from osf_tests.factories import (
RegistrationFactory,
RegistrationProviderFactory
)
from osf.models import (
RegistrationSchema,
)
from osf.management.commands.move_egap_regs_to_provider import (
main as move_egap_regs
)
from django.conf import settings
@pytest.mark.django_db
class TestEGAPMoveToProvider:
@pytest.fixture()
def egap_provider(self):
return RegistrationProviderFactory(name=settings.EGAP_PROVIDER_NAME)
@pytest.fixture()
def non_egap_provider(self):
return RegistrationProviderFactory()
@pytest.fixture()
def egap_reg(self):
egap_schema = RegistrationSchema.objects.filter(
name='EGAP Registration'
).order_by(
'-schema_version'
)[0]
cos = RegistrationProviderFactory(_id='osf')
return RegistrationFactory(schema=egap_schema, provider=cos)
@pytest.fixture()
def egap_non_reg(self, non_egap_provider):
return RegistrationFactory(provider=non_egap_provider)
def test_move_to_provider(self, egap_provider, egap_reg, non_egap_provider, egap_non_reg):
assert egap_reg.provider != egap_provider
assert egap_non_reg.provider != egap_provider
move_egap_regs(dry_run=False)
egap_reg.refresh_from_db()
assert egap_reg.provider == egap_provider
assert egap_non_reg.provider != egap_provider
| 26.481481
| 94
| 0.718881
|
import pytest
from osf_tests.factories import (
RegistrationFactory,
RegistrationProviderFactory
)
from osf.models import (
RegistrationSchema,
)
from osf.management.commands.move_egap_regs_to_provider import (
main as move_egap_regs
)
from django.conf import settings
@pytest.mark.django_db
class TestEGAPMoveToProvider:
@pytest.fixture()
def egap_provider(self):
return RegistrationProviderFactory(name=settings.EGAP_PROVIDER_NAME)
@pytest.fixture()
def non_egap_provider(self):
return RegistrationProviderFactory()
@pytest.fixture()
def egap_reg(self):
egap_schema = RegistrationSchema.objects.filter(
name='EGAP Registration'
).order_by(
'-schema_version'
)[0]
cos = RegistrationProviderFactory(_id='osf')
return RegistrationFactory(schema=egap_schema, provider=cos)
@pytest.fixture()
def egap_non_reg(self, non_egap_provider):
return RegistrationFactory(provider=non_egap_provider)
def test_move_to_provider(self, egap_provider, egap_reg, non_egap_provider, egap_non_reg):
assert egap_reg.provider != egap_provider
assert egap_non_reg.provider != egap_provider
move_egap_regs(dry_run=False)
egap_reg.refresh_from_db()
assert egap_reg.provider == egap_provider
assert egap_non_reg.provider != egap_provider
| true
| true
|
79067a6c907d66d1d68d24f6c136f39728d2235b
| 878
|
py
|
Python
|
nova_powervm/virt/powervm/volume/gpfs.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 24
|
2015-10-18T02:55:20.000Z
|
2021-11-17T11:43:51.000Z
|
nova_powervm/virt/powervm/volume/gpfs.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | null | null | null |
nova_powervm/virt/powervm/volume/gpfs.py
|
openstack/nova-powervm
|
376d9493e2a10313068508daf9054d7ecf6d121f
|
[
"Apache-2.0"
] | 12
|
2015-10-26T17:38:05.000Z
|
2021-07-21T12:45:19.000Z
|
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova_powervm.virt.powervm.volume import fileio
class GPFSVolumeAdapter(fileio.FileIOVolumeAdapter):
"""Connects GPFS Cinder Volumes to PowerVM VMs."""
def _get_path(self):
return self.connection_info.get("data")['device_path']
| 35.12
| 78
| 0.730068
|
from nova_powervm.virt.powervm.volume import fileio
class GPFSVolumeAdapter(fileio.FileIOVolumeAdapter):
def _get_path(self):
return self.connection_info.get("data")['device_path']
| true
| true
|
79067ca4aa4dfdf1c6aeaac791d4bd5fcdd3188e
| 1,707
|
py
|
Python
|
firebot/modules/bash.py
|
vikas04599/Fire-X
|
527d57c29785f36e1b07aa739f3ac7c969cc916c
|
[
"MIT"
] | 20
|
2021-08-16T18:49:49.000Z
|
2022-02-23T08:35:57.000Z
|
firebot/modules/bash.py
|
elizamusic/Fire-X
|
1ec6ba73b3033ad03e7859fcf6917fc6aff89efc
|
[
"MIT"
] | null | null | null |
firebot/modules/bash.py
|
elizamusic/Fire-X
|
1ec6ba73b3033ad03e7859fcf6917fc6aff89efc
|
[
"MIT"
] | 93
|
2021-08-16T19:16:20.000Z
|
2022-03-31T02:02:06.000Z
|
import asyncio
import io
import time
from firebot import CMD_HELP
from firebot.utils import edit_or_reply, fire_on_cmd, sudo_cmd
@fire.on(fire_on_cmd(pattern="bash ?(.*)"))
@fire.on(sudo_cmd(pattern="bash ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
tflyf = await edit_or_reply(event, "Processing Your Request...")
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
OUTPUT = f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
if len(OUTPUT) > 4095:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "exec.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id,
)
await event.delete()
await tflyf.edit(OUTPUT)
CMD_HELP.update(
{
"bash": "**Bash**\
\n\n**Syntax : **`.bash <cmd>`\
\n**Usage :** Run Commands Using Userbot"
}
)
| 29.947368
| 118
| 0.596368
|
import asyncio
import io
import time
from firebot import CMD_HELP
from firebot.utils import edit_or_reply, fire_on_cmd, sudo_cmd
@fire.on(fire_on_cmd(pattern="bash ?(.*)"))
@fire.on(sudo_cmd(pattern="bash ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
PROCESS_RUN_TIME = 100
cmd = event.pattern_match.group(1)
tflyf = await edit_or_reply(event, "Processing Your Request...")
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
e = stderr.decode()
if not e:
e = "No Error"
o = stdout.decode()
if not o:
o = "**Tip**: \n`If you want to see the results of your code, I suggest printing them to stdout.`"
else:
_o = o.split("\n")
o = "`\n".join(_o)
OUTPUT = f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
if len(OUTPUT) > 4095:
with io.BytesIO(str.encode(OUTPUT)) as out_file:
out_file.name = "exec.text"
await bot.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=cmd,
reply_to=reply_to_id,
)
await event.delete()
await tflyf.edit(OUTPUT)
CMD_HELP.update(
{
"bash": "**Bash**\
\n\n**Syntax : **`.bash <cmd>`\
\n**Usage :** Run Commands Using Userbot"
}
)
| true
| true
|
79067cbce81efd04c8395cb3c3418bb2680872bb
| 2,472
|
py
|
Python
|
benchutils/transformers.py
|
qiyunzhu/taxa-assign-benchmarking
|
df0fbe6a84f20ac2e2febbbb21bd686ec90e84e3
|
[
"MIT"
] | null | null | null |
benchutils/transformers.py
|
qiyunzhu/taxa-assign-benchmarking
|
df0fbe6a84f20ac2e2febbbb21bd686ec90e84e3
|
[
"MIT"
] | null | null | null |
benchutils/transformers.py
|
qiyunzhu/taxa-assign-benchmarking
|
df0fbe6a84f20ac2e2febbbb21bd686ec90e84e3
|
[
"MIT"
] | null | null | null |
import pandas as pd
kraken_rank_dictionary = {
'P': 'phylum',
'C': 'class',
'O': 'order',
'F': 'family',
'G': 'genus',
'S': 'species'
}
greengenes_rank_dict = {
'k__': 'kingdom',
'p__': 'phylum',
'c__': 'class',
'o__': 'order',
'f__': 'family',
'g__': 'genus',
's__': 'species'
}
kraken_columns = ['PERCENTAGE', 'lca_read_count', 'read_count', 'rank',
'@@TAXID', 'TAXNAME']
def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
# TODO finsih docs
"""Converts a summary of all ranks from kraken into rank-wise profiles
similar to the CAMI-SIM output
Parameters
----------
all_rank_summary
output_rank_summaries
ranks
Returns
-------
"""
# TODO COULD be split into two format functions: one to reformat,
# and one to split on rank
# TODO give error for invalid rank value
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
# TODO for kraken is it okay to just take the first part (drop the number)
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in
all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
def metaphlan2_transformer(all_rank_summary, output_rank_summaries, ranks):
all_ranks = pd.read_csv(all_rank_summary, sep='\t', skiprows=3)
def last_entry(x): return x.split('|')[-1]
all_ranks['last_clade'] = all_ranks['#clade_name'].map(last_entry)
all_ranks['@@TAXID'] = all_ranks['NCBI_tax_id'].map(last_entry)
all_ranks['RANK'] = all_ranks['last_clade'].map(
lambda x: greengenes_rank_dict[x[:3]])
all_ranks['TAXNAME'] = all_ranks['last_clade'].map(lambda x: x[3:])
all_ranks['PERCENTAGE'] = all_ranks['relative_abundance']
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
| 33.863014
| 78
| 0.642799
|
import pandas as pd
kraken_rank_dictionary = {
'P': 'phylum',
'C': 'class',
'O': 'order',
'F': 'family',
'G': 'genus',
'S': 'species'
}
greengenes_rank_dict = {
'k__': 'kingdom',
'p__': 'phylum',
'c__': 'class',
'o__': 'order',
'f__': 'family',
'g__': 'genus',
's__': 'species'
}
kraken_columns = ['PERCENTAGE', 'lca_read_count', 'read_count', 'rank',
'@@TAXID', 'TAXNAME']
def kraken2_transformer(all_rank_summary, output_rank_summaries, ranks):
all_ranks = pd.read_csv(all_rank_summary, sep='\t')
all_ranks.columns = kraken_columns
all_ranks['rank'] = all_ranks['rank'].str[0]
all_ranks = all_ranks.loc[all_ranks['rank'].isin(kraken_rank_dictionary)]
all_ranks['RANK'] = [kraken_rank_dictionary[key] for key in
all_ranks['rank']]
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
def metaphlan2_transformer(all_rank_summary, output_rank_summaries, ranks):
all_ranks = pd.read_csv(all_rank_summary, sep='\t', skiprows=3)
def last_entry(x): return x.split('|')[-1]
all_ranks['last_clade'] = all_ranks['#clade_name'].map(last_entry)
all_ranks['@@TAXID'] = all_ranks['NCBI_tax_id'].map(last_entry)
all_ranks['RANK'] = all_ranks['last_clade'].map(
lambda x: greengenes_rank_dict[x[:3]])
all_ranks['TAXNAME'] = all_ranks['last_clade'].map(lambda x: x[3:])
all_ranks['PERCENTAGE'] = all_ranks['relative_abundance']
keep_cols = ['@@TAXID', 'RANK', 'TAXNAME', 'PERCENTAGE']
for output_, rank in zip(output_rank_summaries, ranks):
sub_df = all_ranks.loc[all_ranks['RANK'] == rank]
sub_df_matching = sub_df[keep_cols]
sub_df_matching.to_csv(output_, sep='\t', index=False)
| true
| true
|
79067d4cb9698a68a9594018b1385a9783a309dc
| 1,850
|
py
|
Python
|
ooobuild/dyn/ucb/content_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/ucb/content_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/ucb/content_event.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.ucb
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME, UNO_NONE
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Source', 'Action', 'Content', 'Id')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.ucb.ContentEvent'
struct = uno.getClass(type_name)
struct.__ooo_ns__ = 'com.sun.star.ucb'
struct.__ooo_full_ns__= type_name
struct.__ooo_type_name__ = 'struct'
orig_init = struct.__init__
struct.__init__ = init
return struct
ContentEvent = _get_class()
else:
from ...lo.ucb.content_event import ContentEvent as ContentEvent
__all__ = ['ContentEvent']
| 34.259259
| 109
| 0.672973
|
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME, UNO_NONE
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Source', 'Action', 'Content', 'Id')
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.ucb.ContentEvent'
struct = uno.getClass(type_name)
struct.__ooo_ns__ = 'com.sun.star.ucb'
struct.__ooo_full_ns__= type_name
struct.__ooo_type_name__ = 'struct'
orig_init = struct.__init__
struct.__init__ = init
return struct
ContentEvent = _get_class()
else:
from ...lo.ucb.content_event import ContentEvent as ContentEvent
__all__ = ['ContentEvent']
| true
| true
|
79067d5bf2ddbe70b607765307b0df5ef3ff6b90
| 883
|
py
|
Python
|
src/discolight/writers/annotation/widthheightcsv.py
|
denzel-datature/discolight
|
7c8309d3f883263b2e4cae0b289f17be1d1c07ea
|
[
"MIT"
] | 27
|
2020-07-23T08:09:25.000Z
|
2022-03-01T08:24:43.000Z
|
src/discolight/writers/annotation/widthheightcsv.py
|
denzel-datature/discolight
|
7c8309d3f883263b2e4cae0b289f17be1d1c07ea
|
[
"MIT"
] | 7
|
2020-08-05T07:26:55.000Z
|
2020-12-31T04:20:40.000Z
|
src/discolight/writers/annotation/widthheightcsv.py
|
denzel-datature/discolight
|
7c8309d3f883263b2e4cae0b289f17be1d1c07ea
|
[
"MIT"
] | 6
|
2020-07-27T04:30:01.000Z
|
2020-08-13T02:39:25.000Z
|
"""A CSV annotation writer that writes the bbox in x, y, w, h format."""
from .types import CSVAnnotationWriter
class WidthHeightCSV(CSVAnnotationWriter):
"""Writes annotations to a CSV file in the following format.
image_name, x_min, y_min, width, height, label
"""
def get_csv_fieldnames(self):
"""Return the field names for the CSV file."""
return ["image_name", "x_min", "y_min", "width", "height", "label"]
def get_csv_row(self, image_name, _image, annotation):
"""Return the CSV row corresponding to the given annotation."""
return {
"image_name": image_name,
"x_min": annotation.x_min,
"y_min": annotation.y_min,
"width": annotation.x_max - annotation.x_min,
"height": annotation.y_max - annotation.y_min,
"label": annotation.class_idx
}
| 33.961538
| 75
| 0.629672
|
from .types import CSVAnnotationWriter
class WidthHeightCSV(CSVAnnotationWriter):
def get_csv_fieldnames(self):
return ["image_name", "x_min", "y_min", "width", "height", "label"]
def get_csv_row(self, image_name, _image, annotation):
return {
"image_name": image_name,
"x_min": annotation.x_min,
"y_min": annotation.y_min,
"width": annotation.x_max - annotation.x_min,
"height": annotation.y_max - annotation.y_min,
"label": annotation.class_idx
}
| true
| true
|
79067eb7a3522d267d15a67f9850fe61c835b4db
| 27,430
|
py
|
Python
|
nodes/axis.py
|
MarcoStb1993/axis_camera
|
cd08406cdb519488743bf6c69f79a62dbbb536e3
|
[
"BSD-3-Clause"
] | 2
|
2017-09-30T14:41:37.000Z
|
2018-05-29T19:33:10.000Z
|
nodes/axis.py
|
MarcoStb1993/axis_camera
|
cd08406cdb519488743bf6c69f79a62dbbb536e3
|
[
"BSD-3-Clause"
] | null | null | null |
nodes/axis.py
|
MarcoStb1993/axis_camera
|
cd08406cdb519488743bf6c69f79a62dbbb536e3
|
[
"BSD-3-Clause"
] | 1
|
2020-02-11T13:05:31.000Z
|
2020-02-11T13:05:31.000Z
|
#!/usr/bin/env python
"""
Axis camera video driver. Inspired by:
https://code.ros.org/svn/wg-ros-pkg/branches/trunk_cturtle/sandbox/axis_camera/axis.py
Communication with the camera is done using the Axis VAPIX API described at
http://www.axis.com/global/en/support/developer-support/vapix
.. note::
This is a major rewrite of the former ros-drivers/axis_camera node, so it contains a (deprecated) backwards
compatibility layer for the previous (non-released) API.
"""
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
# BACKWARDS COMPATIBILITY LAYER
StreamThread = ImageStreamingThread # deprecated
class Axis(rospy.SubscribeListener):
"""The ROS-VAPIX interface for video streaming."""
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
"""Create the ROS-VAPIX interface.
:param hostname: Hostname of the camera (without http://, can be an IP address).
:type hostname: basestring
:param username: If login is needed, provide a username here.
:type username: :py:obj:`basestring` | None
:param password: If login is needed, provide a password here.
:type password: :py:obj:`basestring` | None
:param width: Width of the requested video stream in pixels (can be changed later). Must be one of the supported
resolutions. If `None`, the resolution will be chosen by height only. If also `height` is `None`,
then the default camera resolution will be used.
:type width: int|None
:param height: Height of the requested video stream in pixels (can be changed later). Must be one of the
supported resolutions. If `None`, the resolution will be chosen by width only. If also `width` is
`None`, then the default camera resolution will be used.
:type height: int|None
:param frame_id: The ROS TF frame assigned to the camera.
:type frame_id: basestring
:param camera_info_url: The URL pointing to the camera calaibration, if available.
:type camera_info_url: basestring
:param use_encrypted_password: Whether to use Plain HTTP Auth (False) or Digest HTTP Auth (True).
:type use_encrypted_password: bool
:param camera_id: ID (number) of the camera. Can be 1 to 4.
:type camera_id: int
:param auto_wakeup_camera: If True, there will be a wakeup trial after first unsuccessful network command.
:type auto_wakeup_camera: bool
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:param fps: The desired frames per second.
:type fps: int
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the requested resolution (either the `resolution`, or `width`+`height`
is not supported.
"""
# True every time the video parameters have changed and the URL has to be altered (set from other threads).
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
# autodetect the VAPIX API and connect to it; try it forever
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
# Sometimes the camera falls into power saving mode and stops streaming.
# This setting allows the script to try to wake up the camera.
self._auto_wakeup_camera = auto_wakeup_camera
# dynamic-reconfigurable properties - definitions
self._width = None # deprecated
self._height = None # deprecated
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
# treat empty strings as None in width and height params
width = width if width != "" else None
height = height if height != "" else None
# dynamic-reconfigurable properties - defaults
if width is None and height is None:
# TODO change to perform default resolution detection from VAPIX
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
# only advertise the supported resolutions on dynamic reconfigure
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
# dynamic reconfigure server
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
# camera info setup
self._frame_id = frame_id
self._camera_info_url = camera_info_url
# generate a valid camera name based on the hostname
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo() # required before getCameraInfo()
# the thread used for streaming images (is instantiated when the first image subscriber subscribes)
self._streaming_thread = None
# the publishers are started/stopped lazily in peer_subscribe/peer_unsubscribe
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
# BACKWARDS COMPATIBILITY LAYER
self.username = username # deprecated
self.password = password # deprecated
self.use_encrypted_password = use_encrypted_password # deprecated
self.st = None # deprecated
self.pub = self._video_publisher # deprecated
self.caminfo_pub = self._camera_info_publisher # deprecated
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
"""Lazy-start the image-publisher."""
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
"""Lazy-stop the image-publisher when nobody is interested"""
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
"""Retrieve a snapshot from the camera.
:param request: The service request.
:type request: :py:class:`axis_camera.srv.TakeSnapshotRequest`
:return: The response containing the image.
:rtype: :py:class:`axis_camera.srv.TakeSnapshotResponse`
:raises: :py:exc:`IOError`, :py:exc:`urllib2.URLError`
"""
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
"""Dynamic reconfigure callback for video parameters.
:param config: The requested configuration.
:type config: dict
:param level: Unused here.
:type level: int
:return: The config corresponding to what was really achieved.
:rtype: dict
"""
if self.__initializing:
# in the initialization phase, we want to give precedence to the values given to the constructor
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
"""First, try to call `setter(config[field])`, and if this call doesn't succeed. set the field in config to
its value stored in this class.
:param config: The dynamic reconfigure config dictionary.
:type config: dict
:param field: The field name (both in :py:obj:`config` and in :py:obj:`self`).
:type field: basestring
:param setter: The setter to use to set the value.
:type setter: lambda function
"""
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
#################################
# DYNAMIC RECONFIGURE CALLBACKS #
#################################
def set_resolution(self, resolution_value):
"""Request a new resolution for the video stream.
:param resolution_value: The string of type `width`x`height` or a :py:class:`VideoResolution` object.
:type resolution_value: basestring|VideoResolution
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
resolution = None
if isinstance(resolution_value, VideoResolution):
resolution = resolution_value
elif isinstance(resolution_value, basestring):
resolution = self._get_resolution_from_param_value(resolution_value)
if resolution is None:
raise ValueError("Unsupported resolution type specified: %r" % resolution_value)
if self._resolution is None or resolution != self._resolution:
self._resolution = resolution
self.video_params_changed = True
# deprecated values
self._width = resolution.get_resolution(self._use_square_pixels)[0]
self._height = resolution.get_resolution(self._use_square_pixels)[1]
def _get_resolution_from_param_value(self, value):
"""Return a :py:class:`VideoResolution` object corresponding to the given video resolution param string.
:param value: Value of the resolution parameter to parse (of form `width`x`height`).
:type value: basestring
:return: The :py:class:`VideoResolution` corresponding to the given resolution param string.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if the resolution is unknown/unsupported.
"""
for resolution in self._allowed_resolutions:
if resolution.get_vapix_representation() == value:
return resolution
raise ValueError("%s is not a valid valid resolution." % value)
def find_resolution_by_size(self, width, height):
"""Return a :py:class:`VideoResolution` object with the given dimensions.
If there are more resolutions with the same size, any of them may be returned.
:param width: Image width in pixels. If `None`, resolutions will be matched only by height.
:type width: int|None
:param height: Image height in pixels. If `None`, resolutions will be matched only by width.
:type height: int|None
:return: The corresponding resolution object.
:rtype: :py:class:`VideoResolution`
:raises: :py:exc:`ValueError` if no resolution with the given dimensions can be found.
:raises: :py:exc:`ValueError` if both `width` and `height` are None.
"""
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
"""Return a list of resolutions supported both by the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
"""Return a list of resolutions supported the camera.
:return: The supported resolutions list.
:rtype: list of :py:class:`VideoResolution`
"""
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
"""Request the given compression level for the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
"""Make sure the given value can be used as a compression level of the video stream.
:param compression: Compression of the image (0 - no compression, 100 - max compression).
:type compression: int
:return: The given compression converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given compression level is outside the allowed range.
"""
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
"""Request the given compression level for the video stream.
:param fps: The desired frames per second.
:type fps: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
"""Make sure the given value can be used as FPS of the video stream.
:param fps: The desired frames per second.
:type fps: int
:return: The given FPS converted to an int.
:rtype: int
:raises: :py:exc:`ValueError` if the given FPS is outside the allowed range.
"""
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
"""Request using/not using color in the video stream.
:param use_color: If True, send a color stream, otherwise send only grayscale image.
:type use_color: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
"""Request using/not using square pixels.
:param use_square_pixels: If True, the resolution will be stretched to match 1:1 pixels.
By default, the pixels have a ratio of 11:12.
:type use_square_pixels: bool
:raises: :py:exc:`ValueError` if the given argument is not a bool.
"""
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
"""Convert the given value to a bool.
:param value: Either True, False,, "1", "0", 1 or 0.
:type value: :py:class:`basestring` | :py:class:`bool` | :py:class:`int`
:param field_name: Name of the field this value belongs to (just for debug messages).
:type field_name: basestring
:return: The bool value of the given value.
:rtype: :py:class:`bool`
:raises: :py:exc:`ValueError` if the given value is not supported in this conversion.
"""
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
# bool("0") returns True because it is a nonempty string
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
"""A class representing a video resolution."""
def __init__(self, width, height):
"""Create a representation of the resolution.
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
# compare by attribute values
return self.__dict__ == other.__dict__
def __ne__(self, other):
# reuse the former __eq__ definition
return not self == other
def get_resolution(self, use_square_pixels=False):
"""Get the image dimensions corresponding to this resolution.
:param use_square_pixels: Whether to strech the resulting resolution to square pixels.
:type use_square_pixels: bool
:return: A tuple (width, height)
:rtype: tuple
"""
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else: # resolution given by CIF name
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
"""A class representing a CIF standard resolution."""
def __init__(self, name, width, height):
"""Create a representation of a CIF resolution.
:param name: CIF standard name of the resolution.
:type name: basestring
:param width: Width of the resolution in pixels.
:type width: int
:param height: Height of the resolution in pixels.
:type height: int
"""
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
"""Start the ROS driver and ROS node."""
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90', # default IP address
'username': None, # default login name
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
"""Look up parameters starting in the driver's private parameter space, but also searching outer namespaces.
Defining them in a higher namespace allows the axis_ptz.py script to share parameters with the driver."""
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
# resolve frame_id with tf_prefix (unless already absolute)
if args['frame_id'][0] != '/': # not absolute?
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None: # prefix defined?
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/': # prefix not absolute?
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
| 42.135177
| 120
| 0.653737
|
import math
import re
import rospy
from sensor_msgs.msg import CompressedImage, CameraInfo
import camera_info_manager
import dynamic_reconfigure.server
from diagnostic_updater import Updater, DiagnosedPublisher, TimeStampStatusParam, FrequencyStatusParam, \
FunctionDiagnosticTask, DiagnosticStatusWrapper
from axis_camera.cfg import VideoStreamConfig
from axis_camera.srv import TakeSnapshot, TakeSnapshotResponse
from axis_camera.vapix import VAPIX
from axis_camera.video_streaming import ImageStreamingThread
from axis_camera.dynamic_reconfigure_tools import change_enum_items
StreamThread = ImageStreamingThread
class Axis(rospy.SubscribeListener):
def __init__(self, hostname, username, password, width, height, frame_id, camera_info_url, use_encrypted_password,
camera_id=1, auto_wakeup_camera=True, compression=0, fps=24, use_color=True,
use_square_pixels=False):
self.video_params_changed = False
self.__initializing = True
self._hostname = hostname
self._camera_id = camera_id
self.diagnostic_updater = Updater()
self.diagnostic_updater.setHardwareID(hostname)
self._api = None
while self._api is None and not rospy.is_shutdown():
try:
self._api = VAPIX.get_api_for_camera(hostname, username, password, camera_id, use_encrypted_password)
except (IOError, ValueError):
rospy.loginfo("Retrying connection to VAPIX on host %s, camera %d in 2 seconds." %
(hostname, camera_id))
rospy.sleep(2)
if rospy.is_shutdown():
return
self._allowed_resolutions = self._get_allowed_resolutions()
rospy.loginfo("The following resolutions are available for camera %d:\n%s" %
(camera_id, "\n".join([str(res) for res in self._allowed_resolutions])))
rospy.set_param("~allowed_resolutions", [res.get_vapix_representation() for res in self._allowed_resolutions])
self._auto_wakeup_camera = auto_wakeup_camera
self._width = None
self._height = None
self._resolution = None
self._compression = None
self._fps = None
self._use_color = None
self._use_square_pixels = None
width = width if width != "" else None
height = height if height != "" else None
if width is None and height is None:
self.set_resolution(self._allowed_resolutions[0])
else:
resolution = self.find_resolution_by_size(width, height)
self.set_resolution(resolution.get_vapix_representation())
self.set_compression(compression)
self.set_fps(fps)
self.set_use_color(use_color)
self.set_use_square_pixels(use_square_pixels)
change_enum_items(
VideoStreamConfig,
"resolution",
[{
'name': res.name if isinstance(res, CIFVideoResolution) else str(res),
'value': res.get_vapix_representation(),
'description': str(res)
} for res in self._allowed_resolutions],
self._resolution.get_vapix_representation()
)
self._video_stream_param_change_server = dynamic_reconfigure.server.Server(VideoStreamConfig,
self.reconfigure_video)
self._frame_id = frame_id
self._camera_info_url = camera_info_url
self._camera_name = camera_info_manager.genCameraName(self._hostname)
self._camera_info = camera_info_manager.CameraInfoManager(cname=self._camera_name, url=self._camera_info_url)
self._camera_info.loadCameraInfo()
self._streaming_thread = None
self._video_publisher_frequency_diagnostic = FrequencyStatusParam({'min': self._fps, 'max': self._fps})
self._video_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("image_raw/compressed", CompressedImage, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._camera_info_publisher = PausableDiagnosedPublisher(
self,
rospy.Publisher("camera_info", CameraInfo, self, queue_size=100),
self.diagnostic_updater, self._video_publisher_frequency_diagnostic, TimeStampStatusParam()
)
self._snapshot_server = rospy.Service("take_snapshot", TakeSnapshot, self.take_snapshot)
self.diagnostic_updater.add(FunctionDiagnosticTask("Camera parameters", self._camera_diagnostic_callback))
self.username = username
self.password = password
self.use_encrypted_password = use_encrypted_password
self.st = None
self.pub = self._video_publisher
self.caminfo_pub = self._camera_info_publisher
self.__initializing = False
def __str__(self):
(width, height) = self._resolution.get_resolution(self._use_square_pixels)
return 'Axis driver on host %s, camera %d (%dx%d px @ %d FPS)' % \
(self._hostname, self._api.camera_id, width, height, self._fps)
def peer_subscribe(self, topic_name, topic_publish, peer_publish):
if self._streaming_thread is None:
self._streaming_thread = ImageStreamingThread(self)
self._streaming_thread.start()
else:
self._streaming_thread.resume()
def peer_unsubscribe(self, topic_name, num_peers):
if num_peers == 0:
self._streaming_thread.pause()
def take_snapshot(self, request):
image_data = self._api.take_snapshot()
image = CompressedImage()
image.header.stamp = rospy.Time.now()
image.header.frame_id = self._frame_id
image.format = "jpeg"
image.data = image_data
response = TakeSnapshotResponse()
response.image = image
return response
def reconfigure_video(self, config, level):
if self.__initializing:
config.compression = self._compression
config.fps = self._fps
config.use_color = self._use_color
config.use_square_pixels = self._use_square_pixels
config.resolution = self._resolution.get_vapix_representation()
else:
self.__try_set_value_from_config(config, 'compression', self.set_compression)
self.__try_set_value_from_config(config, 'fps', self.set_fps)
self.__try_set_value_from_config(config, 'use_color', self.set_use_color)
self.__try_set_value_from_config(config, 'use_square_pixels', self.set_use_square_pixels)
try:
self.set_resolution(config.resolution)
except ValueError:
config.resolution = self._resolution.get_vapix_representation()
return config
def __try_set_value_from_config(self, config, field, setter):
try:
setter(config[field])
except ValueError:
config[field] = getattr(self, field)
)
def find_resolution_by_size(self, width, height):
if width is None and height is None:
raise ValueError("Either width or height of the desired resolution must be specified.")
for resolution in self._allowed_resolutions:
size = resolution.get_resolution(use_square_pixels=False)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
size = resolution.get_resolution(use_square_pixels=True)
if (width is None or width == size[0]) and (height is None or height == size[1]):
return resolution
raise ValueError("Cannot find a supported resolution with dimensions %sx%s" % (width, height))
def _get_allowed_resolutions(self):
camera_resolutions = self._get_resolutions_supported_by_camera()
return camera_resolutions
def _get_resolutions_supported_by_camera(self):
try:
names = self._api.parse_list_parameter_value(self._api.get_parameter("Properties.Image.Resolution"))
return [VideoResolution.parse_from_vapix_param_value(name, self._api) for name in names]
except (IOError, ValueError):
rospy.logwarn("Could not determine resolutions supported by the camera. Asssuming only CIF.")
return [CIFVideoResolution("CIF", 384, 288)]
def set_compression(self, compression):
if compression != self._compression:
self._compression = self.sanitize_compression(compression)
self.video_params_changed = True
@staticmethod
def sanitize_compression(compression):
compression = int(compression)
if not (0 <= compression <= 100):
raise ValueError("%s is not a valid value for compression." % str(compression))
return compression
def set_fps(self, fps):
if fps != self._fps:
self._fps = self.sanitize_fps(fps)
self.video_params_changed = True
if hasattr(self, "_video_publisher_frequency_diagnostic"):
self._video_publisher_frequency_diagnostic.freq_bound['min'] = self._fps
self._video_publisher_frequency_diagnostic.freq_bound['max'] = self._fps
@staticmethod
def sanitize_fps(fps):
fps = int(fps)
if not (1 <= fps <= 30):
raise ValueError("%s is not a valid value for FPS." % str(fps))
return fps
def set_use_color(self, use_color):
if use_color != self._use_color:
self._use_color = self.sanitize_bool(use_color, "use_color")
self.video_params_changed = True
def set_use_square_pixels(self, use_square_pixels):
if use_square_pixels != self._use_square_pixels:
self._use_square_pixels = self.sanitize_bool(use_square_pixels, "use_square_pixels")
self.video_params_changed = True
@staticmethod
def sanitize_bool(value, field_name):
if value not in (True, False, "1", "0", 1, 0):
raise ValueError("%s is not a valid value for %s." % (str(value), field_name))
if value == "0":
return False
return bool(value)
def _camera_diagnostic_callback(self, diag_message):
assert isinstance(diag_message, DiagnosticStatusWrapper)
diag_message.summary(DiagnosticStatusWrapper.OK, "Video parameters")
diag_message.add("FPS", self._fps)
diag_message.add("Resolution", self._resolution)
diag_message.add("Compression", self._compression)
diag_message.add("Color image", self._use_color)
diag_message.add("Square pixels used", self._use_square_pixels)
class VideoResolution(object):
def __init__(self, width, height):
super(VideoResolution, self).__init__()
self.width = int(width)
self.height = int(height)
self.square_pixel_conversion_ratio_width = 12.0 / 11.0
self.square_pixel_conversion_ratio_height = 1
def __str__(self):
return "%dx%d" % (self.width, self.height)
def __repr__(self):
return "VideoResolution(width=%r,height=%r)" % (self.width, self.height)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def get_resolution(self, use_square_pixels=False):
width = self.width
height = self.height
if use_square_pixels:
width = int(math.ceil(self.square_pixel_conversion_ratio_width * self.width))
height = int(math.ceil(self.square_pixel_conversion_ratio_height * self.height))
return width, height
def get_vapix_representation(self):
return "%dx%d" % (self.width, self.height)
@staticmethod
def parse_from_vapix_param_value(value, api):
assert isinstance(value, basestring)
assert isinstance(api, VAPIX)
numeric_regexp = re.compile(r"(\d+)x(\d+)")
match = numeric_regexp.match(value)
if match is not None:
return VideoResolution(int(match.group(1)), int(match.group(2)))
else:
name = value
width, height = api.resolve_video_resolution_name(name)
return CIFVideoResolution(name, width, height)
class CIFVideoResolution(VideoResolution):
def __init__(self, name, width, height):
super(CIFVideoResolution, self).__init__(width, height)
self.name = name
def __str__(self):
return "%s (%dx%d)" % (self.name, self.width, self.height)
def __repr__(self):
return "CIFVideoResolution(name=%r,width=%r,height=%r)" % (self.name, self.width, self.height)
def main():
rospy.init_node("axis_driver")
arg_defaults = {
'hostname': '192.168.0.90',
'username': None,
'password': None,
'width': 704,
'height': 576,
'frame_id': 'axis_camera',
'camera_info_url': '',
'use_encrypted_password': False,
'camera_id': 1,
'auto_wakeup_camera': True,
'compression': 0,
'fps': 24,
'use_color': True,
'use_square_pixels': False,
}
args = read_args_with_defaults(arg_defaults)
axis = Axis(**args)
rate = rospy.Rate(1)
while not rospy.is_shutdown():
axis.diagnostic_updater.update()
try:
rate.sleep()
except rospy.ROSTimeMovedBackwardsException:
rospy.logwarn("Detected jump back in time.")
class PausableDiagnosedPublisher(DiagnosedPublisher):
def __init__(self, axis, pub, diag, freq, stamp):
DiagnosedPublisher.__init__(self, pub, diag, freq, stamp)
self._axis = axis
def run(self, stat):
if self._axis._streaming_thread is None or self._axis._streaming_thread.is_paused():
stat.summary(DiagnosticStatusWrapper.OK, "Video not subscribed")
else:
stat = DiagnosedPublisher.run(self, stat)
return stat
def read_args_with_defaults(arg_defaults):
args = {}
for name, val in arg_defaults.iteritems():
full_name = rospy.search_param(name)
if full_name is None:
args[name] = val
else:
args[name] = rospy.get_param(full_name, val)
if args['frame_id'][0] != '/':
tf_prefix = rospy.search_param('tf_prefix')
prefix_val = ''
if tf_prefix is not None:
prefix_val = rospy.get_param(tf_prefix)
if prefix_val[0] != '/':
prefix_val = '/' + prefix_val
args['frame_id'] = prefix_val + '/' + args['frame_id']
return args
if __name__ == "__main__":
main()
| true
| true
|
7906804406841c9c693d6ca560d89d823c30373a
| 7,839
|
py
|
Python
|
Source/chrome/tools/cygprofile/patch_orderfile.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 20
|
2015-08-26T06:46:00.000Z
|
2019-02-27T09:05:58.000Z
|
Source/chrome/tools/cygprofile/patch_orderfile.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | null | null | null |
Source/chrome/tools/cygprofile/patch_orderfile.py
|
yury-s/v8-inspector
|
0ab4779e0909d387f243f41ca2621237cdb0c7fe
|
[
"BSD-3-Clause"
] | 2
|
2015-08-26T05:49:35.000Z
|
2020-02-03T20:22:43.000Z
|
#!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
symbols), matches the symbols in the orderfile and augments each symbol with the
symbols residing at the same address (due to having identical code).
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." suffix)
TODO(lizeb): Since the suffix ".clone." is only used with -O3 that we don't
currently use, simplify the logic by removing the suffix handling.
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile, with several different prefixes
"""
import collections
import logging
import optparse
import sys
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
_PREFIXES = ('.text.startup.', '.text.hot.', '.text.unlikely.', '.text.')
def _RemoveClone(name):
"""Return name up to the ".clone." marker."""
clone_index = name.find('.clone.')
if clone_index != -1:
return name[:clone_index]
return name
def _GroupSymbolInfos(symbol_infos):
"""Group the symbol infos by name and offset.
Args:
symbol_infos: an iterable of SymbolInfo
Returns:
The same output as _GroupSymbolInfosFromBinary.
"""
# Map the addresses to symbols.
offset_to_symbol_infos = collections.defaultdict(list)
name_to_symbol_infos = collections.defaultdict(list)
for symbol in symbol_infos:
symbol = symbol_extractor.SymbolInfo(name=_RemoveClone(symbol.name),
offset=symbol.offset,
size=symbol.size,
section=symbol.section)
offset_to_symbol_infos[symbol.offset].append(symbol)
name_to_symbol_infos[symbol.name].append(symbol)
return (dict(offset_to_symbol_infos), dict(name_to_symbol_infos))
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
return _GroupSymbolInfos(symbol_infos)
def _StripPrefix(line):
"""Get the symbol from a line with a linker section name.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
line = line.rstrip('\n')
for prefix in _PREFIXES:
if line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _GetSymbolsFromStream(lines):
"""Get the symbols from an iterable of lines.
Filters out wildcards and lines which do not correspond to symbols.
Args:
lines: iterable of lines from an orderfile.
Returns:
Same as GetSymbolsFromOrderfile
"""
# TODO(lizeb): Retain the prefixes later in the processing stages.
symbols = []
unique_symbols = set()
for line in lines:
line = _StripPrefix(line)
name = _RemoveClone(line)
if name == '' or name == '*' or name == '.text':
continue
if not line in unique_symbols:
symbols.append(line)
unique_symbols.add(line)
return symbols
def GetSymbolsFromOrderfile(filename):
"""Return the symbols from an orderfile.
Args:
filename: The name of the orderfile.
Returns:
A list of symbol names.
"""
with open(filename, 'r') as f:
return _GetSymbolsFromStream(f.xreadlines())
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expand a profiled symbol to include all symbols which share an offset
with that symbol.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if not profiled_symbol in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
def _ExpandSymbols(profiled_symbols, name_to_symbol_infos,
offset_to_symbol_infos):
"""Expand all of the symbols in profiled_symbols to include any symbols which
share the same address.
Args:
profiled_symbols: Symbols to match
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
Returns:
A list of the symbol names.
"""
found_symbols = 0
missing_symbols = []
all_symbols = []
for name in profiled_symbols:
expansion = _SymbolsWithSameOffset(name,
name_to_symbol_infos, offset_to_symbol_infos)
if expansion:
found_symbols += 1
all_symbols += expansion
else:
all_symbols.append(name)
missing_symbols.append(name)
logging.info('symbols found: %d\n' % found_symbols)
if missing_symbols > 0:
logging.warning('%d missing symbols.' % len(missing_symbols))
missing_symbols_to_show = min(100, len(missing_symbols))
logging.warning('First %d missing symbols:\n%s' % (
missing_symbols_to_show,
'\n'.join(missing_symbols[:missing_symbols_to_show])))
return all_symbols
def _PrintSymbolsWithPrefixes(symbol_names, output_file):
"""For each symbol, outputs it to output_file with the prefixes."""
unique_outputs = set()
for name in symbol_names:
for prefix in _PREFIXES:
linker_section = prefix + name
if not linker_section in unique_outputs:
output_file.write(linker_section + '\n')
unique_outputs.add(linker_section)
def main(argv):
parser = optparse.OptionParser(usage=
'usage: %prog [options] <unpatched_orderfile> <library>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
options, argv = parser.parse_args(argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
orderfile_filename = argv[1]
binary_filename = argv[2]
symbol_extractor.SetArchitecture(options.arch)
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
binary_filename)
profiled_symbols = GetSymbolsFromOrderfile(orderfile_filename)
expanded_symbols = _ExpandSymbols(
profiled_symbols, name_to_symbol_infos, offset_to_symbol_infos)
_PrintSymbolsWithPrefixes(expanded_symbols, sys.stdout)
# The following is needed otherwise Gold only applies a partial sort.
print '.text' # gets methods not in a section, such as assembly
print '.text.*' # gets everything else
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv))
| 32.936975
| 80
| 0.710677
|
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
symbols), matches the symbols in the orderfile and augments each symbol with the
symbols residing at the same address (due to having identical code).
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." suffix)
TODO(lizeb): Since the suffix ".clone." is only used with -O3 that we don't
currently use, simplify the logic by removing the suffix handling.
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile, with several different prefixes
"""
import collections
import logging
import optparse
import sys
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
_PREFIXES = ('.text.startup.', '.text.hot.', '.text.unlikely.', '.text.')
def _RemoveClone(name):
"""Return name up to the ".clone." marker."""
clone_index = name.find('.clone.')
if clone_index != -1:
return name[:clone_index]
return name
def _GroupSymbolInfos(symbol_infos):
"""Group the symbol infos by name and offset.
Args:
symbol_infos: an iterable of SymbolInfo
Returns:
The same output as _GroupSymbolInfosFromBinary.
"""
# Map the addresses to symbols.
offset_to_symbol_infos = collections.defaultdict(list)
name_to_symbol_infos = collections.defaultdict(list)
for symbol in symbol_infos:
symbol = symbol_extractor.SymbolInfo(name=_RemoveClone(symbol.name),
offset=symbol.offset,
size=symbol.size,
section=symbol.section)
offset_to_symbol_infos[symbol.offset].append(symbol)
name_to_symbol_infos[symbol.name].append(symbol)
return (dict(offset_to_symbol_infos), dict(name_to_symbol_infos))
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
return _GroupSymbolInfos(symbol_infos)
def _StripPrefix(line):
"""Get the symbol from a line with a linker section name.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
line = line.rstrip('\n')
for prefix in _PREFIXES:
if line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _GetSymbolsFromStream(lines):
"""Get the symbols from an iterable of lines.
Filters out wildcards and lines which do not correspond to symbols.
Args:
lines: iterable of lines from an orderfile.
Returns:
Same as GetSymbolsFromOrderfile
"""
# TODO(lizeb): Retain the prefixes later in the processing stages.
symbols = []
unique_symbols = set()
for line in lines:
line = _StripPrefix(line)
name = _RemoveClone(line)
if name == '' or name == '*' or name == '.text':
continue
if not line in unique_symbols:
symbols.append(line)
unique_symbols.add(line)
return symbols
def GetSymbolsFromOrderfile(filename):
"""Return the symbols from an orderfile.
Args:
filename: The name of the orderfile.
Returns:
A list of symbol names.
"""
with open(filename, 'r') as f:
return _GetSymbolsFromStream(f.xreadlines())
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expand a profiled symbol to include all symbols which share an offset
with that symbol.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if not profiled_symbol in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
def _ExpandSymbols(profiled_symbols, name_to_symbol_infos,
offset_to_symbol_infos):
"""Expand all of the symbols in profiled_symbols to include any symbols which
share the same address.
Args:
profiled_symbols: Symbols to match
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
Returns:
A list of the symbol names.
"""
found_symbols = 0
missing_symbols = []
all_symbols = []
for name in profiled_symbols:
expansion = _SymbolsWithSameOffset(name,
name_to_symbol_infos, offset_to_symbol_infos)
if expansion:
found_symbols += 1
all_symbols += expansion
else:
all_symbols.append(name)
missing_symbols.append(name)
logging.info('symbols found: %d\n' % found_symbols)
if missing_symbols > 0:
logging.warning('%d missing symbols.' % len(missing_symbols))
missing_symbols_to_show = min(100, len(missing_symbols))
logging.warning('First %d missing symbols:\n%s' % (
missing_symbols_to_show,
'\n'.join(missing_symbols[:missing_symbols_to_show])))
return all_symbols
def _PrintSymbolsWithPrefixes(symbol_names, output_file):
"""For each symbol, outputs it to output_file with the prefixes."""
unique_outputs = set()
for name in symbol_names:
for prefix in _PREFIXES:
linker_section = prefix + name
if not linker_section in unique_outputs:
output_file.write(linker_section + '\n')
unique_outputs.add(linker_section)
def main(argv):
parser = optparse.OptionParser(usage=
'usage: %prog [options] <unpatched_orderfile> <library>')
parser.add_option('--target-arch', action='store', dest='arch',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
options, argv = parser.parse_args(argv)
if not options.arch:
options.arch = cygprofile_utils.DetectArchitecture()
if len(argv) != 3:
parser.print_help()
return 1
orderfile_filename = argv[1]
binary_filename = argv[2]
symbol_extractor.SetArchitecture(options.arch)
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
binary_filename)
profiled_symbols = GetSymbolsFromOrderfile(orderfile_filename)
expanded_symbols = _ExpandSymbols(
profiled_symbols, name_to_symbol_infos, offset_to_symbol_infos)
_PrintSymbolsWithPrefixes(expanded_symbols, sys.stdout)
# The following is needed otherwise Gold only applies a partial sort.
print '.text' # gets methods not in a section, such as assembly
print '.text.*' # gets everything else
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main(sys.argv))
| false
| true
|
79068056893486e239fb28a73ce3d85e4ad8c97c
| 8,075
|
py
|
Python
|
demo/demo.py
|
ishanic/MeshRCNN-keypoints
|
fdc2c81ce57313207478ab9ff1699614addc5993
|
[
"BSD-3-Clause"
] | 1
|
2021-06-25T17:23:02.000Z
|
2021-06-25T17:23:02.000Z
|
demo/demo.py
|
ishanic/MeshRCNN-keypoints
|
fdc2c81ce57313207478ab9ff1699614addc5993
|
[
"BSD-3-Clause"
] | null | null | null |
demo/demo.py
|
ishanic/MeshRCNN-keypoints
|
fdc2c81ce57313207478ab9ff1699614addc5993
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
| 34.95671
| 100
| 0.593065
|
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
import meshrcnn.data
import meshrcnn.modeling
import meshrcnn.utils
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
predictions = self.predictor(image)
image = image[:, :, ::-1]
imsize = [image.shape[0], image.shape[1]]
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0)
text_color = (218, 227, 218)
composite = image.copy().astype(np.float32)
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
| true
| true
|
790680e88fd7043345c7250db1e6c362532e484f
| 1,817
|
py
|
Python
|
BEGIN/DAY_04/04.1-day-4-2-exercise-solution.py
|
thakopian/100-DAYS-OF-PYTHON-PROJECT
|
81615e3f42a259bdda00e0129c9a0890b07e9282
|
[
"MIT"
] | null | null | null |
BEGIN/DAY_04/04.1-day-4-2-exercise-solution.py
|
thakopian/100-DAYS-OF-PYTHON-PROJECT
|
81615e3f42a259bdda00e0129c9a0890b07e9282
|
[
"MIT"
] | 4
|
2021-01-17T07:57:33.000Z
|
2021-02-06T07:16:00.000Z
|
BEGIN/DAY_04/04.1-day-4-2-exercise-solution.py
|
thakopian/100-DAYS-OF-PYTHON-PROJECT
|
81615e3f42a259bdda00e0129c9a0890b07e9282
|
[
"MIT"
] | null | null | null |
# https://repl.it/@thakopian/day-4-2-exercise#main.py
# write a program which will select a random name from a list of names
# name selected will pay for everyone's bill
# cannot use choice() function
# inputs for the names - Angela, Ben, Jenny, Michael, Chloe
# import modules
import random
# set varialbles for input and another to modify the input to divide strings by comma
names_string = input("Give me everybody's names, separated by a comma. ")
names = names_string.split(", ")
# get name at index of list (example)
print(names[0])
# you can also print len of the names to get their range
print(len(names))
# set random module for the index values
# > this is standard format > random.randint(0, x)
# using the len as a substitute for x in the randint example with a variable set to len(names)
num_items = len(names)
# num_items - 1 in place of x to get the offset of the len length to match a starting 0 position on the index values
# set the function to a variable
choice = random.randint(0, num_items - 1)
# assign the mutable name variable with an index of the choice variable to another variable for storing the index value of the name based on the index vaule
person_who_pays = names[choice]
# print that stored named variable out with a message
print(person_who_pays + " is going to buy the meal today")
#######
# This exercise isn't a practical application of random choice since it doesn't use the .choice() function
# the idea is to replace variables, learn by retention and problem solve
# create your own random choice function to understand how the code can facilitate that withouth the .choice() function
# that way you learn how to go through problem challenges and how to create your own workaround in case the out of the box content isn't everything you need for a given problem
| 41.295455
| 176
| 0.760044
|
annot use choice() function
# inputs for the names - Angela, Ben, Jenny, Michael, Chloe
# import modules
import random
# set varialbles for input and another to modify the input to divide strings by comma
names_string = input("Give me everybody's names, separated by a comma. ")
names = names_string.split(", ")
print(names[0])
print(len(names))
num_items = len(names)
choice = random.randint(0, num_items - 1)
person_who_pays = names[choice]
print(person_who_pays + " is going to buy the meal today")
| true
| true
|
7906810ca3e53172179e0508842ca7cdc2d85ad2
| 4,110
|
py
|
Python
|
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
pyyolo/utils.py
|
isarandi/pyyolo
|
0f26210fd72f7ce973b34d51b6a38b5dd0f57115
|
[
"Apache-2.0"
] | null | null | null |
"""
File name: utils
Author: rameshpr
Date: 11/5/18
"""
import numpy as np
from ctypes import *
from typing import List, Tuple
import cv2
from pyyolo.darknet import c_array, IMAGE, METADATA, predict_image, get_network_boxes, \
do_nms_obj, do_nms_sort, free_image, free_detections, ndarray_image
import pyyolo.darknet
from pyyolo.yolo_data import BBox, YoloData
def load_image(filename, flags=None):
# type: (str, int) -> IMAGE
"""
This will call cv2.imread() with the given arguments and convert
the resulting numpy array to a darknet image
:param filename: Image file name
:param flags: imread flags
:return: Given image file as a darknet image
:rtype: IMAGE
"""
image = cv2.imread(filename, flags)
return array_to_image(image)
def array_to_image(arr):
# type: (np.ndarray) -> IMAGE
"""
Given image with numpy array will be converted to
darkent image
Remember to call free_image(im) function after using this image
:rtype: IMAGE
:param arr: numpy array
:return: darknet image
"""
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
def classify(net, meta, im):
# type: (object, METADATA, IMAGE) -> Tuple[str, float]
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, im, thresh=.2, hier_thresh=0, nms=.4):
# type: (object, METADATA, IMAGE, float, float, float) -> List[YoloData]
"""
Detect the objects in the given image. free_image function is called inside this function.
Therefore the input darkent image is not usable after calling this function.
:param net:
:param meta:
:param im:
:param thresh:
:param hier_thresh:
:param nms:
:return:
"""
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox(b.x - b.w/2.0, b.y - b.h/2.0, b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=lambda x: -x.bbox.c)
free_image(im)
free_detections(dets, num)
return res
def load_net(cfg_filepath, weights_filepath, clear):
# type: (str, str, bool) -> object
"""
:param cfg_filepath: cfg file name
:param weights_filepath: weights file name
:param clear: True if you want to clear the weights otherwise False
:return: darknet network object
"""
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
def load_meta(meta_filepath):
# type: (str) -> METADATA
"""
Recommend using load_names(str) function instead.
:param meta_filepath: metadata file path
:return: darknet metadata object
"""
return pyyolo.darknet.load_meta(meta_filepath)
def load_names(names_filepath):
# type: (str) -> METADATA
"""
Loading metadata from data file (eg: coco.data) is a mess as you need to edit that file also by pointing it to the names file.
Using this function you can directly load the names file as METADATA object.
Older function is still available if you need.
:param names_filepath: Filepath of the names file. Eg: coco.names
:return: darknet metadata object
"""
data = None
with open(names_filepath) as f:
data = f.readlines()
if data is None:
raise ValueError("Names file not found.. %s" % names_filepath)
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:-1]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
| 29.148936
| 130
| 0.652798
|
import numpy as np
from ctypes import *
from typing import List, Tuple
import cv2
from pyyolo.darknet import c_array, IMAGE, METADATA, predict_image, get_network_boxes, \
do_nms_obj, do_nms_sort, free_image, free_detections, ndarray_image
import pyyolo.darknet
from pyyolo.yolo_data import BBox, YoloData
def load_image(filename, flags=None):
image = cv2.imread(filename, flags)
return array_to_image(image)
def array_to_image(arr):
data = arr.ctypes.data_as(POINTER(c_ubyte))
im = ndarray_image(data, arr.ctypes.shape, arr.ctypes.strides)
return im
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, im, thresh=.2, hier_thresh=0, nms=.4):
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if nms:
do_nms_sort(dets, num, meta.classes, nms)
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append(YoloData(id=i, name=meta.names[i], bbox=BBox(b.x - b.w/2.0, b.y - b.h/2.0, b.w, b.h, dets[j].prob[i])))
res = sorted(res, key=lambda x: -x.bbox.c)
free_image(im)
free_detections(dets, num)
return res
def load_net(cfg_filepath, weights_filepath, clear):
return pyyolo.darknet.load_net(cfg_filepath, weights_filepath, clear)
def load_meta(meta_filepath):
return pyyolo.darknet.load_meta(meta_filepath)
def load_names(names_filepath):
data = None
with open(names_filepath) as f:
data = f.readlines()
if data is None:
raise ValueError("Names file not found.. %s" % names_filepath)
n_cls = len(data)
p_names = (c_char_p * n_cls)()
for cls in range(n_cls):
name = data[cls].encode('utf-8')
c_name = c_char_p()
c_name.value = name[:-1]
p_names[cls] = c_name
return METADATA(n_cls, cast(p_names, POINTER(c_char_p)))
| true
| true
|
790682d1bb4627a5c0a8dbb0c149452c906adf07
| 16,991
|
py
|
Python
|
tests/test_nibbles.py
|
knovichikhin/pyiso8583
|
99c9ccdd75f399328b79248532f28080cb97b33a
|
[
"MIT"
] | 17
|
2020-09-10T08:24:58.000Z
|
2022-03-08T01:33:32.000Z
|
tests/test_nibbles.py
|
knovichikhin/pyiso8583
|
99c9ccdd75f399328b79248532f28080cb97b33a
|
[
"MIT"
] | 13
|
2020-11-21T01:46:14.000Z
|
2022-02-14T06:20:32.000Z
|
tests/test_nibbles.py
|
knovichikhin/pyiso8583
|
99c9ccdd75f399328b79248532f28080cb97b33a
|
[
"MIT"
] | 7
|
2020-10-09T20:25:56.000Z
|
2022-02-18T13:54:34.000Z
|
"""Test length measured in half bytes (nibbles). Nibbles were added in v2.1"""
import copy
import iso8583
import iso8583.specs
import pytest
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "result", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_encode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded = {"t": "0200", "2": "1234"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_encode_nibbles_odd_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
def test_encode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Failed to encode .*: field 2",
):
iso8583.encode(decoded, spec=spec)
# fmt: off
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "data", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
# fmt: on
def test_decode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "1234"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
# fmt: on
def test_decode_nibbles_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
# fmt: off
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
# fmt: on
def test_decode_nibbles_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
def test_decode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
data = b"02004000000000000000\x00\x03\x12\x30"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 4 nibbles, expecting 3: field 2 pos 22",
):
iso8583.decode(data, spec=spec)
def test_encode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1234"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 2 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": ""}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 0 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_decode_nibbles_variable_over_max() -> None:
"""Variable field length is over maximum allowed"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000081234"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_partial() -> None:
"""Variable field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000041"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_missing() -> None:
"""Variable field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"0200400000000000000004"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_partial() -> None:
"""Fixed field is provided partially"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"020040000000000000001"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_missing() -> None:
"""Fixed field is missing"""
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
| 35.397917
| 98
| 0.571126
|
import copy
import iso8583
import iso8583.specs
import pytest
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "result", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
def test_encode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded = {"t": "0200", "2": "1234"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
def test_encode_nibbles_odd_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "result", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
def test_encode_nibbles_odd_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
result: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded = {"t": "0200", "2": "123"}
s, encoded = iso8583.encode(decoded, spec)
assert s == result
assert encoded["2"]["len"] == result_f2_len
def test_encode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Failed to encode .*: field 2",
):
iso8583.encode(decoded, spec=spec)
@pytest.mark.parametrize(
["data_enc", "len_enc", "len_type", "max_len", "len_count", "data", "result_f2_len"],
[
("ascii", "ascii", 2, 8, "bytes", b"02004000000000000000041234", b"04"),
("ascii", "ascii", 2, 8, "nibbles", b"02004000000000000000081234", b"08"),
("ascii", "b", 2, 8, "bytes", b"02004000000000000000\x00\x041234", b"\x00\x04"),
("ascii", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x081234", b"\x00\x08"),
("b", "ascii", 2, 8, "bytes", b"0200400000000000000002\x12\x34", b"02"),
("b", "ascii", 2, 8, "nibbles", b"0200400000000000000004\x12\x34", b"04"),
("b", "b", 2, 8, "bytes", b"02004000000000000000\x00\x02\x12\x34", b"\x00\x02"),
("b", "b", 2, 8, "nibbles", b"02004000000000000000\x00\x04\x12\x34", b"\x00\x04"),
("ascii", "ascii", 0, 4, "bytes", b"020040000000000000001234", b""),
("ascii", "ascii", 0, 8, "nibbles", b"020040000000000000001234", b""),
("b", "ascii", 0, 2, "bytes", b"02004000000000000000\x12\x34", b""),
("b", "ascii", 0, 4, "nibbles", b"02004000000000000000\x12\x34", b""),
],
)
def test_decode_nibbles(
data_enc: str,
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = data_enc
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "1234"
assert encoded["2"]["len"] == result_f2_len
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\xF1\x23", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x01\x23", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x01\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x01\x23", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\xF1\x23", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\xF1\x23", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\xF1\x23", b""),
],
)
def test_decode_nibbles_left_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["left_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
@pytest.mark.parametrize(
["len_enc", "len_type", "max_len", "len_count", "pad", "data", "result_f2_len"],
[
("ascii", 2, 8, "nibbles", "0", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "0", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "0", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F", b"02004000000000000000\x12\x3F", b""),
("ascii", 2, 8, "nibbles", "01", b"0200400000000000000003\x12\x30", b"03"),
("b", 2, 8, "nibbles", "01", b"02004000000000000000\x00\x03\x12\x30", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "01", b"02004000000000000000\x12\x30", b""),
("ascii", 2, 8, "nibbles", "F1", b"0200400000000000000003\x12\x3F", b"03"),
("b", 2, 8, "nibbles", "F1", b"02004000000000000000\x00\x03\x12\x3F", b"\x00\x03"),
("ascii", 0, 3, "nibbles", "F1", b"02004000000000000000\x12\x3F", b""),
],
)
def test_decode_nibbles_right_pad(
len_enc: str,
len_type: int,
max_len: int,
len_count: str,
pad: str,
data: bytes,
result_f2_len: bytes,
) -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = len_enc
spec["2"]["len_type"] = len_type
spec["2"]["max_len"] = max_len
spec["2"]["len_count"] = len_count
spec["2"]["right_pad"] = pad
decoded, encoded = iso8583.decode(data, spec)
assert decoded["2"] == "123"
assert encoded["2"]["len"] == result_f2_len
def test_decode_nibbles_odd_no_pad() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "b"
spec["2"]["len_enc"] = "b"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 8
spec["2"]["len_count"] = "nibbles"
data = b"02004000000000000000\x00\x03\x12\x30"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 4 nibbles, expecting 3: field 2 pos 22",
):
iso8583.decode(data, spec=spec)
def test_encode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1234"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": "1"}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 2 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_encode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
decoded = {"t": "0200", "2": ""}
with pytest.raises(
iso8583.EncodeError,
match="Field data is 0 nibbles, expecting 4: field 2",
):
iso8583.encode(decoded, spec=spec)
def test_decode_nibbles_variable_over_max() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000081234"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 8 nibbles, larger than maximum 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000041"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_variable_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 2
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"0200400000000000000004"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 22",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_partial() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"020040000000000000001"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 2 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
def test_decode_nibbles_fixed_missing() -> None:
spec = copy.deepcopy(iso8583.specs.default)
spec["t"]["data_enc"] = "ascii"
spec["p"]["data_enc"] = "ascii"
spec["2"]["data_enc"] = "ascii"
spec["2"]["len_enc"] = "ascii"
spec["2"]["len_type"] = 0
spec["2"]["max_len"] = 4
spec["2"]["len_count"] = "nibbles"
s = b"02004000000000000000"
with pytest.raises(
iso8583.DecodeError,
match="Field data is 0 nibbles, expecting 4: field 2 pos 20",
):
iso8583.decode(s, spec=spec)
| true
| true
|
790683767e43ace504142e8a04e6684fbbaceb56
| 2,558
|
py
|
Python
|
pymatch/utils/KFold.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | 10
|
2021-07-06T17:26:17.000Z
|
2022-01-11T13:02:20.000Z
|
pymatch/utils/KFold.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | 105
|
2020-10-02T13:01:17.000Z
|
2021-07-27T19:06:38.000Z
|
pymatch/utils/KFold.py
|
raharth/PyMatch
|
93cf10fd9ca0fa104b0f2a30e613f75fd0561b92
|
[
"MIT"
] | null | null | null |
import torch
class KFold:
def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False):
self.fold = 0
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.dataset = dataset
self.n_fold = n_fold
self.fold_size = len(self.dataset) // self.n_fold
self.folded_size = self.n_fold * self.fold_size
self.fold_idx = self.fold_split()
def fold_split(self, random_seed=None):
"""
Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number
"""
if random_seed is not None:
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size)
return fold_idx
def fold_loaders(self, fold=-1):
"""
Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)
"""
if fold == -1:
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1)
train_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size, # args.batch_size,
num_workers=self.num_workers, # args.loader_num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = (self.fold + 1) % self.n_fold
return train_loader, test_loader
| 39.353846
| 158
| 0.555903
|
import torch
class KFold:
def __init__(self, dataset, n_fold=10, batch_size=32, num_workers=0, pin_memory=False):
self.fold = 0
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.dataset = dataset
self.n_fold = n_fold
self.fold_size = len(self.dataset) // self.n_fold
self.folded_size = self.n_fold * self.fold_size
self.fold_idx = self.fold_split()
def fold_split(self, random_seed=None):
if random_seed is not None:
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view(-1, self.fold_size)
return fold_idx
def fold_loaders(self, fold=-1):
if fold == -1:
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if i != fold]].view(-1)
train_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = (self.fold + 1) % self.n_fold
return train_loader, test_loader
| true
| true
|
79068379f7a8d34470e9e01eae7af8d8fed90bba
| 12,886
|
py
|
Python
|
sdks/python/client/openapi_client/model/fc_volume_source.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/client/openapi_client/model/fc_volume_source.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | 3
|
2022-03-22T11:49:02.000Z
|
2022-03-24T14:13:59.000Z
|
sdks/python/client/openapi_client/model/fc_volume_source.py
|
Siebjee/argo-workflows
|
1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c
|
[
"Apache-2.0"
] | null | null | null |
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
class FCVolumeSource(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'fs_type': (str,), # noqa: E501
'lun': (int,), # noqa: E501
'read_only': (bool,), # noqa: E501
'target_wwns': ([str],), # noqa: E501
'wwids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fs_type': 'fsType', # noqa: E501
'lun': 'lun', # noqa: E501
'read_only': 'readOnly', # noqa: E501
'target_wwns': 'targetWWNs', # noqa: E501
'wwids': 'wwids', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FCVolumeSource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501
lun (int): Optional: FC target lun number. [optional] # noqa: E501
read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501
wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FCVolumeSource - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fs_type (str): Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.. [optional] # noqa: E501
lun (int): Optional: FC target lun number. [optional] # noqa: E501
read_only (bool): Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.. [optional] # noqa: E501
target_wwns ([str]): Optional: FC target worldwide names (WWNs). [optional] # noqa: E501
wwids ([str]): Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 47.375
| 228
| 0.579621
|
import re
import sys
from openapi_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from openapi_client.exceptions import ApiAttributeError
class FCVolumeSource(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
return {
'fs_type': (str,),
'lun': (int,),
'read_only': (bool,),
'target_wwns': ([str],),
'wwids': ([str],),
}
@cached_property
def discriminator():
return None
attribute_map = {
'fs_type': 'fsType',
'lun': 'lun',
'read_only': 'readOnly',
'target_wwns': 'targetWWNs',
'wwids': 'wwids',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true
| true
|
7906841bba77c73ef7765d83bc5c66a34245f795
| 9,735
|
py
|
Python
|
manim/animation/transform_matching_parts.py
|
aburousan/manim
|
c11b649e9aed34976844e6a131fb12e2a30c7bc8
|
[
"MIT"
] | null | null | null |
manim/animation/transform_matching_parts.py
|
aburousan/manim
|
c11b649e9aed34976844e6a131fb12e2a30c7bc8
|
[
"MIT"
] | null | null | null |
manim/animation/transform_matching_parts.py
|
aburousan/manim
|
c11b649e9aed34976844e6a131fb12e2a30c7bc8
|
[
"MIT"
] | null | null | null |
"""Animations that try to transform Mobjects while keeping track of identical parts."""
__all__ = ["TransformMatchingShapes", "TransformMatchingTex"]
from typing import TYPE_CHECKING, List, Optional
import numpy as np
from .._config import config
from ..mobject.mobject import Group, Mobject
from ..mobject.opengl_mobject import OpenGLGroup, OpenGLMobject
from ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup, OpenGLVMobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from .composition import AnimationGroup
from .fading import FadeIn, FadeOut
from .transform import FadeTransformPieces, Transform
if TYPE_CHECKING:
from ..scene.scene import Scene
class TransformMatchingAbstractBase(AnimationGroup):
"""Abstract base class for transformations that keep track of matching parts.
Subclasses have to implement the two static methods
:meth:`~.TransformMatchingAbstractBase.get_mobject_parts` and
:meth:`~.TransformMatchingAbstractBase.get_mobject_key`.
Basically, this transformation first maps all submobjects returned
by the ``get_mobject_parts`` method to certain keys by applying the
``get_mobject_key`` method. Then, submobjects with matching keys
are transformed into each other.
Parameters
----------
mobject
The starting :class:`~.Mobject`.
target_mobject
The target :class:`~.Mobject`.
transform_mismatches
Controls whether submobjects without a matching key are transformed
into each other by using :class:`~.Transform`. Default: ``False``.
fade_transform_mismatches
Controls whether submobjects without a matching key are transformed
into each other by using :class:`~.FadeTransform`. Default: ``False``.
key_map
Optional. A dictionary mapping keys belonging to some of the starting mobject's
submobjects (i.e., the return values of the ``get_mobject_key`` method)
to some keys belonging to the target mobject's submobjects that should
be transformed although the keys don't match.
kwargs
All further keyword arguments are passed to the submobject transformations.
Note
----
If neither ``transform_mismatches`` nor ``fade_transform_mismatches``
are set to ``True``, submobjects without matching keys in the starting
mobject are faded out in the direction of the unmatched submobjects in
the target mobject, and unmatched submobjects in the target mobject
are faded in from the direction of the unmatched submobjects in the
start mobject.
"""
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
assert type(mobject) is type(target_mobject)
if isinstance(mobject, OpenGLVMobject):
group_type = OpenGLVGroup
elif isinstance(mobject, OpenGLMobject):
group_type = OpenGLGroup
elif isinstance(mobject, VMobject):
group_type = VGroup
else:
group_type = Group
source_map = self.get_shape_map(mobject)
target_map = self.get_shape_map(target_mobject)
if key_map is None:
key_map = {}
# Create two mobjects whose submobjects all match each other
# according to whatever keys are used for source_map and
# target_map
transform_source = group_type()
transform_target = group_type()
kwargs["final_alpha_value"] = 0
for key in set(source_map).intersection(target_map):
transform_source.add(source_map[key])
transform_target.add(target_map[key])
anims = [Transform(transform_source, transform_target, **kwargs)]
# User can manually specify when one part should transform
# into another despite not matching by using key_map
key_mapped_source = group_type()
key_mapped_target = group_type()
for key1, key2 in key_map.items():
if key1 in source_map and key2 in target_map:
key_mapped_source.add(source_map[key1])
key_mapped_target.add(target_map[key2])
source_map.pop(key1, None)
target_map.pop(key2, None)
if len(key_mapped_source) > 0:
anims.append(
FadeTransformPieces(key_mapped_source, key_mapped_target, **kwargs)
)
fade_source = group_type()
fade_target = group_type()
for key in set(source_map).difference(target_map):
fade_source.add(source_map[key])
for key in set(target_map).difference(source_map):
fade_target.add(target_map[key])
if transform_mismatches:
if "replace_mobject_with_target_in_scene" not in kwargs:
kwargs["replace_mobject_with_target_in_scene"] = True
anims.append(Transform(fade_source, fade_target, **kwargs))
elif fade_transform_mismatches:
anims.append(FadeTransformPieces(fade_source, fade_target, **kwargs))
else:
anims.append(FadeOut(fade_source, target_position=fade_target, **kwargs))
anims.append(
FadeIn(fade_target.copy(), target_position=fade_target, **kwargs)
)
super().__init__(*anims)
self.to_remove = mobject
self.to_add = target_mobject
def get_shape_map(self, mobject: "Mobject") -> dict:
shape_map = {}
for sm in self.get_mobject_parts(mobject):
key = self.get_mobject_key(sm)
if key not in shape_map:
if config["renderer"] == "opengl":
shape_map[key] = OpenGLVGroup()
else:
shape_map[key] = VGroup()
shape_map[key].add(sm)
return shape_map
def clean_up_from_scene(self, scene: "Scene") -> None:
for anim in self.animations:
anim.interpolate(0)
scene.remove(self.mobject)
scene.remove(self.to_remove)
scene.add(self.to_add)
@staticmethod
def get_mobject_parts(mobject: "Mobject"):
raise NotImplementedError("To be implemented in subclass.")
@staticmethod
def get_mobject_key(mobject: "Mobject"):
raise NotImplementedError("To be implemented in subclass.")
class TransformMatchingShapes(TransformMatchingAbstractBase):
"""An animation trying to transform groups by matching the shape
of their submobjects.
Two submobjects match if the hash of their point coordinates after
normalization (i.e., after translation to the origin, fixing the submobject
height at 1 unit, and rounding the coordinates to three decimal places)
matches.
See also
--------
:class:`~.TransformMatchingAbstractBase`
Examples
--------
.. manim:: Anagram
class Anagram(Scene):
def construct(self):
src = Text("the morse code")
tar = Text("here come dots")
self.play(Write(src))
self.wait(0.5)
self.play(TransformMatchingShapes(src, tar, path_arc=PI/2))
self.wait(0.5)
"""
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
super().__init__(
mobject,
target_mobject,
transform_mismatches=transform_mismatches,
fade_transform_mismatches=fade_transform_mismatches,
key_map=key_map,
**kwargs
)
@staticmethod
def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]:
return mobject.family_members_with_points()
@staticmethod
def get_mobject_key(mobject: "Mobject") -> int:
mobject.save_state()
mobject.center()
mobject.set_height(1)
result = hash(np.round(mobject.points, 3).tobytes())
mobject.restore()
return result
class TransformMatchingTex(TransformMatchingAbstractBase):
"""A transformation trying to transform rendered LaTeX strings.
Two submobjects match if their ``tex_string`` matches.
See also
--------
:class:`~.TransformMatchingAbstractBase`
Examples
--------
.. manim:: MatchingEquationParts
class MatchingEquationParts(Scene):
def construct(self):
eq1 = MathTex("{{a^2}} + {{b^2}} = {{c^2}}")
eq2 = MathTex("{{a^2}} = {{c^2}} - {{b^2}}")
self.add(eq1)
self.wait(0.5)
self.play(TransformMatchingTex(eq1, eq2))
self.wait(0.5)
"""
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
assert hasattr(mobject, "tex_string")
assert hasattr(target_mobject, "tex_string")
super().__init__(
mobject,
target_mobject,
transform_mismatches=transform_mismatches,
fade_transform_mismatches=fade_transform_mismatches,
key_map=key_map,
**kwargs
)
@staticmethod
def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]:
return mobject.submobjects
@staticmethod
def get_mobject_key(mobject: "Mobject") -> str:
return mobject.tex_string
| 34.521277
| 87
| 0.642732
|
__all__ = ["TransformMatchingShapes", "TransformMatchingTex"]
from typing import TYPE_CHECKING, List, Optional
import numpy as np
from .._config import config
from ..mobject.mobject import Group, Mobject
from ..mobject.opengl_mobject import OpenGLGroup, OpenGLMobject
from ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup, OpenGLVMobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from .composition import AnimationGroup
from .fading import FadeIn, FadeOut
from .transform import FadeTransformPieces, Transform
if TYPE_CHECKING:
from ..scene.scene import Scene
class TransformMatchingAbstractBase(AnimationGroup):
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
assert type(mobject) is type(target_mobject)
if isinstance(mobject, OpenGLVMobject):
group_type = OpenGLVGroup
elif isinstance(mobject, OpenGLMobject):
group_type = OpenGLGroup
elif isinstance(mobject, VMobject):
group_type = VGroup
else:
group_type = Group
source_map = self.get_shape_map(mobject)
target_map = self.get_shape_map(target_mobject)
if key_map is None:
key_map = {}
transform_source = group_type()
transform_target = group_type()
kwargs["final_alpha_value"] = 0
for key in set(source_map).intersection(target_map):
transform_source.add(source_map[key])
transform_target.add(target_map[key])
anims = [Transform(transform_source, transform_target, **kwargs)]
key_mapped_source = group_type()
key_mapped_target = group_type()
for key1, key2 in key_map.items():
if key1 in source_map and key2 in target_map:
key_mapped_source.add(source_map[key1])
key_mapped_target.add(target_map[key2])
source_map.pop(key1, None)
target_map.pop(key2, None)
if len(key_mapped_source) > 0:
anims.append(
FadeTransformPieces(key_mapped_source, key_mapped_target, **kwargs)
)
fade_source = group_type()
fade_target = group_type()
for key in set(source_map).difference(target_map):
fade_source.add(source_map[key])
for key in set(target_map).difference(source_map):
fade_target.add(target_map[key])
if transform_mismatches:
if "replace_mobject_with_target_in_scene" not in kwargs:
kwargs["replace_mobject_with_target_in_scene"] = True
anims.append(Transform(fade_source, fade_target, **kwargs))
elif fade_transform_mismatches:
anims.append(FadeTransformPieces(fade_source, fade_target, **kwargs))
else:
anims.append(FadeOut(fade_source, target_position=fade_target, **kwargs))
anims.append(
FadeIn(fade_target.copy(), target_position=fade_target, **kwargs)
)
super().__init__(*anims)
self.to_remove = mobject
self.to_add = target_mobject
def get_shape_map(self, mobject: "Mobject") -> dict:
shape_map = {}
for sm in self.get_mobject_parts(mobject):
key = self.get_mobject_key(sm)
if key not in shape_map:
if config["renderer"] == "opengl":
shape_map[key] = OpenGLVGroup()
else:
shape_map[key] = VGroup()
shape_map[key].add(sm)
return shape_map
def clean_up_from_scene(self, scene: "Scene") -> None:
for anim in self.animations:
anim.interpolate(0)
scene.remove(self.mobject)
scene.remove(self.to_remove)
scene.add(self.to_add)
@staticmethod
def get_mobject_parts(mobject: "Mobject"):
raise NotImplementedError("To be implemented in subclass.")
@staticmethod
def get_mobject_key(mobject: "Mobject"):
raise NotImplementedError("To be implemented in subclass.")
class TransformMatchingShapes(TransformMatchingAbstractBase):
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
super().__init__(
mobject,
target_mobject,
transform_mismatches=transform_mismatches,
fade_transform_mismatches=fade_transform_mismatches,
key_map=key_map,
**kwargs
)
@staticmethod
def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]:
return mobject.family_members_with_points()
@staticmethod
def get_mobject_key(mobject: "Mobject") -> int:
mobject.save_state()
mobject.center()
mobject.set_height(1)
result = hash(np.round(mobject.points, 3).tobytes())
mobject.restore()
return result
class TransformMatchingTex(TransformMatchingAbstractBase):
def __init__(
self,
mobject: "Mobject",
target_mobject: "Mobject",
transform_mismatches: bool = False,
fade_transform_mismatches: bool = False,
key_map: Optional[dict] = None,
**kwargs
):
assert hasattr(mobject, "tex_string")
assert hasattr(target_mobject, "tex_string")
super().__init__(
mobject,
target_mobject,
transform_mismatches=transform_mismatches,
fade_transform_mismatches=fade_transform_mismatches,
key_map=key_map,
**kwargs
)
@staticmethod
def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]:
return mobject.submobjects
@staticmethod
def get_mobject_key(mobject: "Mobject") -> str:
return mobject.tex_string
| true
| true
|
7906842aa04794cd3c211bb571d43e53d2015736
| 1,936
|
py
|
Python
|
object_database/service_manager/ServiceBase.py
|
braxtonmckee/nativepython
|
5c64e91eb959fcd1c2c42655b40c7cceb3436f1d
|
[
"Apache-2.0"
] | 7
|
2018-08-07T15:41:54.000Z
|
2019-02-19T12:47:57.000Z
|
object_database/service_manager/ServiceBase.py
|
braxtonmckee/nativepython
|
5c64e91eb959fcd1c2c42655b40c7cceb3436f1d
|
[
"Apache-2.0"
] | 38
|
2018-10-17T13:37:46.000Z
|
2019-04-11T20:50:14.000Z
|
object_database/service_manager/ServiceBase.py
|
braxtonmckee/nativepython
|
5c64e91eb959fcd1c2c42655b40c7cceb3436f1d
|
[
"Apache-2.0"
] | 4
|
2019-02-11T17:44:55.000Z
|
2019-03-20T07:38:18.000Z
|
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import object_database
class ServiceRuntimeConfig:
def __init__(self, serviceTemporaryStorageRoot, authToken, ownIpAddress):
self.serviceTemporaryStorageRoot = serviceTemporaryStorageRoot
self.authToken = authToken
self.ownIpAddress = ownIpAddress
class ServiceBase:
coresUsed = 1
gbRamUsed = 1
def __init__(self, db, serviceObject, runtimeConfig):
self.db = db
self.serviceObject = serviceObject
self.runtimeConfig = runtimeConfig
if self.serviceObject is not None:
self.serializationContext = self.serviceObject.getSerializationContext()
else:
self.serializationContext = None
@staticmethod
def configureFromCommandline(db, serviceObject, args):
"""Subclasses should take the remaining args from the commandline and configure using them"""
pass
def initialize(self):
pass
def doWork(self, shouldStop):
# subclasses actually do work in here.
shouldStop.wait()
@staticmethod
def serviceDisplay(serviceObject, instance=None, objType=None, queryArgs=None):
return object_database.web.cells.Card("No details provided for service '%s'" % serviceObject.name)
@staticmethod
def serviceHeaderToggles(serviceObject, instance=None):
return []
| 32.813559
| 106
| 0.710744
|
import object_database
class ServiceRuntimeConfig:
def __init__(self, serviceTemporaryStorageRoot, authToken, ownIpAddress):
self.serviceTemporaryStorageRoot = serviceTemporaryStorageRoot
self.authToken = authToken
self.ownIpAddress = ownIpAddress
class ServiceBase:
coresUsed = 1
gbRamUsed = 1
def __init__(self, db, serviceObject, runtimeConfig):
self.db = db
self.serviceObject = serviceObject
self.runtimeConfig = runtimeConfig
if self.serviceObject is not None:
self.serializationContext = self.serviceObject.getSerializationContext()
else:
self.serializationContext = None
@staticmethod
def configureFromCommandline(db, serviceObject, args):
pass
def initialize(self):
pass
def doWork(self, shouldStop):
shouldStop.wait()
@staticmethod
def serviceDisplay(serviceObject, instance=None, objType=None, queryArgs=None):
return object_database.web.cells.Card("No details provided for service '%s'" % serviceObject.name)
@staticmethod
def serviceHeaderToggles(serviceObject, instance=None):
return []
| true
| true
|
790684e667fe1f5a1bffb218997b9ba8a49a28d3
| 1,192
|
py
|
Python
|
dec06/test.py
|
einarssons/adventofcode2020
|
f5c6251b4eacf14e3947bfaaa27f3fac624ea923
|
[
"MIT"
] | null | null | null |
dec06/test.py
|
einarssons/adventofcode2020
|
f5c6251b4eacf14e3947bfaaa27f3fac624ea923
|
[
"MIT"
] | null | null | null |
dec06/test.py
|
einarssons/adventofcode2020
|
f5c6251b4eacf14e3947bfaaa27f3fac624ea923
|
[
"MIT"
] | null | null | null |
import unittest
from collections import namedtuple
import m
sample = """\
ab
ac
b
b\
"""
TestCase = namedtuple("TestCase", ["text", "output"])
class TestDec6(unittest.TestCase):
def test_get_groups(self):
cases = [
TestCase(sample, [['ab', 'ac'], ['b', 'b']]),
]
for c in cases:
result = m.read_groups(c.text)
self.assertEqual(result, c.output, c)
def test_count_answers(self):
cases = [
TestCase(sample, [3, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.union_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
def test_count_intersection_answers(self):
cases = [
TestCase(sample, [1, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.intersection_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
if __name__ == '__main__':
unittest.main()
| 22.074074
| 57
| 0.519295
|
import unittest
from collections import namedtuple
import m
sample = """\
ab
ac
b
b\
"""
TestCase = namedtuple("TestCase", ["text", "output"])
class TestDec6(unittest.TestCase):
def test_get_groups(self):
cases = [
TestCase(sample, [['ab', 'ac'], ['b', 'b']]),
]
for c in cases:
result = m.read_groups(c.text)
self.assertEqual(result, c.output, c)
def test_count_answers(self):
cases = [
TestCase(sample, [3, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.union_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
def test_count_intersection_answers(self):
cases = [
TestCase(sample, [1, 1]),
]
for c in cases:
groups = m.read_groups(c.text)
nrs = []
for group in groups:
result = m.intersection_answers(group)
nrs.append(result)
self.assertEqual(nrs, c.output, c)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790685577165263740d7cb840e9c16824b5f5b8b
| 824
|
py
|
Python
|
demo_flask.py
|
archfool/nlp
|
8281053a60fd0ef0fae0a2a0649c8814adad6df5
|
[
"MIT"
] | 1
|
2019-06-05T05:44:53.000Z
|
2019-06-05T05:44:53.000Z
|
demo_flask.py
|
archfool/nlp
|
8281053a60fd0ef0fae0a2a0649c8814adad6df5
|
[
"MIT"
] | null | null | null |
demo_flask.py
|
archfool/nlp
|
8281053a60fd0ef0fae0a2a0649c8814adad6df5
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request, redirect
from flask import render_template
app = Flask(__name__)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
from flask import Flask,request,render_template,redirect
# 绑定访问地址127.0.0.1:5000/user
@app.route("/user", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if username == "user" and password == "password":
return redirect("http://www.baidu.com")
else:
message = "Failed Login"
return render_template('login.html', message=message)
return render_template('login.html')
if __name__ == '__main__':
app.run()
| 24.235294
| 65
| 0.654126
|
from flask import Flask, render_template, request, redirect
from flask import render_template
app = Flask(__name__)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
from flask import Flask,request,render_template,redirect
@app.route("/user", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
if username == "user" and password == "password":
return redirect("http://www.baidu.com")
else:
message = "Failed Login"
return render_template('login.html', message=message)
return render_template('login.html')
if __name__ == '__main__':
app.run()
| true
| true
|
790686288c54c7a0b265237e5541e7a2ebfc4a51
| 133,495
|
py
|
Python
|
lib/sqlalchemy/orm/query.py
|
slafs/sqlalchemy
|
156f473de00024688404d73aea305cd4fc452638
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/query.py
|
slafs/sqlalchemy
|
156f473de00024688404d73aea305cd4fc452638
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/orm/query.py
|
slafs/sqlalchemy
|
156f473de00024688404d73aea305cd4fc452638
|
[
"MIT"
] | null | null | null |
# orm/query.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The Query class and support.
Defines the :class:`.Query` class, the central
construct used by the ORM to construct database queries.
The :class:`.Query` class should not be confused with the
:class:`.Select` class, which defines database
SELECT operations at the SQL (non-ORM) level. ``Query`` differs from
``Select`` in that it returns ORM-mapped objects and interacts with an
ORM session, whereas the ``Select`` construct interacts directly with the
database to return iterable result sets.
"""
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
"""ORM-level SQL construction object.
:class:`.Query` is the source of all SELECT statements generated by the
ORM, both those formulated by end-user query operations as well as by
high level internal operations such as related collection loading. It
features a generative interface whereby successive calls return a new
:class:`.Query` object, a copy of the former with additional
criteria and options associated with it.
:class:`.Query` objects are normally initially generated using the
:meth:`~.Session.query` method of :class:`.Session`. For a full
walkthrough of :class:`.Query` usage, see the
:ref:`ormtutorial_toplevel`.
"""
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
"""Adapt incoming clauses to transformations which
have been applied within this query."""
adapters = []
# do we adapt all expression elements or only those
# tagged as 'ORM' constructs ?
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
# for the "from obj" alias, apply extra rule to the
# 'ORM only' check, if this query were generated from a
# subquery of itself, i.e. _from_selectable(), apply adaption
# to all SQL constructs.
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
# if 'orm only', look for ORM annotations
# in the element before adapting.
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
"""The full SELECT statement represented by this Query.
The statement by default will not have disambiguating labels
applied to the construct unless with_labels(True) is called
first.
"""
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# TODO: there's no tests covering effects of
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
"""return the full SELECT statement represented by
this :class:`.Query`, embedded within an :class:`.Alias`.
Eager JOIN generation within the query is disabled.
:param name: string name to be assigned as the alias;
this is passed through to :meth:`.FromClause.alias`.
If ``None``, a name will be deterministically generated
at compile time.
:param with_labels: if True, :meth:`.with_labels` will be called
on the :class:`.Query` first to apply table-qualified labels
to all columns.
:param reduce_columns: if True, :meth:`.Select.reduce_columns` will
be called on the resulting :func:`.select` construct,
to remove same-named columns where one also refers to the other
via foreign key or WHERE clause equivalence.
.. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns``
keyword arguments were added.
"""
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
"""Return the full SELECT statement represented by this
:class:`.Query` represented as a common table expression (CTE).
.. versionadded:: 0.7.6
Parameters and usage are the same as those of the
:meth:`.SelectBase.cte` method; see that method for
further details.
Here is the `Postgresql WITH
RECURSIVE example
<http://www.postgresql.org/docs/8.4/static/queries-with.html>`_.
Note that, in this example, the ``included_parts`` cte and the
``incl_alias`` alias of it are Core selectables, which
means the columns are accessed via the ``.c.`` attribute. The
``parts_alias`` object is an :func:`.orm.aliased` instance of the
``Part`` entity, so column-mapped attributes are available
directly::
from sqlalchemy.orm import aliased
class Part(Base):
__tablename__ = 'part'
part = Column(String, primary_key=True)
sub_part = Column(String, primary_key=True)
quantity = Column(Integer)
included_parts = session.query(
Part.sub_part,
Part.part,
Part.quantity).\\
filter(Part.part=="our part").\\
cte(name="included_parts", recursive=True)
incl_alias = aliased(included_parts, name="pr")
parts_alias = aliased(Part, name="p")
included_parts = included_parts.union_all(
session.query(
parts_alias.sub_part,
parts_alias.part,
parts_alias.quantity).\\
filter(parts_alias.part==incl_alias.c.sub_part)
)
q = session.query(
included_parts.c.sub_part,
func.sum(included_parts.c.quantity).
label('total_quantity')
).\\
group_by(included_parts.c.sub_part)
.. seealso::
:meth:`.SelectBase.cte`
"""
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted
to a scalar subquery with a label of the given name.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
"""Return the full SELECT statement represented by this
:class:`.Query`, converted to a scalar subquery.
Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`.
.. versionadded:: 0.6.5
"""
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
"""Return the :class:`.Select` object emitted by this :class:`.Query`.
Used for :func:`.inspect` compatibility, this is equivalent to::
query.enable_eagerloads(False).with_labels().statement
"""
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
"""Control whether or not eager joins and subqueries are
rendered.
When set to False, the returned Query will not render
eager joins regardless of :func:`~sqlalchemy.orm.joinedload`,
:func:`~sqlalchemy.orm.subqueryload` options
or mapper-level ``lazy='joined'``/``lazy='subquery'``
configurations.
This is used primarily when nesting the Query's
statement into a subquery or other
selectable, or when using :meth:`.Query.yield_per`.
"""
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
"""Apply column labels to the return value of Query.statement.
Indicates that this Query's `statement` accessor should return
a SELECT statement that applies labels to all columns in the
form <tablename>_<columnname>; this is commonly used to
disambiguate columns from multiple tables which have the same
name.
When the `Query` actually issues SQL to load rows, it always
uses column labeling.
"""
self._with_labels = True
@_generative()
def enable_assertions(self, value):
"""Control whether assertions are generated.
When set to False, the returned Query will
not assert its state before certain operations,
including that LIMIT/OFFSET has not been applied
when filter() is called, no criterion exists
when get() is called, and no "from_statement()"
exists when filter()/order_by()/group_by() etc.
is called. This more permissive mode is used by
custom Query subclasses to specify criterion or
other modifiers outside of the usual usage patterns.
Care should be taken to ensure that the usage
pattern is even possible. A statement applied
by from_statement() will override any criterion
set by filter() or order_by(), for example.
"""
self._enable_assertions = value
@property
def whereclause(self):
"""A readonly attribute which returns the current WHERE criterion for
this Query.
This returned value is a SQL expression construct, or ``None`` if no
criterion has been established.
"""
return self._criterion
@_generative()
def _with_current_path(self, path):
"""indicate that this query applies to objects loaded
within a certain path.
Used by deferred loaders (see strategies.py) which transfer
query options from an originating query to a newly generated
query intended for the deferred load.
"""
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
"""Load columns for inheriting classes.
:meth:`.Query.with_polymorphic` applies transformations
to the "main" mapped class represented by this :class:`.Query`.
The "main" mapped class here means the :class:`.Query`
object's first argument is a full class, i.e.
``session.query(SomeClass)``. These transformations allow additional
tables to be present in the FROM clause so that columns for a
joined-inheritance subclass are available in the query, both for the
purposes of load-time efficiency as well as the ability to use
these columns at query time.
See the documentation section :ref:`with_polymorphic` for
details on how this method is used.
.. versionchanged:: 0.8
A new and more flexible function
:func:`.orm.with_polymorphic` supersedes
:meth:`.Query.with_polymorphic`, as it can apply the equivalent
functionality to any set of columns or classes in the
:class:`.Query`, not just the "zero mapper". See that
function for a description of arguments.
"""
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
"""Yield only ``count`` rows at a time.
The purpose of this method is when fetching very large result sets
(> 10K rows), to batch results in sub-collections and yield them
out partially, so that the Python interpreter doesn't need to declare
very large areas of memory which is both time consuming and leads
to excessive memory use. The performance from fetching hundreds of
thousands of rows can often double when a suitable yield-per setting
(e.g. approximately 1000) is used, even with DBAPIs that buffer
rows (which are most).
The :meth:`.Query.yield_per` method **is not compatible with most
eager loading schemes, including subqueryload and joinedload with
collections**. For this reason, it may be helpful to disable
eager loads, either unconditionally with
:meth:`.Query.enable_eagerloads`::
q = sess.query(Object).yield_per(100).enable_eagerloads(False)
Or more selectively using :func:`.lazyload`; such as with
an asterisk to specify the default loader scheme::
q = sess.query(Object).yield_per(100).\\
options(lazyload('*'), joinedload(Object.some_related))
.. warning::
Use this method with caution; if the same instance is
present in more than one batch of rows, end-user changes
to attributes will be overwritten.
In particular, it's usually impossible to use this setting
with eagerly loaded collections (i.e. any lazy='joined' or
'subquery') since those collections will be cleared for a
new load when encountered in a subsequent result batch.
In the case of 'subquery' loading, the full result for all
rows is fetched which generally defeats the purpose of
:meth:`~sqlalchemy.orm.query.Query.yield_per`.
Also note that while
:meth:`~sqlalchemy.orm.query.Query.yield_per` will set the
``stream_results`` execution option to True, currently
this is only understood by
:mod:`~sqlalchemy.dialects.postgresql.psycopg2` dialect
which will stream results using server side cursors
instead of pre-buffer all rows for this query. Other
DBAPIs **pre-buffer all rows** before making them
available. The memory use of raw database rows is much less
than that of an ORM-mapped object, but should still be taken into
consideration when benchmarking.
.. seealso::
:meth:`.Query.enable_eagerloads`
"""
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
E.g.::
my_user = session.query(User).get(5)
some_object = session.query(VersionedFoo).get((5, 10))
:meth:`~.Query.get` is special in that it provides direct
access to the identity map of the owning :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`~.Query.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:meth:`~.Query.get` is only used to return a single
mapped instance, not multiple instances or
individual column constructs, and strictly
on a single primary key value. The originating
:class:`.Query` must be constructed in this way,
i.e. against a single mapped entity,
with no additional filtering criterion. Loading
options via :meth:`~.Query.options` may be applied
however, and will be used if the object is not
yet locally present.
A lazy-loading, many-to-one attribute configured
by :func:`.relationship`, using a simple
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
before querying the database. See :doc:`/orm/loading`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
the primary key. For a composite primary key,
the order of identifiers corresponds in most cases
to that of the mapped :class:`.Table` object's
primary key columns. For a :func:`.mapper` that
was given the ``primary key`` argument during
construction, the order of identifiers corresponds
to the elements present in this collection.
:return: The object instance, or ``None``.
"""
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
"""Return a :class:`.Query` construct which will correlate the given
FROM clauses to that of an enclosing :class:`.Query` or
:func:`~.expression.select`.
The method here accepts mapped classes, :func:`.aliased` constructs,
and :func:`.mapper` constructs as arguments, which are resolved into
expression constructs, in addition to appropriate expression
constructs.
The correlation arguments are ultimately passed to
:meth:`.Select.correlate` after coercion to expression constructs.
The correlation arguments take effect in such cases
as when :meth:`.Query.from_self` is used, or when
a subquery as returned by :meth:`.Query.subquery` is
embedded in another :func:`~.expression.select` construct.
"""
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
"""Return a Query with a specific 'autoflush' setting.
Note that a Session with autoflush=False will
not autoflush, even if this flag is set to True at the
Query level. Therefore this flag is usually used only
to disable autoflush for a specific Query.
"""
self._autoflush = setting
@_generative()
def populate_existing(self):
"""Return a :class:`.Query` that will expire and refresh all instances
as they are loaded, or reused from the current :class:`.Session`.
:meth:`.populate_existing` does not improve behavior when
the ORM is used normally - the :class:`.Session` object's usual
behavior of maintaining a transaction and expiring all attributes
after rollback or commit handles object state automatically.
This method is not intended for general use.
"""
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
"""Set the 'invoke all eagers' flag which causes joined- and
subquery loaders to traverse into already-loaded related objects
and collections.
Default is that of :attr:`.Query._invoke_all_eagers`.
"""
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
"""Add filtering criterion that relates the given instance
to a child object or collection, using its attribute state
as well as an established :func:`.relationship()`
configuration.
The method uses the :func:`.with_parent` function to generate
the clause, the result of which is passed to :meth:`.Query.filter`.
Parameters are the same as :func:`.with_parent`, with the exception
that the given property can be None, in which case a search is
performed against this :class:`.Query` object's target mapper.
"""
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
"""add a mapped entity to the list of result columns
to be returned."""
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
"""Return a :class:`.Query` that will use the given :class:`.Session`.
"""
self.session = session
def from_self(self, *entities):
"""return a Query that selects from this Query's
SELECT statement.
\*entities - optional list of entities which will replace
those being selected.
"""
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
"""Return an iterator yielding result tuples corresponding
to the given list of columns"""
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
"""Return a scalar result corresponding to the given
column expression."""
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
"""Return a new :class:`.Query` replacing the SELECT list with the
given entities.
e.g.::
# Users, filtered on some arbitrary criterion
# and then ordered by related email address
q = session.query(User).\\
join(User.address).\\
filter(User.name.like('%ed%')).\\
order_by(Address.email)
# given *only* User.id==5, Address.email, and 'q', what
# would the *next* User in the result be ?
subq = q.with_entities(Address.email).\\
order_by(None).\\
filter(User.id==5).\\
subquery()
q = q.join((subq, subq.c.email < Address.email)).\\
limit(1)
.. versionadded:: 0.6.5
"""
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
"""Add one or more column expressions to the list
of result columns to be returned."""
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
"""Add a column expression to the list of result columns to be
returned.
Pending deprecation: :meth:`.add_column` will be superseded by
:meth:`.add_columns`.
"""
return self.add_columns(column)
def options(self, *args):
"""Return a new Query object, applying the given list of
mapper options.
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
:ref:`deferred` and :doc:`/orm/loading` for reference
documentation.
"""
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
"""Return a new :class:`.Query` object transformed by
the given function.
E.g.::
def filter_something(criterion):
def transform(q):
return q.filter(criterion)
return transform
q = q.with_transformation(filter_something(x==5))
This allows ad-hoc recipes to be created for :class:`.Query`
objects. See the example at :ref:`hybrid_transformers`.
.. versionadded:: 0.7.4
"""
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing or other executional context
hint for the given entity or selectable to
this :class:`.Query`.
Functionality is passed straight through to
:meth:`~sqlalchemy.sql.expression.Select.with_hint`,
with the addition that ``selectable`` can be a
:class:`.Table`, :class:`.Alias`, or ORM entity / mapped class
/etc.
.. seealso::
:meth:`.Query.with_statement_hint`
"""
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
"""add a statement hint to this :class:`.Select`.
This method is similar to :meth:`.Select.with_hint` except that
it does not require an individual table, and instead applies to the
statement as a whole.
This feature calls down into :meth:`.Select.with_statement_hint`.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.Query.with_hint`
"""
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
""" Set non-SQL options which take effect during execution.
The options are the same as those accepted by
:meth:`.Connection.execution_options`.
Note that the ``stream_results`` execution option is enabled
automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()`
method is used.
"""
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
"""Return a new :class:`.Query` object with the specified "locking mode",
which essentially refers to the ``FOR UPDATE`` clause.
.. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`.
:param mode: a string representing the desired locking mode.
Valid values are:
* ``None`` - translates to no lockmode
* ``'update'`` - translates to ``FOR UPDATE``
(standard SQL, supported by most dialects)
* ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT``
(supported by Oracle, PostgreSQL 8.1 upwards)
* ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL),
and ``FOR SHARE`` (for PostgreSQL)
.. seealso::
:meth:`.Query.with_for_update` - improved API for
specifying the ``FOR UPDATE`` clause.
"""
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
"""return a new :class:`.Query` with the specified options for the
``FOR UPDATE`` clause.
The behavior of this method is identical to that of
:meth:`.SelectBase.with_for_update`. When called with no arguments,
the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause
appended. When additional arguments are specified, backend-specific
options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE``
can take effect.
E.g.::
q = sess.query(User).with_for_update(nowait=True, of=User)
The above query on a Postgresql backend will render like::
SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT
.. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes
the :meth:`.Query.with_lockmode` method.
.. seealso::
:meth:`.GenerativeSelect.with_for_update` - Core level method with
full argument and behavioral description.
"""
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
"""add values for bind parameters which may have been
specified in filter().
parameters may be specified using \**kwargs, or optionally a single
dictionary as the first positional argument. The reason for both is
that \**kwargs is convenient, however some parameter dictionaries
contain unicode keys in which case \**kwargs cannot be used.
"""
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using SQL expressions.
e.g.::
session.query(MyClass).filter(MyClass.name == 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter(MyClass.name == 'some name', MyClass.id > 5)
The criterion is any SQL expression object applicable to the
WHERE clause of a select. String expressions are coerced
into SQL expression constructs via the :func:`.text` construct.
.. versionchanged:: 0.7.5
Multiple criteria joined by AND.
.. seealso::
:meth:`.Query.filter_by` - filter on keyword expressions.
"""
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
"""apply the given filtering criterion to a copy
of this :class:`.Query`, using keyword expressions.
e.g.::
session.query(MyClass).filter_by(name = 'some name')
Multiple criteria are joined together by AND::
session.query(MyClass).\\
filter_by(name = 'some name', id = 5)
The keyword expressions are extracted from the primary
entity of the query, or the last entity that was the
target of a call to :meth:`.Query.join`.
.. seealso::
:meth:`.Query.filter` - filter on SQL expressions.
"""
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
"""apply one or more ORDER BY criterion to the query and return
the newly resulting ``Query``
All existing ORDER BY settings can be suppressed by
passing ``None`` - this will suppress any ORDER BY configured
on mappers as well.
Alternatively, an existing ORDER BY setting on the Query
object can be entirely cancelled by passing ``False``
as the value - use this before calling methods where
an ORDER BY is invalid.
"""
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
"""apply one or more GROUP BY criterion to the query and return
the newly resulting :class:`.Query`"""
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
"""apply a HAVING criterion to the query and return the
newly resulting :class:`.Query`.
:meth:`~.Query.having` is used in conjunction with
:meth:`~.Query.group_by`.
HAVING criterion makes it possible to use filters on aggregate
functions like COUNT, SUM, AVG, MAX, and MIN, eg.::
q = session.query(User.id).\\
join(User.addresses).\\
group_by(User.id).\\
having(func.count(Address.id) > 2)
"""
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
"""Produce a UNION of this Query against one or more queries.
e.g.::
q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar')
q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo')
q3 = q1.union(q2)
The method accepts multiple Query objects so as to control
the level of nesting. A series of ``union()`` calls such as::
x.union(y).union(z).all()
will nest on each ``union()``, and produces::
SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION
SELECT * FROM y) UNION SELECT * FROM Z)
Whereas::
x.union(y, z).all()
produces::
SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION
SELECT * FROM Z)
Note that many database backends do not allow ORDER BY to
be rendered on a query called within UNION, EXCEPT, etc.
To disable all ORDER BY clauses including those configured
on mappers, issue ``query.order_by(None)`` - the resulting
:class:`.Query` object will not render ORDER BY within
its SELECT statement.
"""
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
"""Produce a UNION ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
"""Produce an INTERSECT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
"""Produce an INTERSECT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
"""Produce an EXCEPT of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
"""Produce an EXCEPT ALL of this Query against one or more queries.
Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See
that method for usage examples.
"""
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
"""Create a SQL JOIN against this :class:`.Query` object's criterion
and apply generatively, returning the newly resulting :class:`.Query`.
**Simple Relationship Joins**
Consider a mapping between two classes ``User`` and ``Address``,
with a relationship ``User.addresses`` representing a collection
of ``Address`` objects associated with each ``User``. The most
common usage of :meth:`~.Query.join` is to create a JOIN along this
relationship, using the ``User.addresses`` attribute as an indicator
for how this should occur::
q = session.query(User).join(User.addresses)
Where above, the call to :meth:`~.Query.join` along ``User.addresses``
will result in SQL equivalent to::
SELECT user.* FROM user JOIN address ON user.id = address.user_id
In the above example we refer to ``User.addresses`` as passed to
:meth:`~.Query.join` as the *on clause*, that is, it indicates
how the "ON" portion of the JOIN should be constructed. For a
single-entity query such as the one above (i.e. we start by selecting
only from ``User`` and nothing else), the relationship can also be
specified by its string name::
q = session.query(User).join("addresses")
:meth:`~.Query.join` can also accommodate multiple
"on clause" arguments to produce a chain of joins, such as below
where a join across four related entities is constructed::
q = session.query(User).join("orders", "items", "keywords")
The above would be shorthand for three separate calls to
:meth:`~.Query.join`, each using an explicit attribute to indicate
the source entity::
q = session.query(User).\\
join(User.orders).\\
join(Order.items).\\
join(Item.keywords)
**Joins to a Target Entity or Selectable**
A second form of :meth:`~.Query.join` allows any mapped entity
or core selectable construct as a target. In this usage,
:meth:`~.Query.join` will attempt
to create a JOIN along the natural foreign key relationship between
two entities::
q = session.query(User).join(Address)
The above calling form of :meth:`~.Query.join` will raise an error if
either there are no foreign keys between the two entities, or if
there are multiple foreign key linkages between them. In the
above calling form, :meth:`~.Query.join` is called upon to
create the "on clause" automatically for us. The target can
be any mapped entity or selectable, such as a :class:`.Table`::
q = session.query(User).join(addresses_table)
**Joins to a Target with an ON Clause**
The third calling form allows both the target entity as well
as the ON clause to be passed explicitly. Suppose for
example we wanted to join to ``Address`` twice, using
an alias the second time. We use :func:`~sqlalchemy.orm.aliased`
to create a distinct alias of ``Address``, and join
to it using the ``target, onclause`` form, so that the
alias can be specified explicitly as the target along with
the relationship to instruct how the ON clause should proceed::
a_alias = aliased(Address)
q = session.query(User).\\
join(User.addresses).\\
join(a_alias, User.addresses).\\
filter(Address.email_address=='ed@foo.com').\\
filter(a_alias.email_address=='ed@bar.com')
Where above, the generated SQL would be similar to::
SELECT user.* FROM user
JOIN address ON user.id = address.user_id
JOIN address AS address_1 ON user.id=address_1.user_id
WHERE address.email_address = :email_address_1
AND address_1.email_address = :email_address_2
The two-argument calling form of :meth:`~.Query.join`
also allows us to construct arbitrary joins with SQL-oriented
"on clause" expressions, not relying upon configured relationships
at all. Any SQL expression can be passed as the ON clause
when using the two-argument form, which should refer to the target
entity in some way as well as an applicable source entity::
q = session.query(User).join(Address, User.id==Address.user_id)
.. versionchanged:: 0.7
In SQLAlchemy 0.6 and earlier, the two argument form of
:meth:`~.Query.join` requires the usage of a tuple:
``query(User).join((Address, User.id==Address.user_id))``\ .
This calling form is accepted in 0.7 and further, though
is not necessary unless multiple join conditions are passed to
a single :meth:`~.Query.join` call, which itself is also not
generally necessary as it is now equivalent to multiple
calls (this wasn't always the case).
**Advanced Join Targeting and Adaption**
There is a lot of flexibility in what the "target" can be when using
:meth:`~.Query.join`. As noted previously, it also accepts
:class:`.Table` constructs and other selectables such as
:func:`.alias` and :func:`.select` constructs, with either the one
or two-argument forms::
addresses_q = select([Address.user_id]).\\
where(Address.email_address.endswith("@bar.com")).\\
alias()
q = session.query(User).\\
join(addresses_q, addresses_q.c.user_id==User.id)
:meth:`~.Query.join` also features the ability to *adapt* a
:meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target
selectable. Below we construct a JOIN from ``User`` to a subquery
against ``Address``, allowing the relationship denoted by
``User.addresses`` to *adapt* itself to the altered target::
address_subq = session.query(Address).\\
filter(Address.email_address == 'ed@foo.com').\\
subquery()
q = session.query(User).join(address_subq, User.addresses)
Producing SQL similar to::
SELECT user.* FROM user
JOIN (
SELECT address.id AS id,
address.user_id AS user_id,
address.email_address AS email_address
FROM address
WHERE address.email_address = :email_address_1
) AS anon_1 ON user.id = anon_1.user_id
The above form allows one to fall back onto an explicit ON
clause at any time::
q = session.query(User).\\
join(address_subq, User.id==address_subq.c.user_id)
**Controlling what to Join From**
While :meth:`~.Query.join` exclusively deals with the "right"
side of the JOIN, we can also control the "left" side, in those
cases where it's needed, using :meth:`~.Query.select_from`.
Below we construct a query against ``Address`` but can still
make usage of ``User.addresses`` as our ON clause by instructing
the :class:`.Query` to select first from the ``User``
entity::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which will produce SQL similar to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
**Constructing Aliases Anonymously**
:meth:`~.Query.join` can construct anonymous aliases
using the ``aliased=True`` flag. This feature is useful
when a query is being joined algorithmically, such as
when querying self-referentially to an arbitrary depth::
q = session.query(Node).\\
join("children", "children", aliased=True)
When ``aliased=True`` is used, the actual "alias" construct
is not explicitly available. To work with it, methods such as
:meth:`.Query.filter` will adapt the incoming entity to
the last join point::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1')
When using automatic aliasing, the ``from_joinpoint=True``
argument can allow a multi-node join to be broken into
multiple calls to :meth:`~.Query.join`, so that
each path along the way can be further filtered::
q = session.query(Node).\\
join("children", aliased=True).\\
filter(Node.name='child 1').\\
join("children", aliased=True, from_joinpoint=True).\\
filter(Node.name == 'grandchild 1')
The filtering aliases above can then be reset back to the
original ``Node`` entity using :meth:`~.Query.reset_joinpoint`::
q = session.query(Node).\\
join("children", "children", aliased=True).\\
filter(Node.name == 'grandchild 1').\\
reset_joinpoint().\\
filter(Node.name == 'parent 1)
For an example of ``aliased=True``, see the distribution
example :ref:`examples_xmlpersistence` which illustrates
an XPath-like query system using algorithmic joins.
:param \*props: A collection of one or more join conditions,
each consisting of a relationship-bound attribute or string
relationship name representing an "on clause", or a single
target entity, or a tuple in the form of ``(target, onclause)``.
A special two-argument calling form of the form ``target, onclause``
is also accepted.
:param aliased=False: If True, indicate that the JOIN target should be
anonymously aliased. Subsequent calls to :meth:`~.Query.filter`
and similar will adapt the incoming criterion to the target
alias, until :meth:`~.Query.reset_joinpoint` is called.
:param isouter=False: If True, the join used will be a left outer join,
just as if the :meth:`.Query.outerjoin` method were called. This
flag is here to maintain consistency with the same flag as accepted
by :meth:`.FromClause.join` and other Core constructs.
.. versionadded:: 1.0.0
:param from_joinpoint=False: When using ``aliased=True``, a setting
of True here will cause the join to be from the most recent
joined target, rather than starting back from the original
FROM clauses of the query.
.. seealso::
:ref:`ormtutorial_joins` in the ORM tutorial.
:ref:`inheritance_toplevel` for details on how
:meth:`~.Query.join` is used for inheritance relationships.
:func:`.orm.join` - a standalone ORM-level join function,
used internally by :meth:`.Query.join`, which in previous
SQLAlchemy versions was the primary ORM-level joining interface.
"""
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
"""Create a left outer join against this ``Query`` object's criterion
and apply generatively, returning the newly resulting ``Query``.
Usage is the same as the ``join()`` method.
"""
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
"""consumes arguments from join() or outerjoin(), places them into a
consistent format with which to form the actual JOIN constructs.
"""
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# The child's prev reference might be stale --
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
# and then mutate the child, which might be
# shared by a different query object.
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
# TODO: no coverage here
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
"""append a JOIN to the query's from clause."""
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
# if the target is a joined inheritance mapping,
# be more liberal about auto-aliasing.
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap # test for overlap:
# orm/inheritance/relationships.py
# SelfReferentialM2MTest
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
# if an alias() of the right side was generated here,
# apply an adapter to all subsequent filter() calls
# until reset_joinpoint() is called.
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
# if the onclause is a ClauseElement, adapt it with any
# adapters that are in place right now
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
# if an alias() on the right side was generated,
# which is intended to wrap a the right side in a subquery,
# ensure that columns retrieved from this target in the result
# set are also adapted.
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
"""Return a new :class:`.Query`, where the "join point" has
been reset back to the base FROM entities of the query.
This method is usually used in conjunction with the
``aliased=True`` feature of the :meth:`~.Query.join`
method. See the example in :meth:`~.Query.join` for how
this is used.
"""
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
"""Set the FROM clause of this :class:`.Query` explicitly.
:meth:`.Query.select_from` is often used in conjunction with
:meth:`.Query.join` in order to control which entity is selected
from on the "left" side of the join.
The entity or selectable object here effectively replaces the
"left edge" of any calls to :meth:`~.Query.join`, when no
joinpoint is otherwise established - usually, the default "join
point" is the leftmost entity in the :class:`~.Query` object's
list of entities to be selected.
A typical example::
q = session.query(Address).select_from(User).\\
join(User.addresses).\\
filter(User.name == 'ed')
Which produces SQL equivalent to::
SELECT address.* FROM user
JOIN address ON user.id=address.user_id
WHERE user.name = :name_1
:param \*from_obj: collection of one or more entities to apply
to the FROM clause. Entities can be mapped classes,
:class:`.AliasedClass` objects, :class:`.Mapper` objects
as well as core :class:`.FromClause` elements like subqueries.
.. versionchanged:: 0.9
This method no longer applies the given FROM object
to be the selectable from which matching entities
select from; the :meth:`.select_entity_from` method
now accomplishes this. See that method for a description
of this behavior.
.. seealso::
:meth:`~.Query.join`
:meth:`.Query.select_entity_from`
"""
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
"""Set the FROM clause of this :class:`.Query` to a
core selectable, applying it as a replacement FROM clause
for corresponding mapped entities.
This method is similar to the :meth:`.Query.select_from`
method, in that it sets the FROM clause of the query. However,
where :meth:`.Query.select_from` only affects what is placed
in the FROM, this method also applies the given selectable
to replace the FROM which the selected entities would normally
select from.
The given ``from_obj`` must be an instance of a :class:`.FromClause`,
e.g. a :func:`.select` or :class:`.Alias` construct.
An example would be a :class:`.Query` that selects ``User`` entities,
but uses :meth:`.Query.select_entity_from` to have the entities
selected from a :func:`.select` construct instead of the
base ``user`` table::
select_stmt = select([User]).where(User.id == 7)
q = session.query(User).\\
select_entity_from(select_stmt).\\
filter(User.name == 'ed')
The query generated will select ``User`` entities directly
from the given :func:`.select` construct, and will be::
SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name
FROM (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE anon_1.name = :name_1
Notice above that even the WHERE criterion was "adapted" such that
the ``anon_1`` subquery effectively replaces all references to the
``user`` table, except for the one that it refers to internally.
Compare this to :meth:`.Query.select_from`, which as of
version 0.9, does not affect existing entities. The
statement below::
q = session.query(User).\\
select_from(select_stmt).\\
filter(User.name == 'ed')
Produces SQL where both the ``user`` table as well as the
``select_stmt`` construct are present as separate elements
in the FROM clause. No "adaptation" of the ``user`` table
is applied::
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user", (SELECT "user".id AS id, "user".name AS name
FROM "user"
WHERE "user".id = :id_1) AS anon_1
WHERE "user".name = :name_1
:meth:`.Query.select_entity_from` maintains an older
behavior of :meth:`.Query.select_from`. In modern usage,
similar results can also be achieved using :func:`.aliased`::
select_stmt = select([User]).where(User.id == 7)
user_from_select = aliased(User, select_stmt.alias())
q = session.query(user_from_select)
:param from_obj: a :class:`.FromClause` object that will replace
the FROM clause of this :class:`.Query`.
.. seealso::
:meth:`.Query.select_from`
.. versionadded:: 0.8
:meth:`.Query.select_entity_from` was added to specify
the specific behavior of entity replacement, however
the :meth:`.Query.select_from` maintains this behavior
as well until 0.9.
"""
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
# perhaps we should execute a count() here so that we
# can still use LIMIT/OFFSET ?
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
"""apply LIMIT/OFFSET to the ``Query`` based on a "
"range and return the newly resulting ``Query``."""
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
"""Apply a ``LIMIT`` to the query and return the newly resulting
``Query``.
"""
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
"""Apply an ``OFFSET`` to the query and return the newly resulting
``Query``.
"""
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
"""Apply a ``DISTINCT`` to the query and return the newly resulting
``Query``.
:param \*expr: optional column expressions. When present,
the Postgresql dialect will render a ``DISTINCT ON (<expressions>>)``
construct.
"""
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
"""Apply the prefixes to the query and return the newly resulting
``Query``.
:param \*prefixes: optional prefixes, typically strings,
not using any commas. In particular is useful for MySQL keywords.
e.g.::
query = sess.query(User.name).\\
prefix_with('HIGH_PRIORITY').\\
prefix_with('SQL_SMALL_RESULT', 'ALL')
Would render::
SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name
FROM users
.. versionadded:: 0.7.7
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
"""Return the results represented by this ``Query`` as a list.
This results in an execution of the underlying query.
"""
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
"""Execute the given SELECT statement and return results.
This method bypasses all internal statement compilation, and the
statement is executed without modification.
The statement is typically either a :func:`~.expression.text`
or :func:`~.expression.select` construct, and should return the set
of columns
appropriate to the entity class represented by this :class:`.Query`.
.. seealso::
:ref:`orm_tutorial_literal_sql` - usage examples in the
ORM tutorial
"""
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
"""Return the first result of this ``Query`` or
None if the result doesn't contain any row.
first() applies a limit of one within the generated SQL, so that
only one primary entity row is generated on the server side
(note this may consist of multiple result rows if join-loaded
collections are present).
Calling ``first()`` results in an execution of the underlying query.
"""
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
"""Return exactly one result or raise an exception.
Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects
no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound``
if multiple object identities are returned, or if multiple
rows are returned for a query that does not return object
identities.
Note that an entity query, that is, one which selects one or
more mapped classes as opposed to individual column attributes,
may ultimately represent many rows but only one row of
unique entity or entities - this is a successful result for one().
Calling ``one()`` results in an execution of the underlying query.
.. versionchanged:: 0.6
``one()`` fully fetches all results instead of applying
any kind of limit, so that the "unique"-ing of entities does not
conceal multiple object identities.
"""
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
"""Return the first element of the first result or None
if no rows present. If multiple rows are returned,
raises MultipleResultsFound.
>>> session.query(Item).scalar()
<Item>
>>> session.query(Item.id).scalar()
1
>>> session.query(Item.id).filter(Item.id < 0).scalar()
None
>>> session.query(Item.id, Item.name).scalar()
1
>>> session.query(func.count(Parent.id)).scalar()
20
This results in an execution of the underlying query.
"""
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
"""Return metadata about the columns which would be
returned by this :class:`.Query`.
Format is a list of dictionaries::
user_alias = aliased(User, name='user2')
q = sess.query(User, User.id, user_alias)
# this expression:
q.column_descriptions
# would return:
[
{
'name':'User',
'type':User,
'aliased':False,
'expr':User,
},
{
'name':'id',
'type':Integer(),
'aliased':False,
'expr':User.id,
},
{
'name':'user2',
'type':User,
'aliased':True,
'expr':user_alias
}
]
"""
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
"""Given a ResultProxy cursor as returned by connection.execute(),
return an ORM result as an iterator.
e.g.::
result = engine.execute("select * from users")
for u in session.query(User).instances(result):
print u
"""
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session.
Given an iterator returned by a :class:`.Query` of the same structure
as this one, return an identical iterator of results, with all mapped
instances merged into the session using :meth:`.Session.merge`. This
is an optimized method which will merge all mapped instances,
preserving the structure of the result rows and unmapped columns with
less method overhead than that of calling :meth:`.Session.merge`
explicitly for each value.
The structure of the results is determined based on the column list of
this :class:`.Query` - if these do not correspond, unchecked errors
will occur.
The 'load' argument is the same as that of :meth:`.Session.merge`.
For an example of how :meth:`~.Query.merge_result` is used, see
the source code for the example :ref:`examples_caching`, where
:meth:`~.Query.merge_result` is used to efficiently restore state
from a cache back into a target :class:`.Session`.
"""
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
"""A convenience method that turns a query into an EXISTS subquery
of the form EXISTS (SELECT 1 FROM ... WHERE ...).
e.g.::
q = session.query(User).filter(User.name == 'fred')
session.query(q.exists())
Producing SQL similar to::
SELECT EXISTS (
SELECT 1 FROM users WHERE users.name = :name_1
) AS anon_1
The EXISTS construct is usually used in the WHERE clause::
session.query(User.id).filter(q.exists()).scalar()
Note that some databases such as SQL Server don't allow an
EXISTS expression to be present in the columns clause of a
SELECT. To select a simple boolean value based on the exists
as a WHERE, use :func:`.literal`::
from sqlalchemy import literal
session.query(literal(True)).filter(q.exists()).scalar()
.. versionadded:: 0.8.1
"""
# .add_columns() for the case that we are a query().select_from(X),
# so that ".statement" can be produced (#2995) but also without
# omitting the FROM clause from a query(X) (#2818);
# .with_only_columns() after we have a core select() so that
# we get just "SELECT 1" without any entities.
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
"""Return a count of rows this Query would return.
This generates the SQL for this Query as follows::
SELECT count(1) AS count_1 FROM (
SELECT <rest of query follows...>
) AS anon_1
.. versionchanged:: 0.7
The above scheme is newly refined as of 0.7b3.
For fine grained control over specific columns
to count, to skip the usage of a subquery or
otherwise control of the FROM clause,
or to use other aggregate functions,
use :attr:`~sqlalchemy.sql.expression.func`
expressions in conjunction
with :meth:`~.Session.query`, i.e.::
from sqlalchemy import func
# count User records, without
# using a subquery.
session.query(func.count(User.id))
# return count of user "id" grouped
# by "name"
session.query(func.count(User.id)).\\
group_by(User.name)
from sqlalchemy import distinct
# count distinct "name" values
session.query(func.count(distinct(User.name)))
"""
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
"""Perform a bulk delete query.
Deletes rows matched by this query from the database.
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, objects may still remain in
the session which were in fact deleted which can lead to confusing
results if they are accessed via get() or already loaded
collections.
``'fetch'`` - performs a select query before the delete to find
objects that are matched by the delete query and need to be
removed from the session. Matched objects are removed from the
session.
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an error is raised. In that case you probably
want to use the 'fetch' strategy as a fallback.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
for any foreign key references which require it, otherwise the
database may emit an integrity violation if foreign key references
are being enforced.
After the DELETE, dependent objects in the :class:`.Session` which
were impacted by an ON DELETE may not contain the current
state, or may have been deleted. This issue is resolved once the
:class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`. Accessing an expired object
whose row has been deleted will invoke a SELECT to locate the
row; when the row is not found, an
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
* The :meth:`.MapperEvents.before_delete` and
:meth:`.MapperEvents.after_delete`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_delete` method is provided to act
upon a mass DELETE of entity rows.
.. seealso::
:meth:`.Query.update`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
# TODO: cascades need handling.
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
"""Perform a bulk update query.
Updates rows matched by this query in the database.
E.g.::
sess.query(User).filter(User.age == 25).\
update({User.age: User.age - 10}, synchronize_session='fetch')
sess.query(User).filter(User.age == 25).\
update({"age": User.age - 10}, synchronize_session='evaluate')
:param values: a dictionary with attributes names, or alternatively
mapped attributes or SQL expressions, as keys, and literal
values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
strings were passed as literal column names with no mapper-level
translation.
:param synchronize_session: chooses the strategy to update the
attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
typically occurs after a commit(), or explicitly using
expire_all(). Before the expiration, updated objects may still
remain in the session with stale values on their attributes, which
can lead to confusing results.
``'fetch'`` - performs a select query before the update to find
objects that are matched by the update query. The updated
attributes are expired on matched objects.
``'evaluate'`` - Evaluate the Query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
implemented, an exception is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
"row count" feature.
This method has several key caveats:
* The method does **not** offer in-Python cascading of relationships
- it is assumed that ON UPDATE CASCADE is configured for any foreign
key references which require it, otherwise the database may emit an
integrity violation if foreign key references are being enforced.
After the UPDATE, dependent objects in the :class:`.Session` which
were impacted by an ON UPDATE CASCADE may not contain the current
state; this issue is resolved once the :class:`.Session` is expired,
which normally occurs upon :meth:`.Session.commit` or can be forced
by using :meth:`.Session.expire_all`.
* The method supports multiple table updates, as
detailed in :ref:`multi_table_updates`, and this behavior does
extend to support updates of joined-inheritance and other multiple
table mappings. However, the **join condition of an inheritance
mapper is currently not automatically rendered**.
Care must be taken in any multiple-table update to explicitly
include the joining condition between those tables, even in mappings
where this is normally automatic.
E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
the ``Engineer`` local table using criteria against the ``Employee``
local table might look like::
session.query(Engineer).\\
filter(Engineer.id == Employee.id).\\
filter(Employee.name == 'dilbert').\\
update({"engineer_type": "programmer"})
* The :meth:`.MapperEvents.before_update` and
:meth:`.MapperEvents.after_update`
events are **not** invoked from this method. Instead, the
:meth:`.SessionEvents.after_bulk_update` method is provided to act
upon a mass UPDATE of entity rows.
.. seealso::
:meth:`.Query.delete`
:ref:`inserts_and_updates` - Core SQL tutorial
"""
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
# "load from explicit FROMs" mode,
# i.e. when select_from() or join() is used
context.froms = list(context.from_clause)
else:
# "load from discrete FROMs" mode,
# i.e. when each _MappedEntity has its own FROM
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
# for eager joins present and LIMIT/OFFSET/DISTINCT,
# wrap the query inside a select,
# then append eager joins onto that
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
# TODO: this order_by is only needed if
# LIMIT/OFFSET is present in self._select_args,
# else the application on the outside is enough
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
# EagerLoader places a 'stop_on' attribute on the join,
# giving us a marker as to where the "splice point" of
# the join should be
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
"""Apply single-table-inheritance filtering.
For all distinct single-table-inheritance mappers represented in
the columns clause of this query, add criterion to the WHERE
clause of the given QueryContext such that only the appropriate
subtypes are selected from the total results.
"""
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
"""represent an entity column returned within a Query result."""
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
"""mapper/class/AliasedClass entity"""
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
"""Receive an update from a call to query.with_polymorphic().
Note the newer style of using a free standing with_polymporphic()
construct doesn't make use of this method.
"""
if self.is_aliased_class:
# TODO: invalidrequest ?
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
"""A grouping of SQL expressions that are returned by a :class:`.Query`
under one namespace.
The :class:`.Bundle` essentially allows nesting of the tuple-based
results returned by a column-oriented :class:`.Query` object. It also
is extensible via simple subclassing, where the primary capability
to override is that of how the set of expressions should be returned,
allowing post-processing as well as custom return types, without
involving ORM identity-mapped classes.
.. versionadded:: 0.9.0
.. seealso::
:ref:`bundles`
"""
single_entity = False
"""If True, queries for a single Bundle will be returned as a single
entity, rather than an element within a keyed tuple."""
def __init__(self, name, *exprs, **kw):
"""Construct a new :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
for row in session.query(bn).filter(
bn.c.x == 5).filter(bn.c.y == 4):
print(row.mybundle.x, row.mybundle.y)
:param name: name of the bundle.
:param \*exprs: columns or SQL expressions comprising the bundle.
:param single_entity=False: if True, rows for this :class:`.Bundle`
can be returned as a "single entity" outside of any enclosing tuple
in the same manner as a mapped entity.
"""
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
"""A namespace of SQL expressions referred to by this :class:`.Bundle`.
e.g.::
bn = Bundle("mybundle", MyClass.x, MyClass.y)
q = sess.query(bn).filter(bn.c.x == 5)
Nesting of bundles is also supported::
b1 = Bundle("b1",
Bundle('b2', MyClass.a, MyClass.b),
Bundle('b3', MyClass.x, MyClass.y)
)
q = sess.query(b1).filter(
b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9)
.. seealso::
:attr:`.Bundle.c`
"""
c = None
"""An alias for :attr:`.Bundle.columns`."""
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
"""Provide a copy of this :class:`.Bundle` passing a new label."""
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
"""Produce the "row processing" function for this :class:`.Bundle`.
May be overridden by subclasses.
.. seealso::
:ref:`bundles` - includes an example of subclassing.
"""
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
# TODO: this seems to have no effect for
# _ColumnEntity either
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
# c._label_name = self._label_name
# c.entity_zero = self.entity_zero
# c.entities = self.entities
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
"""Column/expression based entity."""
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# If the Column is unnamed, give it a
# label() so that mutable column expressions
# can be located in the result even
# if the expression's identity has been changed
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query`
that the main table has been aliased.
This is a seldom-used option to suit the
very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
ulist = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = ulist.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias(ulist),
contains_eager(User.addresses))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| 36.305412
| 84
| 0.594135
|
from itertools import chain
from . import (
attributes, interfaces, object_mapper, persistence,
exc as orm_exc, loading
)
from .base import _entity_descriptor, _is_aliased_class, \
_is_mapped_class, _orm_columns, _generative
from .path_registry import PathRegistry
from .util import (
AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased
)
from .. import sql, util, log, exc as sa_exc, inspect, inspection
from ..sql.expression import _interpret_as_from
from ..sql import (
util as sql_util,
expression, visitors
)
from ..sql.base import ColumnCollection
from . import properties
__all__ = ['Query', 'QueryContext', 'aliased']
_path_registry = PathRegistry.root
@inspection._self_inspects
@log.class_logger
class Query(object):
_enable_eagerloads = True
_enable_assertions = True
_with_labels = False
_criterion = None
_yield_per = None
_order_by = False
_group_by = False
_having = None
_distinct = False
_prefixes = None
_offset = None
_limit = None
_for_update_arg = None
_statement = None
_correlate = frozenset()
_populate_existing = False
_invoke_all_eagers = True
_version_check = False
_autoflush = True
_only_load_props = None
_refresh_state = None
_from_obj = ()
_join_entities = ()
_select_from_entity = None
_mapper_adapter_map = {}
_filter_aliases = None
_from_obj_alias = None
_joinpath = _joinpoint = util.immutabledict()
_execution_options = util.immutabledict()
_params = util.immutabledict()
_attributes = util.immutabledict()
_with_options = ()
_with_hints = ()
_enable_single_crit = True
_current_path = _path_registry
def __init__(self, entities, session=None):
self.session = session
self._polymorphic_adapters = {}
self._set_entities(entities)
def _set_entities(self, entities, entity_wrapper=None):
if entity_wrapper is None:
entity_wrapper = _QueryEntity
self._entities = []
self._primary_entity = None
for ent in util.to_list(entities):
entity_wrapper(self, ent)
self._set_entity_selectables(self._entities)
def _set_entity_selectables(self, entities):
self._mapper_adapter_map = d = self._mapper_adapter_map.copy()
for ent in entities:
for entity in ent.entities:
if entity not in d:
ext_info = inspect(entity)
if not ext_info.is_aliased_class and \
ext_info.mapper.with_polymorphic:
if ext_info.mapper.mapped_table not in \
self._polymorphic_adapters:
self._mapper_loads_polymorphically_with(
ext_info.mapper,
sql_util.ColumnAdapter(
ext_info.selectable,
ext_info.mapper._equivalent_columns
)
)
aliased_adapter = None
elif ext_info.is_aliased_class:
aliased_adapter = ext_info._adapter
else:
aliased_adapter = None
d[entity] = (
ext_info,
aliased_adapter
)
ent.setup_entity(*d[entity])
def _mapper_loads_polymorphically_with(self, mapper, adapter):
for m2 in mapper._with_polymorphic_mappers or [mapper]:
self._polymorphic_adapters[m2] = adapter
for m in m2.iterate_to_root():
self._polymorphic_adapters[m.local_table] = adapter
def _set_select_from(self, obj, set_base_alias):
fa = []
select_from_alias = None
for from_obj in obj:
info = inspect(from_obj)
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
if set_base_alias:
raise sa_exc.ArgumentError(
"A selectable (FromClause) instance is "
"expected when the base alias is being set.")
fa.append(info.selectable)
elif not info.is_selectable:
raise sa_exc.ArgumentError(
"argument is not a mapped class, mapper, "
"aliased(), or FromClause instance.")
else:
if isinstance(from_obj, expression.SelectBase):
from_obj = from_obj.alias()
if set_base_alias:
select_from_alias = from_obj
fa.append(from_obj)
self._from_obj = tuple(fa)
if set_base_alias and \
len(self._from_obj) == 1 and \
isinstance(select_from_alias, expression.Alias):
equivs = self.__all_equivs()
self._from_obj_alias = sql_util.ColumnAdapter(
self._from_obj[0], equivs)
def _reset_polymorphic_adapter(self, mapper):
for m2 in mapper._with_polymorphic_mappers:
self._polymorphic_adapters.pop(m2, None)
for m in m2.iterate_to_root():
self._polymorphic_adapters.pop(m.local_table, None)
def _adapt_polymorphic_element(self, element):
if "parententity" in element._annotations:
search = element._annotations['parententity']
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
if isinstance(element, expression.FromClause):
search = element
elif hasattr(element, 'table'):
search = element.table
else:
return None
alias = self._polymorphic_adapters.get(search, None)
if alias:
return alias.adapt_clause(element)
def _adapt_col_list(self, cols):
return [
self._adapt_clause(
expression._literal_as_label_reference(o),
True, True)
for o in cols
]
@_generative()
def _adapt_all_clauses(self):
self._orm_only_adapt = False
def _adapt_clause(self, clause, as_filter, orm_only):
adapters = []
orm_only = getattr(self, '_orm_only_adapt', orm_only)
if as_filter and self._filter_aliases:
for fa in self._filter_aliases._visitor_iterator:
adapters.append(
(
orm_only, fa.replace
)
)
if self._from_obj_alias:
adapters.append(
(
getattr(self, '_orm_only_from_obj_alias', orm_only),
self._from_obj_alias.replace
)
)
if self._polymorphic_adapters:
adapters.append(
(
orm_only, self._adapt_polymorphic_element
)
)
if not adapters:
return clause
def replace(elem):
for _orm_only, adapter in adapters:
if not _orm_only or \
'_orm_adapt' in elem._annotations or \
"parententity" in elem._annotations:
e = adapter(elem)
if e is not None:
return e
return visitors.replacement_traverse(
clause,
{},
replace
)
def _entity_zero(self):
return self._entities[0]
def _mapper_zero(self):
return self._select_from_entity or \
self._entity_zero().entity_zero
@property
def _mapper_entities(self):
for ent in self._entities:
if isinstance(ent, _MapperEntity):
yield ent
def _joinpoint_zero(self):
return self._joinpoint.get(
'_joinpoint_entity',
self._mapper_zero()
)
def _mapper_zero_or_none(self):
if self._primary_entity:
return self._primary_entity.mapper
else:
return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._mapper_zero()
def _only_full_mapper_zero(self, methname):
if self._entities != [self._primary_entity]:
raise sa_exc.InvalidRequestError(
"%s() can only be used against "
"a single mapped class." % methname)
return self._primary_entity.entity_zero
def _only_entity_zero(self, rationale=None):
if len(self._entities) > 1:
raise sa_exc.InvalidRequestError(
rationale or
"This operation requires a Query "
"against a single mapper."
)
return self._entity_zero()
def __all_equivs(self):
equivs = {}
for ent in self._mapper_entities:
equivs.update(ent.mapper._equivalent_columns)
return equivs
def _get_condition(self):
return self._no_criterion_condition(
"get", order_by=False, distinct=False)
def _get_existing_condition(self):
self._no_criterion_assertion("get", order_by=False, distinct=False)
def _no_criterion_assertion(self, meth, order_by=True, distinct=True):
if not self._enable_assertions:
return
if self._criterion is not None or \
self._statement is not None or self._from_obj or \
self._limit is not None or self._offset is not None or \
self._group_by or (order_by and self._order_by) or \
(distinct and self._distinct):
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
def _no_criterion_condition(self, meth, order_by=True, distinct=True):
self._no_criterion_assertion(meth, order_by, distinct)
self._from_obj = ()
self._statement = self._criterion = None
self._order_by = self._group_by = self._distinct = False
def _no_clauseelement_condition(self, meth):
if not self._enable_assertions:
return
if self._order_by:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a "
"Query with existing criterion. " % meth)
self._no_criterion_condition(meth)
def _no_statement_condition(self, meth):
if not self._enable_assertions:
return
if self._statement is not None:
raise sa_exc.InvalidRequestError(
("Query.%s() being called on a Query with an existing full "
"statement - can't apply criterion.") % meth)
def _no_limit_offset(self, meth):
if not self._enable_assertions:
return
if self._limit is not None or self._offset is not None:
raise sa_exc.InvalidRequestError(
"Query.%s() being called on a Query which already has LIMIT "
"or OFFSET applied. To modify the row-limited results of a "
" Query, call from_self() first. "
"Otherwise, call %s() before limit() or offset() "
"are applied."
% (meth, meth)
)
def _no_select_modifiers(self, meth):
if not self._enable_assertions:
return
for attr, methname, notset in (
('_limit', 'limit()', None),
('_offset', 'offset()', None),
('_order_by', 'order_by()', False),
('_group_by', 'group_by()', False),
('_distinct', 'distinct()', False),
):
if getattr(self, attr) is not notset:
raise sa_exc.InvalidRequestError(
"Can't call Query.%s() when %s has been called" %
(meth, methname)
)
def _get_options(self, populate_existing=None,
version_check=None,
only_load_props=None,
refresh_state=None):
if populate_existing:
self._populate_existing = populate_existing
if version_check:
self._version_check = version_check
if refresh_state:
self._refresh_state = refresh_state
if only_load_props:
self._only_load_props = set(only_load_props)
return self
def _clone(self):
cls = self.__class__
q = cls.__new__(cls)
q.__dict__ = self.__dict__.copy()
return q
@property
def statement(self):
stmt = self._compile_context(labels=self._with_labels).\
statement
if self._params:
stmt = stmt.params(self._params)
# the annotation not being there
return stmt._annotate({'no_replacement_traverse': True})
def subquery(self, name=None, with_labels=False, reduce_columns=False):
q = self.enable_eagerloads(False)
if with_labels:
q = q.with_labels()
q = q.statement
if reduce_columns:
q = q.reduce_columns()
return q.alias(name=name)
def cte(self, name=None, recursive=False):
return self.enable_eagerloads(False).\
statement.cte(name=name, recursive=recursive)
def label(self, name):
return self.enable_eagerloads(False).statement.label(name)
def as_scalar(self):
return self.enable_eagerloads(False).statement.as_scalar()
@property
def selectable(self):
return self.__clause_element__()
def __clause_element__(self):
return self.enable_eagerloads(False).with_labels().statement
@_generative()
def enable_eagerloads(self, value):
self._enable_eagerloads = value
def _no_yield_per(self, message):
raise sa_exc.InvalidRequestError(
"The yield_per Query option is currently not "
"compatible with %s eager loading. Please "
"specify lazyload('*') or query.enable_eagerloads(False) in "
"order to "
"proceed with query.yield_per()." % message)
@_generative()
def with_labels(self):
self._with_labels = True
@_generative()
def enable_assertions(self, value):
self._enable_assertions = value
@property
def whereclause(self):
return self._criterion
@_generative()
def _with_current_path(self, path):
self._current_path = path
@_generative(_no_clauseelement_condition)
def with_polymorphic(self,
cls_or_mappers,
selectable=None,
polymorphic_on=None):
if not self._primary_entity:
raise sa_exc.InvalidRequestError(
"No primary mapper set up for this Query.")
entity = self._entities[0]._clone()
self._entities = [entity] + self._entities[1:]
entity.set_with_polymorphic(self,
cls_or_mappers,
selectable=selectable,
polymorphic_on=polymorphic_on)
@_generative()
def yield_per(self, count):
self._yield_per = count
self._execution_options = self._execution_options.union(
{"stream_results": True})
def get(self, ident):
# convert composite types to individual args
if hasattr(ident, '__composite_values__'):
ident = ident.__composite_values__()
ident = util.to_list(ident)
mapper = self._only_full_mapper_zero("get")
if len(ident) != len(mapper.primary_key):
raise sa_exc.InvalidRequestError(
"Incorrect number of values in identifier to formulate "
"primary key for query.get(); primary key columns are %s" %
','.join("'%s'" % c for c in mapper.primary_key))
key = mapper.identity_key_from_primary_key(ident)
if not self._populate_existing and \
not mapper.always_refresh and \
self._for_update_arg is None:
instance = loading.get_from_identity(
self.session, key, attributes.PASSIVE_OFF)
if instance is not None:
self._get_existing_condition()
# reject calls for id in identity map but class
# mismatch.
if not issubclass(instance.__class__, mapper.class_):
return None
return instance
return loading.load_on_ident(self, key)
@_generative()
def correlate(self, *args):
self._correlate = self._correlate.union(
_interpret_as_from(s)
if s is not None else None
for s in args)
@_generative()
def autoflush(self, setting):
self._autoflush = setting
@_generative()
def populate_existing(self):
self._populate_existing = True
@_generative()
def _with_invoke_all_eagers(self, value):
self._invoke_all_eagers = value
def with_parent(self, instance, property=None):
if property is None:
mapper = object_mapper(instance)
for prop in mapper.iterate_properties:
if isinstance(prop, properties.RelationshipProperty) and \
prop.mapper is self._mapper_zero():
property = prop
break
else:
raise sa_exc.InvalidRequestError(
"Could not locate a property which relates instances "
"of class '%s' to instances of class '%s'" %
(
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
if alias is not None:
entity = aliased(entity, alias)
self._entities = list(self._entities)
m = _MapperEntity(self, entity)
self._set_entity_selectables([m])
@_generative()
def with_session(self, session):
self.session = session
def from_self(self, *entities):
fromclause = self.with_labels().enable_eagerloads(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
if entities:
q._set_entities(entities)
return q
@_generative()
def _set_enable_single_crit(self, val):
self._enable_single_crit = val
@_generative()
def _from_selectable(self, fromclause):
for attr in (
'_statement', '_criterion',
'_order_by', '_group_by',
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
'_prefixes',
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
# this enables clause adaptation for non-ORM
# expressions.
self._orm_only_from_obj_alias = False
old_entities = self._entities
self._entities = []
for e in old_entities:
e.adapt_to_selectable(self, self._from_obj[0])
def values(self, *columns):
if not columns:
return iter(())
q = self._clone()
q._set_entities(columns, entity_wrapper=_ColumnEntity)
if not q._yield_per:
q._yield_per = 10
return iter(q)
_values = values
def value(self, column):
try:
return next(self.values(column))[0]
except StopIteration:
return None
@_generative()
def with_entities(self, *entities):
self._set_entities(entities)
@_generative()
def add_columns(self, *column):
self._entities = list(self._entities)
l = len(self._entities)
for c in column:
_ColumnEntity(self, c)
# _ColumnEntity may add many entities if the
# given arg is a FROM clause
self._set_entity_selectables(self._entities[l:])
@util.pending_deprecation("0.7",
":meth:`.add_column` is superseded "
"by :meth:`.add_columns`",
False)
def add_column(self, column):
return self.add_columns(column)
def options(self, *args):
return self._options(False, *args)
def _conditional_options(self, *args):
return self._options(True, *args)
@_generative()
def _options(self, conditional, *args):
# most MapperOptions write to the '_attributes' dictionary,
# so copy that as well
self._attributes = self._attributes.copy()
opts = tuple(util.flatten_iterator(args))
self._with_options = self._with_options + opts
if conditional:
for opt in opts:
opt.process_query_conditionally(self)
else:
for opt in opts:
opt.process_query(self)
def with_transformation(self, fn):
return fn(self)
@_generative()
def with_hint(self, selectable, text, dialect_name='*'):
if selectable is not None:
selectable = inspect(selectable).selectable
self._with_hints += ((selectable, text, dialect_name),)
def with_statement_hint(self, text, dialect_name='*'):
return self.with_hint(None, text, dialect_name)
@_generative()
def execution_options(self, **kwargs):
self._execution_options = self._execution_options.union(kwargs)
@_generative()
def with_lockmode(self, mode):
self._for_update_arg = LockmodeArg.parse_legacy_query(mode)
@_generative()
def with_for_update(self, read=False, nowait=False, of=None):
self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of)
@_generative()
def params(self, *args, **kwargs):
if len(args) == 1:
kwargs.update(args[0])
elif len(args) > 0:
raise sa_exc.ArgumentError(
"params() takes zero or one positional argument, "
"which is a dictionary.")
self._params = self._params.copy()
self._params.update(kwargs)
@_generative(_no_statement_condition, _no_limit_offset)
def filter(self, *criterion):
for criterion in list(criterion):
criterion = expression._expression_literal_as_text(criterion)
criterion = self._adapt_clause(criterion, True, True)
if self._criterion is not None:
self._criterion = self._criterion & criterion
else:
self._criterion = criterion
def filter_by(self, **kwargs):
clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value
for key, value in kwargs.items()]
return self.filter(sql.and_(*clauses))
@_generative(_no_statement_condition, _no_limit_offset)
def order_by(self, *criterion):
if len(criterion) == 1:
if criterion[0] is False:
if '_order_by' in self.__dict__:
del self._order_by
return
if criterion[0] is None:
self._order_by = None
return
criterion = self._adapt_col_list(criterion)
if self._order_by is False or self._order_by is None:
self._order_by = criterion
else:
self._order_by = self._order_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def group_by(self, *criterion):
criterion = list(chain(*[_orm_columns(c) for c in criterion]))
criterion = self._adapt_col_list(criterion)
if self._group_by is False:
self._group_by = criterion
else:
self._group_by = self._group_by + criterion
@_generative(_no_statement_condition, _no_limit_offset)
def having(self, criterion):
criterion = expression._expression_literal_as_text(criterion)
if criterion is not None and \
not isinstance(criterion, sql.ClauseElement):
raise sa_exc.ArgumentError(
"having() argument must be of type "
"sqlalchemy.sql.ClauseElement or string")
criterion = self._adapt_clause(criterion, True, True)
if self._having is not None:
self._having = self._having & criterion
else:
self._having = criterion
def union(self, *q):
return self._from_selectable(
expression.union(*([self] + list(q))))
def union_all(self, *q):
return self._from_selectable(
expression.union_all(*([self] + list(q)))
)
def intersect(self, *q):
return self._from_selectable(
expression.intersect(*([self] + list(q)))
)
def intersect_all(self, *q):
return self._from_selectable(
expression.intersect_all(*([self] + list(q)))
)
def except_(self, *q):
return self._from_selectable(
expression.except_(*([self] + list(q)))
)
def except_all(self, *q):
return self._from_selectable(
expression.except_all(*([self] + list(q)))
)
def join(self, *props, **kwargs):
aliased, from_joinpoint, isouter = kwargs.pop('aliased', False),\
kwargs.pop('from_joinpoint', False),\
kwargs.pop('isouter', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs.keys))
isouter = isouter
return self._join(props,
outerjoin=isouter, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def outerjoin(self, *props, **kwargs):
aliased, from_joinpoint = kwargs.pop('aliased', False), \
kwargs.pop('from_joinpoint', False)
if kwargs:
raise TypeError("unknown arguments: %s" %
','.join(kwargs))
return self._join(props,
outerjoin=True, create_aliases=aliased,
from_joinpoint=from_joinpoint)
def _update_joinpoint(self, jp):
self._joinpoint = jp
# copy backwards to the root of the _joinpath
# dict, so that no existing dict in the path is mutated
while 'prev' in jp:
f, prev = jp['prev']
prev = prev.copy()
prev[f] = jp
jp['prev'] = (f, prev)
jp = prev
self._joinpath = jp
@_generative(_no_statement_condition, _no_limit_offset)
def _join(self, keys, outerjoin, create_aliases, from_joinpoint):
if not from_joinpoint:
self._reset_joinpoint()
if len(keys) == 2 and \
isinstance(keys[0], (expression.FromClause,
type, AliasedClass)) and \
isinstance(keys[1], (str, expression.ClauseElement,
interfaces.PropComparator)):
# detect 2-arg form of join and
# convert to a tuple.
keys = (keys,)
for arg1 in util.to_list(keys):
if isinstance(arg1, tuple):
# "tuple" form of join, multiple
# tuples are accepted as well. The simpler
# "2-arg" form is preferred. May deprecate
# the "tuple" usage.
arg1, arg2 = arg1
else:
arg2 = None
# determine onclause/right_entity. there
# is a little bit of legacy behavior still at work here
# which means they might be in either order. may possibly
# lock this down to (right_entity, onclause) in 0.6.
if isinstance(
arg1, (interfaces.PropComparator, util.string_types)):
right_entity, onclause = arg2, arg1
else:
right_entity, onclause = arg1, arg2
left_entity = prop = None
if isinstance(onclause, interfaces.PropComparator):
of_type = getattr(onclause, '_of_type', None)
else:
of_type = None
if isinstance(onclause, util.string_types):
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity, onclause)
onclause = descriptor
# check for q.join(Class.propname, from_joinpoint=True)
# and Class is that of the current joinpoint
elif from_joinpoint and \
isinstance(onclause, interfaces.PropComparator):
left_entity = onclause._parententity
info = inspect(self._joinpoint_zero())
left_mapper, left_selectable, left_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', None)
if left_mapper is left_entity:
left_entity = self._joinpoint_zero()
descriptor = _entity_descriptor(left_entity,
onclause.key)
onclause = descriptor
if isinstance(onclause, interfaces.PropComparator):
if right_entity is None:
if of_type:
right_entity = of_type
else:
right_entity = onclause.property.mapper
left_entity = onclause._parententity
prop = onclause.property
if not isinstance(onclause, attributes.QueryableAttribute):
onclause = prop
if not create_aliases:
# check for this path already present.
# don't render in that case.
edge = (left_entity, right_entity, prop.key)
if edge in self._joinpoint:
# it could point to a parent older than the
# current joinpoint. If this is the case,
# then we need to update it and then fix the
# tree's spine with _update_joinpoint. Copy
jp = self._joinpoint[edge].copy()
jp['prev'] = (edge, self._joinpoint)
self._update_joinpoint(jp)
continue
elif onclause is not None and right_entity is None:
raise NotImplementedError("query.join(a==b) not supported.")
self._join_left_to_right(
left_entity,
right_entity, onclause,
outerjoin, create_aliases, prop)
def _join_left_to_right(self, left, right,
onclause, outerjoin, create_aliases, prop):
self._polymorphic_adapters = self._polymorphic_adapters.copy()
if left is None:
if self._from_obj:
left = self._from_obj[0]
elif self._entities:
left = self._entities[0].entity_zero_or_selectable
if left is None:
raise sa_exc.InvalidRequestError(
"Don't know how to join from %s; please use "
"select_from() to establish the left "
"entity/selectable of this join" % self._entities[0])
if left is right and \
not create_aliases:
raise sa_exc.InvalidRequestError(
"Can't construct a join from %s to %s, they "
"are the same entity" %
(left, right))
l_info = inspect(left)
r_info = inspect(right)
overlap = False
if not create_aliases:
right_mapper = getattr(r_info, "mapper", None)
if right_mapper and (
right_mapper.with_polymorphic or
isinstance(right_mapper.mapped_table, expression.Join)
):
for from_obj in self._from_obj or [l_info.selectable]:
if sql_util.selectables_overlap(
l_info.selectable, from_obj) and \
sql_util.selectables_overlap(
from_obj, r_info.selectable):
overlap = True
break
if (overlap or not create_aliases) and \
l_info.selectable is r_info.selectable:
raise sa_exc.InvalidRequestError(
"Can't join table/selectable '%s' to itself" %
l_info.selectable)
right, onclause = self._prepare_right_side(
r_info, right, onclause,
create_aliases,
prop, overlap)
# if joining on a MapperProperty path,
# track the path to prevent redundant joins
if not create_aliases and prop:
self._update_joinpoint({
'_joinpoint_entity': right,
'prev': ((left, right, prop.key), self._joinpoint)
})
else:
self._joinpoint = {'_joinpoint_entity': right}
self._join_to_left(l_info, left, right, onclause, outerjoin)
def _prepare_right_side(self, r_info, right, onclause, create_aliases,
prop, overlap):
info = r_info
right_mapper, right_selectable, right_is_aliased = \
getattr(info, 'mapper', None), \
info.selectable, \
getattr(info, 'is_aliased_class', False)
if right_mapper:
self._join_entities += (info, )
if right_mapper and prop and \
not right_mapper.common_parent(prop.mapper):
raise sa_exc.InvalidRequestError(
"Join target %s does not correspond to "
"the right side of join condition %s" % (right, onclause)
)
if not right_mapper and prop:
right_mapper = prop.mapper
need_adapter = False
if right_mapper and right is right_selectable:
if not right_selectable.is_derived_from(
right_mapper.mapped_table):
raise sa_exc.InvalidRequestError(
"Selectable '%s' is not derived from '%s'" %
(right_selectable.description,
right_mapper.mapped_table.description))
if isinstance(right_selectable, expression.SelectBase):
# TODO: this isn't even covered now!
right_selectable = right_selectable.alias()
need_adapter = True
right = aliased(right_mapper, right_selectable)
aliased_entity = right_mapper and \
not right_is_aliased and \
(
right_mapper.with_polymorphic and isinstance(
right_mapper._with_polymorphic_selectable,
expression.Alias)
or
overlap
)
if not need_adapter and (create_aliases or aliased_entity):
right = aliased(right, flat=True)
need_adapter = True
if need_adapter:
self._filter_aliases = ORMAdapter(
right,
equivalents=right_mapper and
right_mapper._equivalent_columns or {},
chain_to=self._filter_aliases)
if isinstance(onclause, expression.ClauseElement):
onclause = self._adapt_clause(onclause, True, True)
if aliased_entity and not create_aliases:
self._mapper_loads_polymorphically_with(
right_mapper,
ORMAdapter(
right,
equivalents=right_mapper._equivalent_columns
)
)
return right, onclause
def _join_to_left(self, l_info, left, right, onclause, outerjoin):
info = l_info
left_mapper = getattr(info, 'mapper', None)
left_selectable = info.selectable
if self._from_obj:
replace_clause_index, clause = sql_util.find_join_source(
self._from_obj,
left_selectable)
if clause is not None:
try:
clause = orm_join(clause,
right,
onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = \
self._from_obj[:replace_clause_index] + \
(clause, ) + \
self._from_obj[replace_clause_index + 1:]
return
if left_mapper:
for ent in self._entities:
if ent.corresponds_to(left):
clause = ent.selectable
break
else:
clause = left
else:
clause = left_selectable
assert clause is not None
try:
clause = orm_join(clause, right, onclause, isouter=outerjoin)
except sa_exc.ArgumentError as ae:
raise sa_exc.InvalidRequestError(
"Could not find a FROM clause to join from. "
"Tried joining to %s, but got: %s" % (right, ae))
self._from_obj = self._from_obj + (clause,)
def _reset_joinpoint(self):
self._joinpoint = self._joinpath
self._filter_aliases = None
@_generative(_no_statement_condition)
def reset_joinpoint(self):
self._reset_joinpoint()
@_generative(_no_clauseelement_condition)
def select_from(self, *from_obj):
self._set_select_from(from_obj, False)
@_generative(_no_clauseelement_condition)
def select_entity_from(self, from_obj):
self._set_select_from([from_obj], True)
def __getitem__(self, item):
if isinstance(item, slice):
start, stop, step = util.decode_slice(item)
if isinstance(stop, int) and \
isinstance(start, int) and \
stop - start <= 0:
return []
elif (isinstance(start, int) and start < 0) \
or (isinstance(stop, int) and stop < 0):
return list(self)[item]
res = self.slice(start, stop)
if step is not None:
return list(res)[None:None:item.step]
else:
return list(res)
else:
if item == -1:
return list(self)[-1]
else:
return list(self[item:item + 1])[0]
@_generative(_no_statement_condition)
def slice(self, start, stop):
if start is not None and stop is not None:
self._offset = (self._offset or 0) + start
self._limit = stop - start
elif start is None and stop is not None:
self._limit = stop
elif start is not None and stop is None:
self._offset = (self._offset or 0) + start
if self._offset == 0:
self._offset = None
@_generative(_no_statement_condition)
def limit(self, limit):
self._limit = limit
@_generative(_no_statement_condition)
def offset(self, offset):
self._offset = offset
@_generative(_no_statement_condition)
def distinct(self, *criterion):
if not criterion:
self._distinct = True
else:
criterion = self._adapt_col_list(criterion)
if isinstance(self._distinct, list):
self._distinct += criterion
else:
self._distinct = criterion
@_generative()
def prefix_with(self, *prefixes):
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
def all(self):
return list(self)
@_generative(_no_clauseelement_condition)
def from_statement(self, statement):
statement = expression._expression_literal_as_text(statement)
if not isinstance(statement,
(expression.TextClause,
expression.SelectBase)):
raise sa_exc.ArgumentError(
"from_statement accepts text(), select(), "
"and union() objects only.")
self._statement = statement
def first(self):
if self._statement is not None:
ret = list(self)[0:1]
else:
ret = list(self[0:1])
if len(ret) > 0:
return ret[0]
else:
return None
def one(self):
ret = list(self)
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
raise orm_exc.NoResultFound("No row was found for one()")
else:
raise orm_exc.MultipleResultsFound(
"Multiple rows were found for one()")
def scalar(self):
try:
ret = self.one()
if not isinstance(ret, tuple):
return ret
return ret[0]
except orm_exc.NoResultFound:
return None
def __iter__(self):
context = self._compile_context()
context.statement.use_labels = True
if self._autoflush and not self._populate_existing:
self.session._autoflush()
return self._execute_and_instances(context)
def _connection_from_session(self, **kw):
conn = self.session.connection(
**kw)
if self._execution_options:
conn = conn.execution_options(**self._execution_options)
return conn
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
mapper=self._mapper_zero_or_none(),
clause=querycontext.statement,
close_with_result=True)
result = conn.execute(querycontext.statement, self._params)
return loading.instances(self, result, querycontext)
@property
def column_descriptions(self):
return [
{
'name': ent._label_name,
'type': ent.type,
'aliased': getattr(ent, 'is_aliased_class', False),
'expr': ent.expr
}
for ent in self._entities
]
def instances(self, cursor, __context=None):
context = __context
if context is None:
context = QueryContext(self)
return loading.instances(self, cursor, context)
def merge_result(self, iterator, load=True):
return loading.merge_result(self, iterator, load)
@property
def _select_args(self):
return {
'limit': self._limit,
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
'group_by': self._group_by or None,
'having': self._having
}
@property
def _should_nest_selectable(self):
kwargs = self._select_args
return (kwargs.get('limit') is not None or
kwargs.get('offset') is not None or
kwargs.get('distinct', False))
def exists(self):
return sql.exists(self.add_columns('1').with_labels().
statement.with_only_columns([1]))
def count(self):
col = sql.func.count(sql.literal_column('*'))
return self.from_self(col).scalar()
def delete(self, synchronize_session='evaluate'):
delete_op = persistence.BulkDelete.factory(
self, synchronize_session)
delete_op.exec_()
return delete_op.rowcount
def update(self, values, synchronize_session='evaluate'):
update_op = persistence.BulkUpdate.factory(
self, synchronize_session, values)
update_op.exec_()
return update_op.rowcount
def _compile_context(self, labels=True):
context = QueryContext(self)
if context.statement is not None:
return context
context.labels = labels
context._for_update_arg = self._for_update_arg
for entity in self._entities:
entity.setup_context(self, context)
for rec in context.create_eager_joins:
strategy = rec[0]
strategy(*rec[1:])
if context.from_clause:
context.froms = list(context.from_clause)
else:
context.froms = context.froms
if self._enable_single_crit:
self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
raise sa_exc.InvalidRequestError(
"No column-based properties specified for "
"refresh operation. Use session.expire() "
"to reload collections and related items.")
else:
raise sa_exc.InvalidRequestError(
"Query contains no columns with which to "
"SELECT from.")
if context.multi_row_eager_loaders and self._should_nest_selectable:
context.statement = self._compound_eager_statement(context)
else:
context.statement = self._simple_statement(context)
return context
def _compound_eager_statement(self, context):
if context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
else:
context.order_by = None
order_by_col_expr = []
inner = sql.select(
context.primary_columns + order_by_col_expr,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
for hint in self._with_hints:
inner = inner.with_hint(*hint)
if self._correlate:
inner = inner.correlate(*self._correlate)
inner = inner.alias()
equivs = self.__all_equivs()
context.adapter = sql_util.ColumnAdapter(inner, equivs)
statement = sql.select(
[inner] + context.secondary_columns,
use_labels=context.labels)
statement._for_update_arg = context._for_update_arg
from_clause = inner
for eager_join in context.eager_joins.values():
from_clause = sql_util.splice_joins(
from_clause,
eager_join, eager_join.stop_on)
statement.append_from(from_clause)
if context.order_by:
statement.append_order_by(
*context.adapter.copy_and_process(
context.order_by
)
)
statement.append_order_by(*context.eager_order_by)
return statement
def _simple_statement(self, context):
if not context.order_by:
context.order_by = None
if self._distinct and context.order_by:
order_by_col_expr = list(
chain(*[
sql_util.unwrap_order_by(o)
for o in context.order_by
])
)
context.primary_columns += order_by_col_expr
context.froms += tuple(context.eager_joins.values())
statement = sql.select(
context.primary_columns +
context.secondary_columns,
context.whereclause,
from_obj=context.froms,
use_labels=context.labels,
order_by=context.order_by,
**self._select_args
)
statement._for_update_arg = context._for_update_arg
for hint in self._with_hints:
statement = statement.with_hint(*hint)
if self._correlate:
statement = statement.correlate(*self._correlate)
if context.eager_order_by:
statement.append_order_by(*context.eager_order_by)
return statement
def _adjust_for_single_inheritance(self, context):
for (ext_info, adapter) in set(self._mapper_adapter_map.values()):
if ext_info in self._join_entities:
continue
single_crit = ext_info.mapper._single_table_criterion
if single_crit is not None:
if adapter:
single_crit = adapter.traverse(single_crit)
single_crit = self._adapt_clause(single_crit, False, False)
context.whereclause = sql.and_(
sql.True_._ifnone(context.whereclause),
single_crit)
def __str__(self):
return str(self._compile_context().statement)
from ..sql.selectable import ForUpdateArg
class LockmodeArg(ForUpdateArg):
@classmethod
def parse_legacy_query(self, mode):
if mode in (None, False):
return None
if mode == "read":
read = True
nowait = False
elif mode == "update":
read = nowait = False
elif mode == "update_nowait":
nowait = True
read = False
else:
raise sa_exc.ArgumentError(
"Unknown with_lockmode argument: %r" % mode)
return LockmodeArg(read=read, nowait=nowait)
class _QueryEntity(object):
def __new__(cls, *args, **kwargs):
if cls is _QueryEntity:
entity = args[1]
if not isinstance(entity, util.string_types) and \
_is_mapped_class(entity):
cls = _MapperEntity
elif isinstance(entity, Bundle):
cls = _BundleEntity
else:
cls = _ColumnEntity
return object.__new__(cls)
def _clone(self):
q = self.__class__.__new__(self.__class__)
q.__dict__ = self.__dict__.copy()
return q
class _MapperEntity(_QueryEntity):
def __init__(self, query, entity):
if not query._primary_entity:
query._primary_entity = self
query._entities.append(self)
self.entities = [entity]
self.expr = entity
supports_single_entity = True
def setup_entity(self, ext_info, aliased_adapter):
self.mapper = ext_info.mapper
self.aliased_adapter = aliased_adapter
self.selectable = ext_info.selectable
self.is_aliased_class = ext_info.is_aliased_class
self._with_polymorphic = ext_info.with_polymorphic_mappers
self._polymorphic_discriminator = \
ext_info.polymorphic_on
self.entity_zero = ext_info
if ext_info.is_aliased_class:
self._label_name = self.entity_zero.name
else:
self._label_name = self.mapper.class_.__name__
self.path = self.entity_zero._path_registry
def set_with_polymorphic(self, query, cls_or_mappers,
selectable, polymorphic_on):
if self.is_aliased_class:
raise NotImplementedError(
"Can't use with_polymorphic() against "
"an Aliased object"
)
if cls_or_mappers is None:
query._reset_polymorphic_adapter(self.mapper)
return
mappers, from_obj = self.mapper._with_polymorphic_args(
cls_or_mappers, selectable)
self._with_polymorphic = mappers
self._polymorphic_discriminator = polymorphic_on
self.selectable = from_obj
query._mapper_loads_polymorphically_with(
self.mapper, sql_util.ColumnAdapter(
from_obj, self.mapper._equivalent_columns))
filter_fn = id
@property
def type(self):
return self.mapper.class_
@property
def entity_zero_or_selectable(self):
return self.entity_zero
def corresponds_to(self, entity):
if entity.is_aliased_class:
if self.is_aliased_class:
if entity._base_alias is self.entity_zero._base_alias:
return True
return False
elif self.is_aliased_class:
if self.entity_zero._use_mapper_path:
return entity in self._with_polymorphic
else:
return entity is self.entity_zero
return entity.common_parent(self.entity_zero)
def adapt_to_selectable(self, query, sel):
query._entities.append(self)
def _get_entity_clauses(self, query, context):
adapter = None
if not self.is_aliased_class:
if query._polymorphic_adapters:
adapter = query._polymorphic_adapters.get(self.mapper, None)
else:
adapter = self.aliased_adapter
if adapter:
if query._from_obj_alias:
ret = adapter.wrap(query._from_obj_alias)
else:
ret = adapter
else:
ret = query._from_obj_alias
return ret
def row_processor(self, query, context, result):
adapter = self._get_entity_clauses(query, context)
if context.adapter and adapter:
adapter = adapter.wrap(context.adapter)
elif not adapter:
adapter = context.adapter
# polymorphic mappers which have concrete tables in
# their hierarchy usually
# require row aliasing unconditionally.
if not adapter and self.mapper._requires_row_aliasing:
adapter = sql_util.ColumnAdapter(
self.selectable,
self.mapper._equivalent_columns)
if query._primary_entity is self:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
only_load_props=query._only_load_props,
refresh_state=context.refresh_state,
polymorphic_discriminator=self._polymorphic_discriminator
)
else:
_instance = loading.instance_processor(
self.mapper,
context,
result,
self.path,
adapter,
polymorphic_discriminator=self._polymorphic_discriminator
)
return _instance, self._label_name
def setup_context(self, query, context):
adapter = self._get_entity_clauses(query, context)
# if self._adapted_selectable is None:
context.froms += (self.selectable,)
if context.order_by is False and self.mapper.order_by:
context.order_by = self.mapper.order_by
# apply adaptation to the mapper's order_by if needed.
if adapter:
context.order_by = adapter.adapt_list(
util.to_list(
context.order_by
)
)
if self._with_polymorphic:
poly_properties = self.mapper._iterate_polymorphic_properties(
self._with_polymorphic)
else:
poly_properties = self.mapper._polymorphic_properties
for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
value.setup(
context,
self,
self.path,
adapter,
only_load_props=query._only_load_props,
column_collection=context.primary_columns
)
if self._polymorphic_discriminator is not None and \
self._polymorphic_discriminator \
is not self.mapper.polymorphic_on:
if adapter:
pd = adapter.columns[self._polymorphic_discriminator]
else:
pd = self._polymorphic_discriminator
context.primary_columns.append(pd)
def __str__(self):
return str(self.mapper)
@inspection._self_inspects
class Bundle(object):
single_entity = False
def __init__(self, name, *exprs, **kw):
self.name = self._label = name
self.exprs = exprs
self.c = self.columns = ColumnCollection()
self.columns.update((getattr(col, "key", col._label), col)
for col in exprs)
self.single_entity = kw.pop('single_entity', self.single_entity)
columns = None
c = None
def _clone(self):
cloned = self.__class__.__new__(self.__class__)
cloned.__dict__.update(self.__dict__)
return cloned
def __clause_element__(self):
return expression.ClauseList(group=False, *self.c)
@property
def clauses(self):
return self.__clause_element__().clauses
def label(self, name):
cloned = self._clone()
cloned.name = name
return cloned
def create_row_processor(self, query, procs, labels):
keyed_tuple = util.lightweight_named_tuple('result', labels)
def proc(row):
return keyed_tuple([proc(row) for proc in procs])
return proc
class _BundleEntity(_QueryEntity):
def __init__(self, query, bundle, setup_entities=True):
query._entities.append(self)
self.bundle = self.expr = bundle
self.type = type(bundle)
self._label_name = bundle.name
self._entities = []
if setup_entities:
for expr in bundle.exprs:
if isinstance(expr, Bundle):
_BundleEntity(self, expr)
else:
_ColumnEntity(self, expr, namespace=self)
self.entities = ()
self.filter_fn = lambda item: item
self.supports_single_entity = self.bundle.single_entity
@property
def entity_zero(self):
for ent in self._entities:
ezero = ent.entity_zero
if ezero is not None:
return ezero
else:
return None
def corresponds_to(self, entity):
return False
@property
def entity_zero_or_selectable(self):
for ent in self._entities:
ezero = ent.entity_zero_or_selectable
if ezero is not None:
return ezero
else:
return None
def adapt_to_selectable(self, query, sel):
c = _BundleEntity(query, self.bundle, setup_entities=False)
for ent in self._entities:
ent.adapt_to_selectable(c, sel)
def setup_entity(self, ext_info, aliased_adapter):
for ent in self._entities:
ent.setup_entity(ext_info, aliased_adapter)
def setup_context(self, query, context):
for ent in self._entities:
ent.setup_context(query, context)
def row_processor(self, query, context, result):
procs, labels = zip(
*[ent.row_processor(query, context, result)
for ent in self._entities]
)
proc = self.bundle.create_row_processor(query, procs, labels)
return proc, self._label_name
class _ColumnEntity(_QueryEntity):
def __init__(self, query, column, namespace=None):
self.expr = column
self.namespace = namespace
if isinstance(column, util.string_types):
column = sql.literal_column(column)
self._label_name = column.name
elif isinstance(column, (
attributes.QueryableAttribute,
interfaces.PropComparator
)):
self._label_name = column.key
column = column._query_clause_element()
else:
self._label_name = getattr(column, 'key', None)
if not isinstance(column, expression.ColumnElement) and \
hasattr(column, '_select_iterable'):
for c in column._select_iterable:
if c is column:
break
_ColumnEntity(query, c, namespace=column)
else:
return
elif isinstance(column, Bundle):
_BundleEntity(query, column)
return
if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
self.type = type_ = column.type
if type_.hashable:
self.filter_fn = lambda item: item
else:
counter = util.counter()
self.filter_fn = lambda item: counter()
# due to adaption.
if not column._label and not getattr(column, 'is_literal', False):
column = column.label(self._label_name)
query._entities.append(self)
self.column = column
self.froms = set()
# look for ORM entities represented within the
# given expression. Try to count only entities
# for columns whose FROM object is in the actual list
# of FROMs for the overall expression - this helps
# subqueries which were built from ORM constructs from
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
self.entities = util.OrderedSet(
elem._annotations['parententity']
for elem in visitors.iterate(column, {})
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
self.entity_zero = list(self.entities)[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
self.entity_zero = None
supports_single_entity = False
@property
def entity_zero_or_selectable(self):
if self.entity_zero is not None:
return self.entity_zero
elif self.actual_froms:
return list(self.actual_froms)[0]
else:
return None
def adapt_to_selectable(self, query, sel):
c = _ColumnEntity(query, sel.corresponding_column(self.column))
c._label_name = self._label_name
c.entity_zero = self.entity_zero
c.entities = self.entities
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
# no tests fail
if self.entity_zero is None:
return False
elif _is_aliased_class(entity):
# TODO: polymorphic subclasses ?
return entity is self.entity_zero
else:
return not _is_aliased_class(self.entity_zero) and \
entity.common_parent(self.entity_zero)
def _resolve_expr_against_query_aliases(self, query, expr, context):
return query._adapt_clause(expr, False, True)
def row_processor(self, query, context, result):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
if context.adapter:
column = context.adapter.columns[column]
getter = result._getter(column)
return getter, self._label_name
def setup_context(self, query, context):
column = self._resolve_expr_against_query_aliases(
query, self.column, context)
context.froms += tuple(self.froms)
context.primary_columns.append(column)
def __str__(self):
return str(self.column)
class QueryContext(object):
multi_row_eager_loaders = False
adapter = None
froms = ()
for_update = None
def __init__(self, query):
if query._statement is not None:
if isinstance(query._statement, expression.SelectBase) and \
not query._statement._textual and \
not query._statement.use_labels:
self.statement = query._statement.apply_labels()
else:
self.statement = query._statement
else:
self.statement = None
self.from_clause = query._from_obj
self.whereclause = query._criterion
self.order_by = query._order_by
self.query = query
self.session = query.session
self.populate_existing = query._populate_existing
self.invoke_all_eagers = query._invoke_all_eagers
self.version_check = query._version_check
self.refresh_state = query._refresh_state
self.primary_columns = []
self.secondary_columns = []
self.eager_order_by = []
self.eager_joins = {}
self.create_eager_joins = []
self.propagate_options = set(o for o in query._with_options if
o.propagate_to_loaders)
self.attributes = query._attributes.copy()
class AliasOption(interfaces.MapperOption):
def __init__(self, alias):
self.alias = alias
def process_query(self, query):
if isinstance(self.alias, util.string_types):
alias = query._mapper_zero().mapped_table.alias(self.alias)
else:
alias = self.alias
query._from_obj_alias = sql_util.ColumnAdapter(alias)
| true
| true
|
790687207d832c9cf0b4e380a80ecbf4c3d09d9f
| 519
|
py
|
Python
|
src/web/drapo/templatetags/timezones.py
|
werelaxe/drapo
|
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
|
[
"MIT"
] | 10
|
2017-04-15T05:00:17.000Z
|
2019-08-27T21:08:48.000Z
|
src/web/drapo/templatetags/timezones.py
|
werelaxe/drapo
|
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
|
[
"MIT"
] | 2
|
2017-10-06T12:35:59.000Z
|
2018-12-03T07:17:12.000Z
|
src/web/drapo/templatetags/timezones.py
|
werelaxe/drapo
|
5f78da735819200f0e7efa6a5e6b3b45ba6e0d4b
|
[
"MIT"
] | 4
|
2017-03-08T21:17:21.000Z
|
2019-05-10T16:22:58.000Z
|
from django.template import Library
from django.utils import timezone
import datetime
register = Library()
@register.filter
def utcoffset(value):
# Yeap, it's strange, but tags are so ugly.. So I defined not use value, but get current timezone from utils
tz = timezone.get_current_timezone()
utc_offset = datetime.datetime.now(tz).utcoffset()
minutes = (utc_offset.days * 24 * 60) + (utc_offset.seconds / 60)
if minutes == 0:
return ''
return '(UTC%+03i:%02i)' % divmod(minutes, 60)
| 28.833333
| 112
| 0.695568
|
from django.template import Library
from django.utils import timezone
import datetime
register = Library()
@register.filter
def utcoffset(value):
tz = timezone.get_current_timezone()
utc_offset = datetime.datetime.now(tz).utcoffset()
minutes = (utc_offset.days * 24 * 60) + (utc_offset.seconds / 60)
if minutes == 0:
return ''
return '(UTC%+03i:%02i)' % divmod(minutes, 60)
| true
| true
|
790688e4e6852f24cdcc9753889463eb777daea2
| 948
|
py
|
Python
|
fancytimers/tests/sanity.py
|
LiteralGenie/FancyTimers
|
ce6e0080b6e1450d096efe8bfd6c9c2bfd2f76b9
|
[
"MIT"
] | null | null | null |
fancytimers/tests/sanity.py
|
LiteralGenie/FancyTimers
|
ce6e0080b6e1450d096efe8bfd6c9c2bfd2f76b9
|
[
"MIT"
] | null | null | null |
fancytimers/tests/sanity.py
|
LiteralGenie/FancyTimers
|
ce6e0080b6e1450d096efe8bfd6c9c2bfd2f76b9
|
[
"MIT"
] | null | null | null |
from classes.fixed_scheduler import FixedScheduler
from classes.concretes.sql_mixin import SqlMixin
from sqlalchemy import Column, create_engine, Table
from sqlalchemy.types import Float
from sqlalchemy.orm import registry, Session
import attr
registry = registry()
@registry.mapped
@attr.s(auto_attribs=True)
class MyClass:
__table__ = Table(
"my_class",
registry.metadata,
Column('time', Float, primary_key=True)
)
time: float
class MyScheduler(SqlMixin, FixedScheduler):
def before_write(self, timestamp: float):
return MyClass(time=timestamp)
if __name__ == "__main__":
engine = create_engine("sqlite:///sanity.sqlite")
registry.metadata.create_all(engine)
with Session(engine) as session:
with session.begin():
scheduler = MyScheduler(1000000, MyClass.time, session)
result = scheduler.check_and_insert()
print(result)
pass
| 26.333333
| 67
| 0.704641
|
from classes.fixed_scheduler import FixedScheduler
from classes.concretes.sql_mixin import SqlMixin
from sqlalchemy import Column, create_engine, Table
from sqlalchemy.types import Float
from sqlalchemy.orm import registry, Session
import attr
registry = registry()
@registry.mapped
@attr.s(auto_attribs=True)
class MyClass:
__table__ = Table(
"my_class",
registry.metadata,
Column('time', Float, primary_key=True)
)
time: float
class MyScheduler(SqlMixin, FixedScheduler):
def before_write(self, timestamp: float):
return MyClass(time=timestamp)
if __name__ == "__main__":
engine = create_engine("sqlite:///sanity.sqlite")
registry.metadata.create_all(engine)
with Session(engine) as session:
with session.begin():
scheduler = MyScheduler(1000000, MyClass.time, session)
result = scheduler.check_and_insert()
print(result)
pass
| true
| true
|
790688ebb94366590d04b95d2613fbe1c13105f9
| 4,479
|
py
|
Python
|
GestureVolume/venv/lib/python3.8/site-packages/mediapipe/framework/formats/annotation/rasterization_pb2.py
|
mesquita97/HandTalks
|
cd035dd39ef0acba9e24516bcea9ed7833fde141
|
[
"MIT"
] | null | null | null |
GestureVolume/venv/lib/python3.8/site-packages/mediapipe/framework/formats/annotation/rasterization_pb2.py
|
mesquita97/HandTalks
|
cd035dd39ef0acba9e24516bcea9ed7833fde141
|
[
"MIT"
] | null | null | null |
GestureVolume/venv/lib/python3.8/site-packages/mediapipe/framework/formats/annotation/rasterization_pb2.py
|
mesquita97/HandTalks
|
cd035dd39ef0acba9e24516bcea9ed7833fde141
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mediapipe/framework/formats/annotation/rasterization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/framework/formats/annotation/rasterization.proto',
package='mediapipe',
syntax='proto2',
serialized_options=_b('\n-com.google.mediapipe.formats.annotation.protoB\022RasterizationProto'),
serialized_pb=_b('\n:mediapipe/framework/formats/annotation/rasterization.proto\x12\tmediapipe\"|\n\rRasterization\x12\x33\n\x08interval\x18\x01 \x03(\x0b\x32!.mediapipe.Rasterization.Interval\x1a\x36\n\x08Interval\x12\t\n\x01y\x18\x01 \x02(\x05\x12\x0e\n\x06left_x\x18\x02 \x02(\x05\x12\x0f\n\x07right_x\x18\x03 \x02(\x05\x42\x43\n-com.google.mediapipe.formats.annotation.protoB\x12RasterizationProto')
)
_RASTERIZATION_INTERVAL = _descriptor.Descriptor(
name='Interval',
full_name='mediapipe.Rasterization.Interval',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='y', full_name='mediapipe.Rasterization.Interval.y', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left_x', full_name='mediapipe.Rasterization.Interval.left_x', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right_x', full_name='mediapipe.Rasterization.Interval.right_x', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=197,
)
_RASTERIZATION = _descriptor.Descriptor(
name='Rasterization',
full_name='mediapipe.Rasterization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interval', full_name='mediapipe.Rasterization.interval', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RASTERIZATION_INTERVAL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=197,
)
_RASTERIZATION_INTERVAL.containing_type = _RASTERIZATION
_RASTERIZATION.fields_by_name['interval'].message_type = _RASTERIZATION_INTERVAL
DESCRIPTOR.message_types_by_name['Rasterization'] = _RASTERIZATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Rasterization = _reflection.GeneratedProtocolMessageType('Rasterization', (_message.Message,), dict(
Interval = _reflection.GeneratedProtocolMessageType('Interval', (_message.Message,), dict(
DESCRIPTOR = _RASTERIZATION_INTERVAL,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization.Interval)
))
,
DESCRIPTOR = _RASTERIZATION,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization)
))
_sym_db.RegisterMessage(Rasterization)
_sym_db.RegisterMessage(Rasterization.Interval)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 35.547619
| 405
| 0.763563
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mediapipe/framework/formats/annotation/rasterization.proto',
package='mediapipe',
syntax='proto2',
serialized_options=_b('\n-com.google.mediapipe.formats.annotation.protoB\022RasterizationProto'),
serialized_pb=_b('\n:mediapipe/framework/formats/annotation/rasterization.proto\x12\tmediapipe\"|\n\rRasterization\x12\x33\n\x08interval\x18\x01 \x03(\x0b\x32!.mediapipe.Rasterization.Interval\x1a\x36\n\x08Interval\x12\t\n\x01y\x18\x01 \x02(\x05\x12\x0e\n\x06left_x\x18\x02 \x02(\x05\x12\x0f\n\x07right_x\x18\x03 \x02(\x05\x42\x43\n-com.google.mediapipe.formats.annotation.protoB\x12RasterizationProto')
)
_RASTERIZATION_INTERVAL = _descriptor.Descriptor(
name='Interval',
full_name='mediapipe.Rasterization.Interval',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='y', full_name='mediapipe.Rasterization.Interval.y', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='left_x', full_name='mediapipe.Rasterization.Interval.left_x', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='right_x', full_name='mediapipe.Rasterization.Interval.right_x', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=143,
serialized_end=197,
)
_RASTERIZATION = _descriptor.Descriptor(
name='Rasterization',
full_name='mediapipe.Rasterization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='interval', full_name='mediapipe.Rasterization.interval', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_RASTERIZATION_INTERVAL, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=197,
)
_RASTERIZATION_INTERVAL.containing_type = _RASTERIZATION
_RASTERIZATION.fields_by_name['interval'].message_type = _RASTERIZATION_INTERVAL
DESCRIPTOR.message_types_by_name['Rasterization'] = _RASTERIZATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Rasterization = _reflection.GeneratedProtocolMessageType('Rasterization', (_message.Message,), dict(
Interval = _reflection.GeneratedProtocolMessageType('Interval', (_message.Message,), dict(
DESCRIPTOR = _RASTERIZATION_INTERVAL,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization.Interval)
))
,
DESCRIPTOR = _RASTERIZATION,
__module__ = 'mediapipe.framework.formats.annotation.rasterization_pb2'
# @@protoc_insertion_point(class_scope:mediapipe.Rasterization)
))
_sym_db.RegisterMessage(Rasterization)
_sym_db.RegisterMessage(Rasterization.Interval)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true
| true
|
79068ac053d28692f742e9bed9487b114e1dbb87
| 31,953
|
py
|
Python
|
RayTracingMazeEnem.py
|
FinFetChannel/PytracingMaze
|
6ccb444c76ede7e48ac09a74d550f32884c7c74b
|
[
"MIT"
] | 9
|
2021-05-01T11:37:05.000Z
|
2022-01-14T13:27:28.000Z
|
RayTracingMazeEnem.py
|
FinFetChannel/pytracingMaze
|
6ccb444c76ede7e48ac09a74d550f32884c7c74b
|
[
"MIT"
] | null | null | null |
RayTracingMazeEnem.py
|
FinFetChannel/pytracingMaze
|
6ccb444c76ede7e48ac09a74d550f32884c7c74b
|
[
"MIT"
] | 1
|
2021-05-07T03:11:51.000Z
|
2021-05-07T03:11:51.000Z
|
import numpy as np
import pygame as pg
from numba import njit
def main():
size = np.random.randint(20,60) # size of the map
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
lx, ly, lz = (size*20, size*30, 1000)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock = np.random.uniform(2, size-3 ), np.random.uniform(2, size-3), 0, 0, 0
maph[int(enx)][int(eny)] = 0
shoot, sx, sy, sdir = 1, -1, -1, rot
res, res_o = 5, [96, 112, 160, 192, 224, 260, 300, 340, 400, 480, 540, 600, 800]
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
running = True
pg.init()
font = pg.font.SysFont("Arial", 18)
font2 = pg.font.SysFont("Impact", 48)
screen = pg.display.set_mode((800, 600))
rr, gg, bb = np.linspace(0,0.8, width*height), np.linspace(0.5,.1, width*height), np.linspace(1,0.1, width*height)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
surf = pg.transform.scale(surf, (750, 550))
screen.blit(surf, (25, 25))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,95))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,105))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,100))
screen.blit(font2.render(" Loading, please wait... ", 1, pg.Color("black"), pg.Color("grey")),(50,300))
pg.display.update()
clock = pg.time.Clock()
pg.mouse.set_visible(False)
et = 0.1
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
sstart, timer, count, autores, smooth = None, 0, -100, 1, 0
pause = 0
pg.mixer.set_num_channels(3)
ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3')
ambient.set_volume(0.5)
runfx = pg.mixer.Sound('soundfx/run.mp3')
shotfx = pg.mixer.Sound('soundfx/slap.mp3')
killfx = pg.mixer.Sound('soundfx/shutdown.mp3')
respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')
successfx = pg.mixer.Sound('soundfx/success.mp3')
failfx = pg.mixer.Sound('soundfx/fail.mp3')
pg.mixer.Channel(0).play(ambient, -1)
pg.mixer.Channel(1).play(respawnfx)
run = 1
score = 0
ticks = pg.time.get_ticks()/100000
while running:
count += 1
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
if not pause:
pause = 1
pg.mixer.Channel(1).play(respawnfx)
endmsg = " Game paused. Current score: " + str(score)
else:
endmsg = " Thanks for playing! Total score: " + str(score)
pg.mixer.Channel(1).play(killfx)
running = False
if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):
shoot = 1
if event.type == pg.KEYDOWN:
if event.key == ord('p'): # pause
if not pause:
pause = 1
endmsg = " Game paused. Current score: " + str(score)
elif (int(posx) != exitx or int(posy) != exity):
pause = 0
if pause and event.key == ord('n'): # new game
pause = 0
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock, run = 0, 0, 0, 0, 0, 1
shoot, sx, sy, sstart = 0, -1, -1, None
mplayer = np.zeros([size, size])
et = 0.1
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
count = -100
if autores:
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
pg.mixer.Channel(1).play(respawnfx)
if event.key == ord('t'): # toggle auto resolution
autores = not(autores)
if event.key == ord('y'): # toggle auto resolution
smooth = not(smooth)
if not autores:
if event.key == ord('q'): # manually change resolution
if res > 0 :
res = res-1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if event.key == ord('e'):
if res < len(res_o)-1 :
res = res+1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if not pause:
rr, gg, bb = super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, size)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
if shoot or smooth:
surf = pg.transform.smoothscale(surf, (800, 600))
else:
surf = pg.transform.scale(surf, (800, 600))
screen.blit(surf, (0, 0))
## fpss = int(clock.get_fps())pg.time.get_ticks()/100000
fpss = int(1000/(pg.time.get_ticks() - ticks*100000))
fps = font.render(str(fpss)+' w: '+ str(width) + ' Score: '+str(score), 1, pg.Color("coral"))
screen.blit(fps,(10,0))
if autores and count > 10: #auto adjust render resolution
if fpss < 50 and width > 100:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
if fpss > 65 and width < 728:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*1.1))
# player's movement
if (int(posx) == exitx and int(posy) == exity):
endmsg = " You escaped safely! "
pg.mixer.Channel(1).play(successfx)
score += 1
pause = 1
pressed_keys = pg.key.get_pressed()
et = clock.tick()/500
if et > 0.5:
et = 0.5
if shoot or sstart != None:
if sstart == None:
pg.mixer.Channel(2).play(shotfx)
if fpss < 60 and autores:
count = -50
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
sstart = pg.time.get_ticks()
elif pg.time.get_ticks() - sstart > 500:
shoot, sx, sy, sstart = 0, -1, -1, None
if enx == 0:
if not run:
pg.mixer.Channel(1).play(killfx)
run = 1
if np.random.uniform() > 0.999:
cos, sin = np.cos(rot), np.sin(rot)
for ee in range(100):
enx = np.clip(np.random.normal(posx, 5), 1, size-2)
eny = np.clip(np.random.normal(posy, 5), 1, size-2)
dtp = (enx-posx)**2 + (eny-posy)**2
if maph[int(enx)][int(eny)] == 0 and dtp > 16 and dtp < 49:
break
if maph[int(enx)][int(eny)] != 0:
enx, eny = 0, 0
else:
seenx, seeny, lock = enx, eny, 0
screen.blit(font2.render(" Enemy Respawning! ", 1, pg.Color("red"), pg.Color("grey")),(300,50))
pg.mixer.Channel(1).play(respawnfx)
else:
dtp = (enx-posx)**2 + (eny-posy)**2
if dtp < 1:
score -= 1
endmsg = " You died! Current score: " + str(score)
pg.mixer.Channel(1).play(failfx)
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
pause = 1
surf = pg.surfarray.make_surface((np.rot90(255-pixels*255)).astype('uint8'))
surf = pg.transform.smoothscale(surf, (800, 600))
screen.blit(surf, (0, 0))
elif dtp > 300:
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
run = 0
ticks = pg.time.get_ticks()/100000
lx = size/2 + 1000*np.cos(ticks)
ly = size/2 + 1000*np.sin(ticks)
posx, posy, rot, rot_v, shoot = movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart)
pg.mouse.set_pos([400, 300])
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir,seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
if run and (seenx == posx or seeny == posy):
run = False
pg.mixer.Channel(1).play(runfx)
else:
clock.tick(30)
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,45))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,55))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,50))
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
if (int(posx) == exitx and int(posy) == exity):
screen.blit(font2.render(" Your current score is "+str(score), 1, pg.Color("grey"), (80, 34, 80)),(50,390))
else:
screen.blit(font2.render(" Press P to continue ", 1, pg.Color("grey"), (80, 34, 80)),(50,390))
screen.blit(font2.render(" Press N for a new game ", 1, pg.Color("grey"), (45, 34, 100)),(50,460))
screen.blit(font2.render(" Press ESC to leave ", 1, pg.Color("grey"), (13, 34, 139)),(50,530))
pg.display.update()
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
pg.mixer.fadeout(1000)
pg.display.update()
print(endmsg)
pg.time.wait(2000)
pg.quit()
def maze_generator(x, y, size):
mr = np.random.uniform(0,1, (size,size))
mg = np.random.uniform(0,1, (size,size))
mb = np.random.uniform(0,1, (size,size))
mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))
maps = np.random.choice([0, 0, 0, 0, 1], (size,size))
mapt = np.random.choice([0, 0, 0, 1, 2], (size,size))
maptemp = np.random.choice([0,0, 1], (size,size))
maph = np.random.uniform(0.25, 0.99, (size,size))
maph[np.where(maptemp == 0)] = 0
maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1)
maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0)
maph[x][y], mapr[x][y] = (0, 0)
count = 0
while 1:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y], mapr[x][y] = (0, 0)
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps
def movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart):
x, y = (posx, posy)
p_mouse = pg.mouse.get_pos()
rot, rot_v = rot - np.clip((p_mouse[0]-400)/200, -0.2, .2), rot_v -(p_mouse[1]-300)/400
rot_v = np.clip(rot_v, -1, 1)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y = (x + et*np.cos(rot), y + et*np.sin(rot))
if pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y = (x - et*np.cos(rot), y - et*np.sin(rot))
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
x, y = (x - et*np.sin(rot), y + et*np.cos(rot))
if pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
x, y = (x + et*np.sin(rot), y - et*np.cos(rot))
if maph[int(x)][int(y)] == 0:
posx, posy = (x, y)
if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:
shoot = 1
return posx, posy, rot, rot_v, shoot
@njit(fastmath=True)
def super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, size):
texture=[[ .95, .99, .97, .8], # brick wall
[ .97, .95, .96, .85],
[.8, .85, .8, .8],
[ .93, .8, .98, .96],
[ .99, .8, .97, .95],
[.8, .85, .8, .8]]
idx = 0
for j in range(height): #vertical loop
rot_j = rot_v + np.deg2rad(24 - j/mod)
sinzo = inc*np.sin(rot_j)
coszo = inc*np.sqrt(abs(np.cos(rot_j)))
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo
modr = 1
cx, cy, c1r, c2r, c3r = 1, 1, 1, 1, 1
shot, enem, mapv = 0, 0, 0
dtp = np.random.uniform(0.002,0.01)
while 1:
if (mapv == 0 or (sinz > 0 and (z > mapv or (mapv==6 and (z>0.4 or z <0.2)) or(z > 0.57 and mapv > 1)))): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
if (z > 1 or z < 0): # check ceiling and floor
break
mapv = maph[int(x)][int(y)]
if mapv > 1 and z < 0.57:
if mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
break
if mapv == 3 or mapv == 9:
enem = 1
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
break
if mapv > 5 and z < 0.4 and z > 0.2:
if ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2 < dtp):#0.01):
shot = 1
break
if mapv > z and mapv < 2: # check walls
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
if (mapr[int(x)][int(y)]): # spherical mirror
if (modr == 1):
cx, cy = int(x), int(y)
modr = modr*0.7
if (modr < 0.2):
break
if (mapv - z <= abs(sinz) ): ## horizontal surface
sinz = -sinz
else:
nx = (x-int(x)-0.5)/0.5; ny = (y-int(y)-0.5)/0.5; nz =(z-int(z)-0.5)/0.5
dot = 2*(cos*nx + sin*ny + sinz*nz)
cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot)
x += cos; y += sin; z += sinz
else:
break
elif mapr[int(x)][int(y)]: # check reflections
if modr == 1:
cx, cy = int(x), int(y)
modr = modr*0.7
if modr < 0.2:
break
if abs(z-maph[int(x)][int(y)]) < abs(sinz):
sinz = -sinz
elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:
cos = -cos
else:
sin = -sin
else:
break
if z > 1: # ceiling
deltaDistZ = (lz-z)*deltaDistZ
x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz
dtol = np.sqrt((x-lx)**2+(y-ly)**2)
if dtol < 50: #light source
shot = 1
c1, c2, c3 = 1, 1, 0.5
else:
angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)
sh = (0.8+ abs(angle - int(angle))/5)/(dtol/1000)
if sh > 1:
sh = 1
if int(angle)%2 == 1:
c1, c2, c3 = 0.8*(1-sh), 0.86*(1-sh/4), (1-sh/10)
else:
c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)
if sx != -1:
c1, c2, c3 = 0.7*c1, 0.7*c2, 0.7*c3
elif z < 0: # floor
z = 0
if int(x*2)%2 == int(y*2)%2:
c1, c2, c3 = .8,.8,.8
else:
if int(x) == exitx and int(y) == exity: #exit
c1, c2, c3 = 0,0,.6
else:
c1, c2, c3 = .1,.1,.1
elif mapv < 2: # walls
c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mg[int(x)][int(y)]
if mapt[int(x)][int(y)]: # textured walls
if y%1 < 0.05 or y%1 > 0.95:
ww = int((x*3)%1*4)
else:
ww = int((y*3)%1*4)
if x%1 < 0.95 and x%1 > 0.05 and y%1 < 0.95 and y%1 > 0.05:
zz = int(x*5%1*6)
else:
zz = int(z*5%1*6)
text = texture[zz][ww]
c1, c2, c3 = c1*text, c2*text, c3*text
if mapv - z <= abs(sinz):
z = mapv
elif not maps[int(x)][int(y)]:
if int(x-cos) != int(x):
x = max(int(x-cos), int(x))
modr = modr*0.80
else:
y = max(int(y-sin), int(y))
modr = modr*0.9
else:
if shot:
sh = ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2)/0.012
c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 # shot
elif z> 0.45:
c1, c2, c3 = 0.6, 0.3, 0.3 # Head
elif z > 0.3:
c1, c2, c3 = 0.3, 0.5, 0.5 # Chest
else:
if enem:
c1, c2, c3 = 1, 0.2, 0.2 # Roller red
else:
c1, c2, c3 = 0.2, 0.2, 1 # Roller blue
if modr <= 0.7 and not shot:
c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]
if not shot and z < 1:
dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)
if dtp > 7:
modr = modr/np.log((dtp-6)/4+np.e)
if z < 1: # shadows
if sx != -1 and maph[int(sx)][int(sy)] > 1:
shot, c3 = 1, c3 * 0.9
dtol = np.sqrt((x-sx)**2+(y-sy)**2+(z-0.35)**2)
cos, sin, sinz = .01*(sx-x)/dtol, .01*(sy-y)/dtol, .01*(0.35-z)/dtol
else:
dtol = np.sqrt((x-lx)**2+(y-ly)**2+(z-lz)**2)
cos, sin, sinz = .01*(lx-x)/dtol, .01*(ly-y)/dtol, .01*(lz-z)/dtol
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if z < mapv and mapv < 1 and not maps[int(x)][int(y)]:
modr = modr*0.39
while modr > 0.45:
if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if shot:
if mapv > 5 or (sinz > 0 and z > 0.35) or (sinz < 0 and z < 0.35):
break
elif z >1:
break
if z < 0.57 and mapv > 1:
if mapv == 3 or mapv == 9:
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
elif mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
if mapv > 0 and z <= mapv and mapv < 2:
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
modr = modr*0.9
else:
modr = modr*0.9
pr[idx] = modr*np.sqrt(c1*c1r)
pg[idx] = modr*np.sqrt(c2*c2r)
pb[idx] = modr*np.sqrt(c3*c3r)
idx += 1
return pr, pg, pb
def adjust_resol(width):
height = int(0.75*width)
mod = width/64
inc = 0.02/mod
rr = np.random.uniform(0,1,width * height)
gg = np.random.uniform(0,1,width * height)
bb = np.random.uniform(0,1,width * height)
## print('Resolution: ', width, height)
return width, height, mod, inc, rr, gg, bb
@njit(fastmath=True)
def agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock):
if enx != 0:
if not lock or np.random.uniform(0,1) > 0.99:
dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)
cos, sin = (posx-enx)/dtp, (posy-eny)/dtp
x, y = enx, eny
for i in range(300):
x += 0.04*cos; y += 0.04*sin
if maph[int(x)][int(y)] != 0:
lock = 0
break
if(int(x) == int(posx) and int(y) == int(posy)):
seenx, seeny = posx, posy
lock = 1
break
if int(enx) == int(seenx) and int(eny) == int(seeny):
if not lock:
if shoot:
seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)
else:
seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2)
else:
seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)
dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2)
cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp
x, y = enx + et*(cos+np.random.normal(0,.5)), eny + et*(sin+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
if np.random.uniform(0,1) > 0.5:
x, y = enx - et*(sin+np.random.normal(0,.5)), eny + et*(cos+np.random.normal(0,.5))
else:
x, y = enx + et*(sin+np.random.normal(0,.5)), eny - et*(cos+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)
lock = 0
mplayer[int(enx)][int(eny)] = 3
mplayer[int(posx)][int(posy)] = 2
if shoot:
if sx == -1:
sdir = rot+np.random.uniform(-.1,.1)
sx, sy = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir)
sx, sy = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir)
if enx != 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:
shoot, sx, sy, enx, eny, seenx, seeny = 0, -1, -1, 0, 0, 0, 0
if maph[int(sx)][int(sy)] != 0:
shoot, sx, sy = 0, -1, -1
else:
mplayer[int(sx)][int(sy)] += 6
mplayer = maph + mplayer
return(enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock)
if __name__ == '__main__':
main()
| 47.337778
| 192
| 0.393234
|
import numpy as np
import pygame as pg
from numba import njit
def main():
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
lx, ly, lz = (size*20, size*30, 1000)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock = np.random.uniform(2, size-3 ), np.random.uniform(2, size-3), 0, 0, 0
maph[int(enx)][int(eny)] = 0
shoot, sx, sy, sdir = 1, -1, -1, rot
res, res_o = 5, [96, 112, 160, 192, 224, 260, 300, 340, 400, 480, 540, 600, 800]
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
running = True
pg.init()
font = pg.font.SysFont("Arial", 18)
font2 = pg.font.SysFont("Impact", 48)
screen = pg.display.set_mode((800, 600))
rr, gg, bb = np.linspace(0,0.8, width*height), np.linspace(0.5,.1, width*height), np.linspace(1,0.1, width*height)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
surf = pg.transform.scale(surf, (750, 550))
screen.blit(surf, (25, 25))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,95))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,105))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,100))
screen.blit(font2.render(" Loading, please wait... ", 1, pg.Color("black"), pg.Color("grey")),(50,300))
pg.display.update()
clock = pg.time.Clock()
pg.mouse.set_visible(False)
et = 0.1
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
sstart, timer, count, autores, smooth = None, 0, -100, 1, 0
pause = 0
pg.mixer.set_num_channels(3)
ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3')
ambient.set_volume(0.5)
runfx = pg.mixer.Sound('soundfx/run.mp3')
shotfx = pg.mixer.Sound('soundfx/slap.mp3')
killfx = pg.mixer.Sound('soundfx/shutdown.mp3')
respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')
successfx = pg.mixer.Sound('soundfx/success.mp3')
failfx = pg.mixer.Sound('soundfx/fail.mp3')
pg.mixer.Channel(0).play(ambient, -1)
pg.mixer.Channel(1).play(respawnfx)
run = 1
score = 0
ticks = pg.time.get_ticks()/100000
while running:
count += 1
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
if not pause:
pause = 1
pg.mixer.Channel(1).play(respawnfx)
endmsg = " Game paused. Current score: " + str(score)
else:
endmsg = " Thanks for playing! Total score: " + str(score)
pg.mixer.Channel(1).play(killfx)
running = False
if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):
shoot = 1
if event.type == pg.KEYDOWN:
if event.key == ord('p'): # pause
if not pause:
pause = 1
endmsg = " Game paused. Current score: " + str(score)
elif (int(posx) != exitx or int(posy) != exity):
pause = 0
if pause and event.key == ord('n'): # new game
pause = 0
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock, run = 0, 0, 0, 0, 0, 1
shoot, sx, sy, sstart = 0, -1, -1, None
mplayer = np.zeros([size, size])
et = 0.1
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
count = -100
if autores:
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
pg.mixer.Channel(1).play(respawnfx)
if event.key == ord('t'): # toggle auto resolution
autores = not(autores)
if event.key == ord('y'): # toggle auto resolution
smooth = not(smooth)
if not autores:
if event.key == ord('q'): # manually change resolution
if res > 0 :
res = res-1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if event.key == ord('e'):
if res < len(res_o)-1 :
res = res+1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if not pause:
rr, gg, bb = super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, size)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
if shoot or smooth:
surf = pg.transform.smoothscale(surf, (800, 600))
else:
surf = pg.transform.scale(surf, (800, 600))
screen.blit(surf, (0, 0))
## fpss = int(clock.get_fps())pg.time.get_ticks()/100000
fpss = int(1000/(pg.time.get_ticks() - ticks*100000))
fps = font.render(str(fpss)+' w: '+ str(width) + ' Score: '+str(score), 1, pg.Color("coral"))
screen.blit(fps,(10,0))
if autores and count > 10: #auto adjust render resolution
if fpss < 50 and width > 100:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
if fpss > 65 and width < 728:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*1.1))
# player's movement
if (int(posx) == exitx and int(posy) == exity):
endmsg = " You escaped safely! "
pg.mixer.Channel(1).play(successfx)
score += 1
pause = 1
pressed_keys = pg.key.get_pressed()
et = clock.tick()/500
if et > 0.5:
et = 0.5
if shoot or sstart != None:
if sstart == None:
pg.mixer.Channel(2).play(shotfx)
if fpss < 60 and autores:
count = -50
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
sstart = pg.time.get_ticks()
elif pg.time.get_ticks() - sstart > 500:
shoot, sx, sy, sstart = 0, -1, -1, None
if enx == 0:
if not run:
pg.mixer.Channel(1).play(killfx)
run = 1
if np.random.uniform() > 0.999:
cos, sin = np.cos(rot), np.sin(rot)
for ee in range(100):
enx = np.clip(np.random.normal(posx, 5), 1, size-2)
eny = np.clip(np.random.normal(posy, 5), 1, size-2)
dtp = (enx-posx)**2 + (eny-posy)**2
if maph[int(enx)][int(eny)] == 0 and dtp > 16 and dtp < 49:
break
if maph[int(enx)][int(eny)] != 0:
enx, eny = 0, 0
else:
seenx, seeny, lock = enx, eny, 0
screen.blit(font2.render(" Enemy Respawning! ", 1, pg.Color("red"), pg.Color("grey")),(300,50))
pg.mixer.Channel(1).play(respawnfx)
else:
dtp = (enx-posx)**2 + (eny-posy)**2
if dtp < 1:
score -= 1
endmsg = " You died! Current score: " + str(score)
pg.mixer.Channel(1).play(failfx)
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
pause = 1
surf = pg.surfarray.make_surface((np.rot90(255-pixels*255)).astype('uint8'))
surf = pg.transform.smoothscale(surf, (800, 600))
screen.blit(surf, (0, 0))
elif dtp > 300:
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
run = 0
ticks = pg.time.get_ticks()/100000
lx = size/2 + 1000*np.cos(ticks)
ly = size/2 + 1000*np.sin(ticks)
posx, posy, rot, rot_v, shoot = movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart)
pg.mouse.set_pos([400, 300])
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir,seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
if run and (seenx == posx or seeny == posy):
run = False
pg.mixer.Channel(1).play(runfx)
else:
clock.tick(30)
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,45))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,55))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,50))
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
if (int(posx) == exitx and int(posy) == exity):
screen.blit(font2.render(" Your current score is "+str(score), 1, pg.Color("grey"), (80, 34, 80)),(50,390))
else:
screen.blit(font2.render(" Press P to continue ", 1, pg.Color("grey"), (80, 34, 80)),(50,390))
screen.blit(font2.render(" Press N for a new game ", 1, pg.Color("grey"), (45, 34, 100)),(50,460))
screen.blit(font2.render(" Press ESC to leave ", 1, pg.Color("grey"), (13, 34, 139)),(50,530))
pg.display.update()
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
pg.mixer.fadeout(1000)
pg.display.update()
print(endmsg)
pg.time.wait(2000)
pg.quit()
def maze_generator(x, y, size):
mr = np.random.uniform(0,1, (size,size))
mg = np.random.uniform(0,1, (size,size))
mb = np.random.uniform(0,1, (size,size))
mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))
maps = np.random.choice([0, 0, 0, 0, 1], (size,size))
mapt = np.random.choice([0, 0, 0, 1, 2], (size,size))
maptemp = np.random.choice([0,0, 1], (size,size))
maph = np.random.uniform(0.25, 0.99, (size,size))
maph[np.where(maptemp == 0)] = 0
maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1)
maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0)
maph[x][y], mapr[x][y] = (0, 0)
count = 0
while 1:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y], mapr[x][y] = (0, 0)
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps
def movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart):
x, y = (posx, posy)
p_mouse = pg.mouse.get_pos()
rot, rot_v = rot - np.clip((p_mouse[0]-400)/200, -0.2, .2), rot_v -(p_mouse[1]-300)/400
rot_v = np.clip(rot_v, -1, 1)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y = (x + et*np.cos(rot), y + et*np.sin(rot))
if pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y = (x - et*np.cos(rot), y - et*np.sin(rot))
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
x, y = (x - et*np.sin(rot), y + et*np.cos(rot))
if pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
x, y = (x + et*np.sin(rot), y - et*np.cos(rot))
if maph[int(x)][int(y)] == 0:
posx, posy = (x, y)
if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:
shoot = 1
return posx, posy, rot, rot_v, shoot
@njit(fastmath=True)
def super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, size):
texture=[[ .95, .99, .97, .8], # brick wall
[ .97, .95, .96, .85],
[.8, .85, .8, .8],
[ .93, .8, .98, .96],
[ .99, .8, .97, .95],
[.8, .85, .8, .8]]
idx = 0
for j in range(height): #vertical loop
rot_j = rot_v + np.deg2rad(24 - j/mod)
sinzo = inc*np.sin(rot_j)
coszo = inc*np.sqrt(abs(np.cos(rot_j)))
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo
modr = 1
cx, cy, c1r, c2r, c3r = 1, 1, 1, 1, 1
shot, enem, mapv = 0, 0, 0
dtp = np.random.uniform(0.002,0.01)
while 1:
if (mapv == 0 or (sinz > 0 and (z > mapv or (mapv==6 and (z>0.4 or z <0.2)) or(z > 0.57 and mapv > 1)))): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
if (z > 1 or z < 0): # check ceiling and floor
break
mapv = maph[int(x)][int(y)]
if mapv > 1 and z < 0.57:
if mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
break
if mapv == 3 or mapv == 9:
enem = 1
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
break
if mapv > 5 and z < 0.4 and z > 0.2:
if ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2 < dtp):#0.01):
shot = 1
break
if mapv > z and mapv < 2: # check walls
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
if (mapr[int(x)][int(y)]): # spherical mirror
if (modr == 1):
cx, cy = int(x), int(y)
modr = modr*0.7
if (modr < 0.2):
break
if (mapv - z <= abs(sinz) ): ## horizontal surface
sinz = -sinz
else:
nx = (x-int(x)-0.5)/0.5; ny = (y-int(y)-0.5)/0.5; nz =(z-int(z)-0.5)/0.5
dot = 2*(cos*nx + sin*ny + sinz*nz)
cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot)
x += cos; y += sin; z += sinz
else:
break
elif mapr[int(x)][int(y)]: # check reflections
if modr == 1:
cx, cy = int(x), int(y)
modr = modr*0.7
if modr < 0.2:
break
if abs(z-maph[int(x)][int(y)]) < abs(sinz):
sinz = -sinz
elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:
cos = -cos
else:
sin = -sin
else:
break
if z > 1: # ceiling
deltaDistZ = (lz-z)*deltaDistZ
x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz
dtol = np.sqrt((x-lx)**2+(y-ly)**2)
if dtol < 50: #light source
shot = 1
c1, c2, c3 = 1, 1, 0.5
else:
angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)
sh = (0.8+ abs(angle - int(angle))/5)/(dtol/1000)
if sh > 1:
sh = 1
if int(angle)%2 == 1:
c1, c2, c3 = 0.8*(1-sh), 0.86*(1-sh/4), (1-sh/10)
else:
c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)
if sx != -1:
c1, c2, c3 = 0.7*c1, 0.7*c2, 0.7*c3
elif z < 0: # floor
z = 0
if int(x*2)%2 == int(y*2)%2:
c1, c2, c3 = .8,.8,.8
else:
if int(x) == exitx and int(y) == exity: #exit
c1, c2, c3 = 0,0,.6
else:
c1, c2, c3 = .1,.1,.1
elif mapv < 2: # walls
c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mg[int(x)][int(y)]
if mapt[int(x)][int(y)]: # textured walls
if y%1 < 0.05 or y%1 > 0.95:
ww = int((x*3)%1*4)
else:
ww = int((y*3)%1*4)
if x%1 < 0.95 and x%1 > 0.05 and y%1 < 0.95 and y%1 > 0.05:
zz = int(x*5%1*6)
else:
zz = int(z*5%1*6)
text = texture[zz][ww]
c1, c2, c3 = c1*text, c2*text, c3*text
if mapv - z <= abs(sinz):
z = mapv
elif not maps[int(x)][int(y)]:
if int(x-cos) != int(x):
x = max(int(x-cos), int(x))
modr = modr*0.80
else:
y = max(int(y-sin), int(y))
modr = modr*0.9
else:
if shot:
sh = ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2)/0.012
c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 # shot
elif z> 0.45:
c1, c2, c3 = 0.6, 0.3, 0.3 # Head
elif z > 0.3:
c1, c2, c3 = 0.3, 0.5, 0.5 # Chest
else:
if enem:
c1, c2, c3 = 1, 0.2, 0.2 # Roller red
else:
c1, c2, c3 = 0.2, 0.2, 1 # Roller blue
if modr <= 0.7 and not shot:
c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]
if not shot and z < 1:
dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)
if dtp > 7:
modr = modr/np.log((dtp-6)/4+np.e)
if z < 1: # shadows
if sx != -1 and maph[int(sx)][int(sy)] > 1:
shot, c3 = 1, c3 * 0.9
dtol = np.sqrt((x-sx)**2+(y-sy)**2+(z-0.35)**2)
cos, sin, sinz = .01*(sx-x)/dtol, .01*(sy-y)/dtol, .01*(0.35-z)/dtol
else:
dtol = np.sqrt((x-lx)**2+(y-ly)**2+(z-lz)**2)
cos, sin, sinz = .01*(lx-x)/dtol, .01*(ly-y)/dtol, .01*(lz-z)/dtol
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if z < mapv and mapv < 1 and not maps[int(x)][int(y)]:
modr = modr*0.39
while modr > 0.45:
if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if shot:
if mapv > 5 or (sinz > 0 and z > 0.35) or (sinz < 0 and z < 0.35):
break
elif z >1:
break
if z < 0.57 and mapv > 1:
if mapv == 3 or mapv == 9:
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
elif mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
if mapv > 0 and z <= mapv and mapv < 2:
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
modr = modr*0.9
else:
modr = modr*0.9
pr[idx] = modr*np.sqrt(c1*c1r)
pg[idx] = modr*np.sqrt(c2*c2r)
pb[idx] = modr*np.sqrt(c3*c3r)
idx += 1
return pr, pg, pb
def adjust_resol(width):
height = int(0.75*width)
mod = width/64
inc = 0.02/mod
rr = np.random.uniform(0,1,width * height)
gg = np.random.uniform(0,1,width * height)
bb = np.random.uniform(0,1,width * height)
## print('Resolution: ', width, height)
return width, height, mod, inc, rr, gg, bb
@njit(fastmath=True)
def agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock):
if enx != 0:
if not lock or np.random.uniform(0,1) > 0.99:
dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)
cos, sin = (posx-enx)/dtp, (posy-eny)/dtp
x, y = enx, eny
for i in range(300):
x += 0.04*cos; y += 0.04*sin
if maph[int(x)][int(y)] != 0:
lock = 0
break
if(int(x) == int(posx) and int(y) == int(posy)):
seenx, seeny = posx, posy
lock = 1
break
if int(enx) == int(seenx) and int(eny) == int(seeny):
if not lock:
if shoot:
seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)
else:
seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2)
else:
seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)
dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2)
cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp
x, y = enx + et*(cos+np.random.normal(0,.5)), eny + et*(sin+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
if np.random.uniform(0,1) > 0.5:
x, y = enx - et*(sin+np.random.normal(0,.5)), eny + et*(cos+np.random.normal(0,.5))
else:
x, y = enx + et*(sin+np.random.normal(0,.5)), eny - et*(cos+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)
lock = 0
mplayer[int(enx)][int(eny)] = 3
mplayer[int(posx)][int(posy)] = 2
if shoot:
if sx == -1:
sdir = rot+np.random.uniform(-.1,.1)
sx, sy = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir)
sx, sy = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir)
if enx != 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:
shoot, sx, sy, enx, eny, seenx, seeny = 0, -1, -1, 0, 0, 0, 0
if maph[int(sx)][int(sy)] != 0:
shoot, sx, sy = 0, -1, -1
else:
mplayer[int(sx)][int(sy)] += 6
mplayer = maph + mplayer
return(enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock)
if __name__ == '__main__':
main()
| true
| true
|
79068c75f08cb626f4e4651e913afac333b92e6a
| 87,820
|
py
|
Python
|
lib/sqlalchemy/dialects/sqlite/base.py
|
josteinl/sqlalchemy
|
9e7c068d669b209713da62da5748579f92d98129
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/dialects/sqlite/base.py
|
josteinl/sqlalchemy
|
9e7c068d669b209713da62da5748579f92d98129
|
[
"MIT"
] | null | null | null |
lib/sqlalchemy/dialects/sqlite/base.py
|
josteinl/sqlalchemy
|
9e7c068d669b209713da62da5748579f92d98129
|
[
"MIT"
] | null | null | null |
# sqlite/base.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: sqlite
:name: SQLite
:full_support: 3.21, 3.28+
:normal_support: 3.12+
:best_effort: 3.7.16+
.. _sqlite_datetime:
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does
not provide out of the box functionality for translating values between Python
`datetime` objects and a SQLite-supported format. SQLAlchemy's own
:class:`~sqlalchemy.types.DateTime` and related types provide date formatting
and parsing functionality when SQLite is used. The implementation classes are
:class:`_sqlite.DATETIME`, :class:`_sqlite.DATE` and :class:`_sqlite.TIME`.
These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
Ensuring Text affinity
^^^^^^^^^^^^^^^^^^^^^^
The DDL rendered for these types is the standard ``DATE``, ``TIME``
and ``DATETIME`` indicators. However, custom storage formats can also be
applied to these types. When the
storage format is detected as containing no alpha characters, the DDL for
these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
so that the column continues to have textual affinity.
.. seealso::
`Type Affinity <https://www.sqlite.org/datatype3.html#affinity>`_ -
in the SQLite documentation
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
----------------------------------
Background on SQLite's autoincrement is at: https://sqlite.org/autoinc.html
Key concepts:
* SQLite has an implicit "auto increment" feature that takes place for any
non-composite primary-key column that is specifically created using
"INTEGER PRIMARY KEY" for the type + primary key.
* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
equivalent to the implicit autoincrement feature; this keyword is not
recommended for general use. SQLAlchemy does not render this keyword
unless a special SQLite-specific directive is used (see below). However,
it still requires that the column's type is named "INTEGER".
Using the AUTOINCREMENT Keyword
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
SQLite's typing model is based on naming conventions. Among other things, this
means that any type name which contains the substring ``"INT"`` will be
determined to be of "integer affinity". A type named ``"BIGINT"``,
``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by SQLite to be
of "integer" affinity. However, **the SQLite autoincrement feature, whether
implicitly or explicitly enabled, requires that the name of the column's type
is exactly the string "INTEGER"**. Therefore, if an application uses a type
like :class:`.BigInteger` for a primary key, on SQLite this type will need to
be rendered as the name ``"INTEGER"`` when emitting the initial ``CREATE
TABLE`` statement in order for the autoincrement behavior to be available.
One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
:meth:`.TypeEngine.with_variant`
:ref:`sqlalchemy.ext.compiler_toplevel`
`Datatypes In SQLite Version 3 <https://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
Database Locking Behavior / Concurrency
---------------------------------------
SQLite is not designed for a high level of write concurrency. The database
itself, being a file, is locked completely during write operations within
transactions, meaning exactly one "connection" (in reality a file handle)
has exclusive access to the database during this period - all other
"connections" will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is
always in a transaction; there is no ``connection.begin()`` method,
only ``connection.commit()`` and ``connection.rollback()``, upon which a
new transaction is to be begun immediately. This may seem to imply
that the SQLite driver would in theory allow only a single filehandle on a
particular database file at any time; however, there are several
factors both within SQLite itself as well as within the pysqlite driver
which loosen this restriction significantly.
However, no matter what locking modes are used, SQLite will still always
lock the database file once a transaction is started and DML (e.g. INSERT,
UPDATE, DELETE) has at least been emitted, and this will block
other transactions at least at the point that they also attempt to emit DML.
By default, the length of time on this block is very short before it times out
with an error.
This behavior becomes more critical when used in conjunction with the
SQLAlchemy ORM. SQLAlchemy's :class:`.Session` object by default runs
within a transaction, and with its autoflush model, may emit DML preceding
any SELECT statement. This may lead to a SQLite database that locks
more quickly than is expected. The locking mode of SQLite and the pysqlite
driver can be manipulated to some degree, however it should be noted that
achieving a high degree of write-concurrency with SQLite is a losing battle.
For more information on SQLite's lack of write concurrency by design, please
see
`Situations Where Another RDBMS May Work Better - High Concurrency
<https://www.sqlite.org/whentouse.html>`_ near the bottom of the page.
The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
.. _sqlite_isolation_level:
Transaction Isolation Level / Autocommit
----------------------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the
`PRAGMA read_uncommitted <https://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
instruction. This setting can essentially switch SQLite between its
default mode of ``SERIALIZABLE`` isolation, and a "dirty read" isolation
mode normally referred to as ``READ UNCOMMITTED``.
SQLAlchemy ties into this PRAGMA statement using the
:paramref:`_sa.create_engine.isolation_level` parameter of
:func:`_sa.create_engine`.
Valid values for this parameter when used with SQLite are ``"SERIALIZABLE"``
and ``"READ UNCOMMITTED"`` corresponding to a value of 0 and 1, respectively.
SQLite defaults to ``SERIALIZABLE``, however its behavior is impacted by
the pysqlite driver's default behavior.
When using the pysqlite driver, the ``"AUTOCOMMIT"`` isolation level is also
available, which will alter the pysqlite connection using the ``.isolation_level``
attribute on the DBAPI connection and set it to None for the duration
of the setting.
.. versionadded:: 1.3.16 added support for SQLite AUTOCOMMIT isolation level
when using the pysqlite / sqlite3 SQLite driver.
The other axis along which SQLite's transactional locking is impacted is
via the nature of the ``BEGIN`` statement used. The three varieties
are "deferred", "immediate", and "exclusive", as described at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_. A straight
``BEGIN`` statement uses the "deferred" mode, where the database file is
not locked until the first read or write operation, and read access remains
open to other transactions until the first write operation. But again,
it is critical to note that the pysqlite driver interferes with this behavior
by *not even emitting BEGIN* until the first write operation.
.. warning::
SQLite's transactional scope is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. seealso::
:ref:`dbapi_autocommit`
SAVEPOINT Support
----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
:meth:`_engine.Connection.begin_nested` method at the Core level, and
:meth:`.Session.begin_nested` at the ORM level. However, SAVEPOINTs
won't work at all with pysqlite unless workarounds are taken.
.. warning::
SQLite's SAVEPOINT feature is impacted by unresolved
issues in the pysqlite driver, which defers BEGIN statements to a greater
degree than is often feasible. See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
Transactional DDL
----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
it also is ending any existing transaction when DDL is detected, so again,
workarounds are required.
.. warning::
SQLite's transactional DDL is impacted by unresolved issues
in the pysqlite driver, which fails to emit BEGIN and additionally
forces a COMMIT to cancel any transaction when DDL is encountered.
See the section :ref:`pysqlite_serializable`
for techniques to work around this behavior.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation of the
table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all
connections before use -- including the initial call to
:meth:`sqlalchemy.schema.MetaData.create_all`.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for
new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. warning::
When SQLite foreign keys are enabled, it is **not possible**
to emit CREATE or DROP statements for tables that contain
mutually-dependent foreign key constraints;
to emit the DDL for these tables requires that ALTER TABLE be used to
create or drop these constraints separately, for which SQLite has
no support.
.. seealso::
`SQLite Foreign Key Support <https://www.sqlite.org/foreignkeys.html>`_
- on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
:ref:`use_alter` - more information on SQLAlchemy's facilities for handling
mutually-dependent foreign key constraints.
.. _sqlite_on_conflict_ddl:
ON CONFLICT support for constraints
-----------------------------------
.. seealso:: This section describes the :term:`DDL` version of "ON CONFLICT" for
SQLite, which occurs within a CREATE TABLE statement. For "ON CONFLICT" as
applied to an INSERT statement, see :ref:`sqlite_on_conflict_insert`.
SQLite supports a non-standard DDL clause known as ON CONFLICT which can be applied
to primary key, unique, check, and not null constraints. In DDL, it is
rendered either within the "CONSTRAINT" clause or within the column definition
itself depending on the location of the target constraint. To render this
clause within DDL, the extension parameter ``sqlite_on_conflict`` can be
specified with a string conflict resolution algorithm within the
:class:`.PrimaryKeyConstraint`, :class:`.UniqueConstraint`,
:class:`.CheckConstraint` objects. Within the :class:`_schema.Column` object,
there
are individual parameters ``sqlite_on_conflict_not_null``,
``sqlite_on_conflict_primary_key``, ``sqlite_on_conflict_unique`` which each
correspond to the three types of relevant constraint types that can be
indicated from a :class:`_schema.Column` object.
.. seealso::
`ON CONFLICT <https://www.sqlite.org/lang_conflict.html>`_ - in the SQLite
documentation
.. versionadded:: 1.3
The ``sqlite_on_conflict`` parameters accept a string argument which is just
the resolution name to be chosen, which on SQLite can be one of ROLLBACK,
ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
)
The above renders CREATE TABLE DDL as::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (id, data) ON CONFLICT IGNORE
)
When using the :paramref:`_schema.Column.unique`
flag to add a UNIQUE constraint
to a single column, the ``sqlite_on_conflict_unique`` parameter can
be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
)
rendering::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER,
PRIMARY KEY (id),
UNIQUE (data) ON CONFLICT IGNORE
)
To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
)
this renders the column inline ON CONFLICT phrase::
CREATE TABLE some_table (
id INTEGER NOT NULL,
data INTEGER NOT NULL ON CONFLICT FAIL,
PRIMARY KEY (id)
)
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
CREATE TABLE some_table (
id INTEGER NOT NULL,
PRIMARY KEY (id) ON CONFLICT FAIL
)
.. _sqlite_on_conflict_insert:
INSERT...ON CONFLICT (Upsert)
-----------------------------------
.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
applied to a CREATE TABLE statement, see :ref:`sqlite_on_conflict_ddl`.
From version 3.24.0 onwards, SQLite supports "upserts" (update or insert)
of rows into a table via the ``ON CONFLICT`` clause of the ``INSERT``
statement. A candidate row will only be inserted if that row does not violate
any unique or primary key constraints. In the case of a unique constraint violation, a
secondary action can occur which can be either "DO UPDATE", indicating that
the data in the target row should be updated, or "DO NOTHING", which indicates
to silently skip this row.
Conflicts are determined using columns that are part of existing unique
constraints and indexes. These constraints are identified by stating the
columns and conditions that comprise the indexes.
SQLAlchemy provides ``ON CONFLICT`` support via the SQLite-specific
:func:`_sqlite.insert()` function, which provides
the generative methods :meth:`_sqlite.Insert.on_conflict_do_update`
and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.dialects.sqlite import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
>>> do_update_stmt = insert_stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?{stop}
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
... index_elements=['id']
... )
>>> print(do_nothing_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO NOTHING
.. versionadded:: 1.4
.. seealso::
`Upsert
<https://sqlite.org/lang_UPSERT.html>`_
- in the SQLite documentation.
Specifying the Target
^^^^^^^^^^^^^^^^^^^^^
Both methods supply the "target" of the conflict using column inference:
* The :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements` argument
specifies a sequence containing string column names, :class:`_schema.Column`
objects, and/or SQL expression elements, which would identify a unique index
or unique constraint.
* When using :paramref:`_sqlite.Insert.on_conflict_do_update.index_elements`
to infer an index, a partial index can be inferred by also specifying the
:paramref:`_sqlite.Insert.on_conflict_do_update.index_where` parameter:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=[my_table.c.user_email],
... index_where=my_table.c.user_email.like('%@gmail.com'),
... set_=dict(data=stmt.excluded.data)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
ON CONFLICT (user_email)
WHERE user_email LIKE '%@gmail.com'
DO UPDATE SET data = excluded.data
>>>
The SET Clause
^^^^^^^^^^^^^^^
``ON CONFLICT...DO UPDATE`` is used to perform an update of the already
existing row, using any combination of new values as well as values
from the proposed insertion. These values are specified using the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` parameter. This
parameter accepts a dictionary which consists of direct values
for UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?
.. warning::
The :meth:`_sqlite.Insert.on_conflict_do_update` method does **not** take
into account Python-side default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`. These
values will not be exercised for an ON CONFLICT style of UPDATE, unless
they are manually specified in the
:paramref:`_sqlite.Insert.on_conflict_do_update.set_` dictionary.
Updating using the Excluded INSERT Values
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In order to refer to the proposed insertion row, the special alias
:attr:`~.sqlite.Insert.excluded` is available as an attribute on
the :class:`_sqlite.Insert` object; this object creates an "excluded." prefix
on a column, that informs the DO UPDATE to update the row with the value that
would have been inserted had the constraint not failed:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author)
... )
>>> print(do_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
Additional WHERE Criteria
^^^^^^^^^^^^^^^^^^^^^^^^^
The :meth:`_sqlite.Insert.on_conflict_do_update` method also accepts
a WHERE clause using the :paramref:`_sqlite.Insert.on_conflict_do_update.where`
parameter, which will limit those rows which receive an UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... )
>>> on_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author),
... where=(my_table.c.status == 2)
... )
>>> print(on_update_stmt)
{opensql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?, author = excluded.author
WHERE my_table.status = ?
Skipping Rows with DO NOTHING
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``ON CONFLICT`` may be used to skip inserting a row entirely
if any conflict with a unique constraint occurs; below this is illustrated
using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
If ``DO NOTHING`` is used without specifying any columns or constraint,
it has the effect of skipping the INSERT for any unique violation which
occurs:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing()
>>> print(stmt)
{opensql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
.. _sqlite_type_reflection:
Type Reflection
---------------
SQLite types are unlike those of most other database backends, in that
the string name of the type usually does not correspond to a "type" in a
one-to-one fashion. Instead, SQLite links per-column typing behavior
to one of five so-called "type affinities" based on a string matching
pattern for the type.
SQLAlchemy's reflection process, when inspecting types, uses a simple
lookup table to link the keywords returned to provided SQLAlchemy types.
This lookup table is present within the SQLite dialect as it is for all
other dialects. However, the SQLite dialect has a different "fallback"
routine for when a particular type name is not located in the lookup map;
it instead implements the SQLite "type affinity" scheme located at
https://www.sqlite.org/datatype3.html section 2.1.
The provided typemap will make direct associations from an exact string
name match for the following types:
:class:`_types.BIGINT`, :class:`_types.BLOB`,
:class:`_types.BOOLEAN`, :class:`_types.BOOLEAN`,
:class:`_types.CHAR`, :class:`_types.DATE`,
:class:`_types.DATETIME`, :class:`_types.FLOAT`,
:class:`_types.DECIMAL`, :class:`_types.FLOAT`,
:class:`_types.INTEGER`, :class:`_types.INTEGER`,
:class:`_types.NUMERIC`, :class:`_types.REAL`,
:class:`_types.SMALLINT`, :class:`_types.TEXT`,
:class:`_types.TIME`, :class:`_types.TIMESTAMP`,
:class:`_types.VARCHAR`, :class:`_types.NVARCHAR`,
:class:`_types.NCHAR`
When a type name does not match one of the above types, the "type affinity"
lookup is used instead:
* :class:`_types.INTEGER` is returned if the type name includes the
string ``INT``
* :class:`_types.TEXT` is returned if the type name includes the
string ``CHAR``, ``CLOB`` or ``TEXT``
* :class:`_types.NullType` is returned if the type name includes the
string ``BLOB``
* :class:`_types.REAL` is returned if the type name includes the string
``REAL``, ``FLOA`` or ``DOUB``.
* Otherwise, the :class:`_types.NUMERIC` type is used.
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
.. _sqlite_partial_index:
Partial Indexes
---------------
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
The index will be rendered at create time as::
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
.. versionadded:: 0.9.9
.. _sqlite_dotted_column_names:
Dotted Column Names
-------------------
Using table or column names that explicitly have periods in them is
**not recommended**. While this is generally a bad idea for relational
databases in general, as the dot is a syntactically significant character,
the SQLite driver up until version **3.10.0** of SQLite has a bug which
requires that SQLAlchemy filter out these dots in result sets.
.. versionchanged:: 1.1
The following SQLite issue has been resolved as of version 3.10.0
of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
workarounds based on detection of this version.
The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
cursor.execute("create table x (a integer, b integer)")
cursor.execute("insert into x (a, b) values (1, 1)")
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
cursor.execute('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
The second assertion fails::
Traceback (most recent call last):
File "test.py", line 19, in <module>
[c[0] for c in cursor.description]
AssertionError: ['x.a', 'x.b']
Where above, the driver incorrectly reports the names of the columns
including the name of the table, which is entirely inconsistent vs.
when the UNION is not present.
SQLAlchemy relies upon column names being predictable in how they match
to the original statement, so the SQLAlchemy dialect has no choice but
to filter these out::
from sqlalchemy import create_engine
eng = create_engine("sqlite://")
conn = eng.connect()
conn.exec_driver_sql("create table x (a integer, b integer)")
conn.exec_driver_sql("insert into x (a, b) values (1, 1)")
conn.exec_driver_sql("insert into x (a, b) values (2, 2)")
result = conn.exec_driver_sql("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
names are still addressable*::
>>> row = result.first()
>>> row["a"]
1
>>> row["x.a"]
1
>>> row["b"]
1
>>> row["x.b"]
1
Therefore, the workaround applied by SQLAlchemy only impacts
:meth:`_engine.CursorResult.keys` and :meth:`.Row.keys()` in the public API. In
the very specific case where an application is forced to use column names that
contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
:meth:`.Row.keys()` is required to return these dotted names unmodified,
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
SQLite-specific table options
-----------------------------
One option for CREATE TABLE is supported directly by the SQLite
dialect in conjunction with the :class:`_schema.Table` construct:
* ``WITHOUT ROWID``::
Table("some_table", metadata, ..., sqlite_with_rowid=False)
.. seealso::
`SQLite CREATE TABLE options
<https://www.sqlite.org/lang_createtable.html>`_
""" # noqa
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import processors
from ...engine import reflection
from ...sql import coercions
from ...sql import ColumnElement
from ...sql import compiler
from ...sql import elements
from ...sql import roles
from ...sql import schema
from ...types import BLOB # noqa
from ...types import BOOLEAN # noqa
from ...types import CHAR # noqa
from ...types import DECIMAL # noqa
from ...types import FLOAT # noqa
from ...types import INTEGER # noqa
from ...types import NUMERIC # noqa
from ...types import REAL # noqa
from ...types import SMALLINT # noqa
from ...types import TEXT # noqa
from ...types import TIMESTAMP # noqa
from ...types import VARCHAR # noqa
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin:
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
"""return True if the storage format will automatically imply
a TEXT affinity.
If the storage format contains no non-numeric characters,
it will imply a NUMERIC storage format on SQLite; in this case,
the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
TIME_CHAR.
.. versionadded:: 1.0.0
"""
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
r"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
2021-03-15 12:05:57.105542
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
)
:param storage_format: format string which will be applied to the dict
with keys year, month, day, hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python datetime() constructor as keyword arguments.
Otherwise, if positional groups are used, the datetime() constructor
is called with positional arguments via
``*map(int, match_obj.groups(0))``.
""" # noqa
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
r"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
2011-03-15
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
:param regexp: regular expression which will be applied to
incoming result rows. If the regexp contains named groups, the
resulting match dict is applied to the Python date() constructor
as keyword arguments. Otherwise, if positional groups are used, the
date() constructor is called with positional arguments via
``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
r"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be applied to the dict
with keys hour, minute, second, and microsecond.
:param regexp: regular expression which will be applied to incoming result
rows. If the regexp contains named groups, the resulting match dict is
applied to the Python time() constructor as keyword arguments. Otherwise,
if positional groups are used, the time() constructor is called with
positional arguments via ``*map(int, match_obj.groups(0))``.
"""
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.DOUBLE,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_truediv_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " / "
+ "(%s + 0.0)" % self.process(binary.right, **kw)
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field
) from err
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
# sqlite has no "FOR UPDATE" AFAICT
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_op_expr(self, type_, expand_op):
# slightly old SQLite versions don't seem to be able to handle
# the empty set impl
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " REGEXP ", **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "(%s)" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, str)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
literal_binds=True,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
elif c in set_parameters:
value = set_parameters.pop(c)
else:
continue
if coercions._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.current_executable.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, str)
else self.process(k, use_schema=False)
)
value_text = self.process(
coercions.expect(roles.ExpressionElementRole, v),
use_schema=False,
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
# for columns with sqlite_autoincrement=True,
# the PRIMARY KEY constraint can only be inline
# with the column itself.
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
col1 = list(constraint)[0]
if isinstance(col1, schema.SchemaItem):
on_conflict_clause = list(constraint)[0].dialect_options[
"sqlite"
]["on_conflict_unique"]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def post_create_table(self, table):
if table.dialect_options["sqlite"]["with_rowid"] is False:
return "\n WITHOUT ROWID"
return ""
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
# note this name provides NUMERIC affinity, not TEXT.
# should not be an issue unless the JSON value consists of a single
# numeric value. JSONTEXT can be used if this case is required.
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
# TODO: detect SQLite version 3.10.0 or greater;
# see [ticket:3633]
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
# "database.tablename.colname", in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
# SQlite supports "DEFAULT VALUES" but *does not* support
# "VALUES (DEFAULT)"
supports_default_values = True
supports_default_metavalue = False
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
supports_statement_cache = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
construct_arguments = [
(
sa_schema.Table,
{
"autoincrement": False,
"with_rowid": True,
},
),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs,
):
default.DefaultDialect.__init__(self, **kwargs)
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some hypothetical
# driver ?)
self.native_datetime = native_datetime
if self.dbapi is not None:
if self.dbapi.sqlite_version_info < (3, 7, 16):
util.warn(
"SQLite version %s is older than 3.7.16, and will not "
"support right nested joins, as are sometimes used in "
"more complex ORM scenarios. SQLAlchemy 1.4 and above "
"no longer tries to rewrite these joins."
% (self.dbapi.sqlite_version_info,)
)
# NOTE: python 3.7 on fedora for me has SQLite 3.34.1. These
# version checks are getting very stale.
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
# https://www.sqlite.org/releaselog/3_7_11.html
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
# see https://www.sqlalchemy.org/trac/ticket/2568
# as well as https://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = util.immutabledict(
{"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
)
def get_isolation_level_values(self, dbapi_connection):
return list(self._isolation_lookup)
def set_isolation_level(self, dbapi_connection, level):
isolation_level = self._isolation_lookup[level]
cursor = dbapi_connection.cursor()
cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}")
cursor.close()
def get_isolation_level(self, dbapi_connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# https://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.exec_driver_sql(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
def _get_default_schema_name(self, connection):
return "main"
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % (
master,
)
rs = connection.exec_driver_sql(s, (view_name,))
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = str(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
"""Return a data type from a reflected column, using affinity rules.
SQLite's goal for universal compatibility introduces some complexity
during reflection, as a column's defined type might not actually be a
type that SQLite understands - or indeed, my not be defined *at all*.
Internally, SQLite handles this with a 'data type affinity' for each
column definition, mapping to one of 'TEXT', 'NUMERIC', 'INTEGER',
'REAL', or 'NONE' (raw bits). The algorithm that determines this is
listed in https://www.sqlite.org/datatype3.html section 2.1.
This method allows SQLAlchemy to support that algorithm, while still
providing access to smarter reflection utilities by recognizing
column definitions that SQLite only supports through affinity (like
DATE and DOUBLE).
"""
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
cols.sort(key=lambda col: col.get("primary_key"))
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesn't exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
ondelete = token[6:].strip()
if ondelete and ondelete != "NO ACTION":
options["ondelete"] = ondelete
elif token.startswith("UPDATE"):
onupdate = token[6:].strip()
if onupdate and onupdate != "NO ACTION":
options["onupdate"] = onupdate
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw,
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) '
r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.exec_driver_sql(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# https://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| 34.656669
| 110
| 0.599601
|
import datetime
import numbers
import re
from .json import JSON
from .json import JSONIndexType
from .json import JSONPathType
from ... import exc
from ... import schema as sa_schema
from ... import sql
from ... import types as sqltypes
from ... import util
from ...engine import default
from ...engine import processors
from ...engine import reflection
from ...sql import coercions
from ...sql import ColumnElement
from ...sql import compiler
from ...sql import elements
from ...sql import roles
from ...sql import schema
from ...types import BLOB
from ...types import BOOLEAN
from ...types import CHAR
from ...types import DECIMAL
from ...types import FLOAT
from ...types import INTEGER
from ...types import NUMERIC
from ...types import REAL
from ...types import SMALLINT
from ...types import TEXT
from ...types import TIMESTAMP
from ...types import VARCHAR
class _SQliteJson(JSON):
def result_processor(self, dialect, coltype):
default_processor = super(_SQliteJson, self).result_processor(
dialect, coltype
)
def process(value):
try:
return default_processor(value)
except TypeError:
if isinstance(value, numbers.Number):
return value
else:
raise
return process
class _DateTimeMixin:
_reg = None
_storage_format = None
def __init__(self, storage_format=None, regexp=None, **kw):
super(_DateTimeMixin, self).__init__(**kw)
if regexp is not None:
self._reg = re.compile(regexp)
if storage_format is not None:
self._storage_format = storage_format
@property
def format_is_text_affinity(self):
spec = self._storage_format % {
"year": 0,
"month": 0,
"day": 0,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
return bool(re.search(r"[^0-9]", spec))
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
kw["storage_format"] = self._storage_format
if self._reg:
kw["regexp"] = self._reg
return super(_DateTimeMixin, self).adapt(cls, **kw)
def literal_processor(self, dialect):
bp = self.bind_processor(dialect)
def process(value):
return "'%s'" % bp(value)
return process
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
_storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
)
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(DATETIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = (
"%(year)04d-%(month)02d-%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d"
)
def bind_processor(self, dialect):
datetime_datetime = datetime.datetime
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_datetime):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
"hour": 0,
"minute": 0,
"second": 0,
"microsecond": 0,
}
else:
raise TypeError(
"SQLite DateTime type only accepts Python "
"datetime and date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.datetime
)
else:
return processors.str_to_datetime
class DATE(_DateTimeMixin, sqltypes.Date):
_storage_format = "%(year)04d-%(month)02d-%(day)02d"
def bind_processor(self, dialect):
datetime_date = datetime.date
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_date):
return format_ % {
"year": value.year,
"month": value.month,
"day": value.day,
}
else:
raise TypeError(
"SQLite Date type only accepts Python "
"date objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.date
)
else:
return processors.str_to_date
class TIME(_DateTimeMixin, sqltypes.Time):
_storage_format = "%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
def __init__(self, *args, **kwargs):
truncate_microseconds = kwargs.pop("truncate_microseconds", False)
super(TIME, self).__init__(*args, **kwargs)
if truncate_microseconds:
assert "storage_format" not in kwargs, (
"You can specify only "
"one of truncate_microseconds or storage_format."
)
assert "regexp" not in kwargs, (
"You can specify only one of "
"truncate_microseconds or regexp."
)
self._storage_format = "%(hour)02d:%(minute)02d:%(second)02d"
def bind_processor(self, dialect):
datetime_time = datetime.time
format_ = self._storage_format
def process(value):
if value is None:
return None
elif isinstance(value, datetime_time):
return format_ % {
"hour": value.hour,
"minute": value.minute,
"second": value.second,
"microsecond": value.microsecond,
}
else:
raise TypeError(
"SQLite Time type only accepts Python "
"time objects as input."
)
return process
def result_processor(self, dialect, coltype):
if self._reg:
return processors.str_to_datetime_processor_factory(
self._reg, datetime.time
)
else:
return processors.str_to_time
colspecs = {
sqltypes.Date: DATE,
sqltypes.DateTime: DATETIME,
sqltypes.JSON: _SQliteJson,
sqltypes.JSON.JSONIndexType: JSONIndexType,
sqltypes.JSON.JSONPathType: JSONPathType,
sqltypes.Time: TIME,
}
ischema_names = {
"BIGINT": sqltypes.BIGINT,
"BLOB": sqltypes.BLOB,
"BOOL": sqltypes.BOOLEAN,
"BOOLEAN": sqltypes.BOOLEAN,
"CHAR": sqltypes.CHAR,
"DATE": sqltypes.DATE,
"DATE_CHAR": sqltypes.DATE,
"DATETIME": sqltypes.DATETIME,
"DATETIME_CHAR": sqltypes.DATETIME,
"DOUBLE": sqltypes.DOUBLE,
"DECIMAL": sqltypes.DECIMAL,
"FLOAT": sqltypes.FLOAT,
"INT": sqltypes.INTEGER,
"INTEGER": sqltypes.INTEGER,
"JSON": JSON,
"NUMERIC": sqltypes.NUMERIC,
"REAL": sqltypes.REAL,
"SMALLINT": sqltypes.SMALLINT,
"TEXT": sqltypes.TEXT,
"TIME": sqltypes.TIME,
"TIME_CHAR": sqltypes.TIME,
"TIMESTAMP": sqltypes.TIMESTAMP,
"VARCHAR": sqltypes.VARCHAR,
"NVARCHAR": sqltypes.NVARCHAR,
"NCHAR": sqltypes.NCHAR,
}
class SQLiteCompiler(compiler.SQLCompiler):
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
},
)
def visit_truediv_binary(self, binary, operator, **kw):
return (
self.process(binary.left, **kw)
+ " / "
+ "(%s + 0.0)" % self.process(binary.right, **kw)
)
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return "1"
def visit_false(self, expr, **kw):
return "0"
def visit_char_length_func(self, fn, **kw):
return "length%s" % self.function_argspec(fn)
def visit_cast(self, cast, **kwargs):
if self.dialect.supports_cast:
return super(SQLiteCompiler, self).visit_cast(cast, **kwargs)
else:
return self.process(cast.clause, **kwargs)
def visit_extract(self, extract, **kw):
try:
return "CAST(STRFTIME('%s', %s) AS INTEGER)" % (
self.extract_map[extract.field],
self.process(extract.expr, **kw),
)
except KeyError as err:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field
) from err
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT " + self.process(sql.literal(-1))
text += " OFFSET " + self.process(select._offset_clause, **kw)
else:
text += " OFFSET " + self.process(sql.literal(0), **kw)
return text
def for_update_clause(self, select, **kw):
return ""
def visit_is_distinct_from_binary(self, binary, operator, **kw):
return "%s IS NOT %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_is_not_distinct_from_binary(self, binary, operator, **kw):
return "%s IS %s" % (
self.process(binary.left),
self.process(binary.right),
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
if binary.type._type_affinity is sqltypes.JSON:
expr = "JSON_QUOTE(JSON_EXTRACT(%s, %s))"
else:
expr = "JSON_EXTRACT(%s, %s)"
return expr % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_empty_set_op_expr(self, type_, expand_op):
# the empty set impl
return self.visit_empty_set_expr(type_)
def visit_empty_set_expr(self, element_types):
return "SELECT %s FROM (SELECT %s) WHERE 1!=1" % (
", ".join("1" for type_ in element_types or [INTEGER()]),
", ".join("1" for type_ in element_types or [INTEGER()]),
)
def visit_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " REGEXP ", **kw)
def visit_not_regexp_match_op_binary(self, binary, operator, **kw):
return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "(%s)" % clause.constraint_target
elif clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
if isinstance(c, str)
else self.process(c, include_table=False, use_schema=False)
)
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
target_text += " WHERE %s" % self.process(
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
literal_binds=True,
)
else:
target_text = ""
return target_text
def visit_on_conflict_do_nothing(self, on_conflict, **kw):
target_text = self._on_conflict_target(on_conflict, **kw)
if target_text:
return "ON CONFLICT %s DO NOTHING" % target_text
else:
return "ON CONFLICT DO NOTHING"
def visit_on_conflict_do_update(self, on_conflict, **kw):
clause = on_conflict
target_text = self._on_conflict_target(on_conflict, **kw)
action_set_ops = []
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
elif c in set_parameters:
value = set_parameters.pop(c)
else:
continue
if coercions._is_literal(value):
value = elements.BindParameter(None, value, type_=c.type)
else:
if (
isinstance(value, elements.BindParameter)
and value.type._isnull
):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
key_text = self.preparer.quote(col_key)
action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
"any column keys in table '%s': %s"
% (
self.current_executable.table.name,
(", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
key_text = (
self.preparer.quote(k)
if isinstance(k, str)
else self.process(k, use_schema=False)
)
value_text = self.process(
coercions.expect(roles.ExpressionElementRole, v),
use_schema=False,
)
action_set_ops.append("%s = %s" % (key_text, value_text))
action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
action_text += " WHERE %s" % self.process(
clause.update_whereclause, include_table=True, use_schema=False
)
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
coltype = self.dialect.type_compiler.process(
column.type, type_expression=column
)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
if isinstance(column.server_default.arg, ColumnElement):
default = "(" + default + ")"
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_not_null"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
if column.primary_key:
if (
column.autoincrement is True
and len(column.table.primary_key.columns) != 1
):
raise exc.CompileError(
"SQLite does not support autoincrement for "
"composite primary keys"
)
if (
column.table.dialect_options["sqlite"]["autoincrement"]
and len(column.table.primary_key.columns) == 1
and issubclass(column.type._type_affinity, sqltypes.Integer)
and not column.foreign_keys
):
colspec += " PRIMARY KEY"
on_conflict_clause = column.dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
colspec += " ON CONFLICT " + on_conflict_clause
colspec += " AUTOINCREMENT"
if column.computed is not None:
colspec += " " + self.process(column.computed)
return colspec
def visit_primary_key_constraint(self, constraint):
if len(constraint.columns) == 1:
c = list(constraint)[0]
if (
c.primary_key
and c.table.dialect_options["sqlite"]["autoincrement"]
and issubclass(c.type._type_affinity, sqltypes.Integer)
and not c.foreign_keys
):
return None
text = super(SQLiteDDLCompiler, self).visit_primary_key_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
on_conflict_clause = list(constraint)[0].dialect_options["sqlite"][
"on_conflict_primary_key"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_unique_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_unique_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is None and len(constraint.columns) == 1:
col1 = list(constraint)[0]
if isinstance(col1, schema.SchemaItem):
on_conflict_clause = list(constraint)[0].dialect_options[
"sqlite"
]["on_conflict_unique"]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_check_constraint(
constraint
)
on_conflict_clause = constraint.dialect_options["sqlite"][
"on_conflict"
]
if on_conflict_clause is not None:
text += " ON CONFLICT " + on_conflict_clause
return text
def visit_column_check_constraint(self, constraint):
text = super(SQLiteDDLCompiler, self).visit_column_check_constraint(
constraint
)
if constraint.dialect_options["sqlite"]["on_conflict"] is not None:
raise exc.CompileError(
"SQLite does not support on conflict clause for "
"column check constraint"
)
return text
def visit_foreign_key_constraint(self, constraint):
local_table = constraint.elements[0].parent.table
remote_table = constraint.elements[0].column.table
if local_table.schema != remote_table.schema:
return None
else:
return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(
constraint
)
def define_constraint_remote_table(self, constraint, table, preparer):
return preparer.format_table(table, use_schema=False)
def visit_create_index(
self, create, include_schema=False, include_table_schema=True
):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX "
if create.if_not_exists:
text += "IF NOT EXISTS "
text += "%s ON %s (%s)" % (
self._prepared_index_name(index, include_schema=True),
preparer.format_table(index.table, use_schema=False),
", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
),
)
whereclause = index.dialect_options["sqlite"]["where"]
if whereclause is not None:
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def post_create_table(self, table):
if table.dialect_options["sqlite"]["with_rowid"] is False:
return "\n WITHOUT ROWID"
return ""
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
def visit_DATETIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
else:
return "DATETIME_CHAR"
def visit_DATE(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_DATE(type_)
else:
return "DATE_CHAR"
def visit_TIME(self, type_, **kw):
if (
not isinstance(type_, _DateTimeMixin)
or type_.format_is_text_affinity
):
return super(SQLiteTypeCompiler, self).visit_TIME(type_)
else:
return "TIME_CHAR"
def visit_JSON(self, type_, **kw):
return "JSON"
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set(
[
"add",
"after",
"all",
"alter",
"analyze",
"and",
"as",
"asc",
"attach",
"autoincrement",
"before",
"begin",
"between",
"by",
"cascade",
"case",
"cast",
"check",
"collate",
"column",
"commit",
"conflict",
"constraint",
"create",
"cross",
"current_date",
"current_time",
"current_timestamp",
"database",
"default",
"deferrable",
"deferred",
"delete",
"desc",
"detach",
"distinct",
"drop",
"each",
"else",
"end",
"escape",
"except",
"exclusive",
"exists",
"explain",
"false",
"fail",
"for",
"foreign",
"from",
"full",
"glob",
"group",
"having",
"if",
"ignore",
"immediate",
"in",
"index",
"indexed",
"initially",
"inner",
"insert",
"instead",
"intersect",
"into",
"is",
"isnull",
"join",
"key",
"left",
"like",
"limit",
"match",
"natural",
"not",
"notnull",
"null",
"of",
"offset",
"on",
"or",
"order",
"outer",
"plan",
"pragma",
"primary",
"query",
"raise",
"references",
"reindex",
"rename",
"replace",
"restrict",
"right",
"rollback",
"row",
"select",
"set",
"table",
"temp",
"temporary",
"then",
"to",
"transaction",
"trigger",
"true",
"union",
"unique",
"update",
"using",
"vacuum",
"values",
"view",
"virtual",
"when",
"where",
]
)
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return (
not self.dialect._broken_dotted_colnames
or self.execution_options.get("sqlite_raw_colnames", False)
)
def _translate_colname(self, colname):
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[-1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = "sqlite"
supports_alter = False
supports_default_values = True
supports_default_metavalue = False
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
tuple_in_values = True
supports_statement_cache = True
default_paramstyle = "qmark"
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
preparer = SQLiteIdentifierPreparer
ischema_names = ischema_names
colspecs = colspecs
construct_arguments = [
(
sa_schema.Table,
{
"autoincrement": False,
"with_rowid": True,
},
),
(sa_schema.Index, {"where": None}),
(
sa_schema.Column,
{
"on_conflict_primary_key": None,
"on_conflict_not_null": None,
"on_conflict_unique": None,
},
),
(sa_schema.Constraint, {"on_conflict": None}),
]
_broken_fk_pragma_quotes = False
_broken_dotted_colnames = False
@util.deprecated_params(
_json_serializer=(
"1.3.7",
"The _json_serializer argument to the SQLite dialect has "
"been renamed to the correct name of json_serializer. The old "
"argument name will be removed in a future release.",
),
_json_deserializer=(
"1.3.7",
"The _json_deserializer argument to the SQLite dialect has "
"been renamed to the correct name of json_deserializer. The old "
"argument name will be removed in a future release.",
),
)
def __init__(
self,
native_datetime=False,
json_serializer=None,
json_deserializer=None,
_json_serializer=None,
_json_deserializer=None,
**kwargs,
):
default.DefaultDialect.__init__(self, **kwargs)
if _json_serializer:
json_serializer = _json_serializer
if _json_deserializer:
json_deserializer = _json_deserializer
self._json_serializer = json_serializer
self._json_deserializer = json_deserializer
self.native_datetime = native_datetime
if self.dbapi is not None:
if self.dbapi.sqlite_version_info < (3, 7, 16):
util.warn(
"SQLite version %s is older than 3.7.16, and will not "
"support right nested joins, as are sometimes used in "
"more complex ORM scenarios. SQLAlchemy 1.4 and above "
"no longer tries to rewrite these joins."
% (self.dbapi.sqlite_version_info,)
)
self._broken_dotted_colnames = self.dbapi.sqlite_version_info < (
3,
10,
0,
)
self.supports_default_values = self.dbapi.sqlite_version_info >= (
3,
3,
8,
)
self.supports_cast = self.dbapi.sqlite_version_info >= (3, 2, 3)
self.supports_multivalues_insert = (
self.dbapi.sqlite_version_info
>= (3, 7, 11)
)
self._broken_fk_pragma_quotes = self.dbapi.sqlite_version_info < (
3,
6,
14,
)
_isolation_lookup = util.immutabledict(
{"READ UNCOMMITTED": 1, "SERIALIZABLE": 0}
)
def get_isolation_level_values(self, dbapi_connection):
return list(self._isolation_lookup)
def set_isolation_level(self, dbapi_connection, level):
isolation_level = self._isolation_lookup[level]
cursor = dbapi_connection.cursor()
cursor.execute(f"PRAGMA read_uncommitted = {isolation_level}")
cursor.close()
def get_isolation_level(self, dbapi_connection):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA read_uncommitted")
res = cursor.fetchone()
if res:
value = res[0]
else:
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
cursor.close()
if value == 0:
return "SERIALIZABLE"
elif value == 1:
return "READ UNCOMMITTED"
else:
assert False, "Unknown isolation level %s" % value
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "PRAGMA database_list"
dl = connection.exec_driver_sql(s)
return [db[1] for db in dl if db[1] != "temp"]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='table' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_table_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='table' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_temp_view_names(self, connection, **kw):
s = (
"SELECT name FROM sqlite_temp_master "
"WHERE type='view' ORDER BY name "
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
self._ensure_has_table_connection(connection)
info = self._get_table_pragma(
connection, "table_info", table_name, schema=schema
)
return bool(info)
def _get_default_schema_name(self, connection):
return "main"
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
else:
master = "sqlite_master"
s = ("SELECT name FROM %s " "WHERE type='view' ORDER BY name") % (
master,
)
rs = connection.exec_driver_sql(s)
return [row[0] for row in rs]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
master = "%s.sqlite_master" % qschema
s = ("SELECT sql FROM %s WHERE name = ? AND type='view'") % (
master,
)
rs = connection.exec_driver_sql(s, (view_name,))
else:
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
" SELECT * FROM sqlite_temp_master) "
"WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM sqlite_master WHERE name = ? "
"AND type='view'"
)
rs = connection.exec_driver_sql(s, (view_name,))
result = rs.fetchall()
if result:
return result[0].sql
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
pragma = "table_info"
# computed columns are threaded as hidden, they require table_xinfo
if self.server_version_info >= (3, 31):
pragma = "table_xinfo"
info = self._get_table_pragma(
connection, pragma, table_name, schema=schema
)
columns = []
tablesql = None
for row in info:
name = row[1]
type_ = row[2].upper()
nullable = not row[3]
default = row[4]
primary_key = row[5]
hidden = row[6] if pragma == "table_xinfo" else 0
# hidden has value 0 for normal columns, 1 for hidden columns,
# 2 for computed virtual columns and 3 for computed stored columns
# https://www.sqlite.org/src/info/069351b85f9a706f60d3e98fbc8aaf40c374356b967c0464aede30ead3d9d18b
if hidden == 1:
continue
generated = bool(hidden)
persisted = hidden == 3
if tablesql is None and generated:
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
columns.append(
self._get_column_info(
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
)
)
return columns
def _get_column_info(
self,
name,
type_,
nullable,
default,
primary_key,
generated,
persisted,
tablesql,
):
if generated:
# the type of a column "cc INTEGER GENERATED ALWAYS AS (1 + 42)"
# somehow is "INTEGER GENERATED ALWAYS"
type_ = re.sub("generated", "", type_, flags=re.IGNORECASE)
type_ = re.sub("always", "", type_, flags=re.IGNORECASE).strip()
coltype = self._resolve_type_affinity(type_)
if default is not None:
default = str(default)
colspec = {
"name": name,
"type": coltype,
"nullable": nullable,
"default": default,
"autoincrement": "auto",
"primary_key": primary_key,
}
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
if match:
sqltext = match.group(1)
colspec["computed"] = {"sqltext": sqltext, "persisted": persisted}
return colspec
def _resolve_type_affinity(self, type_):
match = re.match(r"([\w ]+)(\(.*?\))?", type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = ""
args = ""
if coltype in self.ischema_names:
coltype = self.ischema_names[coltype]
elif "INT" in coltype:
coltype = sqltypes.INTEGER
elif "CHAR" in coltype or "CLOB" in coltype or "TEXT" in coltype:
coltype = sqltypes.TEXT
elif "BLOB" in coltype or not coltype:
coltype = sqltypes.NullType
elif "REAL" in coltype or "FLOA" in coltype or "DOUB" in coltype:
coltype = sqltypes.REAL
else:
coltype = sqltypes.NUMERIC
if args is not None:
args = re.findall(r"(\d+)", args)
try:
coltype = coltype(*[int(a) for a in args])
except TypeError:
util.warn(
"Could not instantiate type %s with "
"reflected arguments %s; using no arguments."
% (coltype, args)
)
coltype = coltype()
else:
coltype = coltype()
return coltype
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
constraint_name = None
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data:
PK_PATTERN = r"CONSTRAINT (\w+) PRIMARY KEY"
result = re.search(PK_PATTERN, table_data, re.I)
constraint_name = result.group(1) if result else None
cols = self.get_columns(connection, table_name, schema, **kw)
cols.sort(key=lambda col: col.get("primary_key"))
pkeys = []
for col in cols:
if col["primary_key"]:
pkeys.append(col["name"])
return {"constrained_columns": pkeys, "name": constraint_name}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# sqlite makes this *extremely difficult*.
# First, use the pragma to get the actual FKs.
pragma_fks = self._get_table_pragma(
connection, "foreign_key_list", table_name, schema=schema
)
fks = {}
for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
if not rcol:
# no referred column, which means it was not named in the
# original DDL. The referred columns of the foreign key
# constraint are therefore the primary key of the referred
# table.
referred_pk = self.get_pk_constraint(
connection, rtbl, schema=schema, **kw
)
# note that if table doesn't exist, we still get back a record,
# just it has no columns in it
referred_columns = referred_pk["constrained_columns"]
else:
# note we use this list only if this is the first column
# in the constraint. for subsequent columns we ignore the
# list and append "rcol" if present.
referred_columns = []
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r"^[\"\[`\']|[\"\]`\']$", "", rtbl)
if numerical_id in fks:
fk = fks[numerical_id]
else:
fk = fks[numerical_id] = {
"name": None,
"constrained_columns": [],
"referred_schema": schema,
"referred_table": rtbl,
"referred_columns": referred_columns,
"options": {},
}
fks[numerical_id] = fk
fk["constrained_columns"].append(lcol)
if rcol:
fk["referred_columns"].append(rcol)
def fk_sig(constrained_columns, referred_table, referred_columns):
return (
tuple(constrained_columns)
+ (referred_table,)
+ tuple(referred_columns)
)
# then, parse the actual SQL and attempt to find DDL that matches
# the names as well. SQLite saves the DDL in whatever format
# it was typed in as, so need to be liberal here.
keys_by_signature = dict(
(
fk_sig(
fk["constrained_columns"],
fk["referred_table"],
fk["referred_columns"],
),
fk,
)
for fk in fks.values()
)
table_data = self._get_table_sql(connection, table_name, schema=schema)
if table_data is None:
# system tables, etc.
return []
def parse_fks():
FK_PATTERN = (
r"(?:CONSTRAINT (\w+) +)?"
r"FOREIGN KEY *\( *(.+?) *\) +"
r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *'
r"((?:ON (?:DELETE|UPDATE) "
r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)"
)
for match in re.finditer(FK_PATTERN, table_data, re.I):
(
constraint_name,
constrained_columns,
referred_quoted_name,
referred_name,
referred_columns,
onupdatedelete,
) = match.group(1, 2, 3, 4, 5, 6)
constrained_columns = list(
self._find_cols_in_sig(constrained_columns)
)
if not referred_columns:
referred_columns = constrained_columns
else:
referred_columns = list(
self._find_cols_in_sig(referred_columns)
)
referred_name = referred_quoted_name or referred_name
options = {}
for token in re.split(r" *\bON\b *", onupdatedelete.upper()):
if token.startswith("DELETE"):
ondelete = token[6:].strip()
if ondelete and ondelete != "NO ACTION":
options["ondelete"] = ondelete
elif token.startswith("UPDATE"):
onupdate = token[6:].strip()
if onupdate and onupdate != "NO ACTION":
options["onupdate"] = onupdate
yield (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
)
fkeys = []
for (
constraint_name,
constrained_columns,
referred_name,
referred_columns,
options,
) in parse_fks():
sig = fk_sig(constrained_columns, referred_name, referred_columns)
if sig not in keys_by_signature:
util.warn(
"WARNING: SQL-parsed foreign key constraint "
"'%s' could not be located in PRAGMA "
"foreign_keys for table %s" % (sig, table_name)
)
continue
key = keys_by_signature.pop(sig)
key["name"] = constraint_name
key["options"] = options
fkeys.append(key)
# assume the remainders are the unnamed, inline constraints, just
# use them as is as it's extremely difficult to parse inline
# constraints
fkeys.extend(keys_by_signature.values())
return fkeys
def _find_cols_in_sig(self, sig):
for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
yield match.group(1) or match.group(2)
@reflection.cache
def get_unique_constraints(
self, connection, table_name, schema=None, **kw
):
auto_index_by_sig = {}
for idx in self.get_indexes(
connection,
table_name,
schema=schema,
include_auto_indexes=True,
**kw,
):
if not idx["name"].startswith("sqlite_autoindex"):
continue
sig = tuple(idx["column_names"])
auto_index_by_sig[sig] = idx
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
unique_constraints = []
def parse_uqs():
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) '
r"+[a-z0-9_ ]+? +UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
name, cols = match.group(1, 2)
yield name, list(self._find_cols_in_sig(cols))
# we need to match inlines as well, as we seek to differentiate
# a UNIQUE constraint from a UNIQUE INDEX, even though these
# are kind of the same thing :)
for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
cols = list(
self._find_cols_in_sig(match.group(1) or match.group(2))
)
yield None, cols
for name, cols in parse_uqs():
sig = tuple(cols)
if sig in auto_index_by_sig:
auto_index_by_sig.pop(sig)
parsed_constraint = {"name": name, "column_names": cols}
unique_constraints.append(parsed_constraint)
# NOTE: auto_index_by_sig might not be empty here,
# the PRIMARY KEY may have an entry.
return unique_constraints
@reflection.cache
def get_check_constraints(self, connection, table_name, schema=None, **kw):
table_data = self._get_table_sql(
connection, table_name, schema=schema, **kw
)
if not table_data:
return []
CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *"
check_constraints = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
for match in re.finditer(CHECK_PATTERN, table_data, re.I):
check_constraints.append(
{"sqltext": match.group(2), "name": match.group(1)}
)
return check_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
pragma_indexes = self._get_table_pragma(
connection, "index_list", table_name, schema=schema
)
indexes = []
include_auto_indexes = kw.pop("include_auto_indexes", False)
for row in pragma_indexes:
# ignore implicit primary key index.
# https://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
if not include_auto_indexes and row[1].startswith(
"sqlite_autoindex"
):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
# loop thru unique indexes to get the column names.
for idx in list(indexes):
pragma_index = self._get_table_pragma(
connection, "index_info", idx["name"]
)
for row in pragma_index:
if row[2] is None:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s" % idx["name"]
)
indexes.remove(idx)
break
else:
idx["column_names"].append(row[2])
return indexes
@reflection.cache
def _get_table_sql(self, connection, table_name, schema=None, **kw):
if schema:
schema_expr = "%s." % (
self.identifier_preparer.quote_identifier(schema)
)
else:
schema_expr = ""
try:
s = (
"SELECT sql FROM "
" (SELECT * FROM %(schema)ssqlite_master UNION ALL "
" SELECT * FROM %(schema)ssqlite_temp_master) "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
except exc.DBAPIError:
s = (
"SELECT sql FROM %(schema)ssqlite_master "
"WHERE name = ? "
"AND type = 'table'" % {"schema": schema_expr}
)
rs = connection.exec_driver_sql(s, (table_name,))
return rs.scalar()
def _get_table_pragma(self, connection, pragma, table_name, schema=None):
quote = self.identifier_preparer.quote_identifier
if schema is not None:
statements = ["PRAGMA %s." % quote(schema)]
else:
# because PRAGMA looks in all attached databases if no schema
# given, need to specify "main" schema, however since we want
# 'temp' tables in the same namespace as 'main', need to run
# the PRAGMA twice
statements = ["PRAGMA main.", "PRAGMA temp."]
qtable = quote(table_name)
for statement in statements:
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.exec_driver_sql(statement)
if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# https://www.sqlite.org/cvstrac/tktview?tn=1884
result = cursor.fetchall()
else:
result = []
if result:
return result
else:
return []
| true
| true
|
79068c8456a602991ace5e08f967eb9e53479cee
| 509
|
py
|
Python
|
rafry/raytracer/beam.py
|
oasys-kit/rafry
|
f1d83eb7f22cccccfd1babcaaaf64285c0e97aa0
|
[
"MIT"
] | null | null | null |
rafry/raytracer/beam.py
|
oasys-kit/rafry
|
f1d83eb7f22cccccfd1babcaaaf64285c0e97aa0
|
[
"MIT"
] | null | null | null |
rafry/raytracer/beam.py
|
oasys-kit/rafry
|
f1d83eb7f22cccccfd1babcaaaf64285c0e97aa0
|
[
"MIT"
] | null | null | null |
class Beam(object):
def __init__(self):
super().__init__()
def get_number_of_rays(self):
raise NotImplementedError("method is abstract")
def get_rays(self):
raise NotImplementedError("method is abstract")
def get_ray(self, ray_index):
raise NotImplementedError("method is abstract")
def duplicate(self):
raise NotImplementedError("method is abstract")
def merge(self, other_beam):
raise NotImplementedError("method is abstract")
| 23.136364
| 55
| 0.675835
|
class Beam(object):
def __init__(self):
super().__init__()
def get_number_of_rays(self):
raise NotImplementedError("method is abstract")
def get_rays(self):
raise NotImplementedError("method is abstract")
def get_ray(self, ray_index):
raise NotImplementedError("method is abstract")
def duplicate(self):
raise NotImplementedError("method is abstract")
def merge(self, other_beam):
raise NotImplementedError("method is abstract")
| true
| true
|
79068cd9b4bc3d41f600987c9187b7dffd3031e7
| 12,477
|
py
|
Python
|
pxr/usd/bin/usddiff/usddiff.py
|
dalgos-adsk/USD
|
65320d266057ae0a68626217f54a0298ac092799
|
[
"AML"
] | 18
|
2017-10-28T22:37:48.000Z
|
2022-01-26T12:00:24.000Z
|
pxr/usd/bin/usddiff/usddiff.py
|
piscees/USD
|
65320d266057ae0a68626217f54a0298ac092799
|
[
"AML"
] | null | null | null |
pxr/usd/bin/usddiff/usddiff.py
|
piscees/USD
|
65320d266057ae0a68626217f54a0298ac092799
|
[
"AML"
] | 3
|
2019-12-03T16:46:14.000Z
|
2021-08-23T14:56:21.000Z
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import difflib, os, sys
from subprocess import call
import platform
isWindows = (platform.system() == 'Windows')
NO_DIFF_FOUND_EXIT_CODE = 0
DIFF_FOUND_EXIT_CODE = 1
ERROR_EXIT_CODE = 2
def _exit(msg, exitCode):
if msg is not None:
sys.stderr.write(msg + "\n")
sys.exit(exitCode)
# generates a command list representing a call which will generate
# a temporary ascii file used during diffing.
def _generateCatCommand(usdcatCmd, inPath, outPath, flatten=None, fmt=None):
command = [usdcatCmd, inPath, '--out', outPath]
if flatten:
command.append('--flatten')
if fmt and os.path.splitext(outPath)[1] == '.usd':
command.append('--usdFormat')
command.append(fmt)
return command
def _findExe(name):
from distutils.spawn import find_executable
cmd = find_executable(name)
if cmd:
return cmd
if isWindows:
# find_executable under Windows only returns *.EXE files
# so we need to traverse PATH.
for path in os.environ['PATH'].split(os.pathsep):
base = os.path.join(path, name)
# We need to test for name.cmd first because on Windows, the USD
# executables are wrapped due to lack of N*IX style shebang support
# on Windows.
for ext in ['.cmd', '']:
cmd = base + ext
if os.access(cmd, os.X_OK):
return cmd
return None
# looks up a suitable diff tool, and locates usdcat
def _findDiffTools():
usdcatCmd = _findExe("usdcat")
if not usdcatCmd:
_exit("Error: Could not find 'usdcat'. Expected it to be in PATH",
ERROR_EXIT_CODE)
# prefer USD_DIFF, then DIFF, else use the internal unified diff.
diffCmd = (os.environ.get('USD_DIFF') or os.environ.get('DIFF'))
if diffCmd and not _findExe(diffCmd):
_exit("Error: Failed to find diff tool %s." % (diffCmd, ),
ERROR_EXIT_CODE)
return (usdcatCmd, diffCmd)
def _getFileFormat(path):
from pxr import Sdf
# Note that python's os.path.splitext retains the '.' portion
# when obtaining an extension, but Sdf's Fileformat API doesn't
# expect one. We also make sure to prune out any version specifiers.
_, ext = os.path.splitext(path)
if len(ext) <= 1:
fileFormat = Sdf.FileFormat.FindByExtension('usd')
else:
prunedExtension = ext[1:]
versionSpecifierPos = prunedExtension.rfind('#')
if versionSpecifierPos != -1:
prunedExtension = prunedExtension[:versionSpecifierPos]
fileFormat = Sdf.FileFormat.FindByExtension(prunedExtension)
# Allow an empty file.
if fileFormat and (os.stat(path).st_size == 0 or fileFormat.CanRead(path)):
return fileFormat.formatId
return None
def _convertTo(inPath, outPath, usdcatCmd, flatten=None, fmt=None):
# Just copy empty files -- we want something to diff against but
# the file isn't valid usd.
if os.stat(inPath).st_size == 0:
import shutil
try:
shutil.copy(inPath, outPath)
return 0
except:
return 1
else:
return call(_generateCatCommand(usdcatCmd, inPath, outPath, flatten, fmt))
def _tryEdit(fileName, tempFileName, usdcatCmd, fileType, flattened):
if flattened:
_exit('Error: Cannot write out flattened result.', ERROR_EXIT_CODE)
if not os.access(fileName, os.W_OK):
_exit('Error: Cannot write to %s, insufficient permissions' % fileName,
ERROR_EXIT_CODE)
return _convertTo(tempFileName, fileName, usdcatCmd, flatten=None, fmt=fileType)
def _runDiff(baseline, comparison, flatten, noeffect):
from pxr import Tf
diffResult = 0
usdcatCmd, diffCmd = _findDiffTools()
baselineFileType = _getFileFormat(baseline)
comparisonFileType = _getFileFormat(comparison)
pluginError = 'Error: Cannot find supported file format plugin for %s'
if baselineFileType is None:
_exit(pluginError % baseline, ERROR_EXIT_CODE)
if comparisonFileType is None:
_exit(pluginError % comparison, ERROR_EXIT_CODE)
# Generate recognizable suffixes for our files in the temp dir
# location of the form /temp/string__originalFileName.usda
# where originalFileName is the basename(no extension) of the original file.
# This allows users to tell which file is which when diffing.
tempBaselineFileName = ("__" +
os.path.splitext(os.path.basename(baseline))[0] + '.usda')
tempComparisonFileName = ("__" +
os.path.splitext(os.path.basename(comparison))[0] + '.usda')
with Tf.NamedTemporaryFile(suffix=tempBaselineFileName) as tempBaseline, \
Tf.NamedTemporaryFile(suffix=tempComparisonFileName) as tempComparison:
# Dump the contents of our files into the temporaries
convertError = 'Error: failed to convert from %s to %s.'
if _convertTo(baseline, tempBaseline.name, usdcatCmd,
flatten, fmt=None) != 0:
_exit(convertError % (baseline, tempBaseline.name),
ERROR_EXIT_CODE)
if _convertTo(comparison, tempComparison.name, usdcatCmd,
flatten, fmt=None) != 0:
_exit(convertError % (comparison, tempComparison.name),
ERROR_EXIT_CODE)
tempBaselineTimestamp = os.path.getmtime(tempBaseline.name)
tempComparisonTimestamp = os.path.getmtime(tempComparison.name)
if diffCmd:
# Run the external diff tool.
diffResult = call([diffCmd, tempBaseline.name, tempComparison.name])
else:
# Read the files.
with open(tempBaseline.name, "r") as f:
baselineData = f.readlines()
with open(tempComparison.name, "r") as f:
comparisonData = f.readlines()
# Generate unified diff and output if there are any differences.
diff = list(difflib.unified_diff(
baselineData, comparisonData,
tempBaseline.name, tempComparison.name, n=0))
if diff:
# Skip the file names.
for line in diff[2:]:
print line,
diffResult = 1
tempBaselineChanged = (
os.path.getmtime(tempBaseline.name) != tempBaselineTimestamp)
tempComparisonChanged = (
os.path.getmtime(tempComparison.name) != tempComparisonTimestamp)
# If we intend to edit either of the files
if not noeffect:
if tempBaselineChanged:
if _tryEdit(baseline, tempBaseline.name,
usdcatCmd, baselineFileType, flatten) != 0:
_exit(convertError % (baseline, tempBaseline.name),
ERROR_EXIT_CODE)
if tempComparisonChanged:
if _tryEdit(comparison, tempComparison.name,
usdcatCmd, comparisonFileType, flatten) != 0:
_exit(convertError % (comparison, tempComparison.name),
ERROR_EXIT_CODE)
return diffResult
def _findFiles(args):
'''Return a 3-tuple of lists: (baseline-only, matching, comparison-only).
baseline-only and comparison-only are lists of individual files, while
matching is a list of corresponding pairs of files.'''
import os
join = os.path.join
basename = os.path.basename
exists = os.path.exists
def listFiles(dirpath):
ret = []
for root, _, files in os.walk(dirpath):
ret += [os.path.relpath(join(root, file), dirpath)
for file in files]
return set(ret)
# Must have FILE FILE, DIR DIR, DIR FILES... or FILES... DIR.
err = ValueError("Error: File arguments must be one of: "
"FILE FILE, DIR DIR, DIR FILES..., or FILES... DIR.")
if len(args) < 2:
raise err
for index, exist in enumerate(map(exists, args)):
if not exist:
raise ValueError("Error: %s does not exist" % args[index])
# DIR FILES...
if os.path.isdir(args[0]) and not any(map(os.path.isdir, args[1:])):
dirpath = args[0]
files = set(map(os.path.relpath, args[1:]))
dirfiles = listFiles(dirpath)
return ([],
[(join(dirpath, p), p) for p in files & dirfiles],
[p for p in files - dirfiles])
# FILES... DIR
elif not any(map(os.path.isdir, args[:-1])) and os.path.isdir(args[-1]):
dirpath = args[-1]
files = set(map(os.path.relpath, args[:-1]))
dirfiles = listFiles(dirpath)
return ([p for p in files - dirfiles],
[(p, join(dirpath, p)) for p in files & dirfiles],
[])
# FILE FILE or DIR DIR
elif len(args) == 2:
# DIR DIR
if all(map(os.path.isdir, args)):
ldir, rdir = args[0], args[1]
lhs, rhs = map(listFiles, args)
return (
# baseline only
sorted([join(ldir, p) for p in lhs - rhs]),
# corresponding
sorted([(join(ldir, p), join(rdir, p)) for p in lhs & rhs]),
# comparison only
sorted([join(rdir, p) for p in rhs - lhs]))
# FILE FILE
elif not any(map(os.path.isdir, args)):
return ([], [(args[0], args[1])], [])
raise err
def main():
import argparse
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description="Compares two usd-readable files using a selected"
" diff program. This is chosen by looking at the"
" $USD_DIFF environment variable. If this is unset,"
" it will consult the $DIFF environment variable. "
" Lastly, if neither of these is set, it will try"
" to use the canonical unix program, diff."
" This will relay the exit code of the selected"
" diff program.")
parser.add_argument('files', nargs='+',
help='The files to compare. These must be of the form '
'DIR DIR, FILE... DIR, DIR FILE... or FILE FILE. ')
parser.add_argument('-n', '--noeffect', action='store_true',
help='Do not edit either file.')
parser.add_argument('-f', '--flatten', action='store_true',
help='Fully compose both layers as Usd Stages and '
'flatten into single layers.')
results = parser.parse_args()
diffResult = NO_DIFF_FOUND_EXIT_CODE
try:
baselineOnly, common, comparisonOnly = _findFiles(results.files)
for (baseline, comparison) in common:
if _runDiff(baseline, comparison,
results.flatten, results.noeffect):
diffResult = DIFF_FOUND_EXIT_CODE
mismatchMsg = 'No corresponding file found for %s, skipping.'
for b in baselineOnly:
print mismatchMsg % b
for c in comparisonOnly:
print mismatchMsg % c
except ValueError as err:
_exit(str(err), ERROR_EXIT_CODE)
_exit(None, diffResult)
if __name__ == "__main__":
main()
| 38.869159
| 84
| 0.612647
|
import difflib, os, sys
from subprocess import call
import platform
isWindows = (platform.system() == 'Windows')
NO_DIFF_FOUND_EXIT_CODE = 0
DIFF_FOUND_EXIT_CODE = 1
ERROR_EXIT_CODE = 2
def _exit(msg, exitCode):
if msg is not None:
sys.stderr.write(msg + "\n")
sys.exit(exitCode)
def _generateCatCommand(usdcatCmd, inPath, outPath, flatten=None, fmt=None):
command = [usdcatCmd, inPath, '--out', outPath]
if flatten:
command.append('--flatten')
if fmt and os.path.splitext(outPath)[1] == '.usd':
command.append('--usdFormat')
command.append(fmt)
return command
def _findExe(name):
from distutils.spawn import find_executable
cmd = find_executable(name)
if cmd:
return cmd
if isWindows:
for path in os.environ['PATH'].split(os.pathsep):
base = os.path.join(path, name)
for ext in ['.cmd', '']:
cmd = base + ext
if os.access(cmd, os.X_OK):
return cmd
return None
def _findDiffTools():
usdcatCmd = _findExe("usdcat")
if not usdcatCmd:
_exit("Error: Could not find 'usdcat'. Expected it to be in PATH",
ERROR_EXIT_CODE)
diffCmd = (os.environ.get('USD_DIFF') or os.environ.get('DIFF'))
if diffCmd and not _findExe(diffCmd):
_exit("Error: Failed to find diff tool %s." % (diffCmd, ),
ERROR_EXIT_CODE)
return (usdcatCmd, diffCmd)
def _getFileFormat(path):
from pxr import Sdf
# when obtaining an extension, but Sdf's Fileformat API doesn't
# expect one. We also make sure to prune out any version specifiers.
_, ext = os.path.splitext(path)
if len(ext) <= 1:
fileFormat = Sdf.FileFormat.FindByExtension('usd')
else:
prunedExtension = ext[1:]
versionSpecifierPos = prunedExtension.rfind('
if versionSpecifierPos != -1:
prunedExtension = prunedExtension[:versionSpecifierPos]
fileFormat = Sdf.FileFormat.FindByExtension(prunedExtension)
# Allow an empty file.
if fileFormat and (os.stat(path).st_size == 0 or fileFormat.CanRead(path)):
return fileFormat.formatId
return None
def _convertTo(inPath, outPath, usdcatCmd, flatten=None, fmt=None):
# Just copy empty files -- we want something to diff against but
# the file isn't valid usd.
if os.stat(inPath).st_size == 0:
import shutil
try:
shutil.copy(inPath, outPath)
return 0
except:
return 1
else:
return call(_generateCatCommand(usdcatCmd, inPath, outPath, flatten, fmt))
def _tryEdit(fileName, tempFileName, usdcatCmd, fileType, flattened):
if flattened:
_exit('Error: Cannot write out flattened result.', ERROR_EXIT_CODE)
if not os.access(fileName, os.W_OK):
_exit('Error: Cannot write to %s, insufficient permissions' % fileName,
ERROR_EXIT_CODE)
return _convertTo(tempFileName, fileName, usdcatCmd, flatten=None, fmt=fileType)
def _runDiff(baseline, comparison, flatten, noeffect):
from pxr import Tf
diffResult = 0
usdcatCmd, diffCmd = _findDiffTools()
baselineFileType = _getFileFormat(baseline)
comparisonFileType = _getFileFormat(comparison)
pluginError = 'Error: Cannot find supported file format plugin for %s'
if baselineFileType is None:
_exit(pluginError % baseline, ERROR_EXIT_CODE)
if comparisonFileType is None:
_exit(pluginError % comparison, ERROR_EXIT_CODE)
tempBaselineFileName = ("__" +
os.path.splitext(os.path.basename(baseline))[0] + '.usda')
tempComparisonFileName = ("__" +
os.path.splitext(os.path.basename(comparison))[0] + '.usda')
with Tf.NamedTemporaryFile(suffix=tempBaselineFileName) as tempBaseline, \
Tf.NamedTemporaryFile(suffix=tempComparisonFileName) as tempComparison:
convertError = 'Error: failed to convert from %s to %s.'
if _convertTo(baseline, tempBaseline.name, usdcatCmd,
flatten, fmt=None) != 0:
_exit(convertError % (baseline, tempBaseline.name),
ERROR_EXIT_CODE)
if _convertTo(comparison, tempComparison.name, usdcatCmd,
flatten, fmt=None) != 0:
_exit(convertError % (comparison, tempComparison.name),
ERROR_EXIT_CODE)
tempBaselineTimestamp = os.path.getmtime(tempBaseline.name)
tempComparisonTimestamp = os.path.getmtime(tempComparison.name)
if diffCmd:
diffResult = call([diffCmd, tempBaseline.name, tempComparison.name])
else:
with open(tempBaseline.name, "r") as f:
baselineData = f.readlines()
with open(tempComparison.name, "r") as f:
comparisonData = f.readlines()
diff = list(difflib.unified_diff(
baselineData, comparisonData,
tempBaseline.name, tempComparison.name, n=0))
if diff:
for line in diff[2:]:
print line,
diffResult = 1
tempBaselineChanged = (
os.path.getmtime(tempBaseline.name) != tempBaselineTimestamp)
tempComparisonChanged = (
os.path.getmtime(tempComparison.name) != tempComparisonTimestamp)
if not noeffect:
if tempBaselineChanged:
if _tryEdit(baseline, tempBaseline.name,
usdcatCmd, baselineFileType, flatten) != 0:
_exit(convertError % (baseline, tempBaseline.name),
ERROR_EXIT_CODE)
if tempComparisonChanged:
if _tryEdit(comparison, tempComparison.name,
usdcatCmd, comparisonFileType, flatten) != 0:
_exit(convertError % (comparison, tempComparison.name),
ERROR_EXIT_CODE)
return diffResult
def _findFiles(args):
'''Return a 3-tuple of lists: (baseline-only, matching, comparison-only).
baseline-only and comparison-only are lists of individual files, while
matching is a list of corresponding pairs of files.'''
import os
join = os.path.join
basename = os.path.basename
exists = os.path.exists
def listFiles(dirpath):
ret = []
for root, _, files in os.walk(dirpath):
ret += [os.path.relpath(join(root, file), dirpath)
for file in files]
return set(ret)
err = ValueError("Error: File arguments must be one of: "
"FILE FILE, DIR DIR, DIR FILES..., or FILES... DIR.")
if len(args) < 2:
raise err
for index, exist in enumerate(map(exists, args)):
if not exist:
raise ValueError("Error: %s does not exist" % args[index])
if os.path.isdir(args[0]) and not any(map(os.path.isdir, args[1:])):
dirpath = args[0]
files = set(map(os.path.relpath, args[1:]))
dirfiles = listFiles(dirpath)
return ([],
[(join(dirpath, p), p) for p in files & dirfiles],
[p for p in files - dirfiles])
elif not any(map(os.path.isdir, args[:-1])) and os.path.isdir(args[-1]):
dirpath = args[-1]
files = set(map(os.path.relpath, args[:-1]))
dirfiles = listFiles(dirpath)
return ([p for p in files - dirfiles],
[(p, join(dirpath, p)) for p in files & dirfiles],
[])
elif len(args) == 2:
if all(map(os.path.isdir, args)):
ldir, rdir = args[0], args[1]
lhs, rhs = map(listFiles, args)
return (
sorted([join(ldir, p) for p in lhs - rhs]),
sorted([(join(ldir, p), join(rdir, p)) for p in lhs & rhs]),
sorted([join(rdir, p) for p in rhs - lhs]))
elif not any(map(os.path.isdir, args)):
return ([], [(args[0], args[1])], [])
raise err
def main():
import argparse
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description="Compares two usd-readable files using a selected"
" diff program. This is chosen by looking at the"
" $USD_DIFF environment variable. If this is unset,"
" it will consult the $DIFF environment variable. "
" Lastly, if neither of these is set, it will try"
" to use the canonical unix program, diff."
" This will relay the exit code of the selected"
" diff program.")
parser.add_argument('files', nargs='+',
help='The files to compare. These must be of the form '
'DIR DIR, FILE... DIR, DIR FILE... or FILE FILE. ')
parser.add_argument('-n', '--noeffect', action='store_true',
help='Do not edit either file.')
parser.add_argument('-f', '--flatten', action='store_true',
help='Fully compose both layers as Usd Stages and '
'flatten into single layers.')
results = parser.parse_args()
diffResult = NO_DIFF_FOUND_EXIT_CODE
try:
baselineOnly, common, comparisonOnly = _findFiles(results.files)
for (baseline, comparison) in common:
if _runDiff(baseline, comparison,
results.flatten, results.noeffect):
diffResult = DIFF_FOUND_EXIT_CODE
mismatchMsg = 'No corresponding file found for %s, skipping.'
for b in baselineOnly:
print mismatchMsg % b
for c in comparisonOnly:
print mismatchMsg % c
except ValueError as err:
_exit(str(err), ERROR_EXIT_CODE)
_exit(None, diffResult)
if __name__ == "__main__":
main()
| false
| true
|
79068d0ac794773135d67c8a1ee07008a27d37f7
| 4,277
|
py
|
Python
|
tests/unit/models/test_table_source.py
|
duddlf23/amundsendatabuilder
|
3e281373bfa8989c7a489dcf5b8c67a9f1ac38f1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/models/test_table_source.py
|
duddlf23/amundsendatabuilder
|
3e281373bfa8989c7a489dcf5b8c67a9f1ac38f1
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/models/test_table_source.py
|
duddlf23/amundsendatabuilder
|
3e281373bfa8989c7a489dcf5b8c67a9f1ac38f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import ANY
from databuilder.models.graph_serializable import (
RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY, RELATION_START_LABEL,
RELATION_TYPE,
)
from databuilder.models.table_source import TableSource
from databuilder.serializers import neo4_serializer, neptune_serializer
from databuilder.serializers.neptune_serializer import (
NEPTUNE_CREATION_TYPE_JOB, NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID,
NEPTUNE_HEADER_LABEL, NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_RELATIONSHIP_HEADER_FROM, NEPTUNE_RELATIONSHIP_HEADER_TO,
)
DB = 'hive'
SCHEMA = 'base'
TABLE = 'test'
CLUSTER = 'default'
SOURCE = '/etl/sql/file.py'
class TestTableSource(unittest.TestCase):
def setUp(self) -> None:
super(TestTableSource, self).setUp()
self.table_source = TableSource(db_name='hive',
schema=SCHEMA,
table_name=TABLE,
cluster=CLUSTER,
source=SOURCE)
self.start_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source'
self.end_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}'
def test_get_source_model_key(self) -> None:
source = self.table_source.get_source_model_key()
self.assertEqual(source, f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source')
def test_get_metadata_model_key(self) -> None:
metadata = self.table_source.get_metadata_model_key()
self.assertEqual(metadata, 'hive://default.base/test')
def test_create_nodes(self) -> None:
nodes = self.table_source.create_nodes()
self.assertEqual(len(nodes), 1)
def test_create_relation(self) -> None:
relations = self.table_source.create_relation()
self.assertEquals(len(relations), 1)
serialized_relation = neo4_serializer.serialize_relationship(relations[0])
expected_relation = {
RELATION_START_KEY: self.start_key,
RELATION_START_LABEL: TableSource.LABEL,
RELATION_END_KEY: self.end_key,
RELATION_END_LABEL: 'Table',
RELATION_TYPE: TableSource.SOURCE_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableSource.TABLE_SOURCE_RELATION_TYPE
}
self.assertDictEqual(expected_relation, serialized_relation)
def test_create_relation_neptune(self) -> None:
relations = self.table_source.create_relation()
serialized_relations = neptune_serializer.convert_relationship(relations[0])
expected = [
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.start_key,
to_vertex_id=self.end_key,
label=TableSource.SOURCE_TABLE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.start_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.end_key,
NEPTUNE_HEADER_LABEL: TableSource.SOURCE_TABLE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
},
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.end_key,
to_vertex_id=self.start_key,
label=TableSource.TABLE_SOURCE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.end_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.start_key,
NEPTUNE_HEADER_LABEL: TableSource.TABLE_SOURCE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
]
self.assertListEqual(expected, serialized_relations)
| 43.20202
| 118
| 0.679448
|
import unittest
from unittest.mock import ANY
from databuilder.models.graph_serializable import (
RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY, RELATION_START_LABEL,
RELATION_TYPE,
)
from databuilder.models.table_source import TableSource
from databuilder.serializers import neo4_serializer, neptune_serializer
from databuilder.serializers.neptune_serializer import (
NEPTUNE_CREATION_TYPE_JOB, NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID,
NEPTUNE_HEADER_LABEL, NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_RELATIONSHIP_HEADER_FROM, NEPTUNE_RELATIONSHIP_HEADER_TO,
)
DB = 'hive'
SCHEMA = 'base'
TABLE = 'test'
CLUSTER = 'default'
SOURCE = '/etl/sql/file.py'
class TestTableSource(unittest.TestCase):
def setUp(self) -> None:
super(TestTableSource, self).setUp()
self.table_source = TableSource(db_name='hive',
schema=SCHEMA,
table_name=TABLE,
cluster=CLUSTER,
source=SOURCE)
self.start_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source'
self.end_key = f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}'
def test_get_source_model_key(self) -> None:
source = self.table_source.get_source_model_key()
self.assertEqual(source, f'{DB}://{CLUSTER}.{SCHEMA}/{TABLE}/_source')
def test_get_metadata_model_key(self) -> None:
metadata = self.table_source.get_metadata_model_key()
self.assertEqual(metadata, 'hive://default.base/test')
def test_create_nodes(self) -> None:
nodes = self.table_source.create_nodes()
self.assertEqual(len(nodes), 1)
def test_create_relation(self) -> None:
relations = self.table_source.create_relation()
self.assertEquals(len(relations), 1)
serialized_relation = neo4_serializer.serialize_relationship(relations[0])
expected_relation = {
RELATION_START_KEY: self.start_key,
RELATION_START_LABEL: TableSource.LABEL,
RELATION_END_KEY: self.end_key,
RELATION_END_LABEL: 'Table',
RELATION_TYPE: TableSource.SOURCE_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableSource.TABLE_SOURCE_RELATION_TYPE
}
self.assertDictEqual(expected_relation, serialized_relation)
def test_create_relation_neptune(self) -> None:
relations = self.table_source.create_relation()
serialized_relations = neptune_serializer.convert_relationship(relations[0])
expected = [
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.start_key,
to_vertex_id=self.end_key,
label=TableSource.SOURCE_TABLE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.start_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.end_key,
NEPTUNE_HEADER_LABEL: TableSource.SOURCE_TABLE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
},
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=self.end_key,
to_vertex_id=self.start_key,
label=TableSource.TABLE_SOURCE_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: self.end_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: self.start_key,
NEPTUNE_HEADER_LABEL: TableSource.TABLE_SOURCE_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
]
self.assertListEqual(expected, serialized_relations)
| true
| true
|
79068d8ba921fb7381c1dec471a296e0346d4f52
| 15,592
|
py
|
Python
|
train_and_eval/trainer.py
|
alibalapour/HATNet
|
9dc4a2203bf51c1d834e20500153402968bcf54e
|
[
"MIT"
] | 25
|
2020-07-26T12:26:26.000Z
|
2022-01-24T11:06:28.000Z
|
train_and_eval/trainer.py
|
alibalapour/HATNet
|
9dc4a2203bf51c1d834e20500153402968bcf54e
|
[
"MIT"
] | 1
|
2021-11-29T09:54:17.000Z
|
2022-01-08T13:40:40.000Z
|
train_and_eval/trainer.py
|
sacmehta/HATNet
|
c4e50746f68140068bae75a6b07525046255d0b5
|
[
"MIT"
] | 9
|
2020-11-18T18:38:21.000Z
|
2021-12-11T01:31:50.000Z
|
# ============================================
__author__ = "Sachin Mehta and Ximing Lu"
__maintainer__ = "Sachin Mehta and Ximing Lu"
# ============================================
import torch
from utilities.print_utilities import *
import os
from utilities.lr_scheduler import get_lr_scheduler
from metrics.metric_utils import accuracy
from metrics.statistics import Statistics
import gc
from utilities.utils import save_checkpoint, load_checkpoint, save_arguments
from utilities.build_dataloader import get_data_loader
from utilities.build_model import build_model
from utilities.build_optimizer import build_optimizer, update_optimizer, read_lr_from_optimzier
from utilities.build_criteria import build_criteria
import numpy as np
import math
import json
from utilities.save_dict_to_file import DictWriter
from train_and_eval.train_utils import prediction
class Trainer(object):
'''This class implemetns the training and validation functionality for training ML model for medical imaging'''
def __init__(self, opts):
super(Trainer, self).__init__()
self.opts = opts
self.best_acc = 0
self.start_epoch = 0
# maximum batch size for CNN on single GPU
self.max_bsz_cnn_gpu0 = opts.max_bsz_cnn_gpu0
self.resume = self.opts.checkpoint if self.opts.checkpoint is not None and os.path.isdir(
self.opts.checkpoint) else None
self.global_setter()
def global_setter(self):
self.setup_device()
self.setup_directories()
self.setup_logger()
self.setup_lr_scheduler()
self.setup_dataloader()
self.setup_model_optimizer_lossfn()
def setup_directories(self):
if not os.path.isdir(self.opts.savedir):
os.makedirs(self.opts.savedir)
def setup_device(self):
num_gpus = torch.cuda.device_count()
self.num_gpus = num_gpus
if num_gpus > 0:
print_log_message('Using {} GPUs'.format(num_gpus))
else:
print_log_message('Using CPU')
self.device = torch.device("cuda:0" if num_gpus > 0 else "cpu")
self.use_multi_gpu = True if num_gpus > 1 else False
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
def setup_logger(self):
# Let's visualize logs on tensorboard. It's awesome
try:
from torch.utils.tensorboard import SummaryWriter
except:
from utilities.summary_writer import SummaryWriter
self.logger = SummaryWriter(log_dir=self.opts.savedir, comment='Training and Validation logs')
def setup_lr_scheduler(self):
# fetch learning rate scheduler
self.lr_scheduler = get_lr_scheduler(self.opts)
def setup_dataloader(self):
from model.base_feature_extractor import BaseFeatureExtractor
base_feature_extractor = BaseFeatureExtractor(opts=self.opts)
base_feature_extractor = base_feature_extractor.to(device=self.device)
# We do not want the base extractor to train, so setting it to eval mode
if self.use_multi_gpu:
base_feature_extractor = torch.nn.DataParallel(base_feature_extractor)
self.base_feature_extractor = base_feature_extractor
self.base_feature_extractor.eval()
# sanity check
if self.base_feature_extractor.training:
print_warning_message('Base feature extractor is in training mode. Moving to evaluation mode')
self.base_feature_extractor.eval()
train_loader, val_loader, diag_classes, class_weights = get_data_loader(opts=self.opts)
self.train_loader = train_loader
self.val_loader = val_loader
self.diag_classes = diag_classes
self.class_weights = torch.from_numpy(class_weights)
def setup_model_optimizer_lossfn(self):
# Build Model
odim = self.base_feature_extractor.module.output_feature_sz if self.use_multi_gpu else self.base_feature_extractor.output_feature_sz
mi_model = build_model(opts=self.opts,
diag_classes=self.diag_classes,
base_feature_odim=odim
)
if self.resume is not None:
resume_ep, resume_model_state, resume_optim_state, resume_perf = load_checkpoint(
checkpoint_dir=self.opts.checkpoint,
device=self.device)
self.start_epoch = resume_ep
self.best_acc = resume_perf
self.mi_model.load_state_dict(resume_model_state)
self.optimizer.load_state_dict(resume_optim_state)
# move optimizer state to the device
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=self.device)
print_log_message('Resuming from checkpoint saved at {}th epoch'.format(self.start_epoch))
mi_model = mi_model.to(device=self.device)
if self.use_multi_gpu:
mi_model = torch.nn.DataParallel(mi_model)
self.mi_model = mi_model
# Build Loss function
criteria = build_criteria(opts=self.opts, class_weights=self.class_weights.float())
self.criteria = criteria.to(device=self.device)
# Build optimizer
self.optimizer = build_optimizer(model=self.mi_model, opts=self.opts)
def training(self, epoch, lr, *args, **kwargs):
train_stats = Statistics()
self.mi_model.train()
self.optimizer.zero_grad()
num_samples = len(self.train_loader)
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.train_loader):
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
# Gradient accumulation is useful, when batch size is very small say 1
# Gradients will be accumulated for accum_count iterations
# After accum_count iterations, weights are updated and graph is freed.
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
train_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
train_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
return train_stats.avg_acc(), train_stats.avg_loss()
def warm_up(self, *args, **kwargs):
self.mi_model.train()
num_samples = len(self.train_loader)
warm_up_iterations = int(math.ceil((self.opts.warm_up_iterations * 1.0) / num_samples) * num_samples)
print_info_message('Warming Up')
print_log_message(
'LR will linearly change from {} to {} in about {} steps'.format(self.opts.warm_up_min_lr, self.opts.lr,
warm_up_iterations))
lr_list = np.linspace(1e-7, self.opts.lr, warm_up_iterations)
epoch_start_time = time.time()
iteration = -1
while iteration < warm_up_iterations:
warm_up_stats = Statistics()
for batch_id, batch in enumerate(self.train_loader):
if iteration >= warm_up_iterations:
break
iteration += 1
try:
lr_iter = lr_list[iteration]
except:
# fall back to final LR after warm-up step if iteration is outsize lr_list range
lr_iter = self.opts.lr
# update learning rate at every iteration
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=lr_iter)
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
# Gradient accumulation is useful, when batch size is very small say 1
# Gradients will be accumulated for accum_count iterations
# After accum_count iterations, weights are updated and graph is freed.
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
warm_up_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
warm_up_stats.output(epoch=-1, batch=iteration, n_batches=warm_up_iterations,
start=epoch_start_time,
lr=lr_iter)
gc.collect()
print_log_message('Warming Up... Done!!!')
def validation(self, epoch, lr, *args, **kwargs):
val_stats = Statistics()
self.mi_model.eval()
num_samples = len(self.val_loader)
with torch.no_grad():
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.val_loader):
# bags, bag_hist_arr, words, word_hist_arr, true_diag_labels = batch
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
# prediction
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
# compute loss
loss = self.criteria(pred_diag_labels, true_diag_labels)
# compute metrics
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
val_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0: # print after every 100 batches
val_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
gc.collect()
avg_acc = val_stats.avg_acc()
avg_loss = val_stats.avg_loss()
print_log_message('* Validation Stats')
print_log_message('* Loss: {:5.2f}, Mean Acc: {:3.2f}'.format(avg_loss, avg_acc))
return avg_acc, avg_loss
def run(self, *args, **kwargs):
kwargs['need_attn'] = False
if self.opts.warm_up:
self.warm_up(args=args, kwargs=kwargs)
if self.resume is not None:
# find the LR value
for epoch in range(self.start_epoch):
self.lr_scheduler.step(epoch)
eval_stats_dict = dict()
for epoch in range(self.start_epoch, self.opts.epochs):
epoch_lr = self.lr_scheduler.step(epoch)
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=epoch_lr)
# Uncomment this line if you want to check the optimizer's LR is updated correctly
# assert read_lr_from_optimzier(self.optimizer) == epoch_lr
train_acc, train_loss = self.training(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
val_acc, val_loss = self.validation(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
eval_stats_dict[epoch] = val_acc
gc.collect()
# remember best accuracy and save checkpoint for best model
is_best = val_acc >= self.best_acc
self.best_acc = max(val_acc, self.best_acc)
model_state = self.mi_model.module.state_dict() if isinstance(self.mi_model, torch.nn.DataParallel) \
else self.mi_model.state_dict()
optimizer_state = self.optimizer.state_dict()
save_checkpoint(epoch=epoch,
model_state=model_state,
optimizer_state=optimizer_state,
best_perf=self.best_acc,
save_dir=self.opts.savedir,
is_best=is_best,
keep_best_k_models=self.opts.keep_best_k_models
)
self.logger.add_scalar('LR', round(epoch_lr, 6), epoch)
self.logger.add_scalar('TrainingLoss', train_loss, epoch)
self.logger.add_scalar('TrainingAcc', train_acc, epoch)
self.logger.add_scalar('ValidationLoss', val_loss, epoch)
self.logger.add_scalar('ValidationAcc', val_acc, epoch)
# dump the validation epoch id and accuracy data, so that it could be used for filtering later on
eval_stats_dict_sort = {k: v for k, v in sorted(eval_stats_dict.items(),
key=lambda item: item[1],
reverse=True
)}
eval_stats_fname = '{}/val_stats_bag_{}_word_{}_{}_{}'.format(
self.opts.savedir,
self.opts.bag_size,
self.opts.word_size,
self.opts.attn_fn,
self.opts.attn_type,
)
writer = DictWriter(file_name=eval_stats_fname, format='json')
# if json file does not exist
if not os.path.isfile(eval_stats_fname):
writer.write(data_dict=eval_stats_dict_sort)
else:
with open(eval_stats_fname, 'r') as json_file:
eval_stats_dict_old = json.load(json_file)
eval_stats_dict_old.update(eval_stats_dict_sort)
eval_stats_dict_updated = {k: v for k, v in sorted(eval_stats_dict_old.items(),
key=lambda item: item[1],
reverse=True
)}
writer.write(data_dict=eval_stats_dict_updated)
self.logger.close()
| 41.248677
| 140
| 0.602745
|
__author__ = "Sachin Mehta and Ximing Lu"
__maintainer__ = "Sachin Mehta and Ximing Lu"
import torch
from utilities.print_utilities import *
import os
from utilities.lr_scheduler import get_lr_scheduler
from metrics.metric_utils import accuracy
from metrics.statistics import Statistics
import gc
from utilities.utils import save_checkpoint, load_checkpoint, save_arguments
from utilities.build_dataloader import get_data_loader
from utilities.build_model import build_model
from utilities.build_optimizer import build_optimizer, update_optimizer, read_lr_from_optimzier
from utilities.build_criteria import build_criteria
import numpy as np
import math
import json
from utilities.save_dict_to_file import DictWriter
from train_and_eval.train_utils import prediction
class Trainer(object):
def __init__(self, opts):
super(Trainer, self).__init__()
self.opts = opts
self.best_acc = 0
self.start_epoch = 0
self.max_bsz_cnn_gpu0 = opts.max_bsz_cnn_gpu0
self.resume = self.opts.checkpoint if self.opts.checkpoint is not None and os.path.isdir(
self.opts.checkpoint) else None
self.global_setter()
def global_setter(self):
self.setup_device()
self.setup_directories()
self.setup_logger()
self.setup_lr_scheduler()
self.setup_dataloader()
self.setup_model_optimizer_lossfn()
def setup_directories(self):
if not os.path.isdir(self.opts.savedir):
os.makedirs(self.opts.savedir)
def setup_device(self):
num_gpus = torch.cuda.device_count()
self.num_gpus = num_gpus
if num_gpus > 0:
print_log_message('Using {} GPUs'.format(num_gpus))
else:
print_log_message('Using CPU')
self.device = torch.device("cuda:0" if num_gpus > 0 else "cpu")
self.use_multi_gpu = True if num_gpus > 1 else False
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
def setup_logger(self):
try:
from torch.utils.tensorboard import SummaryWriter
except:
from utilities.summary_writer import SummaryWriter
self.logger = SummaryWriter(log_dir=self.opts.savedir, comment='Training and Validation logs')
def setup_lr_scheduler(self):
self.lr_scheduler = get_lr_scheduler(self.opts)
def setup_dataloader(self):
from model.base_feature_extractor import BaseFeatureExtractor
base_feature_extractor = BaseFeatureExtractor(opts=self.opts)
base_feature_extractor = base_feature_extractor.to(device=self.device)
if self.use_multi_gpu:
base_feature_extractor = torch.nn.DataParallel(base_feature_extractor)
self.base_feature_extractor = base_feature_extractor
self.base_feature_extractor.eval()
if self.base_feature_extractor.training:
print_warning_message('Base feature extractor is in training mode. Moving to evaluation mode')
self.base_feature_extractor.eval()
train_loader, val_loader, diag_classes, class_weights = get_data_loader(opts=self.opts)
self.train_loader = train_loader
self.val_loader = val_loader
self.diag_classes = diag_classes
self.class_weights = torch.from_numpy(class_weights)
def setup_model_optimizer_lossfn(self):
odim = self.base_feature_extractor.module.output_feature_sz if self.use_multi_gpu else self.base_feature_extractor.output_feature_sz
mi_model = build_model(opts=self.opts,
diag_classes=self.diag_classes,
base_feature_odim=odim
)
if self.resume is not None:
resume_ep, resume_model_state, resume_optim_state, resume_perf = load_checkpoint(
checkpoint_dir=self.opts.checkpoint,
device=self.device)
self.start_epoch = resume_ep
self.best_acc = resume_perf
self.mi_model.load_state_dict(resume_model_state)
self.optimizer.load_state_dict(resume_optim_state)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=self.device)
print_log_message('Resuming from checkpoint saved at {}th epoch'.format(self.start_epoch))
mi_model = mi_model.to(device=self.device)
if self.use_multi_gpu:
mi_model = torch.nn.DataParallel(mi_model)
self.mi_model = mi_model
criteria = build_criteria(opts=self.opts, class_weights=self.class_weights.float())
self.criteria = criteria.to(device=self.device)
self.optimizer = build_optimizer(model=self.mi_model, opts=self.opts)
def training(self, epoch, lr, *args, **kwargs):
train_stats = Statistics()
self.mi_model.train()
self.optimizer.zero_grad()
num_samples = len(self.train_loader)
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.train_loader):
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
train_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0:
train_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
return train_stats.avg_acc(), train_stats.avg_loss()
def warm_up(self, *args, **kwargs):
self.mi_model.train()
num_samples = len(self.train_loader)
warm_up_iterations = int(math.ceil((self.opts.warm_up_iterations * 1.0) / num_samples) * num_samples)
print_info_message('Warming Up')
print_log_message(
'LR will linearly change from {} to {} in about {} steps'.format(self.opts.warm_up_min_lr, self.opts.lr,
warm_up_iterations))
lr_list = np.linspace(1e-7, self.opts.lr, warm_up_iterations)
epoch_start_time = time.time()
iteration = -1
while iteration < warm_up_iterations:
warm_up_stats = Statistics()
for batch_id, batch in enumerate(self.train_loader):
if iteration >= warm_up_iterations:
break
iteration += 1
try:
lr_iter = lr_list[iteration]
except:
lr_iter = self.opts.lr
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=lr_iter)
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if (batch_id + 1) % self.opts.accum_count == 0 or batch_id + 1 == len(self.train_loader):
self.optimizer.step()
self.optimizer.zero_grad()
warm_up_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0:
warm_up_stats.output(epoch=-1, batch=iteration, n_batches=warm_up_iterations,
start=epoch_start_time,
lr=lr_iter)
gc.collect()
print_log_message('Warming Up... Done!!!')
def validation(self, epoch, lr, *args, **kwargs):
val_stats = Statistics()
self.mi_model.eval()
num_samples = len(self.val_loader)
with torch.no_grad():
epoch_start_time = time.time()
for batch_id, batch in enumerate(self.val_loader):
words, true_diag_labels = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(
words=words,
cnn_model=self.base_feature_extractor,
mi_model=self.mi_model,
max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0,
num_gpus=self.num_gpus,
device=self.device
)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
val_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if batch_id % self.opts.log_interval == 0 and batch_id > 0:
val_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
gc.collect()
avg_acc = val_stats.avg_acc()
avg_loss = val_stats.avg_loss()
print_log_message('* Validation Stats')
print_log_message('* Loss: {:5.2f}, Mean Acc: {:3.2f}'.format(avg_loss, avg_acc))
return avg_acc, avg_loss
def run(self, *args, **kwargs):
kwargs['need_attn'] = False
if self.opts.warm_up:
self.warm_up(args=args, kwargs=kwargs)
if self.resume is not None:
for epoch in range(self.start_epoch):
self.lr_scheduler.step(epoch)
eval_stats_dict = dict()
for epoch in range(self.start_epoch, self.opts.epochs):
epoch_lr = self.lr_scheduler.step(epoch)
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=epoch_lr)
# assert read_lr_from_optimzier(self.optimizer) == epoch_lr
train_acc, train_loss = self.training(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
val_acc, val_loss = self.validation(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
eval_stats_dict[epoch] = val_acc
gc.collect()
# remember best accuracy and save checkpoint for best model
is_best = val_acc >= self.best_acc
self.best_acc = max(val_acc, self.best_acc)
model_state = self.mi_model.module.state_dict() if isinstance(self.mi_model, torch.nn.DataParallel) \
else self.mi_model.state_dict()
optimizer_state = self.optimizer.state_dict()
save_checkpoint(epoch=epoch,
model_state=model_state,
optimizer_state=optimizer_state,
best_perf=self.best_acc,
save_dir=self.opts.savedir,
is_best=is_best,
keep_best_k_models=self.opts.keep_best_k_models
)
self.logger.add_scalar('LR', round(epoch_lr, 6), epoch)
self.logger.add_scalar('TrainingLoss', train_loss, epoch)
self.logger.add_scalar('TrainingAcc', train_acc, epoch)
self.logger.add_scalar('ValidationLoss', val_loss, epoch)
self.logger.add_scalar('ValidationAcc', val_acc, epoch)
# dump the validation epoch id and accuracy data, so that it could be used for filtering later on
eval_stats_dict_sort = {k: v for k, v in sorted(eval_stats_dict.items(),
key=lambda item: item[1],
reverse=True
)}
eval_stats_fname = '{}/val_stats_bag_{}_word_{}_{}_{}'.format(
self.opts.savedir,
self.opts.bag_size,
self.opts.word_size,
self.opts.attn_fn,
self.opts.attn_type,
)
writer = DictWriter(file_name=eval_stats_fname, format='json')
# if json file does not exist
if not os.path.isfile(eval_stats_fname):
writer.write(data_dict=eval_stats_dict_sort)
else:
with open(eval_stats_fname, 'r') as json_file:
eval_stats_dict_old = json.load(json_file)
eval_stats_dict_old.update(eval_stats_dict_sort)
eval_stats_dict_updated = {k: v for k, v in sorted(eval_stats_dict_old.items(),
key=lambda item: item[1],
reverse=True
)}
writer.write(data_dict=eval_stats_dict_updated)
self.logger.close()
| true
| true
|
79068dabb025f7f64f6fb8acd5f138bc5ff5dda3
| 2,364
|
py
|
Python
|
robopose/evaluation/meters/utils.py
|
lesteve/robopose
|
536e5317c891bf6bb2b8cbdda294533b3f111e09
|
[
"MIT"
] | 43
|
2021-04-19T13:57:00.000Z
|
2022-03-30T02:43:29.000Z
|
robopose/evaluation/meters/utils.py
|
lesteve/robopose
|
536e5317c891bf6bb2b8cbdda294533b3f111e09
|
[
"MIT"
] | 1
|
2021-07-07T04:46:57.000Z
|
2021-07-09T04:48:34.000Z
|
robopose/evaluation/meters/utils.py
|
lesteve/robopose
|
536e5317c891bf6bb2b8cbdda294533b3f111e09
|
[
"MIT"
] | 6
|
2021-05-10T16:14:08.000Z
|
2022-01-13T23:01:34.000Z
|
import numpy as np
import pandas as pd
from collections import OrderedDict
def one_to_one_matching(pred_infos, gt_infos,
keys=('scene_id', 'view_id'),
allow_pred_missing=False):
keys = list(keys)
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
matches = pred_infos.merge(gt_infos, on=keys)
matches_gb = matches.groupby(keys).groups
assert all([len(v) == 1 for v in matches_gb.values()])
if not allow_pred_missing:
assert len(matches) == len(gt_infos)
return matches
def get_candidate_matches(pred_infos, gt_infos,
group_keys=['scene_id', 'view_id', 'label'],
only_valids=True):
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
group_keys = list(group_keys)
cand_infos = pred_infos.merge(gt_infos, on=group_keys)
if only_valids:
cand_infos = cand_infos[cand_infos['valid']].reset_index(drop=True)
cand_infos['cand_id'] = np.arange(len(cand_infos))
return cand_infos
def match_poses(cand_infos, group_keys=['scene_id', 'view_id', 'label']):
assert 'error' in cand_infos
matches = []
def match_label_preds(group):
gt_ids_matched = set()
group = group.reset_index(drop=True)
gb_pred = group.groupby('pred_id', sort=False)
ids_sorted = gb_pred.first().sort_values('score', ascending=False)
gb_pred_groups = gb_pred.groups
for idx, _ in ids_sorted.iterrows():
pred_group = group.iloc[gb_pred_groups[idx]]
best_error = np.inf
best_match = None
for _, tentative_match in pred_group.iterrows():
if tentative_match['error'] < best_error and \
tentative_match['gt_id'] not in gt_ids_matched:
best_match = tentative_match
best_error = tentative_match['error']
if best_match is not None:
gt_ids_matched.add(best_match['gt_id'])
matches.append(best_match)
if len(cand_infos) > 0:
cand_infos.groupby(group_keys).apply(match_label_preds)
matches = pd.DataFrame(matches).reset_index(drop=True)
else:
matches = cand_infos
return matches
| 36.369231
| 75
| 0.630711
|
import numpy as np
import pandas as pd
from collections import OrderedDict
def one_to_one_matching(pred_infos, gt_infos,
keys=('scene_id', 'view_id'),
allow_pred_missing=False):
keys = list(keys)
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
matches = pred_infos.merge(gt_infos, on=keys)
matches_gb = matches.groupby(keys).groups
assert all([len(v) == 1 for v in matches_gb.values()])
if not allow_pred_missing:
assert len(matches) == len(gt_infos)
return matches
def get_candidate_matches(pred_infos, gt_infos,
group_keys=['scene_id', 'view_id', 'label'],
only_valids=True):
pred_infos['pred_id'] = np.arange(len(pred_infos))
gt_infos['gt_id'] = np.arange(len(gt_infos))
group_keys = list(group_keys)
cand_infos = pred_infos.merge(gt_infos, on=group_keys)
if only_valids:
cand_infos = cand_infos[cand_infos['valid']].reset_index(drop=True)
cand_infos['cand_id'] = np.arange(len(cand_infos))
return cand_infos
def match_poses(cand_infos, group_keys=['scene_id', 'view_id', 'label']):
assert 'error' in cand_infos
matches = []
def match_label_preds(group):
gt_ids_matched = set()
group = group.reset_index(drop=True)
gb_pred = group.groupby('pred_id', sort=False)
ids_sorted = gb_pred.first().sort_values('score', ascending=False)
gb_pred_groups = gb_pred.groups
for idx, _ in ids_sorted.iterrows():
pred_group = group.iloc[gb_pred_groups[idx]]
best_error = np.inf
best_match = None
for _, tentative_match in pred_group.iterrows():
if tentative_match['error'] < best_error and \
tentative_match['gt_id'] not in gt_ids_matched:
best_match = tentative_match
best_error = tentative_match['error']
if best_match is not None:
gt_ids_matched.add(best_match['gt_id'])
matches.append(best_match)
if len(cand_infos) > 0:
cand_infos.groupby(group_keys).apply(match_label_preds)
matches = pd.DataFrame(matches).reset_index(drop=True)
else:
matches = cand_infos
return matches
| true
| true
|
79068e9728629a1c3b2e86b809aeda3955c65ac2
| 2,178
|
py
|
Python
|
test_corebio/test_table_io.py
|
javicorvi/weblogo_edited
|
c44f2a4be40318571a8991bcbe4d5cb83e327719
|
[
"MIT"
] | null | null | null |
test_corebio/test_table_io.py
|
javicorvi/weblogo_edited
|
c44f2a4be40318571a8991bcbe4d5cb83e327719
|
[
"MIT"
] | null | null | null |
test_corebio/test_table_io.py
|
javicorvi/weblogo_edited
|
c44f2a4be40318571a8991bcbe4d5cb83e327719
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2005 Gavin E. Crooks <gec@threeplusone.com>
#
# This software is distributed under the MIT Open Source License.
# <http://www.opensource.org/licenses/mit-license.html>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import unittest
from corebio import *
from corebio._py3k import StringIO
from corebio.seq import *
from corebio.seq_io import *
from test_corebio import *
class test_table_io(unittest.TestCase):
def test_read(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
self.assertEqual(len(seqs), 10)
self.assertEqual(seqs[2].name, "EC0003")
self.assertEqual(len(seqs[1]), 50)
def test_read_fail(self):
f = StringIO(plain_io.example)
# Wrong alphabet
self.assertRaises(ValueError, table_io.read, f)
def test_write_seq(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
fout = StringIO()
table_io.write(fout, seqs)
fout.seek(0)
seqs2 = table_io.read(fout)
self.assertEqual(seqs, seqs2)
if __name__ == '__main__':
unittest.main()
| 33.507692
| 80
| 0.71258
|
import unittest
from corebio import *
from corebio._py3k import StringIO
from corebio.seq import *
from corebio.seq_io import *
from test_corebio import *
class test_table_io(unittest.TestCase):
def test_read(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
self.assertEqual(len(seqs), 10)
self.assertEqual(seqs[2].name, "EC0003")
self.assertEqual(len(seqs[1]), 50)
def test_read_fail(self):
f = StringIO(plain_io.example)
self.assertRaises(ValueError, table_io.read, f)
def test_write_seq(self):
f = StringIO(table_io.example)
seqs = table_io.read(f)
fout = StringIO()
table_io.write(fout, seqs)
fout.seek(0)
seqs2 = table_io.read(fout)
self.assertEqual(seqs, seqs2)
if __name__ == '__main__':
unittest.main()
| true
| true
|
79069073bbd1fec00e8ae8096e7d0fda9c0b6a11
| 6,933
|
py
|
Python
|
tests/tape/interfaces/test_qnode_jax.py
|
PritishSehzpaul/pennylane
|
93ff1b2deeaf620db90ec91448fde64709a9fd9f
|
[
"Apache-2.0"
] | 1
|
2021-02-18T02:14:27.000Z
|
2021-02-18T02:14:27.000Z
|
tests/tape/interfaces/test_qnode_jax.py
|
PritishSehzpaul/pennylane
|
93ff1b2deeaf620db90ec91448fde64709a9fd9f
|
[
"Apache-2.0"
] | null | null | null |
tests/tape/interfaces/test_qnode_jax.py
|
PritishSehzpaul/pennylane
|
93ff1b2deeaf620db90ec91448fde64709a9fd9f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the JAX interface"""
import pytest
jax = pytest.importorskip("jax")
jnp = pytest.importorskip("jax.numpy")
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape, qnode, QNode, QubitParamShiftTape
def test_qnode_intergration():
"""Test a simple use of qnode with a JAX interface and non-JAX device"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device
@qml.qnode(dev, interface="jax")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_to_jax():
"""Test the to_jax method"""
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
circuit.to_jax()
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_simple_jacobian():
"""Test the use of jax.jaxrev"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
grads = jax.jacrev(circuit)(weights)
# This is the easiest way to ensure our object is a DeviceArray instead
# of a numpy array.
assert "DeviceArray" in grads.__repr__()
assert grads.shape == (2,)
np.testing.assert_allclose(grads, np.array([-0.09784342, -0.19767685]))
def test_simple_grad():
"""Test the use of jax.grad"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = jax.grad(circuit)(weights)
assert "DeviceArray" in val.__repr__()
@pytest.mark.parametrize("diff_method", ['parameter-shift', 'finite-diff'])
def test_differentiable_expand(diff_method):
"""Test that operation and nested tapes expansion
is differentiable"""
class U3(qml.U3):
def expand(self):
theta, phi, lam = self.data
wires = self.wires
with JacobianTape() as tape:
qml.Rot(lam, theta, -lam, wires=wires)
qml.PhaseShift(phi + lam, wires=wires)
return tape
dev = qml.device("default.mixed", wires=1)
a = jnp.array(0.1)
p = jnp.array([0.1, 0.2, 0.3])
@qnode(dev, diff_method=diff_method, interface="jax")
def circuit(a, p):
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
return qml.expval(qml.PauliX(0))
res = circuit(a, p)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
tol = 1e-5
assert np.allclose(res, expected, atol=tol, rtol=0)
res = jax.grad(circuit, argnums=1)(a, p)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def qtransform(qnode, a, framework=jnp):
"""Transforms every RY(y) gate in a circuit to RX(-a*cos(y))"""
def construct(self, args, kwargs):
"""New quantum tape construct method, that performs
the transform on the tape in a define-by-run manner"""
t_op = []
QNode.construct(self, args, kwargs)
new_ops = []
for o in self.qtape.operations:
# here, we loop through all tape operations, and make
# the transformation if a RY gate is encountered.
if isinstance(o, qml.RY):
t_op.append(qml.RX(-a * framework.cos(o.data[0]), wires=o.wires))
new_ops.append(t_op[-1])
else:
new_ops.append(o)
self.qtape._ops = new_ops
self.qtape._update()
import copy
new_qnode = copy.deepcopy(qnode)
new_qnode.construct = construct.__get__(new_qnode, QNode)
return new_qnode
@pytest.mark.parametrize(
"dev_name,diff_method",
[("default.mixed", "finite-diff"), ("default.qubit.autograd", "parameter-shift")],
)
def test_transform(dev_name, diff_method, monkeypatch, tol):
"""Test an example transform"""
monkeypatch.setattr(qml.operation.Operation, "do_check_domain", False)
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="jax", diff_method=diff_method)
def circuit(weights):
op1 = qml.RY(weights[0], wires=0)
op2 = qml.RX(weights[1], wires=0)
return qml.expval(qml.PauliZ(wires=0))
weights = np.array([0.32, 0.543])
a = np.array(0.5)
def loss(weights, a):
# transform the circuit QNode with trainable weight 'a'
new_circuit = qtransform(circuit, a)
# evaluate the transformed QNode
res = new_circuit(weights)
# evaluate the original QNode with pre-processed parameters
res2 = circuit(jnp.sin(weights))
# return the sum of the two QNode evaluations
return res + res2
res = loss(weights, a)
grad = jax.grad(loss, argnums=[0, 1])(weights, a)
assert len(grad) == 2
assert grad[0].shape == weights.shape
assert grad[1].shape == a.shape
# compare against the expected values
tol = 1e-5
assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0)
assert np.allclose(grad[0], [-0.26610258, -0.47053553], atol=tol, rtol=0)
assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
| 33.985294
| 97
| 0.617337
|
import pytest
jax = pytest.importorskip("jax")
jnp = pytest.importorskip("jax.numpy")
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape, qnode, QNode, QubitParamShiftTape
def test_qnode_intergration():
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="jax")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_to_jax():
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
circuit.to_jax()
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_simple_jacobian():
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
grads = jax.jacrev(circuit)(weights)
assert "DeviceArray" in grads.__repr__()
assert grads.shape == (2,)
np.testing.assert_allclose(grads, np.array([-0.09784342, -0.19767685]))
def test_simple_grad():
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = jax.grad(circuit)(weights)
assert "DeviceArray" in val.__repr__()
@pytest.mark.parametrize("diff_method", ['parameter-shift', 'finite-diff'])
def test_differentiable_expand(diff_method):
class U3(qml.U3):
def expand(self):
theta, phi, lam = self.data
wires = self.wires
with JacobianTape() as tape:
qml.Rot(lam, theta, -lam, wires=wires)
qml.PhaseShift(phi + lam, wires=wires)
return tape
dev = qml.device("default.mixed", wires=1)
a = jnp.array(0.1)
p = jnp.array([0.1, 0.2, 0.3])
@qnode(dev, diff_method=diff_method, interface="jax")
def circuit(a, p):
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
return qml.expval(qml.PauliX(0))
res = circuit(a, p)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
tol = 1e-5
assert np.allclose(res, expected, atol=tol, rtol=0)
res = jax.grad(circuit, argnums=1)(a, p)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def qtransform(qnode, a, framework=jnp):
def construct(self, args, kwargs):
t_op = []
QNode.construct(self, args, kwargs)
new_ops = []
for o in self.qtape.operations:
if isinstance(o, qml.RY):
t_op.append(qml.RX(-a * framework.cos(o.data[0]), wires=o.wires))
new_ops.append(t_op[-1])
else:
new_ops.append(o)
self.qtape._ops = new_ops
self.qtape._update()
import copy
new_qnode = copy.deepcopy(qnode)
new_qnode.construct = construct.__get__(new_qnode, QNode)
return new_qnode
@pytest.mark.parametrize(
"dev_name,diff_method",
[("default.mixed", "finite-diff"), ("default.qubit.autograd", "parameter-shift")],
)
def test_transform(dev_name, diff_method, monkeypatch, tol):
monkeypatch.setattr(qml.operation.Operation, "do_check_domain", False)
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="jax", diff_method=diff_method)
def circuit(weights):
op1 = qml.RY(weights[0], wires=0)
op2 = qml.RX(weights[1], wires=0)
return qml.expval(qml.PauliZ(wires=0))
weights = np.array([0.32, 0.543])
a = np.array(0.5)
def loss(weights, a):
new_circuit = qtransform(circuit, a)
res = new_circuit(weights)
res2 = circuit(jnp.sin(weights))
return res + res2
res = loss(weights, a)
grad = jax.grad(loss, argnums=[0, 1])(weights, a)
assert len(grad) == 2
assert grad[0].shape == weights.shape
assert grad[1].shape == a.shape
tol = 1e-5
assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0)
assert np.allclose(grad[0], [-0.26610258, -0.47053553], atol=tol, rtol=0)
assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
| true
| true
|
7906918058514df117adb2ce11269cef6aba4a6d
| 3,594
|
py
|
Python
|
bsp/stm32/stm32f103-fire-arbitrary/rtconfig.py
|
guisuanzi/rt-thread
|
d8fe7772ddb9550891487f51ad6161218391f5d4
|
[
"Apache-2.0"
] | 4
|
2019-08-25T14:15:33.000Z
|
2020-09-28T13:42:40.000Z
|
bsp/stm32/stm32f103-fire-arbitrary/rtconfig.py
|
LoveCeline/rt-thread
|
538923d34ea9a36f8b967467c60f36c02abaf387
|
[
"Apache-2.0"
] | 2
|
2018-12-11T08:31:42.000Z
|
2020-06-11T06:31:58.000Z
|
bsp/stm32/stm32f103-fire-arbitrary/rtconfig.py
|
LoveCeline/rt-thread
|
538923d34ea9a36f8b967467c60f36c02abaf387
|
[
"Apache-2.0"
] | 3
|
2019-11-10T23:04:21.000Z
|
2020-03-07T20:45:16.000Z
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| 27.435115
| 152
| 0.571508
|
import os
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
| true
| true
|
7906926887ccd244b0431457ba90d0dd5740add8
| 6,179
|
py
|
Python
|
docs/source/conf.py
|
agaszmurlo/bdg-sequila
|
ccc65fab604a068784115277d13c692a4ca46a15
|
[
"Apache-2.0"
] | 1
|
2020-06-11T12:27:45.000Z
|
2020-06-11T12:27:45.000Z
|
docs/source/conf.py
|
agaszmurlo/bdg-sequila
|
ccc65fab604a068784115277d13c692a4ca46a15
|
[
"Apache-2.0"
] | 2
|
2022-03-06T09:35:38.000Z
|
2022-03-07T09:01:05.000Z
|
docs/source/conf.py
|
mikolajroszak/bdg-sequila
|
ccc65fab604a068784115277d13c692a4ca46a15
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'bdg-sequila'
copyright = u'2019, biodatageeks.org'
author = u'biodatageeks.org'
# The short X.Y version
version = u'|version|'
# The full version, including alpha/beta/rc tags
release = u'|version|'
project_name = u'bdg-sequila'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinxcontrib.github_ribbon',
# 'sphinx.ext.ifconfig',
'sphinxcontrib.bibtex',
'sphinx.ext.autosectionlabel',
'rst2pdf.pdfbuilder'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#html_logo='sequila.png'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bdg-sequiladoc'
#--- Options for PDF ------------------------
pdf_documents = [('index', u'rst2pdf', u'SeQuiLa documentation', u'biodatageeks.org'),]
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bdg-sequila.tex', u'SeQuiLa Documentation',
u'biodatageeks.org', 'howto'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bdg-sequila', u'bdg-spark-granges Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bdg-spark-granges', u'bdg-spark-granges Documentation',
author, 'bdg-spark-granges', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
##github
#pip install sphinxcontrib-github_ribbon
github_ribbon_repo = 'ZSI-Bio/bdg-sequila'
github_ribbon_position = "right"
github_ribbon_color ="red"
#latexpdf
text_add_secnumbers = False
#latex_logo = "sequila.png"
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 29.146226
| 87
| 0.653666
|
project = u'bdg-sequila'
copyright = u'2019, biodatageeks.org'
author = u'biodatageeks.org'
version = u'|version|'
release = u'|version|'
project_name = u'bdg-sequila'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinxcontrib.github_ribbon',
'sphinxcontrib.bibtex',
'sphinx.ext.autosectionlabel',
'rst2pdf.pdfbuilder'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = "sphinx_rtd_theme"
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#html_logo='sequila.png'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bdg-sequiladoc'
#--- Options for PDF ------------------------
pdf_documents = [('index', u'rst2pdf', u'SeQuiLa documentation', u'biodatageeks.org'),]
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bdg-sequila.tex', u'SeQuiLa Documentation',
u'biodatageeks.org', 'howto'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bdg-sequila', u'bdg-spark-granges Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bdg-spark-granges', u'bdg-spark-granges Documentation',
author, 'bdg-spark-granges', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
##github
#pip install sphinxcontrib-github_ribbon
github_ribbon_repo = 'ZSI-Bio/bdg-sequila'
github_ribbon_position = "right"
github_ribbon_color ="red"
#latexpdf
text_add_secnumbers = False
#latex_logo = "sequila.png"
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
790693ae018bc2b2c882ccede257bf8d888a969c
| 3,299
|
py
|
Python
|
py12306/log/order_log.py
|
juinjonn/py12306
|
47555694e03b30aaa174080c18a0222092b8c22a
|
[
"Apache-2.0"
] | 12,397
|
2019-01-08T02:02:22.000Z
|
2022-03-31T06:47:06.000Z
|
py12306/log/order_log.py
|
xawi2000/py12306
|
0316e52e90a21aca7aeef0042a1aa1d6fba2ee2c
|
[
"Apache-2.0"
] | 384
|
2019-01-08T09:09:52.000Z
|
2022-01-24T06:53:40.000Z
|
py12306/log/order_log.py
|
jiangjunlu/12306
|
6ae91fa07f2c1655f63e62353dd4472c27ce838a
|
[
"Apache-2.0"
] | 3,397
|
2019-01-08T05:43:10.000Z
|
2022-03-22T10:14:38.000Z
|
from py12306.log.base import BaseLog
from py12306.helpers.func import *
@singleton
class OrderLog(BaseLog):
# 这里如果不声明,会出现重复打印,目前不知道什么原因
logs = []
thread_logs = {}
quick_log = []
MESSAGE_REQUEST_INIT_DC_PAGE_FAIL = '请求初始化订单页面失败'
MESSAGE_SUBMIT_ORDER_REQUEST_FAIL = '提交订单失败,错误原因 {} \n'
MESSAGE_SUBMIT_ORDER_REQUEST_SUCCESS = '提交订单成功'
MESSAGE_CHECK_ORDER_INFO_FAIL = '检查订单失败,错误原因 {} \n'
MESSAGE_CHECK_ORDER_INFO_SUCCESS = '检查订单成功'
MESSAGE_GET_QUEUE_INFO_SUCCESS = '获取排队信息成功,目前排队人数 {}, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_INFO_NO_SEAT = '接口返回实际为无票,跳过本次排队'
MESSAGE_GET_QUEUE_COUNT_SUCCESS = '排队成功,你当前排在第 {} 位, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_LESS_TICKET = '排队失败,目前排队人数已经超过余票张数'
MESSAGE_GET_QUEUE_COUNT_FAIL = '排队失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_SUCCESS = '# 提交订单成功!#'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_ERROR = '出票失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_FAIL = '提交订单失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_WAITING = '排队等待中,排队人数 {},预计还需要 {} 秒'
MESSAGE_QUERY_ORDER_WAIT_TIME_FAIL = '排队失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_INFO = '第 {} 次排队,请耐心等待'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_TITLE = '车票购买成功!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_CONTENT = '请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO = '\t\t车次信息: {} {}[{}] -> {}[{}],乘车日期 {},席位:{},乘车人:{}'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_START_SEND = '正在发送语音通知...'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_CONTENT = '你的车票 {} 到 {} 购买成功,请登录 12306 进行支付'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_EMAIL_CONTENT = '订单号 {},请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_JOB_CLOSED = '当前任务已结束'
@classmethod
def print_passenger_did_deleted(cls, passengers):
self = cls()
result = [passenger.get('name') + '(' + passenger.get('type_text') + ')' for passenger in passengers]
self.add_quick_log('# 删减后的乘客列表 {} #'.format(', '.join(result)))
self.flush()
return self
@classmethod
def print_ticket_did_ordered(cls, order_id):
self = cls()
self.add_quick_log('# 车票购买成功,订单号 {} #'.format(order_id))
self.flush()
return self
@classmethod
def get_order_success_notification_info(cls, query):
from py12306.query.job import Job
assert isinstance(query, Job)
passengers = [passenger.get(
'name') + '(' + passenger.get('type_text') + ')' for passenger in query.passengers]
return cls.MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO.format(query.get_info_of_train_number(),
query.get_info_of_left_station(),
query.get_info_of_train_left_time(),
query.get_info_of_arrive_station(),
query.get_info_of_train_arrive_time(),
query.get_info_of_left_date(),
query.current_seat_name,
','.join(passengers))
| 45.191781
| 109
| 0.614429
|
from py12306.log.base import BaseLog
from py12306.helpers.func import *
@singleton
class OrderLog(BaseLog):
logs = []
thread_logs = {}
quick_log = []
MESSAGE_REQUEST_INIT_DC_PAGE_FAIL = '请求初始化订单页面失败'
MESSAGE_SUBMIT_ORDER_REQUEST_FAIL = '提交订单失败,错误原因 {} \n'
MESSAGE_SUBMIT_ORDER_REQUEST_SUCCESS = '提交订单成功'
MESSAGE_CHECK_ORDER_INFO_FAIL = '检查订单失败,错误原因 {} \n'
MESSAGE_CHECK_ORDER_INFO_SUCCESS = '检查订单成功'
MESSAGE_GET_QUEUE_INFO_SUCCESS = '获取排队信息成功,目前排队人数 {}, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_INFO_NO_SEAT = '接口返回实际为无票,跳过本次排队'
MESSAGE_GET_QUEUE_COUNT_SUCCESS = '排队成功,你当前排在第 {} 位, 余票还剩余 {} 张'
MESSAGE_GET_QUEUE_LESS_TICKET = '排队失败,目前排队人数已经超过余票张数'
MESSAGE_GET_QUEUE_COUNT_FAIL = '排队失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_SUCCESS = '# 提交订单成功!#'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_ERROR = '出票失败,错误原因 {}'
MESSAGE_CONFIRM_SINGLE_FOR_QUEUE_FAIL = '提交订单失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_WAITING = '排队等待中,排队人数 {},预计还需要 {} 秒'
MESSAGE_QUERY_ORDER_WAIT_TIME_FAIL = '排队失败,错误原因 {}'
MESSAGE_QUERY_ORDER_WAIT_TIME_INFO = '第 {} 次排队,请耐心等待'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_TITLE = '车票购买成功!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_CONTENT = '请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO = '\t\t车次信息: {} {}[{}] -> {}[{}],乘车日期 {},席位:{},乘车人:{}'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_START_SEND = '正在发送语音通知...'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_VOICE_CODE_CONTENT = '你的车票 {} 到 {} 购买成功,请登录 12306 进行支付'
MESSAGE_ORDER_SUCCESS_NOTIFICATION_OF_EMAIL_CONTENT = '订单号 {},请及时登录12306账号[{}],打开 \'未完成订单\',在30分钟内完成支付!'
MESSAGE_JOB_CLOSED = '当前任务已结束'
@classmethod
def print_passenger_did_deleted(cls, passengers):
self = cls()
result = [passenger.get('name') + '(' + passenger.get('type_text') + ')' for passenger in passengers]
self.add_quick_log('# 删减后的乘客列表 {} #'.format(', '.join(result)))
self.flush()
return self
@classmethod
def print_ticket_did_ordered(cls, order_id):
self = cls()
self.add_quick_log('# 车票购买成功,订单号 {} #'.format(order_id))
self.flush()
return self
@classmethod
def get_order_success_notification_info(cls, query):
from py12306.query.job import Job
assert isinstance(query, Job)
passengers = [passenger.get(
'name') + '(' + passenger.get('type_text') + ')' for passenger in query.passengers]
return cls.MESSAGE_ORDER_SUCCESS_NOTIFICATION_INFO.format(query.get_info_of_train_number(),
query.get_info_of_left_station(),
query.get_info_of_train_left_time(),
query.get_info_of_arrive_station(),
query.get_info_of_train_arrive_time(),
query.get_info_of_left_date(),
query.current_seat_name,
','.join(passengers))
| true
| true
|
79069488a3ed7b05b315f80f4b0008e0fd3b95ea
| 2,429
|
py
|
Python
|
src/m2_sounds.py
|
schochla/06-IntroductionToRobots-201930
|
3053a23062f2cb9714057777a3a93dfa3f7473f7
|
[
"MIT"
] | null | null | null |
src/m2_sounds.py
|
schochla/06-IntroductionToRobots-201930
|
3053a23062f2cb9714057777a3a93dfa3f7473f7
|
[
"MIT"
] | null | null | null |
src/m2_sounds.py
|
schochla/06-IntroductionToRobots-201930
|
3053a23062f2cb9714057777a3a93dfa3f7473f7
|
[
"MIT"
] | null | null | null |
"""
An opportunity to explore how to make an EV3 Robot make sounds.
Authors: Dave Fisher, David Mutchler, Vibha Alangar,
their colleagues, and Leo Schoch-Spana.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import simple_rosebotics as rb
# ------------------------------------------------------------------------------
# DONE: 2. This is an ** OPTIONAL ** exercise.
# Using the DOT trick, add code to make_sounds to make the robot
# make sounds in various ways, using the instance variables of the robot's
# SOUND system.
# ------------------------------------------------------------------------------
def main():
tone_player = rb.ToneMaker()
tone_player.play_tone_sequence([(392, 350, 100), (392, 350, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(587.32, 350, 100), (587.32, 350, 100), (587.32, 350, 100),
(622.26, 250, 100), (466.2, 25, 100), (369.99, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (369.99, 350, 100),
(311.13, 250, 100), (392, 25, 100), (466.16, 350, 100),
(392, 250, 100), (466.16, 25, 100), (587.32, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (392, 350, 100),
(311.13, 250, 100), (466.16, 25, 100), (392.00, 300, 150),
(311.13, 250, 100), (466.16, 25, 100), (392, 700)])
def speak():
speech_player = rb.SpeechMaker()
speech_player.speak('hello I am a robot')
def beep():
beeper = rb.Beeper()
beeper.beep()
speak()
| 46.711538
| 86
| 0.46727
|
import simple_rosebotics as rb
# SOUND system.
# ------------------------------------------------------------------------------
def main():
tone_player = rb.ToneMaker()
tone_player.play_tone_sequence([(392, 350, 100), (392, 350, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(587.32, 350, 100), (587.32, 350, 100), (587.32, 350, 100),
(622.26, 250, 100), (466.2, 25, 100), (369.99, 350, 100),
(311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (369.99, 350, 100),
(311.13, 250, 100), (392, 25, 100), (466.16, 350, 100),
(392, 250, 100), (466.16, 25, 100), (587.32, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100),
(784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400),
(415.3, 25, 200), (554.36, 350, 100), (523.25, 250, 100),
(493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (392, 350, 100),
(311.13, 250, 100), (466.16, 25, 100), (392.00, 300, 150),
(311.13, 250, 100), (466.16, 25, 100), (392, 700)])
def speak():
speech_player = rb.SpeechMaker()
speech_player.speak('hello I am a robot')
def beep():
beeper = rb.Beeper()
beeper.beep()
speak()
| true
| true
|
7906949b6bf55fc6345aadfa45e65a63b4744f5c
| 1,494
|
py
|
Python
|
users/models.py
|
MaryzangelaBessa/ElRoyale
|
4788ec912fde95230b52d481c27577b020678a02
|
[
"MIT"
] | 2
|
2019-09-19T13:28:07.000Z
|
2019-09-19T19:00:20.000Z
|
users/models.py
|
MaryzangelaBessa/ElRoyale
|
4788ec912fde95230b52d481c27577b020678a02
|
[
"MIT"
] | 4
|
2021-03-19T01:53:26.000Z
|
2021-06-10T18:59:34.000Z
|
users/models.py
|
MaryzangelaBessa/ElRoyale
|
4788ec912fde95230b52d481c27577b020678a02
|
[
"MIT"
] | 1
|
2019-11-01T01:37:45.000Z
|
2019-11-01T01:37:45.000Z
|
from uuid import uuid4
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.validators import EmailValidator
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, name, email, password=None):
if not email:
raise ValueError('O email é obrigatório.')
email = self.normalize_email(email)
user = self.model(name=name, email=email)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, name, email, password):
user = self.create_user(name, email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
public_id = models.UUIDField(default=uuid4, editable=False)
name = models.CharField('Nome', max_length=255)
email = models.EmailField('Email', max_length=255,
unique=True, validators=[EmailValidator(message='Email inválido.'), ])
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', ]
def get_full_name(self):
return self.name
def get_shot_name(self):
return self.name
def __str__(self):
return self.name
| 28.730769
| 100
| 0.657965
|
from uuid import uuid4
from django.contrib.auth.models import (AbstractBaseUser, BaseUserManager,
PermissionsMixin)
from django.core.validators import EmailValidator
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, name, email, password=None):
if not email:
raise ValueError('O email é obrigatório.')
email = self.normalize_email(email)
user = self.model(name=name, email=email)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, name, email, password):
user = self.create_user(name, email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
public_id = models.UUIDField(default=uuid4, editable=False)
name = models.CharField('Nome', max_length=255)
email = models.EmailField('Email', max_length=255,
unique=True, validators=[EmailValidator(message='Email inválido.'), ])
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name', ]
def get_full_name(self):
return self.name
def get_shot_name(self):
return self.name
def __str__(self):
return self.name
| true
| true
|
790694aac8eb16f528a4059bc11ab5e98ba4289d
| 1,919
|
py
|
Python
|
Code/PreProcessing/Regex/annotation_crawling.py
|
avsharma96/Named-Entity-Recognition
|
5dfdbbc0b4181a40b1981ad7ff01ed2600c0221d
|
[
"MIT"
] | null | null | null |
Code/PreProcessing/Regex/annotation_crawling.py
|
avsharma96/Named-Entity-Recognition
|
5dfdbbc0b4181a40b1981ad7ff01ed2600c0221d
|
[
"MIT"
] | null | null | null |
Code/PreProcessing/Regex/annotation_crawling.py
|
avsharma96/Named-Entity-Recognition
|
5dfdbbc0b4181a40b1981ad7ff01ed2600c0221d
|
[
"MIT"
] | null | null | null |
"""
The code below crawls the annotations of the MADE 1.0 Train Data and stores them
as Corpus ID, Annotation ID, Type, Length, Offset, Text in the
CSV_Annotations.csv file.
Input Files:
All xml files in the annotations folder in the made_train_data folder
Output Files:
CSV_Annotations.csv
Note: Make sure to delete the CSV_Annotations.csv file if already existing in
the folder as this code appends to the existing file.
"""
# Importing required Files
import os
import xml.etree.ElementTree as ET
import csv
final =list()
final.append(["Content ID", "Annotation ID", "Type", "Length", "Offset", "Text"])
# Reading required files
path ="C:\\Project_NLP_Final\\Project Dataset\\made_train_data\\annotations\\"
dirListing = os.listdir(path)
for item in dirListing:
tree = ET.parse(path + '\\' + item)
root = tree.getroot()
annot = dict()
for i in root.findall('./document/passage'):
flag = 0
for doc in i.findall('./annotation'):
annot=list()
annot.append(item[0:-9])
annot.append(doc.get('id'))
for typ in doc:
if typ.tag =='infon':
annot.append(typ.text)
elif typ.tag =='location':
annot.append(typ.get('length'))
annot.append(typ.get('offset'))
elif typ.tag == 'text':
annot.append(typ.text)
final.append(annot)
flag = 1
if flag == 0:
annot = [item[0:-9], None, None, None, None, None]
final.append(annot)
# Writing the required files
with open("C:\\Project_NLP_Final\\Project Dataset\\PreProcessing\\Regex\\CSV_Annotations.csv",'a', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter=',',quotechar = '"')
for row in final:
writer.writerow(row)
| 34.890909
| 143
| 0.600313
|
import os
import xml.etree.ElementTree as ET
import csv
final =list()
final.append(["Content ID", "Annotation ID", "Type", "Length", "Offset", "Text"])
path ="C:\\Project_NLP_Final\\Project Dataset\\made_train_data\\annotations\\"
dirListing = os.listdir(path)
for item in dirListing:
tree = ET.parse(path + '\\' + item)
root = tree.getroot()
annot = dict()
for i in root.findall('./document/passage'):
flag = 0
for doc in i.findall('./annotation'):
annot=list()
annot.append(item[0:-9])
annot.append(doc.get('id'))
for typ in doc:
if typ.tag =='infon':
annot.append(typ.text)
elif typ.tag =='location':
annot.append(typ.get('length'))
annot.append(typ.get('offset'))
elif typ.tag == 'text':
annot.append(typ.text)
final.append(annot)
flag = 1
if flag == 0:
annot = [item[0:-9], None, None, None, None, None]
final.append(annot)
with open("C:\\Project_NLP_Final\\Project Dataset\\PreProcessing\\Regex\\CSV_Annotations.csv",'a', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter=',',quotechar = '"')
for row in final:
writer.writerow(row)
| true
| true
|
790694b980290a435bbc6d1016665683115086c8
| 7,651
|
py
|
Python
|
plugins/rapid7_intsights/icon_rapid7_intsights/util/api.py
|
blaxminarayan-r7/insightconnect-plugins
|
a3963eb3d3d7432d07bd46a5641700bd0ba6e11e
|
[
"MIT"
] | null | null | null |
plugins/rapid7_intsights/icon_rapid7_intsights/util/api.py
|
blaxminarayan-r7/insightconnect-plugins
|
a3963eb3d3d7432d07bd46a5641700bd0ba6e11e
|
[
"MIT"
] | null | null | null |
plugins/rapid7_intsights/icon_rapid7_intsights/util/api.py
|
blaxminarayan-r7/insightconnect-plugins
|
a3963eb3d3d7432d07bd46a5641700bd0ba6e11e
|
[
"MIT"
] | null | null | null |
import json
import time
from dataclasses import dataclass
from logging import Logger
import requests
from insightconnect_plugin_runtime.exceptions import PluginException
from insightconnect_plugin_runtime.helper import clean
from requests.auth import HTTPBasicAuth
@dataclass
class AlertParams:
alert_type: [str]
severity: [str]
source_type: [str]
network_type: [str]
matched_asset_value: str
remediation_status: [str]
source_date_from: str
source_date_to: str
found_date_from: str
found_date_to: str
assigned: str
is_flagged: str
is_closed: str
has_ioc: bool
def to_dict(self) -> dict:
return clean(
{
"alertType": ",".join(self.alert_type) if self.alert_type else None,
"severity": ",".join(self.severity) if self.severity else None,
"sourceType": ",".join(self.source_type) if self.source_type else None,
"networkType": ",".join(self.network_type) if self.network_type else None,
"matchedAssetValue": ",".join(self.matched_asset_value) if self.matched_asset_value else None,
"remediationStatus": ",".join(self.remediation_status) if self.remediation_status else None,
"sourceDateFrom": int(self.source_date_from) if self.source_date_from else None,
"sourceDateTo": int(self.source_date_to) if self.source_date_to else None,
"foundDateFrom": int(self.found_date_from) if self.found_date_from else None,
"foundDateTo": int(self.found_date_to) if self.found_date_to else None,
"assigned": self.assigned == "Assigned" if self.assigned else None,
"isFlagged": self.is_flagged == "Flagged" if self.is_flagged else None,
"isClosed": self.is_closed == "Closed" if self.is_closed else None,
"hasIoc": self.has_ioc,
}
)
@dataclass
class Image:
type: str
data: str
@dataclass
class ManualAlertParams:
title: str
found_date: str
description: str
type: str
sub_type: str
severity: str
source_type: int
source_network_type: int
source_url: int
source_date: int
images: [Image]
def to_dict(self) -> dict:
images = []
if self.images:
for image in self.images:
if not image:
continue
try:
images.append({"Type": image["type"], "Data": image["data"]})
except KeyError as e:
raise PluginException(cause="Wrong input parameter.", assistance=f"Wrong image: {e}.")
return clean(
{
"FoundDate": self.found_date,
"Details": {
"Title": self.title,
"Description": self.description,
"Type": self.type,
"SubType": self.sub_type,
"Severity": self.severity,
"Source": {
"Type": self.source_type,
"NetworkType": self.source_network_type,
"URL": self.source_url,
"Date": self.source_date,
},
"Images": images,
},
}
)
class IntSightsAPI:
def __init__(self, account_id: str, api_key: str, logger: Logger):
self.account_id = account_id
self.api_key = api_key
self.url = "https://api.intsights.com"
self.logger = logger
def get_indicator_by_value(self, ioc_value: str) -> dict:
return self.make_json_request("GET", f"public/v2/iocs/ioc-by-value?iocValue={ioc_value}")
def enrich_indicator(self, ioc_value: str) -> dict:
response = {}
for _ in range(0, 9999):
response = self.make_json_request("GET", f"public/v1/iocs/enrich/{ioc_value}")
if response.get("Status", "InProgress") in ["Done", "Failed"]:
break
time.sleep(5)
return response
def rescan_indicator(self, indicator_file_hash: str) -> dict:
return self.make_json_request("POST", "public/v1/iocs/rescan", json_data={"IocValue": indicator_file_hash})
def get_scan_status(self, task_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/iocs/rescan/status/{task_id}")
def get_complete_alert_by_id(self, alert_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/data/alerts/get-complete-alert/{alert_id}")
def takedown_request(self, alert_id: str, target: str) -> dict:
return self.make_json_request(
"PATCH", f"public/v1/data/alerts/takedown-request/{alert_id}", json_data={"Target": target}
)
def get_alerts(self, alert_params: AlertParams) -> list:
return self.make_request("GET", "public/v1/data/alerts/alerts-list", params=alert_params.to_dict()).json()
def add_manual_alert(self, manual_alert_params: ManualAlertParams) -> str:
return self.make_request("PUT", "public/v1/data/alerts/add-alert", json_data=manual_alert_params.to_dict()).text
def test_credentials(self) -> bool:
return self.make_request("HEAD", "public/v1/test-credentials").status_code == 200
def make_json_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> dict:
try:
response = self.make_request(method=method, path=path, json_data=json_data, params=params)
if response.status_code == 204:
return {}
json_response = response.json()
if json_response.get("Status") == "Invalid":
raise PluginException(
cause="IntSights returned an error response: ", assistance=f"{json_response.get('FailedReason')}."
)
return json_response
except json.decoder.JSONDecodeError as e:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)
def make_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> requests.Response:
try:
response = requests.request(
method=method,
url=f"{self.url}/{path}",
headers={"Content-Type": "application/json"},
verify=True,
params=params,
json=json_data,
auth=HTTPBasicAuth(self.account_id, self.api_key),
)
if response.status_code == 401:
raise PluginException(preset=PluginException.Preset.USERNAME_PASSWORD, data=response.text)
if response.status_code == 403:
raise PluginException(preset=PluginException.Preset.API_KEY, data=response.text)
if response.status_code == 404:
raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=response.text)
if 400 <= response.status_code < 500:
raise PluginException(
preset=PluginException.Preset.UNKNOWN,
data=response.text,
)
if response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
if 200 <= response.status_code < 300:
return response
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
except requests.exceptions.HTTPError as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
| 39.642487
| 120
| 0.606065
|
import json
import time
from dataclasses import dataclass
from logging import Logger
import requests
from insightconnect_plugin_runtime.exceptions import PluginException
from insightconnect_plugin_runtime.helper import clean
from requests.auth import HTTPBasicAuth
@dataclass
class AlertParams:
alert_type: [str]
severity: [str]
source_type: [str]
network_type: [str]
matched_asset_value: str
remediation_status: [str]
source_date_from: str
source_date_to: str
found_date_from: str
found_date_to: str
assigned: str
is_flagged: str
is_closed: str
has_ioc: bool
def to_dict(self) -> dict:
return clean(
{
"alertType": ",".join(self.alert_type) if self.alert_type else None,
"severity": ",".join(self.severity) if self.severity else None,
"sourceType": ",".join(self.source_type) if self.source_type else None,
"networkType": ",".join(self.network_type) if self.network_type else None,
"matchedAssetValue": ",".join(self.matched_asset_value) if self.matched_asset_value else None,
"remediationStatus": ",".join(self.remediation_status) if self.remediation_status else None,
"sourceDateFrom": int(self.source_date_from) if self.source_date_from else None,
"sourceDateTo": int(self.source_date_to) if self.source_date_to else None,
"foundDateFrom": int(self.found_date_from) if self.found_date_from else None,
"foundDateTo": int(self.found_date_to) if self.found_date_to else None,
"assigned": self.assigned == "Assigned" if self.assigned else None,
"isFlagged": self.is_flagged == "Flagged" if self.is_flagged else None,
"isClosed": self.is_closed == "Closed" if self.is_closed else None,
"hasIoc": self.has_ioc,
}
)
@dataclass
class Image:
type: str
data: str
@dataclass
class ManualAlertParams:
title: str
found_date: str
description: str
type: str
sub_type: str
severity: str
source_type: int
source_network_type: int
source_url: int
source_date: int
images: [Image]
def to_dict(self) -> dict:
images = []
if self.images:
for image in self.images:
if not image:
continue
try:
images.append({"Type": image["type"], "Data": image["data"]})
except KeyError as e:
raise PluginException(cause="Wrong input parameter.", assistance=f"Wrong image: {e}.")
return clean(
{
"FoundDate": self.found_date,
"Details": {
"Title": self.title,
"Description": self.description,
"Type": self.type,
"SubType": self.sub_type,
"Severity": self.severity,
"Source": {
"Type": self.source_type,
"NetworkType": self.source_network_type,
"URL": self.source_url,
"Date": self.source_date,
},
"Images": images,
},
}
)
class IntSightsAPI:
def __init__(self, account_id: str, api_key: str, logger: Logger):
self.account_id = account_id
self.api_key = api_key
self.url = "https://api.intsights.com"
self.logger = logger
def get_indicator_by_value(self, ioc_value: str) -> dict:
return self.make_json_request("GET", f"public/v2/iocs/ioc-by-value?iocValue={ioc_value}")
def enrich_indicator(self, ioc_value: str) -> dict:
response = {}
for _ in range(0, 9999):
response = self.make_json_request("GET", f"public/v1/iocs/enrich/{ioc_value}")
if response.get("Status", "InProgress") in ["Done", "Failed"]:
break
time.sleep(5)
return response
def rescan_indicator(self, indicator_file_hash: str) -> dict:
return self.make_json_request("POST", "public/v1/iocs/rescan", json_data={"IocValue": indicator_file_hash})
def get_scan_status(self, task_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/iocs/rescan/status/{task_id}")
def get_complete_alert_by_id(self, alert_id: str) -> dict:
return self.make_json_request("GET", f"public/v1/data/alerts/get-complete-alert/{alert_id}")
def takedown_request(self, alert_id: str, target: str) -> dict:
return self.make_json_request(
"PATCH", f"public/v1/data/alerts/takedown-request/{alert_id}", json_data={"Target": target}
)
def get_alerts(self, alert_params: AlertParams) -> list:
return self.make_request("GET", "public/v1/data/alerts/alerts-list", params=alert_params.to_dict()).json()
def add_manual_alert(self, manual_alert_params: ManualAlertParams) -> str:
return self.make_request("PUT", "public/v1/data/alerts/add-alert", json_data=manual_alert_params.to_dict()).text
def test_credentials(self) -> bool:
return self.make_request("HEAD", "public/v1/test-credentials").status_code == 200
def make_json_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> dict:
try:
response = self.make_request(method=method, path=path, json_data=json_data, params=params)
if response.status_code == 204:
return {}
json_response = response.json()
if json_response.get("Status") == "Invalid":
raise PluginException(
cause="IntSights returned an error response: ", assistance=f"{json_response.get('FailedReason')}."
)
return json_response
except json.decoder.JSONDecodeError as e:
raise PluginException(preset=PluginException.Preset.INVALID_JSON, data=e)
def make_request(self, method: str, path: str, json_data: dict = None, params: dict = None) -> requests.Response:
try:
response = requests.request(
method=method,
url=f"{self.url}/{path}",
headers={"Content-Type": "application/json"},
verify=True,
params=params,
json=json_data,
auth=HTTPBasicAuth(self.account_id, self.api_key),
)
if response.status_code == 401:
raise PluginException(preset=PluginException.Preset.USERNAME_PASSWORD, data=response.text)
if response.status_code == 403:
raise PluginException(preset=PluginException.Preset.API_KEY, data=response.text)
if response.status_code == 404:
raise PluginException(preset=PluginException.Preset.NOT_FOUND, data=response.text)
if 400 <= response.status_code < 500:
raise PluginException(
preset=PluginException.Preset.UNKNOWN,
data=response.text,
)
if response.status_code >= 500:
raise PluginException(preset=PluginException.Preset.SERVER_ERROR, data=response.text)
if 200 <= response.status_code < 300:
return response
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=response.text)
except requests.exceptions.HTTPError as e:
raise PluginException(preset=PluginException.Preset.UNKNOWN, data=e)
| true
| true
|
790694eccfba29d3848b4013a9df93dfe29c7ce6
| 478
|
py
|
Python
|
pokus1/urls.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
pokus1/urls.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
pokus1/urls.py
|
zvolsky/example_translation
|
37a8e71df6bdc12dda96b9e84b5c1bb7a16087dc
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.views.i18n import JavaScriptCatalog
from .views import HelloWorldView
app_name = 'pokus1' # make possible use {% url 'pokus1:..' %}
# however this is maybe deprecated; you can achieve same in include(), see project level urls.py
urlpatterns = [
path('jsi18n/pokus1/', JavaScriptCatalog.as_view(), name='javascript-catalog'), # /pokus1/: unique app url, probably important
path('', HelloWorldView.as_view(), name='hello')
]
| 34.142857
| 131
| 0.730126
|
from django.urls import path
from django.views.i18n import JavaScriptCatalog
from .views import HelloWorldView
app_name = 'pokus1'
urlpatterns = [
path('jsi18n/pokus1/', JavaScriptCatalog.as_view(), name='javascript-catalog'),
path('', HelloWorldView.as_view(), name='hello')
]
| true
| true
|
79069520ba8499a61f76c4f589920c516a30c1db
| 5,288
|
py
|
Python
|
utils/utils.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | null | null | null |
utils/utils.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | null | null | null |
utils/utils.py
|
suhasgupta791/mids-w251-final-project
|
aa1ef80685c6d9b5fc8a444e438078150cc0d96c
|
[
"Apache-2.0"
] | 1
|
2020-02-14T01:10:43.000Z
|
2020-02-14T01:10:43.000Z
|
#!/usr/bin/env python
# coding: utf-8
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import pickle
import shutil
from sklearn.model_selection import train_test_split
def tokenize(tokenizer,text_array,max_seq_len=64,pad_to_max_length=True,add_special_tokens=True):
''' Returns tokenized IDs and attention mask
The transformers encode_plus method returns the following:
{
input_ids: list[int],
token_type_ids: list[int] if return_token_type_ids is True (default)
attention_mask: list[int] if return_attention_mask is True (default)
overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
}'''
all_tokens=[]
all_attention_mask=[]
for i,text in enumerate(tqdm(text_array)):
encoded = tokenizer.encode_plus(
text,
add_special_tokens=add_special_tokens,
max_length=max_seq_len,
pad_to_max_length=pad_to_max_length)
tokens = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
all_tokens.append(tokens)
all_attention_mask.append(attention_mask)
return all_tokens,all_attention_mask
class CreateDataset(Dataset):
def __init__(self,data,atten_mask,labels,num_excl):
self._dataset = [[data[i],atten_mask[i],labels.values[i],num_excl.values[i]] for i in range(0,len(data))]
def __len__(self):
return len(self._dataset)
def __getitem__(self,idx):
return self._dataset[idx]
def createTestTrainSplit(all_train_df,test_size=0.2,seed=1234):
# Create train, validation dataset splits
train_df, valid_df = train_test_split(all_train_df, test_size=0.2,random_state=seed)
train_data = train_df.text.fillna("DUMMY_VALUE")
train_labels = train_df.label
train_num_excl = train_df.num_exclamation_marks
valid_data = valid_df.text.fillna("DUMMY_VALUE")
valid_labels = valid_df.label
valid_num_excl = train_df.num_exclamation_marks
return train_data,train_labels,train_num_excl,valid_data,valid_labels,valid_num_excl
def saveTokensToFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# save to files for later use
with open(TOKEN_DATA_PATH+'/train_data_tokenized.txt', 'wb') as fp:
pickle.dump(train_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/train_attention_mask.txt', 'wb') as fp:
pickle.dump(train_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/valid_data_tokenized.txt', 'wb') as fp:
pickle.dump(valid_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/valid_attention_mask.txt', 'wb') as fp:
pickle.dump(valid_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/test_data_tokenized.txt', 'wb') as fp:
pickle.dump(test_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/test_attention_mask.txt', 'wb') as fp:
pickle.dump(test_attention_mask, fp)
def loadTokensFromFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
# read back tokenized data
with open(TOKEN_DATA_PATH+'train_data_tokenized.txt', 'rb') as fp:
train_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'train_attention_mask.txt', 'rb') as fp:
train_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_data_tokenized.txt', 'rb') as fp:
valid_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_attention_mask.txt', 'rb') as fp:
valid_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_data_tokenized.txt', 'rb') as fp:
test_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_attention_mask.txt', 'rb') as fp:
test_attention_mask=pickle.load(fp)
def generateDataLoader(dataset,batch_size,shuffle=False,num_workers=16,pin_memory=False,drop_last=True):
# print("Expected number of batches:", int(len(train_data_tokenized)/params['batch_size']))
sampler = RandomSampler(dataset)
dataLoader = torch.utils.data.DataLoader(dataset=dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return dataLoader
| 45.982609
| 118
| 0.703101
|
import random
import numpy as np
import sys, os
import pandas as pd
import torch
from torchsummary import summary
from torchtext import data
import torch.nn as nn
import torch.utils.data
from torch.utils.data import Dataset, TensorDataset,DataLoader, RandomSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import pickle
import shutil
from sklearn.model_selection import train_test_split
def tokenize(tokenizer,text_array,max_seq_len=64,pad_to_max_length=True,add_special_tokens=True):
all_tokens=[]
all_attention_mask=[]
for i,text in enumerate(tqdm(text_array)):
encoded = tokenizer.encode_plus(
text,
add_special_tokens=add_special_tokens,
max_length=max_seq_len,
pad_to_max_length=pad_to_max_length)
tokens = torch.tensor(encoded['input_ids'])
attention_mask = torch.tensor(encoded['attention_mask'])
all_tokens.append(tokens)
all_attention_mask.append(attention_mask)
return all_tokens,all_attention_mask
class CreateDataset(Dataset):
def __init__(self,data,atten_mask,labels,num_excl):
self._dataset = [[data[i],atten_mask[i],labels.values[i],num_excl.values[i]] for i in range(0,len(data))]
def __len__(self):
return len(self._dataset)
def __getitem__(self,idx):
return self._dataset[idx]
def createTestTrainSplit(all_train_df,test_size=0.2,seed=1234):
train_df, valid_df = train_test_split(all_train_df, test_size=0.2,random_state=seed)
train_data = train_df.text.fillna("DUMMY_VALUE")
train_labels = train_df.label
train_num_excl = train_df.num_exclamation_marks
valid_data = valid_df.text.fillna("DUMMY_VALUE")
valid_labels = valid_df.label
valid_num_excl = train_df.num_exclamation_marks
return train_data,train_labels,train_num_excl,valid_data,valid_labels,valid_num_excl
def saveTokensToFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
with open(TOKEN_DATA_PATH+'/train_data_tokenized.txt', 'wb') as fp:
pickle.dump(train_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/train_attention_mask.txt', 'wb') as fp:
pickle.dump(train_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/valid_data_tokenized.txt', 'wb') as fp:
pickle.dump(valid_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/valid_attention_mask.txt', 'wb') as fp:
pickle.dump(valid_attention_mask, fp)
with open(TOKEN_DATA_PATH+'/test_data_tokenized.txt', 'wb') as fp:
pickle.dump(test_data_tokenized, fp)
with open(TOKEN_DATA_PATH+'/test_attention_mask.txt', 'wb') as fp:
pickle.dump(test_attention_mask, fp)
def loadTokensFromFiles(TOKEN_DATA_PATH,
train_data_tokenized,train_attention_mask,
valid_data_tokenized,valid_attention_mask,
test_data_tokenized,test_attention_mask):
with open(TOKEN_DATA_PATH+'train_data_tokenized.txt', 'rb') as fp:
train_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'train_attention_mask.txt', 'rb') as fp:
train_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_data_tokenized.txt', 'rb') as fp:
valid_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'valid_attention_mask.txt', 'rb') as fp:
valid_attention_mask=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_data_tokenized.txt', 'rb') as fp:
test_data_tokenized=pickle.load(fp)
with open(TOKEN_DATA_PATH+'test_attention_mask.txt', 'rb') as fp:
test_attention_mask=pickle.load(fp)
def generateDataLoader(dataset,batch_size,shuffle=False,num_workers=16,pin_memory=False,drop_last=True):
sampler = RandomSampler(dataset)
dataLoader = torch.utils.data.DataLoader(dataset=dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return dataLoader
| true
| true
|
790695b2230e378c5f81c23793b761a3a832279d
| 10,523
|
py
|
Python
|
python/papaya_i2chttpinst.py
|
papaya-iot/papaya-examples
|
ff52997170fbf8975b0b027169fb047762f2b94d
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T16:08:55.000Z
|
2022-02-21T16:08:55.000Z
|
python/papaya_i2chttpinst.py
|
papaya-iot/papaya-examples
|
ff52997170fbf8975b0b027169fb047762f2b94d
|
[
"BSD-3-Clause"
] | null | null | null |
python/papaya_i2chttpinst.py
|
papaya-iot/papaya-examples
|
ff52997170fbf8975b0b027169fb047762f2b94d
|
[
"BSD-3-Clause"
] | 2
|
2021-07-16T23:27:58.000Z
|
2022-01-10T12:30:27.000Z
|
"""
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import binascii
from codecs import getencoder
import time
def enforce_hex(addr):
if type(addr) == int and addr < 256:
return hex(addr).lstrip('0x')
elif type(addr) == str:
return addr.lstrip('0x')
else:
raise ValueError('addr must be hex string or int < 256')
def scanI2c(ip):
"""
scans devices on i2c bus
:return: list of hex string addresses present on i2c bus
"""
try:
req_url = 'http://' + ip + '/i2c/scan'
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("i2c failed scan")
class I2cHttpDevice:
def __init__(self, ip, dev_addr):
# device address should be hex string
self.url = 'http://' + ip + '/i2c/'
self.dev_addr = enforce_hex(dev_addr)
def read(self, reg_addr, len_read):
"""
read len_read bytes starting from register reg_addr
:param reg_addr: (str) register address to read in hex
:param len_read: (int) number of bytes to read
:return: bytestring of data
"""
assert len_read < 256, "num of bytes to read cannot exceed 255"
hex_reg_addr = enforce_hex(reg_addr)
try:
req_url = '%sread/%s/%s/%d' % (self.url, self.dev_addr, hex_reg_addr, len_read)
resp = requests.get(url=req_url)
return binascii.a2b_hex(resp.content)
except ValueError:
print("i2c failed read")
def write(self, reg_addr, data, len_data=0):
"""
:param reg_addr: (str) register address to write to in hex
:param data: (str or bytes) hex-encoded bytes, ie: '014ce8'
:param len_data: (optional int) dummy variable to support code portability
:return: None
"""
hex_reg_addr = enforce_hex(reg_addr)
if type(data) == bytes:
# to work across python 2+3:
# https://izziswift.com/whats-the-correct-way-to-convert-bytes-to-a-hex-string-in-python-3/
data = getencoder('hex')(data)[0].decode('ascii')
try:
req_url = '%swrite/%s/%s/%s' % (self.url, self.dev_addr, hex_reg_addr, data)
requests.get(url=req_url)
except ValueError:
print("i2c device 0x%s failed write" % self.dev_addr)
class BME280(I2cHttpDevice):
"""
Bosch BME280
https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf
code adapted from BME280.py, http://abyz.me.uk/rpi/pigpio/examples.html (2016-08-05)
This example shows that porting the original code to use the Wifi
Papaya Controller is straightforward and minimal
"""
_calib00 = 0x88
_T1 = 0x88 - _calib00
_T2 = 0x8A - _calib00
_T3 = 0x8C - _calib00
_P1 = 0x8E - _calib00
_P2 = 0x90 - _calib00
_P3 = 0x92 - _calib00
_P4 = 0x94 - _calib00
_P5 = 0x96 - _calib00
_P6 = 0x98 - _calib00
_P7 = 0x9A - _calib00
_P8 = 0x9C - _calib00
_P9 = 0x9E - _calib00
_H1 = 0xA1 - _calib00
_chip_id = 0xD0
_reset = 0xE0
_calib26 = 0xE1
_H2 = 0xE1 - _calib26
_H3 = 0xE3 - _calib26
_xE4 = 0xE4 - _calib26
_xE5 = 0xE5 - _calib26
_xE6 = 0xE6 - _calib26
_H6 = 0xE7 - _calib26
_ctrl_hum = 0xF2
_status = 0xF3
_ctrl_meas = 0xF4
_config = 0xF5
_rawdata = 0xF7
_press = 0xF7
_temp = 0xFA
_humid = 0xFD
_p_msb = 0xF7 - _rawdata
_p_lsb = 0xF8 - _rawdata
_p_xlsb = 0xF9 - _rawdata
_t_msb = 0xFA - _rawdata
_t_lsb = 0xFB - _rawdata
_t_xlsb = 0xFC - _rawdata
_h_msb = 0xFD - _rawdata
_h_lsb = 0xFE - _rawdata
_os_ms = [0, 1, 2, 4, 8, 16]
def __init__(self, i2c_conn, gpib_addr, sampling):
super().__init__(i2c_conn, gpib_addr)
# additional initialization procedure
self.sampling = sampling
self._load_calibration()
self.measure_delay = self._measurement_time(sampling, sampling, sampling)
self.t_fine = 0.0
def _s16(self, _calib, off):
v = self._u16(_calib, off)
if v > 32767:
v -= 65536
return v
def _u16(self, _calib, off):
return _calib[off] | (_calib[off + 1] << 8)
def _u8(self, _calib, off):
return _calib[off]
def _s8(self, _calib, off):
v = self._u8(_calib, off)
if v > 127:
v -= 256
return v
def _measurement_time(self, os_temp, os_press, os_hum):
ms = ((1.25 + 2.3 * self._os_ms[os_temp]) +
(0.575 + 2.3 * self._os_ms[os_press]) +
(0.575 + 2.3 * self._os_ms[os_hum]))
return ms / 1000.0
def _load_calibration(self):
d1 = self.read(self._calib00, 26)
self.T1 = self._u16(d1, self._T1)
self.T2 = self._s16(d1, self._T2)
self.T3 = self._s16(d1, self._T3)
self.P1 = self._u16(d1, self._P1)
self.P2 = self._s16(d1, self._P2)
self.P3 = self._s16(d1, self._P3)
self.P4 = self._s16(d1, self._P4)
self.P5 = self._s16(d1, self._P5)
self.P6 = self._s16(d1, self._P6)
self.P7 = self._s16(d1, self._P7)
self.P8 = self._s16(d1, self._P8)
self.P9 = self._s16(d1, self._P9)
self.H1 = self._u8(d1, self._H1)
d2 = self.read(self._calib26, 7)
self.H2 = self._s16(d2, self._H2)
self.H3 = self._u8(d2, self._H3)
t = self._u8(d2, self._xE5)
t_l = t & 15
t_h = (t >> 4) & 15
self.H4 = (self._u8(d2, self._xE4) << 4) | t_l
if self.H4 > 2047:
self.H4 -= 4096
self.H5 = (self._u8(d2, self._xE6) << 4) | t_h
if self.H5 > 2047:
self.H5 -= 4096
self.H6 = self._s8(d2, self._H6)
def _read_raw_data(self):
# write control bytes for oversampling config
self.write(self._ctrl_hum, bytes([self.sampling]), 1)
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 8 bytes starting from register self._rawdata
d = self.read(self._rawdata, 8)
# print(''.join(format(x, '02x') for x in d))
msb = d[self._t_msb]
lsb = d[self._t_lsb]
xlsb = d[self._t_xlsb]
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._p_msb]
lsb = d[self._p_lsb]
xlsb = d[self._p_xlsb]
raw_p = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._h_msb]
lsb = d[self._h_lsb]
raw_h = (msb << 8) | lsb
return raw_t, raw_p, raw_h
def read_temp(self):
# write measurement control byte
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
# read 3 bytes starting from register self._temp
d = self.read(self._temp, 3)
# print(''.join(format(x, '02x') for x in d))
msb, lsb, xlsb = d
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
return t
def read_data(self):
raw_t, raw_p, raw_h = self._read_raw_data()
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
var1 = (self.t_fine / 2.0) - 64000.0
var2 = var1 * var1 * self.P6 / 32768.0
var2 = var2 + (var1 * self.P5 * 2.0)
var2 = (var2 / 4.0) + (self.P4 * 65536.0)
var1 = ((self.P3 * var1 * var1 / 524288.0) + (self.P2 * var1)) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.P1
if var1 != 0.0:
p = 1048576.0 - raw_p
p = (p - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.P9 * p * p / 2147483648.0
var2 = p * self.P8 / 32768.0
p = p + (var1 + var2 + self.P7) / 16.0
else:
p = 0
h = self.t_fine - 76800.0
h = ((raw_h - ((self.H4) * 64.0 + (self.H5) / 16384.0 * h)) *
((self.H2) / 65536.0 * (1.0 + (self.H6) / 67108864.0 * h *
(1.0 + (self.H3) / 67108864.0 * h))))
h = h * (1.0 - self.H1 * h / 524288.0)
if h > 100.0:
h = 100.0
elif h < 0.0:
h = 0.0
return t, p, h
| 33.512739
| 756
| 0.566949
|
import requests
import binascii
from codecs import getencoder
import time
def enforce_hex(addr):
if type(addr) == int and addr < 256:
return hex(addr).lstrip('0x')
elif type(addr) == str:
return addr.lstrip('0x')
else:
raise ValueError('addr must be hex string or int < 256')
def scanI2c(ip):
try:
req_url = 'http://' + ip + '/i2c/scan'
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("i2c failed scan")
class I2cHttpDevice:
def __init__(self, ip, dev_addr):
self.url = 'http://' + ip + '/i2c/'
self.dev_addr = enforce_hex(dev_addr)
def read(self, reg_addr, len_read):
assert len_read < 256, "num of bytes to read cannot exceed 255"
hex_reg_addr = enforce_hex(reg_addr)
try:
req_url = '%sread/%s/%s/%d' % (self.url, self.dev_addr, hex_reg_addr, len_read)
resp = requests.get(url=req_url)
return binascii.a2b_hex(resp.content)
except ValueError:
print("i2c failed read")
def write(self, reg_addr, data, len_data=0):
hex_reg_addr = enforce_hex(reg_addr)
if type(data) == bytes:
data = getencoder('hex')(data)[0].decode('ascii')
try:
req_url = '%swrite/%s/%s/%s' % (self.url, self.dev_addr, hex_reg_addr, data)
requests.get(url=req_url)
except ValueError:
print("i2c device 0x%s failed write" % self.dev_addr)
class BME280(I2cHttpDevice):
_calib00 = 0x88
_T1 = 0x88 - _calib00
_T2 = 0x8A - _calib00
_T3 = 0x8C - _calib00
_P1 = 0x8E - _calib00
_P2 = 0x90 - _calib00
_P3 = 0x92 - _calib00
_P4 = 0x94 - _calib00
_P5 = 0x96 - _calib00
_P6 = 0x98 - _calib00
_P7 = 0x9A - _calib00
_P8 = 0x9C - _calib00
_P9 = 0x9E - _calib00
_H1 = 0xA1 - _calib00
_chip_id = 0xD0
_reset = 0xE0
_calib26 = 0xE1
_H2 = 0xE1 - _calib26
_H3 = 0xE3 - _calib26
_xE4 = 0xE4 - _calib26
_xE5 = 0xE5 - _calib26
_xE6 = 0xE6 - _calib26
_H6 = 0xE7 - _calib26
_ctrl_hum = 0xF2
_status = 0xF3
_ctrl_meas = 0xF4
_config = 0xF5
_rawdata = 0xF7
_press = 0xF7
_temp = 0xFA
_humid = 0xFD
_p_msb = 0xF7 - _rawdata
_p_lsb = 0xF8 - _rawdata
_p_xlsb = 0xF9 - _rawdata
_t_msb = 0xFA - _rawdata
_t_lsb = 0xFB - _rawdata
_t_xlsb = 0xFC - _rawdata
_h_msb = 0xFD - _rawdata
_h_lsb = 0xFE - _rawdata
_os_ms = [0, 1, 2, 4, 8, 16]
def __init__(self, i2c_conn, gpib_addr, sampling):
super().__init__(i2c_conn, gpib_addr)
self.sampling = sampling
self._load_calibration()
self.measure_delay = self._measurement_time(sampling, sampling, sampling)
self.t_fine = 0.0
def _s16(self, _calib, off):
v = self._u16(_calib, off)
if v > 32767:
v -= 65536
return v
def _u16(self, _calib, off):
return _calib[off] | (_calib[off + 1] << 8)
def _u8(self, _calib, off):
return _calib[off]
def _s8(self, _calib, off):
v = self._u8(_calib, off)
if v > 127:
v -= 256
return v
def _measurement_time(self, os_temp, os_press, os_hum):
ms = ((1.25 + 2.3 * self._os_ms[os_temp]) +
(0.575 + 2.3 * self._os_ms[os_press]) +
(0.575 + 2.3 * self._os_ms[os_hum]))
return ms / 1000.0
def _load_calibration(self):
d1 = self.read(self._calib00, 26)
self.T1 = self._u16(d1, self._T1)
self.T2 = self._s16(d1, self._T2)
self.T3 = self._s16(d1, self._T3)
self.P1 = self._u16(d1, self._P1)
self.P2 = self._s16(d1, self._P2)
self.P3 = self._s16(d1, self._P3)
self.P4 = self._s16(d1, self._P4)
self.P5 = self._s16(d1, self._P5)
self.P6 = self._s16(d1, self._P6)
self.P7 = self._s16(d1, self._P7)
self.P8 = self._s16(d1, self._P8)
self.P9 = self._s16(d1, self._P9)
self.H1 = self._u8(d1, self._H1)
d2 = self.read(self._calib26, 7)
self.H2 = self._s16(d2, self._H2)
self.H3 = self._u8(d2, self._H3)
t = self._u8(d2, self._xE5)
t_l = t & 15
t_h = (t >> 4) & 15
self.H4 = (self._u8(d2, self._xE4) << 4) | t_l
if self.H4 > 2047:
self.H4 -= 4096
self.H5 = (self._u8(d2, self._xE6) << 4) | t_h
if self.H5 > 2047:
self.H5 -= 4096
self.H6 = self._s8(d2, self._H6)
def _read_raw_data(self):
self.write(self._ctrl_hum, bytes([self.sampling]), 1)
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
d = self.read(self._rawdata, 8)
msb = d[self._t_msb]
lsb = d[self._t_lsb]
xlsb = d[self._t_xlsb]
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._p_msb]
lsb = d[self._p_lsb]
xlsb = d[self._p_xlsb]
raw_p = ((msb << 16) | (lsb << 8) | xlsb) >> 4
msb = d[self._h_msb]
lsb = d[self._h_lsb]
raw_h = (msb << 8) | lsb
return raw_t, raw_p, raw_h
def read_temp(self):
self.write(self._ctrl_meas, bytes([self.sampling << 5 | self.sampling << 2 | 1]), 1)
time.sleep(self.measure_delay)
d = self.read(self._temp, 3)
msb, lsb, xlsb = d
raw_t = ((msb << 16) | (lsb << 8) | xlsb) >> 4
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
return t
def read_data(self):
raw_t, raw_p, raw_h = self._read_raw_data()
var1 = (raw_t / 16384.0 - (self.T1) / 1024.0) * float(self.T2)
var2 = (((raw_t) / 131072.0 - (self.T1) / 8192.0) *
((raw_t) / 131072.0 - (self.T1) / 8192.0)) * (self.T3)
self.t_fine = var1 + var2
t = (var1 + var2) / 5120.0
var1 = (self.t_fine / 2.0) - 64000.0
var2 = var1 * var1 * self.P6 / 32768.0
var2 = var2 + (var1 * self.P5 * 2.0)
var2 = (var2 / 4.0) + (self.P4 * 65536.0)
var1 = ((self.P3 * var1 * var1 / 524288.0) + (self.P2 * var1)) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self.P1
if var1 != 0.0:
p = 1048576.0 - raw_p
p = (p - (var2 / 4096.0)) * 6250.0 / var1
var1 = self.P9 * p * p / 2147483648.0
var2 = p * self.P8 / 32768.0
p = p + (var1 + var2 + self.P7) / 16.0
else:
p = 0
h = self.t_fine - 76800.0
h = ((raw_h - ((self.H4) * 64.0 + (self.H5) / 16384.0 * h)) *
((self.H2) / 65536.0 * (1.0 + (self.H6) / 67108864.0 * h *
(1.0 + (self.H3) / 67108864.0 * h))))
h = h * (1.0 - self.H1 * h / 524288.0)
if h > 100.0:
h = 100.0
elif h < 0.0:
h = 0.0
return t, p, h
| true
| true
|
790696471fbebd66373d6da188317cd690d56dce
| 662
|
py
|
Python
|
pucadmin/schools/migrations/0006_alter_school_courses_offered.py
|
JobDoesburg/PUC-admin
|
ab61478cbf1cb0ddb57661a7508e70b23642810b
|
[
"MIT"
] | null | null | null |
pucadmin/schools/migrations/0006_alter_school_courses_offered.py
|
JobDoesburg/PUC-admin
|
ab61478cbf1cb0ddb57661a7508e70b23642810b
|
[
"MIT"
] | null | null | null |
pucadmin/schools/migrations/0006_alter_school_courses_offered.py
|
JobDoesburg/PUC-admin
|
ab61478cbf1cb0ddb57661a7508e70b23642810b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-11-29 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0004_auto_20210718_1147"),
("schools", "0005_alter_school_courses_offered"),
]
operations = [
migrations.AlterField(
model_name="school",
name="courses_offered",
field=models.ManyToManyField(
blank=True,
related_name="schools",
related_query_name="schools",
to="organisations.Course",
verbose_name="courses",
),
),
]
| 25.461538
| 57
| 0.563444
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0004_auto_20210718_1147"),
("schools", "0005_alter_school_courses_offered"),
]
operations = [
migrations.AlterField(
model_name="school",
name="courses_offered",
field=models.ManyToManyField(
blank=True,
related_name="schools",
related_query_name="schools",
to="organisations.Course",
verbose_name="courses",
),
),
]
| true
| true
|
790698c6e82d67533cbc201fa4f1d367a1d8c25f
| 1,495
|
py
|
Python
|
setup.py
|
greyside/django-floppyforms
|
4116f4dbfef631fa79c5b475444a111237939d40
|
[
"BSD-3-Clause"
] | 1
|
2020-10-05T21:51:21.000Z
|
2020-10-05T21:51:21.000Z
|
setup.py
|
greyside/django-floppyforms
|
4116f4dbfef631fa79c5b475444a111237939d40
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
greyside/django-floppyforms
|
4116f4dbfef631fa79c5b475444a111237939d40
|
[
"BSD-3-Clause"
] | 1
|
2020-10-05T21:51:27.000Z
|
2020-10-05T21:51:27.000Z
|
# -*- coding: utf-8 -*-
import codecs
import re
from os import path
from distutils.core import setup
from setuptools import find_packages
def read(*parts):
return codecs.open(path.join(path.dirname(__file__), *parts),
encoding='utf-8').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-floppyforms',
version=find_version('floppyforms', '__init__.py'),
author='Bruno Renié',
author_email='bruno@renie.fr',
packages=find_packages(exclude=["tests.*", "tests"]),
include_package_data=True,
url='https://github.com/gregmuellegger/django-floppyforms',
license='BSD licence, see LICENSE file',
description='Full control of form rendering in the templates',
long_description='\n\n'.join((
read('README.rst'),
read('CHANGES.rst'))),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
zip_safe=False,
)
| 31.145833
| 68
| 0.628094
|
import codecs
import re
from os import path
from distutils.core import setup
from setuptools import find_packages
def read(*parts):
return codecs.open(path.join(path.dirname(__file__), *parts),
encoding='utf-8').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-floppyforms',
version=find_version('floppyforms', '__init__.py'),
author='Bruno Renié',
author_email='bruno@renie.fr',
packages=find_packages(exclude=["tests.*", "tests"]),
include_package_data=True,
url='https://github.com/gregmuellegger/django-floppyforms',
license='BSD licence, see LICENSE file',
description='Full control of form rendering in the templates',
long_description='\n\n'.join((
read('README.rst'),
read('CHANGES.rst'))),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
],
zip_safe=False,
)
| true
| true
|
79069ace969f4177408f757c77d45f495281853c
| 2,282
|
py
|
Python
|
convertmask/utils/mask2json_script.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 27
|
2020-06-11T01:47:57.000Z
|
2022-03-18T01:47:59.000Z
|
convertmask/utils/mask2json_script.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 14
|
2020-06-11T01:50:41.000Z
|
2022-03-18T13:11:04.000Z
|
convertmask/utils/mask2json_script.py
|
wwdok/mask2json
|
403c6b3df677185d5951239d13187b55bda6465a
|
[
"Apache-2.0"
] | 5
|
2020-10-21T01:51:59.000Z
|
2022-02-28T08:27:32.000Z
|
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-07-10 10:33:39
LastEditors: xiaoshuyui
LastEditTime: 2021-01-05 10:21:49
'''
import glob
import os
from tqdm import tqdm
from convertmask.utils.methods import getMultiShapes
from convertmask.utils.methods.logger import logger
def getJsons(imgPath, maskPath, savePath, yamlPath=''):
"""
imgPath: origin image path \n
maskPath : mask image path \n
savePath : json file save path \n
>>> getJsons(path-to-your-imgs,path-to-your-maskimgs,path-to-your-jsonfiles)
"""
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiShapes(imgPath, maskPath, savePath, yamlPath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
if os.path.exists(i_mask):
# print(i)
getMultiShapes.getMultiShapes(i, i_mask, savePath, yamlPath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath, yamlPath))
logger.info('Done! See here. {}'.format(savePath))
def getXmls(imgPath, maskPath, savePath):
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiObjs_voc(imgPath, maskPath, savePath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
# print(i)
if os.path.exists(i_mask):
getMultiShapes.getMultiObjs_voc(i, i_mask, savePath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath))
logger.info('Done! See here. {}'.format(savePath))
| 31.694444
| 82
| 0.620508
|
import glob
import os
from tqdm import tqdm
from convertmask.utils.methods import getMultiShapes
from convertmask.utils.methods.logger import logger
def getJsons(imgPath, maskPath, savePath, yamlPath=''):
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiShapes(imgPath, maskPath, savePath, yamlPath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
if os.path.exists(i_mask):
getMultiShapes.getMultiShapes(i, i_mask, savePath, yamlPath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath, yamlPath))
logger.info('Done! See here. {}'.format(savePath))
def getXmls(imgPath, maskPath, savePath):
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiObjs_voc(imgPath, maskPath, savePath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
if os.path.exists(i_mask):
getMultiShapes.getMultiObjs_voc(i, i_mask, savePath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath))
logger.info('Done! See here. {}'.format(savePath))
| true
| true
|
79069b575b0203071f15c22e0efcb3861cada834
| 40,919
|
py
|
Python
|
scripts/linters/js_ts_linter_test.py
|
EishaMazhar/oppia
|
ab4f3cf20764b27f567798e4b1184471aaf7f73b
|
[
"Apache-2.0"
] | 1
|
2020-10-11T07:56:09.000Z
|
2020-10-11T07:56:09.000Z
|
scripts/linters/js_ts_linter_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-05-27T06:08:17.000Z
|
2020-05-27T06:08:17.000Z
|
scripts/linters/js_ts_linter_test.py
|
gitter-badger/oppia
|
7d8e659264582d7ce74bc6c139e597b82bca0e04
|
[
"Apache-2.0"
] | 1
|
2020-11-05T12:26:10.000Z
|
2020-11-05T12:26:10.000Z
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/linters/js_ts_linter.py."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import multiprocessing
import os
import shutil
import subprocess
import sys
from core.tests import test_utils
from . import js_ts_linter
from . import pre_commit_linter
from .. import common
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, os.pardir, 'oppia_tools')
ESPRIMA_PATH = os.path.join(
OPPIA_TOOLS_DIR, 'esprima-%s' % common.ESPRIMA_VERSION)
sys.path.insert(1, ESPRIMA_PATH)
import esprima # isort:skip pylint: disable=wrong-import-order, wrong-import-position
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_JS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.js')
VALID_TS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.ts')
VALID_APP_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ts')
VALID_APP_CONSTANTS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ajs.ts')
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ts')
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ajs.ts')
VALID_BACKEND_API_SERVICE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid-backend-api.service.ts')
EXTRA_JS_FILEPATH = os.path.join('core', 'templates', 'demo.js')
INVALID_COMPONENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_two_component.ts')
INVALID_SCOPE_TRUE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_scope_true.ts')
INVALID_SCOPE_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid_scope.ts')
INVALID_SORTED_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_sorted_dependencies.ts')
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_line_breaks_in_controller_dependencies.ts')
INVALID_CONSTANT_IN_TS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_constant_in_ts_file.ts')
INVALID_CONSTANT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ts')
INVALID_CONSTANT_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ajs.ts')
INVALID_AS_CONST_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_as_const.constants.ts')
INVALID_HTTP_CLIENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_http_client_used.ts')
INVALID_FORMATTED_COMMENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_comments.ts')
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK = os.path.join(
LINTER_TESTS_DIR, 'invalid_directive_without_return.ts')
INVALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_ignore.ts')
VALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_ignore.ts')
INVALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_expect_error.ts')
VALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_expect_error.spec.ts')
VALID_IGNORED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ignored.service.ts')
VALID_UNLISTED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_unlisted.service.ts')
# Note: Almost all test functions have a subprocess call. This call is to mock
# the compile function used in js_ts_linter. The tests require fewer files to
# be compiled instead of all files as done in js_ts_linter. Mocking the
# compile method reduces the compile time as fewer files are compiled
# thereby making the tests run faster.
class JsTsLintTests(test_utils.LinterTestBase):
"""Tests for js_ts_linter file."""
def validate(self, lint_task_report, expected_messages, failed_count):
"""Assert linter output messages with expected messages."""
for stdout in lint_task_report:
if stdout.failed:
for message in expected_messages:
self.assert_same_list_elements(
[message], stdout.trimmed_messages)
self.assert_failed_messages_count(
stdout.get_report(), failed_count)
else:
continue
def test_validate_and_parse_js_and_ts_files_with_exception(self):
def mock_parse_script(unused_file_content, comment): # pylint: disable=unused-argument
raise Exception('Exception raised from parse_script()')
esprima_swap = self.swap(esprima, 'parseScript', mock_parse_script)
with esprima_swap, self.assertRaisesRegexp(
Exception, r'Exception raised from parse_script\(\)'):
js_ts_linter.JsTsLintChecksManager(
[], [VALID_JS_FILEPATH], FILE_CACHE).perform_all_lint_checks()
def test_check_extra_js_file_found(self):
def mock_readlines(unused_self, unused_filepath):
return ('var a = 10;\n',)
def mock_read(unused_self, unused_filepath):
return 'var a = 10;\n'
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
read_swap = self.swap(
pre_commit_linter.FileCache, 'read', mock_read)
with readlines_swap, read_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[EXTRA_JS_FILEPATH], [], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Found extra .js file']
expected_messages.extend([
'If you want the above files to be present as js files, add '
'them to the list JS_FILEPATHS_NOT_TO_BUILD in build.py. '
'Otherwise, rename them to .ts'])
self.validate(lint_task_report, expected_messages, 1)
def test_check_js_and_ts_component_name_and_count_with_two_component(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_COMPONENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_COMPONENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that there is exactly one component '
'in the file.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_true_value(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[],
[INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file does not have scope set to true.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_no_scope(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SCOPE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file has a scope: {}.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_sorted_dependencies_with_unsorted_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SORTED_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SORTED_DEPENDENCIES_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in SuggestionModalForCreatorViewController'
' in file', 'the injected dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.']
expected_messages.extend([
'Please ensure that in SuggestionModalForCreatorViewController'
' in file ', 'the stringfied dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.'])
self.validate(lint_task_report, expected_messages, 1)
def test_match_line_breaks_in_controller_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in file',
'the line breaks pattern between the dependencies mentioned as'
' strings:\n[$rootScope,$window,BackgroundMaskService,\n'
'SidebarStatusService,UrlService]\nand the dependencies '
'mentioned as function parameters: \n($rootScope,$window,\n'
'BackgroundMaskService,\nSidebarStatusService,UrlService)\n'
'for the corresponding controller should exactly match.'
]
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
expected_messages.extend([
'Please ensure that the constant ADMIN_TABS is initialized '
'from the value from the corresponding Angular constants file '
'(the *.constants.ts file). Please create one in the Angular '
'constants file if it does not exist there.'
])
self.validate(lint_task_report, expected_messages, 1)
def test_check_duplicate_constant_declaration_in_separate_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH,
INVALID_CONSTANT_IN_TS_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'The constant \'ADMIN_ROLE_HANDLER_URL\' is already declared '
'in', 'Please import the file where the constant is declared '
'or rename the constant.']
self.validate(lint_task_report, expected_messages, 1)
def test_duplicate_constants_in_ajs_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
self.validate(lint_task_report, expected_messages, 1)
def test_as_const_in_constant_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_AS_CONST_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_AS_CONST_CONSTANTS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'This constants file doesn\'t have \'as const\' at the end.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_outside_class(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH,
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_app_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_APP_CONSTANTS_AJS_FILEPATH,
VALID_APP_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_APP_CONSTANTS_FILEPATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_in_non_constant_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Constant declaration found at line 19. Please declare the '
'constants in a separate constants file.']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
expected_messages = ['Unused injected value IMPORT_STATEMENT']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter_with_stderr(self):
with self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
INVALID_SORTED_DEPENDENCIES_FILEPATH
).perform_all_lint_checks()
def test_third_party_linter_with_invalid_eslint_path(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap, self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
def test_third_party_linter_with_success_message(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[VALID_TS_FILEPATH]).perform_all_lint_checks()
expected_messages = (
['SUCCESS ESLint check passed'])
self.validate(lint_task_report, expected_messages, 0)
def test_custom_linter_with_no_files(self):
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [], FILE_CACHE).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[]).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_http_client_used_with_excluded_file(self):
excluded_file = (
'core/templates/services/request-interceptor.service.spec.ts')
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'core/templates/services/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
excluded_file)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [excluded_file], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_in_backend_api_service_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_BACKEND_API_SERVICE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_BACKEND_API_SERVICE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_with_error_message(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_HTTP_CLIENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_HTTP_CLIENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'An instance of HttpClient is found in this file. You are not '
'allowed to create http requests from files that are not '
'backend api services.']
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-ignore found at line 25.']
expected_messages.extend(['@ts-ignore found at line 31.'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 25. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 31. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {
VALID_TS_IGNORE_FILEPATH: ['let b: number = c;']
})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS ignore check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_ts_expect_error_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-expect-error found at line 24.']
expected_messages.extend(['@ts-expect-error found at line 30.'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 24. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 30. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_expect_error_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS expect error check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_missing_punctuation_at_end_of_comment(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_FORMATTED_COMMENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_FORMATTED_COMMENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Line 39: Invalid punctuation used at '
'the end of the comment.']
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_UNLISTED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_UNLISTED_SERVICE_PATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
angular_services_index_path = (
'./core/templates/services/angular-services.index.ts')
class_name = 'UnlistedService'
service_name_type_pair = (
'[\'%s\', %s]' % (class_name, class_name))
expected_messages = [
'Please import %s to Angular Services Index file in %s'
'from %s'
% (
class_name,
angular_services_index_path,
VALID_UNLISTED_SERVICE_PATH),
'Please add the pair %s to the angularServices in %s'
% (service_name_type_pair, angular_services_index_path)
]
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_IGNORED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_IGNORED_SERVICE_PATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'SUCCESS Angular Services Index file check passed'
]
self.validate(lint_task_report, expected_messages, 0)
def test_get_linters_with_success(self):
custom_linter, third_party = js_ts_linter.get_linters(
[VALID_JS_FILEPATH], [VALID_TS_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, js_ts_linter.JsTsLintChecksManager))
self.assertTrue(
isinstance(
third_party,
js_ts_linter.ThirdPartyJsTsLintChecksManager))
| 48.482227
| 95
| 0.641951
|
from __future__ import absolute_import
from __future__ import unicode_literals
import multiprocessing
import os
import shutil
import subprocess
import sys
from core.tests import test_utils
from . import js_ts_linter
from . import pre_commit_linter
from .. import common
CURR_DIR = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR = os.path.join(CURR_DIR, os.pardir, 'oppia_tools')
ESPRIMA_PATH = os.path.join(
OPPIA_TOOLS_DIR, 'esprima-%s' % common.ESPRIMA_VERSION)
sys.path.insert(1, ESPRIMA_PATH)
import esprima
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = pre_commit_linter.FileCache()
FILE_CACHE = NAME_SPACE.files
LINTER_TESTS_DIR = os.path.join(os.getcwd(), 'scripts', 'linters', 'test_files')
VALID_JS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.js')
VALID_TS_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'valid.ts')
VALID_APP_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ts')
VALID_APP_CONSTANTS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_app.constants.ajs.ts')
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ts')
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_constant_outside_class.constants.ajs.ts')
VALID_BACKEND_API_SERVICE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid-backend-api.service.ts')
EXTRA_JS_FILEPATH = os.path.join('core', 'templates', 'demo.js')
INVALID_COMPONENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_two_component.ts')
INVALID_SCOPE_TRUE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_scope_true.ts')
INVALID_SCOPE_FILEPATH = os.path.join(LINTER_TESTS_DIR, 'invalid_scope.ts')
INVALID_SORTED_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_sorted_dependencies.ts')
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_line_breaks_in_controller_dependencies.ts')
INVALID_CONSTANT_IN_TS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_constant_in_ts_file.ts')
INVALID_CONSTANT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ts')
INVALID_CONSTANT_AJS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_duplicate.constants.ajs.ts')
INVALID_AS_CONST_CONSTANTS_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_as_const.constants.ts')
INVALID_HTTP_CLIENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_http_client_used.ts')
INVALID_FORMATTED_COMMENT_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_comments.ts')
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK = os.path.join(
LINTER_TESTS_DIR, 'invalid_directive_without_return.ts')
INVALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_ignore.ts')
VALID_TS_IGNORE_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_ignore.ts')
INVALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'invalid_ts_expect_error.ts')
VALID_TS_EXPECT_ERROR_FILEPATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ts_expect_error.spec.ts')
VALID_IGNORED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_ignored.service.ts')
VALID_UNLISTED_SERVICE_PATH = os.path.join(
LINTER_TESTS_DIR, 'valid_unlisted.service.ts')
class JsTsLintTests(test_utils.LinterTestBase):
def validate(self, lint_task_report, expected_messages, failed_count):
for stdout in lint_task_report:
if stdout.failed:
for message in expected_messages:
self.assert_same_list_elements(
[message], stdout.trimmed_messages)
self.assert_failed_messages_count(
stdout.get_report(), failed_count)
else:
continue
def test_validate_and_parse_js_and_ts_files_with_exception(self):
def mock_parse_script(unused_file_content, comment):
raise Exception('Exception raised from parse_script()')
esprima_swap = self.swap(esprima, 'parseScript', mock_parse_script)
with esprima_swap, self.assertRaisesRegexp(
Exception, r'Exception raised from parse_script\(\)'):
js_ts_linter.JsTsLintChecksManager(
[], [VALID_JS_FILEPATH], FILE_CACHE).perform_all_lint_checks()
def test_check_extra_js_file_found(self):
def mock_readlines(unused_self, unused_filepath):
return ('var a = 10;\n',)
def mock_read(unused_self, unused_filepath):
return 'var a = 10;\n'
readlines_swap = self.swap(
pre_commit_linter.FileCache, 'readlines', mock_readlines)
read_swap = self.swap(
pre_commit_linter.FileCache, 'read', mock_read)
with readlines_swap, read_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[EXTRA_JS_FILEPATH], [], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Found extra .js file']
expected_messages.extend([
'If you want the above files to be present as js files, add '
'them to the list JS_FILEPATHS_NOT_TO_BUILD in build.py. '
'Otherwise, rename them to .ts'])
self.validate(lint_task_report, expected_messages, 1)
def test_check_js_and_ts_component_name_and_count_with_two_component(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_COMPONENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_COMPONENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that there is exactly one component '
'in the file.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_true_value(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[],
[INVALID_SCOPE_TRUE_FILEPATH,
INVALID_DIRECTIVE_WITH_NO_RETURN_BLOCK], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file does not have scope set to true.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_directive_scope_with_no_scope(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SCOPE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SCOPE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that baseContent directive in ',
' file has a scope: {}.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_sorted_dependencies_with_unsorted_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_SORTED_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_SORTED_DEPENDENCIES_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in SuggestionModalForCreatorViewController'
' in file', 'the injected dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.']
expected_messages.extend([
'Please ensure that in SuggestionModalForCreatorViewController'
' in file ', 'the stringfied dependencies should be in the '
'following manner: dollar imports, regular imports and '
'constant imports, all in sorted order.'])
self.validate(lint_task_report, expected_messages, 1)
def test_match_line_breaks_in_controller_dependencies(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_LINE_BREAK_IN_CONTROLLER_DEPENDENCIES_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Please ensure that in file',
'the line breaks pattern between the dependencies mentioned as'
' strings:\n[$rootScope,$window,BackgroundMaskService,\n'
'SidebarStatusService,UrlService]\nand the dependencies '
'mentioned as function parameters: \n($rootScope,$window,\n'
'BackgroundMaskService,\nSidebarStatusService,UrlService)\n'
'for the corresponding controller should exactly match.'
]
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
expected_messages.extend([
'Please ensure that the constant ADMIN_TABS is initialized '
'from the value from the corresponding Angular constants file '
'(the *.constants.ts file). Please create one in the Angular '
'constants file if it does not exist there.'
])
self.validate(lint_task_report, expected_messages, 1)
def test_check_duplicate_constant_declaration_in_separate_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH,
INVALID_CONSTANT_IN_TS_FILEPATH],
FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'The constant \'ADMIN_ROLE_HANDLER_URL\' is already declared '
'in', 'Please import the file where the constant is declared '
'or rename the constant.']
self.validate(lint_task_report, expected_messages, 1)
def test_duplicate_constants_in_ajs_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_AJS_FILEPATH,
INVALID_CONSTANT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['Duplicate constant declaration found.']
self.validate(lint_task_report, expected_messages, 1)
def test_as_const_in_constant_files(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_AS_CONST_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_AS_CONST_CONSTANTS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'This constants file doesn\'t have \'as const\' at the end.']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_outside_class(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_CONSTANT_OUTSIDE_CLASS_AJS_FILEPATH,
VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_CONSTANT_OUTSIDE_CLASS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_app_constants_declaration(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_APP_CONSTANTS_AJS_FILEPATH,
VALID_APP_CONSTANTS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_APP_CONSTANTS_FILEPATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS Constants declaration check passed']
self.validate(lint_task_report, expected_messages, 1)
def test_check_constants_declaration_in_non_constant_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_CONSTANT_IN_TS_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_CONSTANT_IN_TS_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Constant declaration found at line 19. Please declare the '
'constants in a separate constants file.']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
expected_messages = ['Unused injected value IMPORT_STATEMENT']
self.validate(lint_task_report, expected_messages, 1)
def test_third_party_linter_with_stderr(self):
with self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
INVALID_SORTED_DEPENDENCIES_FILEPATH
).perform_all_lint_checks()
def test_third_party_linter_with_invalid_eslint_path(self):
def mock_exists(unused_path):
return False
exists_swap = self.swap(os.path, 'exists', mock_exists)
with exists_swap, self.assertRaisesRegexp(SystemExit, '1'):
js_ts_linter.ThirdPartyJsTsLintChecksManager(
[INVALID_SORTED_DEPENDENCIES_FILEPATH]
).perform_all_lint_checks()
def test_third_party_linter_with_success_message(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[VALID_TS_FILEPATH]).perform_all_lint_checks()
expected_messages = (
['SUCCESS ESLint check passed'])
self.validate(lint_task_report, expected_messages, 0)
def test_custom_linter_with_no_files(self):
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [], FILE_CACHE).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_third_party_linter_with_no_files(self):
lint_task_report = js_ts_linter.ThirdPartyJsTsLintChecksManager(
[]).perform_all_lint_checks()
self.assertEqual(
[
'There are no JavaScript or Typescript files to lint.',
'SUCCESS JS TS lint check passed'],
lint_task_report[0].get_report())
self.assertEqual('JS TS lint', lint_task_report[0].name)
self.assertFalse(lint_task_report[0].failed)
def test_http_client_used_with_excluded_file(self):
excluded_file = (
'core/templates/services/request-interceptor.service.spec.ts')
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'core/templates/services/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
excluded_file)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [excluded_file], FILE_CACHE).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_in_backend_api_service_file(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_BACKEND_API_SERVICE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_BACKEND_API_SERVICE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS HTTP requests check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_http_client_used_with_error_message(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_HTTP_CLIENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_HTTP_CLIENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'An instance of HttpClient is found in this file. You are not '
'allowed to create http requests from files that are not '
'backend api services.']
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-ignore found at line 25.']
expected_messages.extend(['@ts-ignore found at line 31.'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 25. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the @ts-ignore '
'explaining the @ts-ignore at line 31. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_ignore_found_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_IGNORE_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
ts_ignore_exceptions_swap = self.swap(
js_ts_linter, 'TS_IGNORE_EXCEPTIONS', {
VALID_TS_IGNORE_FILEPATH: ['let b: number = c;']
})
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap, ts_ignore_exceptions_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_IGNORE_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS ignore check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_ts_expect_error_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['@ts-expect-error found at line 24.']
expected_messages.extend(['@ts-expect-error found at line 30.'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 24. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
expected_messages.extend([
'Please add a comment above the '
'@ts-expect-error explaining the '
'@ts-expect-error at line 30. The format '
'of comment should be -> This throws "...". '
'This needs to be suppressed because ...'])
self.validate(lint_task_report, expected_messages, 1)
def test_ts_expect_error_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_TS_EXPECT_ERROR_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_TS_EXPECT_ERROR_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = ['SUCCESS TS expect error check passed']
self.validate(lint_task_report, expected_messages, 0)
def test_missing_punctuation_at_end_of_comment(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
INVALID_FORMATTED_COMMENT_FILEPATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [INVALID_FORMATTED_COMMENT_FILEPATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'Line 39: Invalid punctuation used at '
'the end of the comment.']
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_error(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_UNLISTED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_UNLISTED_SERVICE_PATH], FILE_CACHE
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
angular_services_index_path = (
'./core/templates/services/angular-services.index.ts')
class_name = 'UnlistedService'
service_name_type_pair = (
'[\'%s\', %s]' % (class_name, class_name))
expected_messages = [
'Please import %s to Angular Services Index file in %s'
'from %s'
% (
class_name,
angular_services_index_path,
VALID_UNLISTED_SERVICE_PATH),
'Please add the pair %s to the angularServices in %s'
% (service_name_type_pair, angular_services_index_path)
]
self.validate(lint_task_report, expected_messages, 1)
def test_angular_services_index_success(self):
def mock_compile_all_ts_files():
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH +
'scripts/linters/test_files/', 'true', 'es2017,dom', 'true',
'true', 'es5', './node_modules/@types',
VALID_IGNORED_SERVICE_PATH)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compile_all_ts_files_swap = self.swap(
js_ts_linter, 'compile_all_ts_files', mock_compile_all_ts_files)
with compile_all_ts_files_swap:
lint_task_report = js_ts_linter.JsTsLintChecksManager(
[], [VALID_IGNORED_SERVICE_PATH], FILE_CACHE,
).perform_all_lint_checks()
shutil.rmtree(
js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH, ignore_errors=True)
expected_messages = [
'SUCCESS Angular Services Index file check passed'
]
self.validate(lint_task_report, expected_messages, 0)
def test_get_linters_with_success(self):
custom_linter, third_party = js_ts_linter.get_linters(
[VALID_JS_FILEPATH], [VALID_TS_FILEPATH], FILE_CACHE)
self.assertTrue(
isinstance(custom_linter, js_ts_linter.JsTsLintChecksManager))
self.assertTrue(
isinstance(
third_party,
js_ts_linter.ThirdPartyJsTsLintChecksManager))
| true
| true
|
79069c6a94506c5d4f3560444aefaf40cd08111d
| 428
|
py
|
Python
|
monero_glue/messages/LiskTransactionType.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 20
|
2018-04-05T22:06:10.000Z
|
2021-09-18T10:43:44.000Z
|
monero_glue/messages/LiskTransactionType.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | null | null | null |
monero_glue/messages/LiskTransactionType.py
|
ph4r05/monero-agent
|
0bac0e6f33142b2bb885565bfd1ef8ac04559280
|
[
"MIT"
] | 5
|
2018-08-06T15:06:04.000Z
|
2021-07-16T01:58:43.000Z
|
# Automatically generated by pb2py
# fmt: off
if False:
from typing_extensions import Literal
Transfer = 0 # type: Literal[0]
RegisterSecondPassphrase = 1 # type: Literal[1]
RegisterDelegate = 2 # type: Literal[2]
CastVotes = 3 # type: Literal[3]
RegisterMultisignatureAccount = 4 # type: Literal[4]
CreateDapp = 5 # type: Literal[5]
TransferIntoDapp = 6 # type: Literal[6]
TransferOutOfDapp = 7 # type: Literal[7]
| 30.571429
| 53
| 0.721963
|
if False:
from typing_extensions import Literal
Transfer = 0
RegisterSecondPassphrase = 1
RegisterDelegate = 2
CastVotes = 3
RegisterMultisignatureAccount = 4
CreateDapp = 5
TransferIntoDapp = 6
TransferOutOfDapp = 7
| true
| true
|
79069cd57bb0e7deabdb4b8fcba5ce272e3fa6da
| 21,548
|
py
|
Python
|
c8ylp/cli/core.py
|
SoftwareAG/cumulocity-remote-access-local-proxy
|
5314b80365c82626a17561acc4f57d209c757b80
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-03-19T12:40:29.000Z
|
2021-12-23T11:08:43.000Z
|
c8ylp/cli/core.py
|
SoftwareAG/cumulocity-remote-access-local-proxy
|
5314b80365c82626a17561acc4f57d209c757b80
|
[
"ECL-2.0",
"Apache-2.0"
] | 30
|
2021-04-09T07:53:50.000Z
|
2022-03-31T20:20:41.000Z
|
c8ylp/cli/core.py
|
SoftwareAG/cumulocity-remote-access-local-proxy
|
5314b80365c82626a17561acc4f57d209c757b80
|
[
"ECL-2.0",
"Apache-2.0"
] | 6
|
2021-03-11T14:16:27.000Z
|
2021-12-22T11:47:19.000Z
|
#
# Copyright (c) 2021 Software AG, Darmstadt, Germany and/or its licensors
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exit codes"""
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
"""Exit codes"""
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
"""Local proxy context"""
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
"""Get the port used by the local proxy
Returns:
int: Port number
"""
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
"""Store the port used by the local proxy for later reference
Args:
value (int): Port number
"""
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
"""Exit with a server not ready error
Returns:
NoReturn: The function does not return
"""
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
"""Load proxy settings from a dictionary
Args:
src_dict (Dict[str, Any]): [description]
Returns:
ProxyContext: Proxy options after the values have been set
via the dictionary
"""
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
"""Start the local proxy in the background
Returns:
ProxyContext: Reference to the proxy context so it can be chained
with other commands or used after the initialization of the class.
"""
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
"""Show an message to the user and log it
Args:
msg (str): User message to print on the console
"""
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
"""Show an error to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
"""Show an info message to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
"""Show a warning to the user and log it
Args:
msg (str): User message to print on the console
"""
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
"""Set environment variables so information about the proxy can
be access by plugins
"""
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
# Support WSL environments and expose variables to be explosed to WSL
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
"""Remote access connection data"""
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
"""CLI Logger"""
# pylint: disable=too-few-public-methods
@classmethod
def log_path(cls) -> pathlib.Path:
"""Get the log path"""
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
"""Configure logger
Args:
path (pathlib.Path): Path where the persistent logger should write to.
verbose (bool, optional): Use verbose logging. Defaults to False.
Returns:
logging.Logger: Created logger
"""
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
# Set default log format
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
# ignore console log messages
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
# Remove default console logging and only use file logging
logger.handlers = []
# Max 5 log files each 10 MB.
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
# Log to Rotating File
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
"""Signal handler"""
sys.exit(ExitCodes.TERMINATE)
def register_signals():
"""Register signal handlers"""
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
"""Store credentials to the environment file. It creates
the file if it does not already exist.
The file will only be written to if it has changed.
Args:
opts (ProxyContext): Proxy options
client (CumulocityClient): Cumulocity client containing valid
credentials
"""
changed = save_env(
opts.env_file,
{
# Note: Don't save password!
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
"""Get the remote access configuration id matching a specific type
from a device managed object
Args:
mor (Dict[str, Any]): Device managed object
config (str): Expected configuration type
Returns:
str: Remote access configuration id
"""
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
"""Run the proxy in a background thread
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
connection_data (RemoteAccessConnectionData): Remote access connection data
"""
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
"""Run prestart checks before starting the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
Optional[RemoteAccessConnectionData]: Remote access connection data
"""
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
"""Start the local proxy
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
"""
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
# as the plugins should do their own thing
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
# loop, waiting for server to stop
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
# propagate exit code
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| 30.094972
| 118
| 0.622424
|
import dataclasses
import logging
import os
import pathlib
import signal
import threading
import time
import sys
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from typing import Any, Dict, NoReturn, Optional
import click
from ..timer import CommandTimer
from ..banner import BANNER1
from ..env import save_env
from ..rest_client.c8yclient import CumulocityClient, CumulocityMissingTFAToken
from ..tcp_socket import TCPProxyServer
from ..websocket_client import WebsocketClient
class ExitCodes(IntEnum):
OK = 0
NO_SESSION = 2
NOT_AUTHORIZED = 3
DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT = 5
DEVICE_NO_PASSTHROUGH_CONFIG = 6
DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG = 7
MISSING_ROLE_REMOTE_ACCESS_ADMIN = 8
UNKNOWN = 9
SSH_NOT_FOUND = 10
TIMEOUT_WAIT_FOR_PORT = 11
COMMAND_NOT_FOUND = 12
PLUGIN_EXECUTION_ERROR = 20
PLUGIN_INVALID_FORMAT = 21
PLUGIN_NOT_FOUND = 22
TERMINATE = 100
@dataclasses.dataclass
class ProxyContext:
host = ""
device = ""
external_type = ""
config = ""
tenant = ""
user = ""
token = ""
password = ""
tfa_code = ""
port = 0
ping_interval = 0
kill = False
tcp_size = 0
tcp_timeout = 0
verbose = False
ignore_ssl_validate = False
reconnects = 0
ssh_user = ""
additional_args = None
disable_prompts = False
env_file = None
store_token = False
wait_port_timeout = 60.0
def __init__(self, ctx: click.Context, src_dict: Dict[str, Any] = None) -> None:
self._ctx = ctx
if src_dict is not None:
self.fromdict(src_dict)
configure_logger(CliLogger.log_path(), self.verbose)
@property
def _root_context(self) -> click.Context:
return self._ctx.find_root().ensure_object(dict)
@property
def used_port(self) -> int:
return self._root_context.get("used_port", self.port)
@used_port.setter
def used_port(self, value: int):
self._root_context["used_port"] = value
def exit_server_not_ready(self) -> NoReturn:
self.show_error(
"Timed out waiting for local port to open: "
f"port={self.used_port}, timeout={self.wait_port_timeout}s"
)
self._ctx.exit(ExitCodes.TIMEOUT_WAIT_FOR_PORT)
def fromdict(self, src_dict: Dict[str, Any]) -> "ProxyContext":
logging.info("Loading from dictionary")
assert isinstance(src_dict, dict)
for key, value in src_dict.items():
logging.info("reading key: %s=%s", key, value)
if hasattr(self, key):
setattr(self, key, value)
return self
def start_background(self, ctx: click.Context = None) -> "ProxyContext":
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
ready_signal = threading.Event()
run_proxy_in_background(
cur_ctx, self, connection_data=connection_data, ready_signal=ready_signal
)
if not ready_signal.wait(self.wait_port_timeout):
self.exit_server_not_ready()
return self
def start(self, ctx: click.Context = None) -> None:
cur_ctx = ctx or self._ctx
connection_data = pre_start_checks(cur_ctx, self)
start_proxy(cur_ctx, self, connection_data=connection_data)
@classmethod
def show_message(cls, msg: str, *args, **kwargs):
click.secho(msg, fg="green")
logging.info(msg, *args, **kwargs)
def show_error(self, msg: str, *args, **kwargs):
if not self.verbose:
click.secho(msg, fg="red")
logging.warning(msg, *args, **kwargs)
def show_info(self, msg: str, *args, **kwargs):
if not self.verbose:
click.secho(msg)
logging.warning(msg, *args, **kwargs)
def show_warning(self, msg: str, *args, **kwargs):
if not self.verbose:
click.secho(msg, fg="yellow")
logging.warning(msg, *args, **kwargs)
def set_env(self):
os.environ["C8Y_HOST"] = str(self.host)
os.environ["PORT"] = str(self.used_port)
os.environ["DEVICE"] = self.device
os.environ["WSLENV"] = "PORT/u:DEVICE/u:C8Y_HOST/u"
@dataclasses.dataclass
class RemoteAccessConnectionData:
client: CumulocityClient
managed_object_id: str
remote_config_id: str
PASSTHROUGH = "PASSTHROUGH"
REMOTE_ACCESS_FRAGMENT = "c8y_RemoteAccessList"
class CliLogger:
@classmethod
def log_path(cls) -> pathlib.Path:
return (
pathlib.Path(os.getenv("C8YLP_LOG_DIR", "~/.c8ylp/")).expanduser()
/ "localproxy.log"
)
def configure_logger(path: pathlib.Path, verbose: bool = False) -> logging.Logger:
path.parent.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_formatter = logging.Formatter(
"%(asctime)s %(threadName)s %(levelname)s %(name)s %(message)s"
)
if verbose:
log_console_formatter = logging.Formatter(
"[c8ylp] %(levelname)-5s %(message)s"
)
console_loglevel = logging.INFO
if len(logger.handlers) == 0:
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_console_formatter)
console_handler.setLevel(console_loglevel)
logger.addHandler(console_handler)
else:
handler = logger.handlers[0]
handler.setLevel(console_loglevel)
handler.setFormatter(log_console_formatter)
else:
logger.handlers = []
rotate_handler = RotatingFileHandler(
filename=str(path), maxBytes=10000000, backupCount=5
)
rotate_handler.setFormatter(log_file_formatter)
rotate_handler.setLevel(logging.INFO)
logger.addHandler(rotate_handler)
return logger
def signal_handler(_signal, _frame):
sys.exit(ExitCodes.TERMINATE)
def register_signals():
signal.signal(signal.SIGINT, signal_handler)
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client
def store_credentials(opts: ProxyContext, client: CumulocityClient):
changed = save_env(
opts.env_file,
{
"C8Y_HOST": client.url,
"C8Y_USER": client.user,
"C8Y_TENANT": client.tenant,
"C8Y_TOKEN": client.token,
},
)
if changed:
opts.show_message(f"Env file was updated: {opts.env_file}")
else:
opts.show_info(f"Env file is already up to date: {opts.env_file}")
def get_config_id(ctx: click.Context, mor: Dict[str, Any], config: str) -> str:
device_name = mor.get("name", "<<empty_name>>")
if REMOTE_ACCESS_FRAGMENT not in mor:
logging.error(
'No Remote Access Configuration has been found for device "%s"', device_name
)
ctx.exit(ExitCodes.DEVICE_MISSING_REMOTE_ACCESS_FRAGMENT)
valid_configs = [
item
for item in mor.get(REMOTE_ACCESS_FRAGMENT, [])
if item.get("protocol") == PASSTHROUGH
]
if not valid_configs:
logging.error(
'No config with protocol set to "%s" has been found for device "%s"',
PASSTHROUGH,
device_name,
)
ctx.exit(ExitCodes.DEVICE_NO_PASSTHROUGH_CONFIG)
def extract_config_id(matching_config):
logging.info(
'Using Configuration with Name "%s" and Remote Port %s',
matching_config.get("name"),
matching_config.get("port"),
)
return matching_config.get("id")
if not config:
# use first config
return extract_config_id(valid_configs[0])
# find config matching name
matches = [
item
for item in valid_configs
if item.get("name", "").casefold() == config.casefold()
]
if not matches:
logging.error(
'Provided config name "%s" for "%s" was not found or none with protocal set to "%s"',
config,
device_name,
PASSTHROUGH,
)
ctx.exit(ExitCodes.DEVICE_NO_MATCHING_PASSTHROUGH_CONFIG)
return extract_config_id(matches[0])
def run_proxy_in_background(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
ready_signal: threading.Event = None,
):
stop_signal = threading.Event()
_local_ready_signal = threading.Event()
# register signals as the proxy will be starting in a background thread
# to enable the proxy to run as a subcommand
register_signals()
# Start the proxy in a background thread so the user can
background = threading.Thread(
target=start_proxy,
args=(ctx, opts),
kwargs=dict(
connection_data=connection_data,
stop_signal=stop_signal,
ready_signal=_local_ready_signal,
),
daemon=True,
)
background.start()
# Block until the local proxy is ready to accept connections
if not _local_ready_signal.wait(opts.wait_port_timeout):
opts.exit_server_not_ready()
# Inject custom env variables for use within the script
opts.set_env()
# The subcommand is called after this
timer = CommandTimer("Duration", on_exit=click.echo).start()
# Shutdown the server once the plugin has been run
@ctx.call_on_close
def _shutdown_server_thread():
stop_signal.set()
background.join()
timer.stop_with_message()
# Only set ready signal once the whole env include env variables has
# been setup
if ready_signal:
ready_signal.set()
def pre_start_checks(
ctx: click.Context, opts: ProxyContext
) -> Optional[RemoteAccessConnectionData]:
try:
client = create_client(ctx, opts)
mor = client.get_managed_object(opts.device, opts.external_type)
config_id = get_config_id(ctx, mor, opts.config)
device_id = mor.get("id")
is_authorized = client.validate_remote_access_role()
if not is_authorized:
opts.show_error(
"The user is not authorized to use Cloud Remote Access. "
f"Contact your Cumulocity Admin. user={opts.user}",
)
ctx.exit(ExitCodes.MISSING_ROLE_REMOTE_ACCESS_ADMIN)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
opts.show_error(f"Could not retrieve device information. reason={ex}")
# re-raise existing exit
raise
error_context = ""
extra_details = []
if opts.host and opts.host not in str(ex):
extra_details.append(f"host={opts.host or ''}")
if opts.user and opts.user not in str(ex):
extra_details.append(f"user={opts.user or ''}")
if extra_details:
error_context = ". settings: " + ", ".join(extra_details)
opts.show_error(
"Unexpected error when retrieving device information from Cumulocity. "
f"error_details={ex}{error_context}"
)
ctx.exit(ExitCodes.NOT_AUTHORIZED)
return RemoteAccessConnectionData(
client=client, managed_object_id=device_id, remote_config_id=config_id
)
def start_proxy(
ctx: click.Context,
opts: ProxyContext,
connection_data: RemoteAccessConnectionData,
stop_signal: threading.Event = None,
ready_signal: threading.Event = None,
) -> NoReturn:
# pylint: disable=too-many-branches,too-many-statements
is_main_thread = threading.current_thread() is threading.main_thread()
if is_main_thread:
register_signals()
client_opts = {
"host": opts.host,
"config_id": connection_data.remote_config_id,
"device_id": connection_data.managed_object_id,
"session": connection_data.client.session,
"token": opts.token,
"ignore_ssl_validate": opts.ignore_ssl_validate,
"ping_interval": opts.ping_interval,
"max_retries": 2,
}
tcp_server = None
background = None
try:
tcp_server = TCPProxyServer(
opts.port,
WebsocketClient(**client_opts),
opts.tcp_size,
opts.tcp_timeout,
)
exit_code = ExitCodes.OK
click.secho(BANNER1)
logging.info("Starting tcp server")
background = threading.Thread(target=tcp_server.serve_forever, daemon=True)
background.start()
# Block until the local proxy is ready to accept connections
if not tcp_server.wait_for_running(opts.wait_port_timeout):
opts.exit_server_not_ready()
# store the used port for reference to later
if tcp_server.server.socket:
opts.used_port = tcp_server.server.socket.getsockname()[1]
# Plugins start in a background thread so don't display it
if is_main_thread:
opts.show_info(
f"\nc8ylp is listening for device (ext_id) {opts.device} ({opts.host}) on localhost:{opts.used_port}",
)
ssh_username = opts.ssh_user or "<device_username>"
opts.show_message(
f"\nFor example, if you are running a ssh proxy, you connect to {opts.device} by executing the "
"following in a new tab/console:\n\n"
f"\tssh -p {opts.used_port} {ssh_username}@localhost",
)
opts.show_info("\nPress ctrl-c to shutdown the server")
if ready_signal:
ready_signal.set()
while background.is_alive():
if stop_signal and stop_signal.is_set():
break
time.sleep(1)
logging.debug(
"Waiting in background: alive=%s",
background.is_alive(),
)
except Exception as ex:
if isinstance(ex, click.exceptions.Exit):
exit_code = getattr(ex, "exit_code")
raise
if str(ex):
opts.show_error(
"The local proxy TCP Server experienced an unexpected error. "
f"port={opts.port}, error={ex}"
)
exit_code = ExitCodes.UNKNOWN
finally:
if tcp_server:
tcp_server.shutdown()
if background:
background.join()
if is_main_thread:
if int(exit_code) == 0:
opts.show_message(f"Exiting: {str(exit_code)} ({int(exit_code)})")
else:
opts.show_error(f"Exiting: {str(exit_code)} ({int(exit_code)})")
ctx.exit(exit_code)
else:
opts.show_info("Exiting")
| true
| true
|
79069d5d2c833b9a9cc91dab4c3f8fd6bba40a22
| 913
|
py
|
Python
|
setup.py
|
geoffjukes/nameko-couchbase
|
113a1ab7b72fd049d56e927dcaa6755885c17546
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
geoffjukes/nameko-couchbase
|
113a1ab7b72fd049d56e927dcaa6755885c17546
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
geoffjukes/nameko-couchbase
|
113a1ab7b72fd049d56e927dcaa6755885c17546
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='nameko-couchbase',
version='0.1.5',
description='Nameko dependency for Couchbase',
url='https://github.com/geoffjukes/nameko-couchbase',
author='Geoff Jukes',
license="Apache License, Version 2.0",
classifiers=[
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='nameko dependency couchbase',
py_modules=['nameko_couchbase'],
install_requires=['couchbase==2.5.9'],
)
| 33.814815
| 71
| 0.638554
|
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='nameko-couchbase',
version='0.1.5',
description='Nameko dependency for Couchbase',
url='https://github.com/geoffjukes/nameko-couchbase',
author='Geoff Jukes',
license="Apache License, Version 2.0",
classifiers=[
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='nameko dependency couchbase',
py_modules=['nameko_couchbase'],
install_requires=['couchbase==2.5.9'],
)
| true
| true
|
79069d65d07e3095beb924880e1da9c6b68f5513
| 1,006
|
py
|
Python
|
babyshop_app/babyshop/urls.py
|
MET-DEV/Django-E-Commerce
|
9a1a50fe56a888946eec28caeb2c735a60065f19
|
[
"MIT"
] | null | null | null |
babyshop_app/babyshop/urls.py
|
MET-DEV/Django-E-Commerce
|
9a1a50fe56a888946eec28caeb2c735a60065f19
|
[
"MIT"
] | null | null | null |
babyshop_app/babyshop/urls.py
|
MET-DEV/Django-E-Commerce
|
9a1a50fe56a888946eec28caeb2c735a60065f19
|
[
"MIT"
] | null | null | null |
"""babyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('products.urls')),
path('users/',include('users.urls')),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| 37.259259
| 77
| 0.724652
|
from django.urls import path, include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('products.urls')),
path('users/',include('users.urls')),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| true
| true
|
79069d7a7e92d22d0979b30d01150684f877a556
| 2,489
|
py
|
Python
|
scripts/csv2html/csv2html.py
|
sjkelly/openlane
|
0fec8c8fb2382d3d487127face5109ec7d2baa51
|
[
"Apache-2.0"
] | 2
|
2021-09-23T01:55:01.000Z
|
2022-03-10T04:06:44.000Z
|
scripts/csv2html/csv2html.py
|
sjkelly/openlane
|
0fec8c8fb2382d3d487127face5109ec7d2baa51
|
[
"Apache-2.0"
] | 1
|
2021-02-10T10:32:45.000Z
|
2021-02-11T03:56:36.000Z
|
scripts/csv2html/csv2html.py
|
sjkelly/openlane
|
0fec8c8fb2382d3d487127face5109ec7d2baa51
|
[
"Apache-2.0"
] | 2
|
2021-09-23T01:55:04.000Z
|
2022-01-30T10:01:33.000Z
|
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import argparse
import pathlib
import pandas as pd
from jinja2 import Environment, PackageLoader, select_autoescape
parser = argparse.ArgumentParser(
description='Takes an input csv report from the run_designs.py script and creates an html summary for it')
parser.add_argument('--csv_file', '-i',required=True,
help='The input csv file')
parser.add_argument('--html_file', '-o', required=True,
help='The output html file')
args = parser.parse_args()
csv_file = args.csv_file
html_file = args.html_file
env = Environment(
loader=PackageLoader('csv2html', 'templates'),
autoescape=select_autoescape('html')
)
template = env.get_template('main.html')
def get_static_folder(file_name):
p = pathlib.Path('.')
return pathlib.PosixPath(str(p) +'/scripts/csv2html/static/'+str(file_name))
def read_csv(csv_file):
csv_file_opener = open(csv_file, 'r')
csv_data = csv.reader(csv_file_opener)
csv_headers = next(csv_data)
return csv_headers, csv_data
def create_output_html(csv_file, html_file):
colms = ['design','config','runtime','DIEAREA_mm^2','OpenDP_Util','cell_count','tritonRoute_violations',
'Short_violations', 'Magic_violations', 'antenna_violations', 'wns', 'CLOCK_PERIOD']
allData = pd.read_csv(csv_file, error_bad_lines=False)
dataFrame = pd.DataFrame(data=allData)
usedData = dataFrame[colms]
usedData.to_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
headers, data = read_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
with open(html_file, 'w') as output:
static_file = 'style.css'
output.write(template.render(headers=headers, rows=data, style_url=get_static_folder(static_file).resolve()))
os.remove(csv_file.split(".csv")[0]+"_tmp_report.csv")
if __name__ == '__main__':
create_output_html(csv_file, html_file)
| 32.75
| 117
| 0.723584
|
import csv
import os
import argparse
import pathlib
import pandas as pd
from jinja2 import Environment, PackageLoader, select_autoescape
parser = argparse.ArgumentParser(
description='Takes an input csv report from the run_designs.py script and creates an html summary for it')
parser.add_argument('--csv_file', '-i',required=True,
help='The input csv file')
parser.add_argument('--html_file', '-o', required=True,
help='The output html file')
args = parser.parse_args()
csv_file = args.csv_file
html_file = args.html_file
env = Environment(
loader=PackageLoader('csv2html', 'templates'),
autoescape=select_autoescape('html')
)
template = env.get_template('main.html')
def get_static_folder(file_name):
p = pathlib.Path('.')
return pathlib.PosixPath(str(p) +'/scripts/csv2html/static/'+str(file_name))
def read_csv(csv_file):
csv_file_opener = open(csv_file, 'r')
csv_data = csv.reader(csv_file_opener)
csv_headers = next(csv_data)
return csv_headers, csv_data
def create_output_html(csv_file, html_file):
colms = ['design','config','runtime','DIEAREA_mm^2','OpenDP_Util','cell_count','tritonRoute_violations',
'Short_violations', 'Magic_violations', 'antenna_violations', 'wns', 'CLOCK_PERIOD']
allData = pd.read_csv(csv_file, error_bad_lines=False)
dataFrame = pd.DataFrame(data=allData)
usedData = dataFrame[colms]
usedData.to_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
headers, data = read_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
with open(html_file, 'w') as output:
static_file = 'style.css'
output.write(template.render(headers=headers, rows=data, style_url=get_static_folder(static_file).resolve()))
os.remove(csv_file.split(".csv")[0]+"_tmp_report.csv")
if __name__ == '__main__':
create_output_html(csv_file, html_file)
| true
| true
|
79069d949da4560a5e300cd95474e25a8e5db399
| 3,930
|
py
|
Python
|
nuts.py
|
samrobbins85/CMHSS-Coursework
|
88affe95ff410d6b9533403e0f866687d5a79432
|
[
"MIT"
] | 2
|
2021-04-12T08:26:13.000Z
|
2021-07-13T15:26:58.000Z
|
nuts.py
|
samrobbins85/CMHSS-Coursework
|
88affe95ff410d6b9533403e0f866687d5a79432
|
[
"MIT"
] | 9
|
2019-11-07T13:24:16.000Z
|
2022-01-24T09:41:51.000Z
|
nuts.py
|
samrobbins85/CMHSS-Coursework
|
88affe95ff410d6b9533403e0f866687d5a79432
|
[
"MIT"
] | 3
|
2020-02-26T12:09:54.000Z
|
2021-05-06T15:45:49.000Z
|
"""
nuts_finder
-----------
You give it a point, it tells you all the EU NUTS regions
"""
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
"""Lower bound of median, without using numpy (heavy reqs)"""
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
"""Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise
select a `value` from the available range of allowed values, selected by a custom `selector`
function.
Args:
obj: An object on which to run setattr
value: A value which if not None will be set as an attribute of object
value_name (str): The name of the new attribute
regex (str): regex string by which to find allowed values on the NUTS website.
selector (function): Function which takes an iterable and selects a value.
"""
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
"""Use the provided regex to find allowed values on the NUTS website."""
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
"""
Object for holding onto NUTS data and exposing to the user, also
providing a lat, lon lookup
"""
def __init__(self, year=None, scale=None):
"""
Args:
year (int): If provided, NUTS regions for this year will be used (if available)
scale (int): If provided, NUTS regions at this resolution will be used (if available)
"""
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle) # Take the middle scale
self.shapes = self._get_shapes()
def _get_shapes(self):
"""Load the shape files for the given year and scale"""
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
# For some reason this year/scale isn't available
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
"""Find every NUTS region for this lat, lon"""
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
| 36.055046
| 97
| 0.637913
|
import geojson
import requests
import re
from io import BytesIO
from zipfile import ZipFile
from shapely import geometry
from functools import lru_cache
import logging
YEAR_REGEX = "NUTS ([0-9]+)"
SCALE_REGEX = "1:([0-9]+) Million"
TOP_URL = "https://ec.europa.eu/eurostat/cache/" "GISCO/distribution/v2/nuts/download"
ZIP_URL = f"{TOP_URL}/" "ref-nuts-{year}-{scale}m.geojson.zip"
NESTED_FILE = "NUTS_RG_{scale}M_{year}_4326.geojson"
def _middle(values):
n = len(values)
is_odd = n % 2
middle_idx = int((n + is_odd) / 2) - 1
return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector):
allowed_values = _get_available(regex)
if value is None:
value = selector(allowed_values)
if value not in allowed_values:
raise ValueError(f"'{value_name}' must be one of {allowed_values}")
setattr(obj, value_name, value)
@lru_cache()
def _get_available(regex):
r = requests.get(TOP_URL, verify=True)
values = set(int(yr) for yr in re.findall(regex, r.text))
return values
class NutsFinder:
def __init__(self, year=None, scale=None):
self.years = list(_get_available(YEAR_REGEX))
self.year_selector = max
_setattr(self, year, "year", YEAR_REGEX, self.year_selector)
_setattr(self, scale, "scale", SCALE_REGEX, _middle)
self.shapes = self._get_shapes()
def _get_shapes(self):
scale = str(self.scale).zfill(2)
filename = NESTED_FILE.format(year=self.year, scale=scale)
url = ZIP_URL.format(year=self.year, scale=scale)
r = requests.get(url, verify=True)
r.raise_for_status()
try:
with ZipFile(BytesIO(r.content)) as zipfile:
with zipfile.open(filename) as f:
shapes = geojson.load(f)
except KeyError:
logging.warning(
f"No match for this year ({self.year}) and scale ({self.scale})"
)
# Remove this year from the sample and try another year
self.years.remove(self.year)
self.year = self.year_selector(self.years)
logging.warning(f"Retrying with year ({self.year})")
return self._get_shapes()
return shapes
def find(self, lat, lon):
p = geometry.Point(lon, lat)
nuts = []
for region in self.shapes["features"]:
s = geometry.shape(region["geometry"])
if s.contains(p):
nuts.append(region["properties"])
return sorted(nuts, key=lambda row: row["LEVL_CODE"])
| true
| true
|
79069e13b6dc710ab9ad1ff91f884709c8529f0a
| 1,635
|
py
|
Python
|
contrib/runners/winrm_runner/winrm_runner/winrm_ps_command_runner.py
|
nickbaum/st2
|
21c01c7c8c0f511ee75e3b2a3a03502472281058
|
[
"Apache-2.0"
] | 1
|
2020-11-09T21:05:33.000Z
|
2020-11-09T21:05:33.000Z
|
contrib/runners/winrm_runner/winrm_runner/winrm_ps_command_runner.py
|
ellerbrock/st2
|
b3a0d9f82053c1fd5adb616dc8331bad427cd11f
|
[
"Apache-2.0"
] | 3
|
2021-03-26T00:29:52.000Z
|
2021-03-26T00:34:45.000Z
|
contrib/runners/winrm_runner/winrm_runner/winrm_ps_command_runner.py
|
ellerbrock/st2
|
b3a0d9f82053c1fd5adb616dc8331bad427cd11f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
from st2common import log as logging
from st2common.runners.base import get_metadata as get_runner_metadata
from winrm_runner.winrm_base import WinRmBaseRunner
__all__ = [
'WinRmPsCommandRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
RUNNER_COMMAND = 'cmd'
class WinRmPsCommandRunner(WinRmBaseRunner):
def run(self, action_parameters):
powershell_command = self.runner_parameters[RUNNER_COMMAND]
# execute
return self.run_ps(powershell_command)
def get_runner():
return WinRmPsCommandRunner(str(uuid.uuid4()))
def get_metadata():
metadata = get_runner_metadata('winrm_runner')
metadata = [runner for runner in metadata if
runner['runner_module'] == __name__.split('.')[-1]][0]
return metadata
| 31.442308
| 74
| 0.752905
|
from __future__ import absolute_import
import uuid
from st2common import log as logging
from st2common.runners.base import get_metadata as get_runner_metadata
from winrm_runner.winrm_base import WinRmBaseRunner
__all__ = [
'WinRmPsCommandRunner',
'get_runner',
'get_metadata'
]
LOG = logging.getLogger(__name__)
RUNNER_COMMAND = 'cmd'
class WinRmPsCommandRunner(WinRmBaseRunner):
def run(self, action_parameters):
powershell_command = self.runner_parameters[RUNNER_COMMAND]
return self.run_ps(powershell_command)
def get_runner():
return WinRmPsCommandRunner(str(uuid.uuid4()))
def get_metadata():
metadata = get_runner_metadata('winrm_runner')
metadata = [runner for runner in metadata if
runner['runner_module'] == __name__.split('.')[-1]][0]
return metadata
| true
| true
|
79069f2ad045a3a35831b27298ef5aa566ab2c4c
| 5,772
|
py
|
Python
|
src/image-gallery/azext_image_gallery/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/image-gallery/azext_image_gallery/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/image-gallery/azext_image_gallery/custom.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.util import sdk_no_wait
from ._client_factory import _compute_client_factory
logger = get_logger(__name__)
def sig_community_image_definition_list(client, location, public_gallery_name, marker=None, show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name)
return get_page_result(generator, marker, show_next_marker)
def sig_community_image_version_list(client, location, public_gallery_name, gallery_image_name, marker=None,
show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name,
gallery_image_name=gallery_image_name)
return get_page_result(generator, marker, show_next_marker)
def get_page_result(generator, marker, show_next_marker=None):
pages = generator.by_page(continuation_token=marker) # ContainerPropertiesPaged
result = list_generator(pages=pages)
if show_next_marker:
next_marker = {"nextMarker": pages.continuation_token}
result.append(next_marker)
else:
if pages.continuation_token:
logger.warning('Next Marker:')
logger.warning(pages.continuation_token)
return result
# The REST service takes 50 items as a page by default
def list_generator(pages, num_results=50):
result = []
# get first page items
page = list(next(pages))
result += page
while True:
if not pages.continuation_token:
break
# handle num results
if num_results is not None:
if num_results == len(result):
break
page = list(next(pages))
result += page
return result
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None, permissions=None, soft_delete=None,
publisher_uri=None, publisher_contact=None, eula=None, public_name_prefix=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import Gallery
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
if soft_delete is not None:
gallery.soft_delete_policy = {'is_soft_delete_enabled': soft_delete}
client = _compute_client_factory(cmd.cli_ctx)
if permissions:
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfile
gallery.sharing_profile = SharingProfile(permissions=permissions)
if permissions == 'Community':
if publisher_uri is None or publisher_contact is None or eula is None or public_name_prefix is None:
raise RequiredArgumentMissingError('If you want to share to the community, '
'you need to fill in all the following parameters:'
' --publisher-uri, --publisher-email, --eula, --public-name-prefix.')
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import CommunityGalleryInfo
gallery.sharing_profile.community_gallery_info = CommunityGalleryInfo(publisher_uri=publisher_uri,
publisher_contact=publisher_contact,
eula=eula,
public_name_prefix=public_name_prefix)
return sdk_no_wait(no_wait, client.galleries.begin_create_or_update, resource_group_name, gallery_name, gallery)
def sig_share_update(cmd, client, resource_group_name, gallery_name, subscription_ids=None, tenant_ids=None,
op_type=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfileGroup, SharingUpdate, SharingProfileGroupTypes
if op_type != 'EnableCommunity':
if subscription_ids is None and tenant_ids is None:
raise RequiredArgumentMissingError('At least one of subscription ids or tenant ids must be provided')
groups = []
if subscription_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.SUBSCRIPTIONS, ids=subscription_ids))
if tenant_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.AAD_TENANTS, ids=tenant_ids))
sharing_update = SharingUpdate(operation_type=op_type, groups=groups)
return client.begin_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
sharing_update=sharing_update)
| 49.333333
| 129
| 0.675676
|
from knack.log import get_logger
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.azclierror import RequiredArgumentMissingError
from azure.cli.core.util import sdk_no_wait
from ._client_factory import _compute_client_factory
logger = get_logger(__name__)
def sig_community_image_definition_list(client, location, public_gallery_name, marker=None, show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name)
return get_page_result(generator, marker, show_next_marker)
def sig_community_image_version_list(client, location, public_gallery_name, gallery_image_name, marker=None,
show_next_marker=None):
generator = client.list(location=location, public_gallery_name=public_gallery_name,
gallery_image_name=gallery_image_name)
return get_page_result(generator, marker, show_next_marker)
def get_page_result(generator, marker, show_next_marker=None):
pages = generator.by_page(continuation_token=marker)
result = list_generator(pages=pages)
if show_next_marker:
next_marker = {"nextMarker": pages.continuation_token}
result.append(next_marker)
else:
if pages.continuation_token:
logger.warning('Next Marker:')
logger.warning(pages.continuation_token)
return result
def list_generator(pages, num_results=50):
result = []
page = list(next(pages))
result += page
while True:
if not pages.continuation_token:
break
if num_results is not None:
if num_results == len(result):
break
page = list(next(pages))
result += page
return result
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
return client.resource_groups.get(resource_group_name).location
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None, permissions=None, soft_delete=None,
publisher_uri=None, publisher_contact=None, eula=None, public_name_prefix=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import Gallery
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
if soft_delete is not None:
gallery.soft_delete_policy = {'is_soft_delete_enabled': soft_delete}
client = _compute_client_factory(cmd.cli_ctx)
if permissions:
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfile
gallery.sharing_profile = SharingProfile(permissions=permissions)
if permissions == 'Community':
if publisher_uri is None or publisher_contact is None or eula is None or public_name_prefix is None:
raise RequiredArgumentMissingError('If you want to share to the community, '
'you need to fill in all the following parameters:'
' --publisher-uri, --publisher-email, --eula, --public-name-prefix.')
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import CommunityGalleryInfo
gallery.sharing_profile.community_gallery_info = CommunityGalleryInfo(publisher_uri=publisher_uri,
publisher_contact=publisher_contact,
eula=eula,
public_name_prefix=public_name_prefix)
return sdk_no_wait(no_wait, client.galleries.begin_create_or_update, resource_group_name, gallery_name, gallery)
def sig_share_update(cmd, client, resource_group_name, gallery_name, subscription_ids=None, tenant_ids=None,
op_type=None):
from .vendored_sdks.azure_mgmt_compute.models._models_py3 import SharingProfileGroup, SharingUpdate, SharingProfileGroupTypes
if op_type != 'EnableCommunity':
if subscription_ids is None and tenant_ids is None:
raise RequiredArgumentMissingError('At least one of subscription ids or tenant ids must be provided')
groups = []
if subscription_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.SUBSCRIPTIONS, ids=subscription_ids))
if tenant_ids:
groups.append(SharingProfileGroup(type=SharingProfileGroupTypes.AAD_TENANTS, ids=tenant_ids))
sharing_update = SharingUpdate(operation_type=op_type, groups=groups)
return client.begin_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
sharing_update=sharing_update)
| true
| true
|
79069f6a16a159b300b61a629f67a60cee85474b
| 174
|
py
|
Python
|
codegen/generator/__init__.py
|
fangyi-zhou/TypeScript-Multiparty-Sessions
|
216ae5806fc9d901086e8e773ef241fc9848c544
|
[
"MIT"
] | 4
|
2020-09-09T12:40:17.000Z
|
2021-12-05T21:43:49.000Z
|
codegen/generator/__init__.py
|
fangyi-zhou/TypeScript-Multiparty-Sessions
|
216ae5806fc9d901086e8e773ef241fc9848c544
|
[
"MIT"
] | 19
|
2020-03-31T08:11:47.000Z
|
2022-02-27T11:13:47.000Z
|
codegen/generator/__init__.py
|
fangyi-zhou/TypeScript-Multiparty-Sessions
|
216ae5806fc9d901086e8e773ef241fc9848c544
|
[
"MIT"
] | 2
|
2020-04-15T14:45:12.000Z
|
2021-01-13T13:54:15.000Z
|
from codegen.generator.browser import BrowserCodegenStrategy
from codegen.generator.code_generator import CodeGenerator
from codegen.generator.node import NodeCodegenStrategy
| 58
| 60
| 0.902299
|
from codegen.generator.browser import BrowserCodegenStrategy
from codegen.generator.code_generator import CodeGenerator
from codegen.generator.node import NodeCodegenStrategy
| true
| true
|
7906a102057891ed5023dad9d193ef47a1fcff1d
| 1,528
|
py
|
Python
|
flowtron_logger.py
|
hit-thusz-RookieCJ/flowtron
|
3822bd0ed3226b001dd4ec1653809449f889b520
|
[
"Apache-2.0"
] | null | null | null |
flowtron_logger.py
|
hit-thusz-RookieCJ/flowtron
|
3822bd0ed3226b001dd4ec1653809449f889b520
|
[
"Apache-2.0"
] | null | null | null |
flowtron_logger.py
|
hit-thusz-RookieCJ/flowtron
|
3822bd0ed3226b001dd4ec1653809449f889b520
|
[
"Apache-2.0"
] | null | null | null |
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from flowtron_plotting_utils import plot_alignment_to_numpy
from flowtron_plotting_utils import plot_gate_outputs_to_numpy
class FlowtronLogger(SummaryWriter):
def __init__(self, logdir):
super(FlowtronLogger, self).__init__(logdir)
def log_training(self, loss, learning_rate, iteration):
self.add_scalar("training/loss", loss, iteration)
self.add_scalar("learning_rate", learning_rate, iteration)
def log_validation(self, loss, loss_nll, loss_gate, attns, gate_pred,
gate_out, iteration):
self.add_scalar("validation/loss", loss, iteration)
self.add_scalar("validation/loss_nll", loss_nll, iteration)
self.add_scalar("validation/loss_gate", loss_gate, iteration)
# batch里随机抽一条看看效果
idx = random.randint(0, len(gate_out) - 1)
for i in range(len(attns)):
self.add_image(
'attention_weights_{}'.format(i),
plot_alignment_to_numpy(attns[i][idx].data.cpu().numpy().T),
iteration,
dataformats='HWC')
if gate_pred is not None:
gate_pred = gate_pred.transpose(0, 1)[:, :, 0]
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_out[idx].data.cpu().numpy(),
torch.sigmoid(gate_pred[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| 39.179487
| 76
| 0.630236
|
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from flowtron_plotting_utils import plot_alignment_to_numpy
from flowtron_plotting_utils import plot_gate_outputs_to_numpy
class FlowtronLogger(SummaryWriter):
def __init__(self, logdir):
super(FlowtronLogger, self).__init__(logdir)
def log_training(self, loss, learning_rate, iteration):
self.add_scalar("training/loss", loss, iteration)
self.add_scalar("learning_rate", learning_rate, iteration)
def log_validation(self, loss, loss_nll, loss_gate, attns, gate_pred,
gate_out, iteration):
self.add_scalar("validation/loss", loss, iteration)
self.add_scalar("validation/loss_nll", loss_nll, iteration)
self.add_scalar("validation/loss_gate", loss_gate, iteration)
idx = random.randint(0, len(gate_out) - 1)
for i in range(len(attns)):
self.add_image(
'attention_weights_{}'.format(i),
plot_alignment_to_numpy(attns[i][idx].data.cpu().numpy().T),
iteration,
dataformats='HWC')
if gate_pred is not None:
gate_pred = gate_pred.transpose(0, 1)[:, :, 0]
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_out[idx].data.cpu().numpy(),
torch.sigmoid(gate_pred[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
| true
| true
|
7906a196647f476d08db56808d5d95dd375d5933
| 18,575
|
py
|
Python
|
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_3/api/link_aggregation_groups_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.3, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class LinkAggregationGroupsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api23_link_aggregation_groups_delete_with_http_info(
self,
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""DELETE link-aggregation-groups
Remove a link aggregation group to unbind the ports.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_get_with_http_info(
self,
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupGetResponse
"""GET link-aggregation-groups
List the status and attributes of the Ethernet ports in the configured link aggregation groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str continuation_token: An opaque token used to iterate over a collection. The token to use on the next request is returned in the `continuation_token` field of the result.
:param str filter: Exclude resources that don't match the specified criteria.
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param int limit: Limit the size of the response to the specified number of resources. A `limit` of `0` can be used to get the number of resources without getting all of the resources. It will be returned in the `total_item_count` field. If a client asks for a page size larger than the maximum number, the request is still valid. In that case the server just returns the maximum number of items, disregarding the client's page size request.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param int offset: The offset of the first resource to return from a collection.
:param list[str] sort: Sort the response by the specified fields (in descending order if '-' is appended to the field name). NOTE: If you provide a sort you will not get a `continuation_token` in the response.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_patch_with_http_info(
self,
link_aggregation_group=None, # type: models.Linkaggregationgroup
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupResponse
"""PATCH link-aggregation-groups
Modify link aggregation groups by adding and removing Ethernet ports.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_patch_with_http_info(link_aggregation_group, async_req=True)
>>> result = thread.get()
:param Linkaggregationgroup link_aggregation_group: (required)
:param list[str] ids: A comma-separated list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters.
:param list[str] names: A comma-separated list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'link_aggregation_group' is set
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_post_with_http_info(
self,
link_aggregation_group=None, # type: models.LinkAggregationGroup
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.LinkAggregationGroupResponse
"""POST link-aggregation-groups
Create a link aggregation group of Ethernet ports on the array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api23_link_aggregation_groups_post_with_http_info(link_aggregation_group, names, async_req=True)
>>> result = thread.get()
:param LinkAggregationGroup link_aggregation_group: (required)
:param list[str] names: A comma-separated list of resource names. (required)
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: LinkAggregationGroupResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'link_aggregation_group' is set
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_post`")
# verify the required parameter 'names' is set
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api23_link_aggregation_groups_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 44.544365
| 449
| 0.638331
|
from __future__ import absolute_import
import re
import six
from typing import List, Optional
from .. import models
class LinkAggregationGroupsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api23_link_aggregation_groups_delete_with_http_info(
self,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_get_with_http_info(
self,
continuation_token=None,
filter=None,
ids=None,
limit=None,
names=None,
offset=None,
sort=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api23_link_aggregation_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_patch_with_http_info(
self,
link_aggregation_group=None,
ids=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api23_link_aggregation_groups_post_with_http_info(
self,
link_aggregation_group=None,
names=None,
async_req=False,
_return_http_data_only=False,
_preload_content=True,
_request_timeout=None,
):
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if link_aggregation_group is None:
raise TypeError("Missing the required parameter `link_aggregation_group` when calling `api23_link_aggregation_groups_post`")
if names is None:
raise TypeError("Missing the required parameter `names` when calling `api23_link_aggregation_groups_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'link_aggregation_group' in params:
body_params = params['link_aggregation_group']
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.3/link-aggregation-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LinkAggregationGroupResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| true
| true
|
7906a1af0aa434a1018f229e752f0645fd08511f
| 5,281
|
py
|
Python
|
SK_Clustering_WIP.py
|
NatalieBarbosa/hida-datathon-ufz
|
6c3272e07696993523fd97e61c38b981852248f4
|
[
"MIT"
] | null | null | null |
SK_Clustering_WIP.py
|
NatalieBarbosa/hida-datathon-ufz
|
6c3272e07696993523fd97e61c38b981852248f4
|
[
"MIT"
] | null | null | null |
SK_Clustering_WIP.py
|
NatalieBarbosa/hida-datathon-ufz
|
6c3272e07696993523fd97e61c38b981852248f4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path as osp
#import netCDF4
#from netcdf_helpers.reader import say_hello, get_time_series_from_location
#from plot.plot import plot_time_series_for_locations
#from sklearn.model_selection import train_test_split
#from sklearn.preprocessing import MinMaxScaler
import numpy as np
import xarray as xr
import pandas as pd
#import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import mglearn
#say_hello()
# set a path to the directory containing the data
directory = "/Users/houben/phd/hackathons/hida_datathon/data/MyChallengePaleo"
# set the file names
filename_temp_data_r1 = "T2m_R1_ym_1stMill.nc"
filename_temp_data_r2 = "T2m_R2_ym_1stMill.nc"
filename_solar_data = "Solar_forcing_1st_mill.nc"
filename_volc_data = "Volc_Forc_AOD_1st_mill.nc"
# load netCDF
#temp_data_r1 = netCDF4.Dataset(osp.join(directory, filename_temp_data_r1), "r")
#temp_data_r2 = netCDF4.Dataset(osp.join(directory, filename_temp_data_r2), "r")
temp_data_r1 = xr.open_dataset(osp.join(directory, filename_temp_data_r1))
temp_data_r2 = xr.open_dataset(osp.join(directory, filename_temp_data_r2))
#Understand the data more and see the levels of each column
df = temp_data_r1.to_dataframe()["T2m"]
print(df.index.get_level_values('time'))
timelist = df.index.get_level_values('time')
latlist = df.index.get_level_values('lat')
lonlist = df.index.get_level_values('lon')
#Reset the indices (I find it easier to work this way)
df_r1 = temp_data_r1.to_dataframe().reset_index(level=['lat', 'lon', 'time'])#["T2m"]
#Calculate a global annual mean temperature time series
Globalmeantemp = df_r1.groupby('time').mean()
#Calculate the mean of the time series to focus on the variation from the mean
mean = np.mean(Globalmeantemp["T2m"])
Var_frommean = Globalmeantemp["T2m"] - mean
plt.plot(Var_frommean)
from sklearn.cluster import KMeans
#Initialize the algorithm and fit it with the data
kmeans = KMeans(n_clusters = 5)
X = Var_frommean.to_numpy().reshape(-1,1)
kmeans.fit(X)
kmeans.cluster_centers_
print("Cluster memberships:\n{}".format(kmeans.labels_))
#Assign classes to each data point based on the model
classes = kmeans.predict(X)
#Inspect the centroids of the clusters
print(kmeans.cluster_centers_)
#Shortcut to see/visualize the datapoints and range of each cluster
mglearn.discrete_scatter(X, X, kmeans.labels_, markers='o')
#Volcanic activity is expected to have the maximum impact out of all forcings so look for the time points which are in the cluster associated with the lowest centroid
dip = np.argwhere(classes==np.argmin(kmeans.cluster_centers_))
#look for the years which have the biggest dips
dipinyear = list(int(timelist[i][0]/10000) for i in dip)
len(dipinyear)
# -----------------------------------------------------------------------------
# Apply a filter to the
# -----------------------------------------------------------------------------
from
# -----------------------------------------------------------------------------
shortlistedtimeseries = list(timelist[i][0] for i in dip)
#fourth column to group locations:
#df_r1['latlon'] = df_r1[['lat', 'lon']].apply(lambda x: ','.join(x.astype(str)), axis=1)
#the above step takes too long. look for alternatives. ALternatively, go for the original dataset
#locationmean = df_r1.groupby('latlon').mean()
locationmean = df_r1.groupby(['lat','lon']).mean() #testing alternative to above, much shorter
locationmean["mean"] = locationmean["T2m"]
df_r1_locmean = pd.merge(df_r1, locationmean[['T2m']], on = ['lat','lon']).rename(columns={'T2m_y':'mean'}) #merging the two dataframes
df_r1_locmean["Var"] = df_r1_locmean["T2m_x"] - df_r1_locmean["mean"] #calculating variation from mean of time series at respective location
#Filter the dataset and look at only the years which have the biggest dips for the data analysis/image analysis
#Also divide it into 6 zones as previously discussed: tropical, temperate and polar in northern and southern hemispheres
df_r1_time = df_r1_locmean[df_r1_locmean.time.isin(shortlistedtimeseries)]
df_North_trop = df_r1[(df_r1.lat>=0) & (df_r1.lat<30)]
df_North_temp = df_r1[(df_r1.lat>=30) & (df_r1.lat<60)]
df_North_polar = df_r1[df_r1.lat>=60]
df_South_trop = df_r1[(df_r1.lat>=-30) & (df_r1.lat<0)]
df_South_temp = df_r1[(df_r1.lat>=-60) & (df_r1.lat<-30)]
df_South_polar = df_r1[df_r1.lat<-60]
#Taking snapshots of years of interest: this needs to be broadened to consider the 5 year rolling window I think
kmeans = KMeans(n_clusters = 3)
for t in shortlistedtimeseries[:5]:
Y = df_r1_time[df_r1_time['time']==t]
series = Y["Var"]
X = series.to_numpy().reshape(-1,1)
# X = Var_frommean.to_numpy().reshape(-1,1)
kmeans.fit(X)
# print("Cluster memberships:\n{}".format(kmeans.labels_))
#Assign classes to each data point based on the model
classes = kmeans.predict(X)
Y["labels"] = classes
Y["plotlabels"] = kmeans.cluster_centers_[Y["labels"]] #To label the location with the corresponding cluster centroid
# print(kmeans.cluster_centers_)
plt.figure()
mglearn.discrete_scatter(Y['lon'], Y['lat'], Y["plotlabels"], markers='o')
plt.title("Year: "+str(int(t/10000)))
plt.legend()
| 46.734513
| 167
| 0.709903
|
import os.path as osp
import numpy as np
import xarray as xr
import pandas as pd
import matplotlib.pyplot as plt
import mglearn
directory = "/Users/houben/phd/hackathons/hida_datathon/data/MyChallengePaleo"
filename_temp_data_r1 = "T2m_R1_ym_1stMill.nc"
filename_temp_data_r2 = "T2m_R2_ym_1stMill.nc"
filename_solar_data = "Solar_forcing_1st_mill.nc"
filename_volc_data = "Volc_Forc_AOD_1st_mill.nc"
temp_data_r1 = xr.open_dataset(osp.join(directory, filename_temp_data_r1))
temp_data_r2 = xr.open_dataset(osp.join(directory, filename_temp_data_r2))
df = temp_data_r1.to_dataframe()["T2m"]
print(df.index.get_level_values('time'))
timelist = df.index.get_level_values('time')
latlist = df.index.get_level_values('lat')
lonlist = df.index.get_level_values('lon')
df_r1 = temp_data_r1.to_dataframe().reset_index(level=['lat', 'lon', 'time'])
Globalmeantemp = df_r1.groupby('time').mean()
mean = np.mean(Globalmeantemp["T2m"])
Var_frommean = Globalmeantemp["T2m"] - mean
plt.plot(Var_frommean)
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 5)
X = Var_frommean.to_numpy().reshape(-1,1)
kmeans.fit(X)
kmeans.cluster_centers_
print("Cluster memberships:\n{}".format(kmeans.labels_))
classes = kmeans.predict(X)
print(kmeans.cluster_centers_)
mglearn.discrete_scatter(X, X, kmeans.labels_, markers='o')
dip = np.argwhere(classes==np.argmin(kmeans.cluster_centers_))
dipinyear = list(int(timelist[i][0]/10000) for i in dip)
len(dipinyear)
from
shortlistedtimeseries = list(timelist[i][0] for i in dip)
locationmean = df_r1.groupby(['lat','lon']).mean()
locationmean["mean"] = locationmean["T2m"]
df_r1_locmean = pd.merge(df_r1, locationmean[['T2m']], on = ['lat','lon']).rename(columns={'T2m_y':'mean'})
df_r1_locmean["Var"] = df_r1_locmean["T2m_x"] - df_r1_locmean["mean"]
df_r1_time = df_r1_locmean[df_r1_locmean.time.isin(shortlistedtimeseries)]
df_North_trop = df_r1[(df_r1.lat>=0) & (df_r1.lat<30)]
df_North_temp = df_r1[(df_r1.lat>=30) & (df_r1.lat<60)]
df_North_polar = df_r1[df_r1.lat>=60]
df_South_trop = df_r1[(df_r1.lat>=-30) & (df_r1.lat<0)]
df_South_temp = df_r1[(df_r1.lat>=-60) & (df_r1.lat<-30)]
df_South_polar = df_r1[df_r1.lat<-60]
kmeans = KMeans(n_clusters = 3)
for t in shortlistedtimeseries[:5]:
Y = df_r1_time[df_r1_time['time']==t]
series = Y["Var"]
X = series.to_numpy().reshape(-1,1)
kmeans.fit(X)
classes = kmeans.predict(X)
Y["labels"] = classes
Y["plotlabels"] = kmeans.cluster_centers_[Y["labels"]]
plt.figure()
mglearn.discrete_scatter(Y['lon'], Y['lat'], Y["plotlabels"], markers='o')
plt.title("Year: "+str(int(t/10000)))
plt.legend()
| false
| true
|
7906a1fe76d3a37bcac2d9bcc255d1e9cc8a2724
| 4,064
|
py
|
Python
|
garage/datastore.py
|
gurumitts/garage-butler
|
f136d78f17b55c91acda0ad4e1b7afeda28769eb
|
[
"Apache-2.0"
] | 11
|
2016-08-17T05:46:34.000Z
|
2021-12-02T12:09:13.000Z
|
garage/datastore.py
|
gurumitts/garage-butler
|
f136d78f17b55c91acda0ad4e1b7afeda28769eb
|
[
"Apache-2.0"
] | null | null | null |
garage/datastore.py
|
gurumitts/garage-butler
|
f136d78f17b55c91acda0ad4e1b7afeda28769eb
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
import logging
DOOR_OPENED = 'door opened'
DOOR_CLOSED = 'door closed'
class DataStore:
def __init__(self, setup=False):
self.connection = sqlite3.connect('db/app.sqlite3.db')
self.connection.row_factory = sqlite3.Row
if setup:
self.setup()
def record_door_opened(self):
self.add_event(DOOR_OPENED)
def record_door_closed(self):
self.add_event(DOOR_CLOSED)
def add_event(self, event):
params = [event]
cursor = self.connection.cursor()
cursor.execute("""insert into events (EVENT)
values(?);""", params)
self.connection.commit()
cursor.close()
def get_events(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 15""")
rows = cursor.fetchall()
events = []
if rows is not None:
for row in rows:
event = {}
for key in row.keys():
event[key.lower()] = row[key]
events.append(event)
cursor.close()
return events
def get_last_event(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 1""")
rows = cursor.fetchone()
event = {}
if rows is not None:
for row in rows:
for key in row.keys():
event[key.lower()] = row[key]
cursor.close()
return event
def get_status(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event,
(strftime('%s','now') - strftime('%s',dt))/60 as
elapsed_minutes from events order by dt desc limit 1""")
row = cursor.fetchone()
status = {}
if row is not None:
for key in row.keys():
status[key.lower()] = row[key]
cursor.close()
return status
def get_settings(self):
cursor = self.connection.cursor()
cursor.execute("""select * from settings limit 1""")
row = cursor.fetchone()
settings = {}
if row is not None:
for key in row.keys():
settings[key.lower()] = row[key]
cursor.close()
return settings
def shutdown(self):
self.connection.commit()
self.connection.close()
def setup(self):
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from events')
# print cursor.fetchone()
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating events table...')
cursor.execute("""create table events(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
DT DATETIME DEFAULT CURRENT_TIMESTAMP,
EVENT TEXT);""")
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from settings')
# print cursor.fetchone()
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating settings table...')
cursor.execute("""CREATE TABLE "settings" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"check_interval_mins" INTEGER DEFAULT (2),
"notify_interval_mins" INTEGER DEFAULT (30),
"warning_threshold_mins" INTEGER DEFAULT (15),
"sentry_mode" INTEGER DEFAULT (0))""")
self.connection.commit()
cursor.execute('insert into settings (id) values (1)')
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
| 33.866667
| 112
| 0.546752
|
import sqlite3
import logging
DOOR_OPENED = 'door opened'
DOOR_CLOSED = 'door closed'
class DataStore:
def __init__(self, setup=False):
self.connection = sqlite3.connect('db/app.sqlite3.db')
self.connection.row_factory = sqlite3.Row
if setup:
self.setup()
def record_door_opened(self):
self.add_event(DOOR_OPENED)
def record_door_closed(self):
self.add_event(DOOR_CLOSED)
def add_event(self, event):
params = [event]
cursor = self.connection.cursor()
cursor.execute("""insert into events (EVENT)
values(?);""", params)
self.connection.commit()
cursor.close()
def get_events(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 15""")
rows = cursor.fetchall()
events = []
if rows is not None:
for row in rows:
event = {}
for key in row.keys():
event[key.lower()] = row[key]
events.append(event)
cursor.close()
return events
def get_last_event(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event from events order by dt desc limit 1""")
rows = cursor.fetchone()
event = {}
if rows is not None:
for row in rows:
for key in row.keys():
event[key.lower()] = row[key]
cursor.close()
return event
def get_status(self):
cursor = self.connection.cursor()
cursor.execute("""select datetime(dt,'localtime') as dt, event,
(strftime('%s','now') - strftime('%s',dt))/60 as
elapsed_minutes from events order by dt desc limit 1""")
row = cursor.fetchone()
status = {}
if row is not None:
for key in row.keys():
status[key.lower()] = row[key]
cursor.close()
return status
def get_settings(self):
cursor = self.connection.cursor()
cursor.execute("""select * from settings limit 1""")
row = cursor.fetchone()
settings = {}
if row is not None:
for key in row.keys():
settings[key.lower()] = row[key]
cursor.close()
return settings
def shutdown(self):
self.connection.commit()
self.connection.close()
def setup(self):
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from events')
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating events table...')
cursor.execute("""create table events(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
DT DATETIME DEFAULT CURRENT_TIMESTAMP,
EVENT TEXT);""")
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
cursor = self.connection.cursor()
try:
cursor.execute('select count(*) from settings')
except Exception as e:
logging.getLogger('garage').info('Required table not found... creating settings table...')
cursor.execute("""CREATE TABLE "settings" (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"check_interval_mins" INTEGER DEFAULT (2),
"notify_interval_mins" INTEGER DEFAULT (30),
"warning_threshold_mins" INTEGER DEFAULT (15),
"sentry_mode" INTEGER DEFAULT (0))""")
self.connection.commit()
cursor.execute('insert into settings (id) values (1)')
logging.info('done!')
finally:
cursor.close()
self.connection.commit()
| true
| true
|
7906a23f9520296d25e3b4189a8b8cd4d982db33
| 2,212
|
py
|
Python
|
vb2py/test/testdotnet.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/test/testdotnet.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
vb2py/test/testdotnet.py
|
ceprio/xl_vb2py
|
899fec0301140fd8bd313e8c80b3fa839b3f5ee4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: latin-1 -*-
#
# Turn off logging in extensions (too loud!)
from vb2py.test.testframework import *
import vb2py.extensions
import vb2py.utils
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
#
# Set some config options which are appropriate for testing
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
# String methods
tests.extend([
'a = "hello".Length',
'a = ("hello").Length',
'a = ("hello" + "world").Length',
'a = ("hello" + "world").Length + 2',
])
# Expression calls
tests.extend([
'a = (a + b).Truncate(2)',
'(a + b).SendToDestination("email.com")',
'(a + b).SendToDestination',
])
tests.append(
"""
Function B()
Return 12
End Function
"""
)
tests.append((
"""
Public Class MyObject
Public Property A As Integer
Get
Return 10
End Get
Set(Value as Integer)
X = Value
End Set
End Property
End Class
"""
))
# VB.NET
tests.append("""
Class MyClass
A = 1
End Class
""")
# Decorated Class
tests.append("""
<Decorator.Thing()> Class MyClass
A = 1
End Class
""")
tests.append("""
<Decorator.Thing()> _
Class MyClass
A = 1
End Class
""")
# Handlers
tests.append("""
Class MyClass
Public Sub DoIt() Handles Button.Click
End Sub
End Class
""")
# Shared methods
tests.append("""
Class MyClass
Public Shared Sub DoIt()
End Sub
Public Shared Function DoIt()
End Function
End Class
""")
tests.append("""
Module Digests
Public Const a = ""
End Module
""")
class ParsingTest(unittest.TestCase):
"""Holder class which gets built into a whole test case"""
def getTestMethod(vb):
"""Create a test method"""
def testMethod(self):
try:
buildParseTree(vb, dialect='vb.net')
except VBParserError:
raise Exception("Unable to parse ...\n%s" % vb)
return testMethod
# Add tests to main test class
for idx in range(len(tests)):
setattr(ParsingTest, "test%d" % idx, getTestMethod(tests[idx]))
if __name__ == "__main__":
main()
| 16.757576
| 67
| 0.623418
|
from vb2py.test.testframework import *
import vb2py.extensions
import vb2py.utils
vb2py.extensions.disableLogging()
from vb2py.vbparser import buildParseTree, VBParserError
import vb2py.config
Config = vb2py.config.VB2PYConfig()
Config.setLocalOveride("General", "ReportPartialConversion", "No")
tests = []
tests.extend([
'a = "hello".Length',
'a = ("hello").Length',
'a = ("hello" + "world").Length',
'a = ("hello" + "world").Length + 2',
])
tests.extend([
'a = (a + b).Truncate(2)',
'(a + b).SendToDestination("email.com")',
'(a + b).SendToDestination',
])
tests.append(
"""
Function B()
Return 12
End Function
"""
)
tests.append((
"""
Public Class MyObject
Public Property A As Integer
Get
Return 10
End Get
Set(Value as Integer)
X = Value
End Set
End Property
End Class
"""
))
tests.append("""
Class MyClass
A = 1
End Class
""")
tests.append("""
<Decorator.Thing()> Class MyClass
A = 1
End Class
""")
tests.append("""
<Decorator.Thing()> _
Class MyClass
A = 1
End Class
""")
tests.append("""
Class MyClass
Public Sub DoIt() Handles Button.Click
End Sub
End Class
""")
tests.append("""
Class MyClass
Public Shared Sub DoIt()
End Sub
Public Shared Function DoIt()
End Function
End Class
""")
tests.append("""
Module Digests
Public Const a = ""
End Module
""")
class ParsingTest(unittest.TestCase):
def getTestMethod(vb):
def testMethod(self):
try:
buildParseTree(vb, dialect='vb.net')
except VBParserError:
raise Exception("Unable to parse ...\n%s" % vb)
return testMethod
for idx in range(len(tests)):
setattr(ParsingTest, "test%d" % idx, getTestMethod(tests[idx]))
if __name__ == "__main__":
main()
| true
| true
|
7906a248511b8ead8f4b3cfe1b4c732077b4a6a1
| 77
|
py
|
Python
|
debug.py
|
HeqetLabs/pipeline
|
325169eead2b08a08e9960fce6a9e8a94e69c298
|
[
"Apache-2.0"
] | null | null | null |
debug.py
|
HeqetLabs/pipeline
|
325169eead2b08a08e9960fce6a9e8a94e69c298
|
[
"Apache-2.0"
] | null | null | null |
debug.py
|
HeqetLabs/pipeline
|
325169eead2b08a08e9960fce6a9e8a94e69c298
|
[
"Apache-2.0"
] | null | null | null |
from pipeline import app
if __name__ == "__main__":
app.run(debug=True)
| 15.4
| 26
| 0.701299
|
from pipeline import app
if __name__ == "__main__":
app.run(debug=True)
| true
| true
|
7906a2ba0f788ea34abb0c4d0389afc879b650b4
| 1,451
|
py
|
Python
|
explorind_project/explorind_project/urls.py
|
Reinaldowijaya/explorind
|
f174b505b857c8ad56848bc29cc11724894df567
|
[
"MIT"
] | 1
|
2018-11-03T23:55:52.000Z
|
2018-11-03T23:55:52.000Z
|
explorind_project/explorind_project/urls.py
|
Reinaldowijaya/explorind
|
f174b505b857c8ad56848bc29cc11724894df567
|
[
"MIT"
] | null | null | null |
explorind_project/explorind_project/urls.py
|
Reinaldowijaya/explorind
|
f174b505b857c8ad56848bc29cc11724894df567
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
import settings.base
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'profiles.views.index', name='index'),
url(r'^accounts/', include('allauth.urls')),
# Examples:
# url(r'^$', 'explorind_project.views.home', name='home'),
# url(r'^explorind_project/', include('explorind_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^locations/', include('locations.urls')),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.STATIC_ROOT,
}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.MEDIA_ROOT}),
url(r'^login$', 'profiles.views.login_view'), # login
url(r'^logout$', 'profiles.views.logout_view'), # logout
url(r'^signup$', 'profiles.views.signup'), # signup
url(r'^submit$', 'profiles.views.submit'),
url(r'^reviews$', 'profiles.views.public'),
url(r'^users/$', 'profiles.views.users'),
url(r'^users/(?P<username>.{0,30})/$', 'profiles.views.users'),
url(r'^follow$', 'profiles.views.follow'),
)
| 40.305556
| 73
| 0.669194
|
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
import settings.base
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'profiles.views.index', name='index'),
url(r'^accounts/', include('allauth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^locations/', include('locations.urls')),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.STATIC_ROOT,
}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.base.MEDIA_ROOT}),
url(r'^login$', 'profiles.views.login_view'),
url(r'^logout$', 'profiles.views.logout_view'),
url(r'^signup$', 'profiles.views.signup'),
url(r'^submit$', 'profiles.views.submit'),
url(r'^reviews$', 'profiles.views.public'),
url(r'^users/$', 'profiles.views.users'),
url(r'^users/(?P<username>.{0,30})/$', 'profiles.views.users'),
url(r'^follow$', 'profiles.views.follow'),
)
| true
| true
|
7906a3221aa5e6d21e6628ad09562743a9ac31aa
| 259
|
py
|
Python
|
shipments/shipments/doctype/port_of_loading/port_of_loading.py
|
umar567/shipment-repo
|
b6f5830310ae5c9bd45bef252effa489534517bb
|
[
"MIT"
] | null | null | null |
shipments/shipments/doctype/port_of_loading/port_of_loading.py
|
umar567/shipment-repo
|
b6f5830310ae5c9bd45bef252effa489534517bb
|
[
"MIT"
] | null | null | null |
shipments/shipments/doctype/port_of_loading/port_of_loading.py
|
umar567/shipment-repo
|
b6f5830310ae5c9bd45bef252effa489534517bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Havenir and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class PortOfLoading(Document):
pass
| 23.545455
| 49
| 0.776062
|
from __future__ import unicode_literals
from frappe.model.document import Document
class PortOfLoading(Document):
pass
| true
| true
|
7906a37308a4dce48e3db627baa57958812a5d2f
| 89
|
py
|
Python
|
output/models/saxon_data/all/all001_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/saxon_data/all/all001_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/saxon_data/all/all001_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.saxon_data.all.all001_xsd.all001 import Doc
__all__ = [
"Doc",
]
| 14.833333
| 62
| 0.719101
|
from output.models.saxon_data.all.all001_xsd.all001 import Doc
__all__ = [
"Doc",
]
| true
| true
|
7906a41e203cdf69a82c8aa7b1b5529e404046cf
| 652
|
py
|
Python
|
cereal/messaging_arne/demo.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 85
|
2019-06-14T17:51:31.000Z
|
2022-02-09T22:18:20.000Z
|
cereal/messaging_arne/demo.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 4
|
2018-12-08T19:02:06.000Z
|
2019-09-01T13:54:26.000Z
|
cereal/messaging_arne/demo.py
|
919bot/Tessa
|
9b48ff9020e8fb6992fc78271f2720fd19e01093
|
[
"MIT"
] | 73
|
2018-12-03T19:34:42.000Z
|
2020-07-27T05:10:23.000Z
|
import time
from messaging_pyx import Context, Poller, SubSocket, PubSocket # pylint: disable=no-name-in-module, import-error
MSGS = 1e5
if __name__ == "__main__":
c = Context()
sub_sock = SubSocket()
pub_sock = PubSocket()
sub_sock.connect(c, "controlsState")
pub_sock.connect(c, "controlsState")
poller = Poller()
poller.registerSocket(sub_sock)
t = time.time()
for i in range(int(MSGS)):
bts = i.to_bytes(4, 'little')
pub_sock.send(bts)
for s in poller.poll(100):
dat = s.receive()
ii = int.from_bytes(dat, 'little')
assert(i == ii)
dt = time.time() - t
print("%.1f msg/s" % (MSGS / dt))
| 21.032258
| 113
| 0.642638
|
import time
from messaging_pyx import Context, Poller, SubSocket, PubSocket
MSGS = 1e5
if __name__ == "__main__":
c = Context()
sub_sock = SubSocket()
pub_sock = PubSocket()
sub_sock.connect(c, "controlsState")
pub_sock.connect(c, "controlsState")
poller = Poller()
poller.registerSocket(sub_sock)
t = time.time()
for i in range(int(MSGS)):
bts = i.to_bytes(4, 'little')
pub_sock.send(bts)
for s in poller.poll(100):
dat = s.receive()
ii = int.from_bytes(dat, 'little')
assert(i == ii)
dt = time.time() - t
print("%.1f msg/s" % (MSGS / dt))
| true
| true
|
7906a4c57fc6f2068b03b13b4ea23881acebec75
| 85
|
py
|
Python
|
src/config.py
|
exthrempty/vkbottle-bot-template
|
491e1bd142371eca42ece8085cc3f8657f091ad3
|
[
"MIT"
] | 4
|
2021-11-02T18:21:37.000Z
|
2021-11-27T08:59:23.000Z
|
src/config.py
|
exthrempty/vkbottle-bot-template
|
491e1bd142371eca42ece8085cc3f8657f091ad3
|
[
"MIT"
] | null | null | null |
src/config.py
|
exthrempty/vkbottle-bot-template
|
491e1bd142371eca42ece8085cc3f8657f091ad3
|
[
"MIT"
] | 1
|
2021-11-02T18:24:44.000Z
|
2021-11-02T18:24:44.000Z
|
from envparse import env
env.read_envfile(".env")
BOT_TOKEN = env.str("BOT_TOKEN")
| 14.166667
| 32
| 0.741176
|
from envparse import env
env.read_envfile(".env")
BOT_TOKEN = env.str("BOT_TOKEN")
| true
| true
|
7906a52cd4fe6b29e321fd090c89f09a8e79b0a2
| 1,150
|
py
|
Python
|
python/Django/django1/urls.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
python/Django/django1/urls.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
python/Django/django1/urls.py
|
ShiroDevC/portfolio
|
f31ce2bc4a5a2725d4de40a22997dca2b361d367
|
[
"MIT"
] | null | null | null |
"""django1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('blog/', include('blog.urls')),
path('polls/', include('polls.urls')),
path('portfolio', include('portfolio.urls')),
path('admin/', admin.site.urls)
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 38.333333
| 82
| 0.721739
|
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('blog/', include('blog.urls')),
path('polls/', include('polls.urls')),
path('portfolio', include('portfolio.urls')),
path('admin/', admin.site.urls)
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
7906a5878687fea08eea316d9302d9a2b35677ae
| 470
|
py
|
Python
|
casepy/eulerRuO2/nNoh512x1/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | 8
|
2017-05-04T07:50:02.000Z
|
2019-05-17T02:27:20.000Z
|
casepy/eulerRuO2/nNoh512x1/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
casepy/eulerRuO2/nNoh512x1/chars.py
|
will-iam/Variant
|
5b6732134fd51cf6c2b90b51b7976be0693ba28d
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import script.rio as io
import script.initial_condition.noh1D as noh1D
# Domain properties
lx = 1.0
ly = 1.0
Nx = 512
Ny = 1
# Scheme execution options
T = 0.6
CFL = 0.5
gamma = 5./3.
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
def buildme(quantityDict, coords_to_uid, coords_to_bc):
noh1D.build(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
| 18.8
| 83
| 0.680851
|
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import script.rio as io
import script.initial_condition.noh1D as noh1D
lx = 1.0
ly = 1.0
Nx = 512
Ny = 1
T = 0.6
CFL = 0.5
gamma = 5./3.
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
def buildme(quantityDict, coords_to_uid, coords_to_bc):
noh1D.build(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
| true
| true
|
7906a5a7369970e020fbb91590c06bea93d2fdfc
| 1,897
|
py
|
Python
|
examples/views/ephemeral.py
|
NextChai/discord.py
|
1bb8ea97db38594cd2f1bbc727b81e0bba81efc3
|
[
"MIT"
] | 8
|
2021-08-28T03:10:57.000Z
|
2021-10-31T07:49:18.000Z
|
examples/views/ephemeral.py
|
NextChai/discord.py
|
1bb8ea97db38594cd2f1bbc727b81e0bba81efc3
|
[
"MIT"
] | null | null | null |
examples/views/ephemeral.py
|
NextChai/discord.py
|
1bb8ea97db38594cd2f1bbc727b81e0bba81efc3
|
[
"MIT"
] | 1
|
2021-09-30T13:49:45.000Z
|
2021-09-30T13:49:45.000Z
|
from discord.ext import commands
import discord
class EphemeralCounterBot(commands.Bot):
def __init__(self):
super().__init__()
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
# Define a simple View that gives us a counter button
class Counter(discord.ui.View):
# Define the actual button
# When pressed, this increments the number displayed until it hits 5.
# When it hits 5, the counter button is disabled and it turns green.
# note: The name of the function does not matter to the library
@discord.ui.button(label='0', style=discord.ButtonStyle.red)
async def count(self, button: discord.ui.Button, interaction: discord.Interaction):
number = int(button.label) if button.label else 0
if number + 1 >= 5:
button.style = discord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
# Make sure to update the message with our updated selves
await interaction.response.edit_message(view=self)
# Define a View that will give us our own personal counter button
class EphemeralCounter(discord.ui.View):
# When this button is pressed, it will respond with a Counter view that will
# give the button presser their own personal button they can press 5 times.
@discord.ui.button(label='Click', style=discord.ButtonStyle.blurple)
async def receive(self, button: discord.ui.Button, interaction: discord.Interaction):
# ephemeral=True makes the message hidden from everyone except the button presser
await interaction.response.send_message('Enjoy!', view=Counter(), ephemeral=True)
bot = EphemeralCounterBot()
@bot.slash()
async def counter(ctx: commands.Context):
"""Starts a counter for pressing."""
await ctx.send('Press!', view=EphemeralCounter())
bot.run('token')
| 39.520833
| 89
| 0.702688
|
from discord.ext import commands
import discord
class EphemeralCounterBot(commands.Bot):
def __init__(self):
super().__init__()
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
class Counter(discord.ui.View):
@discord.ui.button(label='0', style=discord.ButtonStyle.red)
async def count(self, button: discord.ui.Button, interaction: discord.Interaction):
number = int(button.label) if button.label else 0
if number + 1 >= 5:
button.style = discord.ButtonStyle.green
button.disabled = True
button.label = str(number + 1)
await interaction.response.edit_message(view=self)
class EphemeralCounter(discord.ui.View):
@discord.ui.button(label='Click', style=discord.ButtonStyle.blurple)
async def receive(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.response.send_message('Enjoy!', view=Counter(), ephemeral=True)
bot = EphemeralCounterBot()
@bot.slash()
async def counter(ctx: commands.Context):
await ctx.send('Press!', view=EphemeralCounter())
bot.run('token')
| true
| true
|
7906a670a76b8501c8a9810318daa888510802ec
| 1,799
|
py
|
Python
|
examples/example-5.py
|
abarreal/coopy
|
af2c42ab20e534d7790d7f591d39ea9e6c727c35
|
[
"MIT"
] | null | null | null |
examples/example-5.py
|
abarreal/coopy
|
af2c42ab20e534d7790d7f591d39ea9e6c727c35
|
[
"MIT"
] | null | null | null |
examples/example-5.py
|
abarreal/coopy
|
af2c42ab20e534d7790d7f591d39ea9e6c727c35
|
[
"MIT"
] | null | null | null |
#
# This example is again a graph coloring problem. In this case, however,
# a stronger object oriented approach is adopted to show how Coopy is
# indeed compatible with such practices.
#
import coopy
import random
class Node:
def __init__(self):
self._color = coopy.symbolic_int('c')
self._neighbors = set()
@property
def color(self):
return self._color
@property
def has_valid_connections(self):
return coopy.all([self.color != n.color for n in self._neighbors])
def direct_edge_towards(self, other):
self._neighbors.add(other)
def __repr__(self):
return str(self.color)
def construct_k_colored_graph(k, n, p):
"""
Constructs a k colored graph of n nodes in which a pair
of nodes shares an edge with probability 0 <= p <= 1.
Note: this code is for demonstrative purposes only; the
solution for such a problem will not necessarily exist,
in which case the concretization process will throw
an exception.
"""
with coopy.scope():
# Instantiate n nodes.
nodes = [Node() for i in range(n)]
# Connect nodes with probability p.
for i in range(n-1):
for j in range(i+1,n):
a = nodes[i]
b = nodes[j]
if random.uniform(0,1) < p:
a.direct_edge_towards(b)
b.direct_edge_towards(a)
# Impose restrictions over the nodes.
for node in nodes:
coopy.any([node.color == i for i in range(k)]).require()
node.has_valid_connections.require()
# Concretize the graph and return it as a list of nodes.
coopy.concretize()
return nodes
graph = construct_k_colored_graph(3, 10, 0.2)
print(graph)
| 28.109375
| 74
| 0.617009
|
import coopy
import random
class Node:
def __init__(self):
self._color = coopy.symbolic_int('c')
self._neighbors = set()
@property
def color(self):
return self._color
@property
def has_valid_connections(self):
return coopy.all([self.color != n.color for n in self._neighbors])
def direct_edge_towards(self, other):
self._neighbors.add(other)
def __repr__(self):
return str(self.color)
def construct_k_colored_graph(k, n, p):
with coopy.scope():
nodes = [Node() for i in range(n)]
for i in range(n-1):
for j in range(i+1,n):
a = nodes[i]
b = nodes[j]
if random.uniform(0,1) < p:
a.direct_edge_towards(b)
b.direct_edge_towards(a)
for node in nodes:
coopy.any([node.color == i for i in range(k)]).require()
node.has_valid_connections.require()
coopy.concretize()
return nodes
graph = construct_k_colored_graph(3, 10, 0.2)
print(graph)
| true
| true
|
7906a6f5f47fab50ffebdffe61744dcd8129dab9
| 771
|
py
|
Python
|
tests/test_expiration.py
|
vera1118/async_lru
|
e292b73e2a655dd5fb9a42a17a6170c66efb52a8
|
[
"MIT"
] | null | null | null |
tests/test_expiration.py
|
vera1118/async_lru
|
e292b73e2a655dd5fb9a42a17a6170c66efb52a8
|
[
"MIT"
] | null | null | null |
tests/test_expiration.py
|
vera1118/async_lru
|
e292b73e2a655dd5fb9a42a17a6170c66efb52a8
|
[
"MIT"
] | null | null | null |
import asyncio
import time
import pytest
from async_lru import alru_cache
pytestmark = pytest.mark.asyncio
async def test_expiration(check_lru, loop):
@alru_cache(maxsize=4, expiration_time=2, loop=loop)
async def coro(val):
return val
inputs = [1, 2, 3]
coros = [coro(v) for v in inputs]
ret = await asyncio.gather(*coros, loop=loop)
assert ret == inputs
check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(1)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(3)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=4, cache=3, tasks=0, maxsize=4)
| 23.363636
| 66
| 0.658885
|
import asyncio
import time
import pytest
from async_lru import alru_cache
pytestmark = pytest.mark.asyncio
async def test_expiration(check_lru, loop):
@alru_cache(maxsize=4, expiration_time=2, loop=loop)
async def coro(val):
return val
inputs = [1, 2, 3]
coros = [coro(v) for v in inputs]
ret = await asyncio.gather(*coros, loop=loop)
assert ret == inputs
check_lru(coro, hits=0, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(1)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=3, cache=3, tasks=0, maxsize=4)
time.sleep(3)
inputs = 1
ret = await coro(inputs)
assert ret == inputs
check_lru(coro, hits=1, misses=4, cache=3, tasks=0, maxsize=4)
| true
| true
|
7906a6fde0ca63bcd157e24f3181b2fae1b27ca5
| 5,245
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/testreport_teardown.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/testreport_teardown.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/testreport_teardown.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class TestReport_TeardownSchema:
"""
A summary of information based on the results of executing a TestScript.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A summary of information based on the results of executing a TestScript.
id: unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
action: The teardown action will only contain an operation.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
# unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The teardown action will only contain an operation.
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 42.642276
| 96
| 0.563203
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
class TestReport_TeardownSchema:
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.testreport_action2 import (
TestReport_Action2Schema,
)
if (
max_recursion_limit
and nesting_list.count("TestReport_Teardown") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
my_nesting_list: List[str] = nesting_list + ["TestReport_Teardown"]
schema = StructType(
[
StructField("id", StringType(), True),
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
StructField(
"action",
ArrayType(
TestReport_Action2Schema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| true
| true
|
7906a7b370537741e3d8794e81c7da71067d6607
| 75,859
|
py
|
Python
|
python/ccxt/base/exchange.py
|
newdime/ccxt
|
baf9302b7bb760f0779b38521acd69d5fce78806
|
[
"MIT"
] | null | null | null |
python/ccxt/base/exchange.py
|
newdime/ccxt
|
baf9302b7bb760f0779b38521acd69d5fce78806
|
[
"MIT"
] | null | null | null |
python/ccxt/base/exchange.py
|
newdime/ccxt
|
baf9302b7bb760f0779b38521acd69d5fce78806
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.18.575'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND
from ccxt.base.decimal_to_precision import number_to_string
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
# web3/0x imports
try:
# from web3.auto import w3
from web3 import Web3, HTTPProvider
from web3.utils.encoding import hex_encode_abi_type
except ImportError:
Web3 = HTTPProvider = None # web3/0x not supported in Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
certified = False
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': DDoSProtection,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
trades = None
transactions = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingFees': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
web3 = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.orders = dict() if self.orders is None else self.orders
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not self.web3:
# self.web3 = w3 if w3 else Web3(HTTPProvider())
self.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['api_backup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'api_backup' in self.urls:
self.urls['api'] = self.urls['api_backup']
del self.urls['api_backup']
@classmethod
def define_rest_api(cls, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
def partialer():
outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def raise_error(self, exception_type, url=None, method=None, error=None, details=None):
if error:
error = str(error)
output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None])
raise exception_type(output)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def handle_errors(self, code, reason, url, method, headers, body, response):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
if body:
body = body.encode()
self.session.cookies.clear()
response = None
http_response = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies
)
http_response = response.text
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
print("\nResponse:", method, url, response.status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
self.raise_error(RequestTimeout, method, url, e)
except TooManyRedirects as e:
self.raise_error(ExchangeError, url, method, e)
except SSLError as e:
self.raise_error(ExchangeError, url, method, e)
except HTTPError as e:
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(e, response.status_code, http_response, url, method)
self.raise_error(ExchangeError, url, method, e, http_response)
except RequestException as e: # base exception class
error_string = str(e)
if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string):
self.raise_error(NetworkError, url, method, e)
else:
self.raise_error(ExchangeError, url, method, e)
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response
def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'):
error = None
string_code = str(http_status_code)
if string_code in self.httpExceptions:
error = self.httpExceptions[string_code]
if error == ExchangeNotAvailable:
if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE):
error = DDoSProtection
if error:
self.raise_error(error, url, method, exception if exception else http_status_code, response)
def handle_rest_response(self, response, json_response, url, method='GET', headers=None, body=None):
if self.is_json_encoded_object(response) and json_response is None:
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
self.raise_error(DDoSProtection, method, url, None, response)
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
self.raise_error(ExchangeNotAvailable, method, url, None, message)
self.raise_error(ExchangeError, method, url, ValueError('failed to decode json'), response)
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key:
value = float(dictionary[key])
else:
value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if key is None or (key not in dictionary):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
if value:
grouped = Exchange.group_by(array, key)
if value in grouped:
return grouped[value]
return []
return array
@staticmethod
def filterBy(self, array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
for key in params:
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def url(path, params={}):
result = Exchange.implode_params(path, params)
query = Exchange.omit(params, Exchange.extract_params(path))
if query:
result += '?' + _urlencode.urlencode(query)
return result
@staticmethod
def urlencode(params={}):
if (type(params) is dict) or isinstance(params, collections.OrderedDict):
return _urlencode.urlencode(params)
return params
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_to_string(s):
return s.decode('ascii')
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'):
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encodedHeader = Exchange.base64urlencode(header)
encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encodedHeader + '.' + encodedData
hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary')
signature = Exchange.base64urlencode(hmac)
return token + '.' + signature
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
self.raise_error(AuthenticationError, details='requires `' + key + '`')
else:
return error
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': 0.0,
'used': 0.0,
'total': 0.0,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.raise_error(NotSupported, details='create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='cancel_order() not supported yet')
def fetch_bids_asks(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_tickers(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order() is not supported yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order_trades() is not supported yet')
def fetch_transactions(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_transactions() is not supported yet')
def fetch_deposits(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_deposits() is not supported yet')
def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_withdrawals() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
self.raise_error(NotSupported, details='fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
self.raise_error(NotSupported, details='fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' in unit:
scale = 60 * 60 * 24 * 365
elif 'M' in unit:
scale = 60 * 60 * 24 * 30
elif 'w' in unit:
scale = 60 * 60 * 24 * 7
elif 'd' in unit:
scale = 60 * 60 * 24
elif 'h' in unit:
scale = 60 * 60
else:
scale = 60 # 1m by default
return amount * scale
def parse_trades(self, trades, market=None, since=None, limit=None):
array = self.to_array(trades)
array = [self.parse_trade(trade, market) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None):
array = self.to_array(data)
array = [self.parse_ledger_entry(item, currency) for item in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None):
array = self.to_array(orders)
array = [self.parse_order(order, market) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_currency_code(self, data, key, currency=None):
code = None
currency_id = self.safe_string(data, key)
if currency_id in self.currencies_by_id:
currency = self.currencies_by_id[currency_id]
else:
code = self.common_currency_code(currency_id)
if currency is not None:
code = currency['code']
return code
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None):
array = self.to_array(array)
if value:
array = [entry for entry in array if entry[field] == value]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit)
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
self.raise_error(ExchangeError, details='Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
self.raise_error(ExchangeError, details='Does not have currency code ' + str(code))
def find_market(self, string):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(string, basestring):
if string in self.markets_by_id:
return self.markets_by_id[string]
if string in self.markets:
return self.markets[string]
return string
def find_symbol(self, string, market=None):
if market is None:
market = self.find_market(string)
if isinstance(market, dict):
return market['symbol']
return string
def market(self, symbol):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
self.raise_error(ExchangeError, details='No market symbol ' + str(symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
# -------------------------------------------------------------------------
# web3 / 0x methods
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
def eth_decimals(self, unit='ether'):
units = {
'wei': 0, # 1
'kwei': 3, # 1000
'babbage': 3, # 1000
'femtoether': 3, # 1000
'mwei': 6, # 1000000
'lovelace': 6, # 1000000
'picoether': 6, # 1000000
'gwei': 9, # 1000000000
'shannon': 9, # 1000000000
'nanoether': 9, # 1000000000
'nano': 9, # 1000000000
'szabo': 12, # 1000000000000
'microether': 12, # 1000000000000
'micro': 12, # 1000000000000
'finney': 15, # 1000000000000000
'milliether': 15, # 1000000000000000
'milli': 15, # 1000000000000000
'ether': 18, # 1000000000000000000
'kether': 21, # 1000000000000000000000
'grand': 21, # 1000000000000000000000
'mether': 24, # 1000000000000000000000000
'gether': 27, # 1000000000000000000000000000
'tether': 30, # 1000000000000000000000000000000
}
return self.safe_value(units, unit)
def eth_unit(self, decimals=18):
units = {
0: 'wei', # 1000000000000000000
3: 'kwei', # 1000000000000000
6: 'mwei', # 1000000000000
9: 'gwei', # 1000000000
12: 'szabo', # 1000000
15: 'finney', # 1000
18: 'ether', # 1
21: 'kether', # 0.001
24: 'mether', # 0.000001
27: 'gether', # 0.000000001
30: 'tether', # 0.000000000001
}
return self.safe_value(units, decimals)
def fromWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
amount = int(amount) * (10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return float(Web3.fromWei(int(amount), unit))
def toWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
# this case has known yet unsolved problems:
# toWei(1.999, 'ether', 17) == '199900000000000011'
# toWei(1.999, 'ether', 19) == '19989999999999999991'
# the best solution should not involve additional dependencies
amount = Decimal(amount) / Decimal(10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return str(Web3.toWei(amount, unit))
def decryptAccountFromJSON(self, value, password):
return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password)
def decryptAccount(self, key, password):
return self.web3.eth.accounts.decrypt(key, password)
def decryptAccountFromPrivateKey(self, privateKey):
return self.web3.eth.accounts.privateKeyToAccount(privateKey)
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def soliditySha256(self, values):
types = self.solidityTypes(values)
solidity_values = self.solidityValues(values)
encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)]
hex_string = '0x' + ''.join(encoded_values)
return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256')
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array]
def getZeroExOrderHash2(self, order):
return self.soliditySha3([
order['exchangeContractAddress'], # address
order['maker'], # address
order['taker'], # address
order['makerTokenAddress'], # address
order['takerTokenAddress'], # address
order['feeRecipient'], # address
order['makerTokenAmount'], # uint256
order['takerTokenAmount'], # uint256
order['makerFee'], # uint256
order['takerFee'], # uint256
order['expirationUnixTimestampSec'], # uint256
order['salt'], # uint256
])
def getZeroExOrderHash(self, order):
unpacked = [
self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
types = [
'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.maker, type: types_1.SolidityTypes.Address },
'address', # { value: order.taker, type: types_1.SolidityTypes.Address },
'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
return self.web3.soliditySha3(types, unpacked).hex()
def remove_0x_prefix(self, value):
if value[:2] == '0x':
return value[2:]
return value
def getZeroExOrderHashV2(self, order):
# https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py
def pad_20_bytes_to_32(twenty_bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i):
return i.to_bytes(32, byteorder="big")
def to_bytes(value):
if not isinstance(value, str):
raise TypeError("Value must be an instance of str")
if len(value) % 2:
value = "0x0" + self.remove_0x_prefix(value)
return base64.b16decode(self.remove_0x_prefix(value), casefold=True)
domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5"
order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o'
header = b"\x19\x01"
domain_struct_hash = self.web3.sha3(
domain_struct_header +
pad_20_bytes_to_32(to_bytes(order["exchangeAddress"]))
)
order_struct_hash = self.web3.sha3(
order_schema_hash +
pad_20_bytes_to_32(to_bytes(order["makerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["takerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) +
pad_20_bytes_to_32(to_bytes(order["senderAddress"])) +
int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["makerFee"])) +
int_to_32_big_endian_bytes(int(order["takerFee"])) +
int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) +
int_to_32_big_endian_bytes(int(order["salt"])) +
self.web3.sha3(to_bytes(order["makerAssetData"])) +
self.web3.sha3(to_bytes(order["takerAssetData"]))
)
sha3 = self.web3.sha3(
header +
domain_struct_hash +
order_struct_hash
)
return '0x' + base64.b16encode(sha3).decode('ascii').lower()
def signZeroExOrder(self, order, privateKey):
orderHash = self.getZeroExOrderHash(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'ecSignature': signature, # todo fix v if needed
})
def signZeroExOrderV2(self, order, privateKey):
orderHash = self.getZeroExOrderHashV2(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'signature': self._convertECSignatureToSignatureHex(signature),
})
def _convertECSignatureToSignatureHex(self, signature):
# https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts
v = signature["v"]
if v != 27 and v != 28:
v = v + 27
return (
"0x" +
self.remove_0x_prefix(hex(v)) +
self.remove_0x_prefix(signature["r"]) +
self.remove_0x_prefix(signature["s"]) +
"03"
)
def hashMessage(self, message):
message_bytes = bytes.fromhex(message)
return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex()
def signHash(self, hash, privateKey):
signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:])
return {
'v': signature.v, # integer
'r': self.web3.toHex(signature.r), # '0x'-prefixed hex string
's': self.web3.toHex(signature.s), # '0x'-prefixed hex string
}
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def dec_to_bytes(n):
if n > 0:
return dec_to_bytes(n // 256) + bytes([n % 256])
else:
return b''
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(dec_to_bytes(epoch).rjust(8, b'\x00'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
| 40.56631
| 299
| 0.595025
|
__version__ = '1.18.575'
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND
from ccxt.base.decimal_to_precision import number_to_string
__all__ = [
'Exchange',
]
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
from ssl import SSLError
import time
import uuid
import zlib
from decimal import Decimal
try:
basestring
except NameError:
basestring = str
try:
long
except NameError:
long = int
try:
import urllib.parse as _urlencode
except ImportError:
import urllib as _urlencode
try:
from web3 import Web3, HTTPProvider
from web3.utils.encoding import hex_encode_abi_type
except ImportError:
Web3 = HTTPProvider = None
class Exchange(object):
id = None
version = None
certified = False
enableRateLimit = False
rateLimit = 2000
timeout = 10000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None
logger = None
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
tickers = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*'
proxies = None
hostname = None
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = ''
walletAddress = ''
token = ''
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': DDoSProtection,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
trades = None
transactions = None
currencies = None
options = None
accounts = None
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False,
'privateKey': False,
'walletAddress': False,
'token': False,
}
has = {
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchFundingFees': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
web3 = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.orders = dict() if self.orders is None else self.orders
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
camelcase = parts[0] + ''.join(self.capitalize(i) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket') if hasattr(self, 'tokenBucket') else {})
self.session = self.session if self.session else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not self.web3:
self.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['api_backup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'api_backup' in self.urls:
self.urls['api'] = self.urls['api_backup']
del self.urls['api_backup']
@classmethod
def define_rest_api(cls, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name)
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
def partialer():
outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
inner_kwargs = dict(outer_kwargs)
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def raise_error(self, exception_type, url=None, method=None, error=None, details=None):
if error:
error = str(error)
output = ' '.join([self.id] + [var for var in (url, method, error, details) if var is not None])
raise exception_type(output)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def find_broadly_matched_key(self, broad, string):
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def handle_errors(self, code, reason, url, method, headers, body, response):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
if body:
body = body.encode()
self.session.cookies.clear()
response = None
http_response = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies
)
http_response = response.text
json_response = self.parse_json(http_response) if self.is_json_encoded_object(http_response) else None
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
print("\nResponse:", method, url, response.status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
self.raise_error(RequestTimeout, method, url, e)
except TooManyRedirects as e:
self.raise_error(ExchangeError, url, method, e)
except SSLError as e:
self.raise_error(ExchangeError, url, method, e)
except HTTPError as e:
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_errors(e, response.status_code, http_response, url, method)
self.raise_error(ExchangeError, url, method, e, http_response)
except RequestException as e:
error_string = str(e)
if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string):
self.raise_error(NetworkError, url, method, e)
else:
self.raise_error(ExchangeError, url, method, e)
self.handle_errors(response.status_code, response.reason, url, method, headers, http_response, json_response)
self.handle_rest_response(http_response, json_response, url, method, headers, body)
if json_response is not None:
return json_response
return http_response
def handle_rest_errors(self, exception, http_status_code, response, url, method='GET'):
error = None
string_code = str(http_status_code)
if string_code in self.httpExceptions:
error = self.httpExceptions[string_code]
if error == ExchangeNotAvailable:
if re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE):
error = DDoSProtection
if error:
self.raise_error(error, url, method, exception if exception else http_status_code, response)
def handle_rest_response(self, response, json_response, url, method='GET', headers=None, body=None):
if self.is_json_encoded_object(response) and json_response is None:
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
self.raise_error(DDoSProtection, method, url, None, response)
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
self.raise_error(ExchangeNotAvailable, method, url, None, message)
self.raise_error(ExchangeError, method, url, ValueError('failed to decode json'), response)
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError:
pass
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if isinstance(dictionary, list) and isinstance(key, int) and len(dictionary) > key:
value = float(dictionary[key])
else:
value = float(dictionary[key]) if (key is not None) and (key in dictionary) and (dictionary[key] is not None) else default_value
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if key is None or (key not in dictionary):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if key is not None and (key in dictionary) and dictionary[key] is not None else default_value
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
if value:
grouped = Exchange.group_by(array, key)
if value in grouped:
return grouped[value]
return []
return array
@staticmethod
def filterBy(self, array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
for element in array:
if (key in element) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
for key in params:
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def url(path, params={}):
result = Exchange.implode_params(path, params)
query = Exchange.omit(params, Exchange.extract_params(path))
if query:
result += '?' + _urlencode.urlencode(query)
return result
@staticmethod
def urlencode(params={}):
if (type(params) is dict) or isinstance(params, collections.OrderedDict):
return _urlencode.urlencode(params)
return params
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri):
return _urlencode.quote(uri, safe="~()*!.'")
@staticmethod
def omit(d, *args):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_to_string(s):
return s.decode('ascii')
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def jwt(request, secret, algorithm=hashlib.sha256, alg='HS256'):
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encodedHeader = Exchange.base64urlencode(header)
encodedData = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encodedHeader + '.' + encodedData
hmac = Exchange.hmac(Exchange.encode(token), Exchange.encode(secret), algorithm, 'binary')
signature = Exchange.base64urlencode(hmac)
return token + '.' + signature
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
self.raise_error(AuthenticationError, details='requires `' + key + '`')
else:
return error
def check_address(self, address):
if address is None:
self.raise_error(InvalidAddress, details='address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
self.raise_error(InvalidAddress, details='address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': 0.0,
'used': 0.0,
'total': 0.0,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if 'baseId' in market else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if 'quoteId' in market else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.raise_error(NotSupported, details='create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='cancel_order() not supported yet')
def fetch_bids_asks(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_tickers(self, symbols=None, params={}):
self.raise_error(NotSupported, details='API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order() is not supported yet')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
self.raise_error(NotSupported, details='fetch_order_trades() is not supported yet')
def fetch_transactions(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_transactions() is not supported yet')
def fetch_deposits(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_deposits() is not supported yet')
def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}):
self.raise_error(NotSupported, details='fetch_withdrawals() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
self.raise_error(ExchangeError, details='unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, 'info').keys()
for account in ['free', 'used', 'total']:
balance[account] = {}
for currency in currencies:
balance[account][currency] = balance[currency][account]
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
self.raise_error(NotSupported, details='fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
self.raise_error(NotSupported, details='fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
self.raise_error(NotSupported, details='fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
self.raise_error(NotSupported, details='fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs):
result = []
for i in range(0, len(ohlcvs['t'])):
result.append([
ohlcvs['t'][i] * 1000,
ohlcvs['o'][i],
ohlcvs['h'][i],
ohlcvs['l'][i],
ohlcvs['c'][i],
ohlcvs['v'][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs):
result = {
't': [],
'o': [],
'h': [],
'l': [],
'c': [],
'v': [],
}
for i in range(0, len(ohlcvs)):
result['t'].append(int(ohlcvs[i][0] / 1000))
result['o'].append(ohlcvs[i][1])
result['h'].append(ohlcvs[i][2])
result['l'].append(ohlcvs[i][3])
result['c'].append(ohlcvs[i][4])
result['v'].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' in unit:
scale = 60 * 60 * 24 * 365
elif 'M' in unit:
scale = 60 * 60 * 24 * 30
elif 'w' in unit:
scale = 60 * 60 * 24 * 7
elif 'd' in unit:
scale = 60 * 60 * 24
elif 'h' in unit:
scale = 60 * 60
else:
scale = 60
return amount * scale
def parse_trades(self, trades, market=None, since=None, limit=None):
array = self.to_array(trades)
array = [self.parse_trade(trade, market) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None):
array = self.to_array(data)
array = [self.parse_ledger_entry(item, currency) for item in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None):
array = self.to_array(orders)
array = [self.parse_order(order, market) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_currency_code(self, data, key, currency=None):
code = None
currency_id = self.safe_string(data, key)
if currency_id in self.currencies_by_id:
currency = self.currencies_by_id[currency_id]
else:
code = self.common_currency_code(currency_id)
if currency is not None:
code = currency['code']
return code
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None):
array = self.to_array(array)
if value:
array = [entry for entry in array if entry[field] == value]
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit)
def filter_by_since_limit(self, array, since=None, limit=None):
array = self.to_array(array)
if since:
array = [entry for entry in array if entry['timestamp'] >= since]
if limit:
array = array[0:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
self.raise_error(ExchangeError, details='Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
self.raise_error(ExchangeError, details='Does not have currency code ' + str(code))
def find_market(self, string):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(string, basestring):
if string in self.markets_by_id:
return self.markets_by_id[string]
if string in self.markets:
return self.markets[string]
return string
def find_symbol(self, string, market=None):
if market is None:
market = self.find_market(string)
if isinstance(market, dict):
return market['symbol']
return string
def market(self, symbol):
if not self.markets:
self.raise_error(ExchangeError, details='Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
self.raise_error(ExchangeError, details='No market symbol ' + str(symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
def eth_decimals(self, unit='ether'):
units = {
'wei': 0,
'kwei': 3,
'babbage': 3,
'femtoether': 3,
'mwei': 6,
'lovelace': 6,
'picoether': 6,
'gwei': 9,
'shannon': 9,
'nanoether': 9,
'nano': 9,
'szabo': 12,
'microether': 12,
'micro': 12,
'finney': 15,
'milliether': 15,
'milli': 15,
'ether': 18,
'kether': 21,
'grand': 21,
'mether': 24,
'gether': 27,
'tether': 30,
}
return self.safe_value(units, unit)
def eth_unit(self, decimals=18):
units = {
0: 'wei',
3: 'kwei',
6: 'mwei',
9: 'gwei',
12: 'szabo',
15: 'finney',
18: 'ether',
21: 'kether',
24: 'mether',
27: 'gether',
30: 'tether',
}
return self.safe_value(units, decimals)
def fromWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
amount = int(amount) * (10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return float(Web3.fromWei(int(amount), unit))
def toWei(self, amount, unit='ether', decimals=18):
if Web3 is None:
self.raise_error(NotSupported, details="ethereum web3 methods require Python 3: https://pythonclock.org")
if amount is None:
return amount
if decimals != 18:
if decimals % 3:
amount = Decimal(amount) / Decimal(10 ** (18 - decimals))
else:
unit = self.eth_unit(decimals)
return str(Web3.toWei(amount, unit))
def decryptAccountFromJSON(self, value, password):
return self.decryptAccount(json.loads(value) if isinstance(value, basestring) else value, password)
def decryptAccount(self, key, password):
return self.web3.eth.accounts.decrypt(key, password)
def decryptAccountFromPrivateKey(self, privateKey):
return self.web3.eth.accounts.privateKeyToAccount(privateKey)
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def soliditySha256(self, values):
types = self.solidityTypes(values)
solidity_values = self.solidityValues(values)
encoded_values = [hex_encode_abi_type(abi_type, value)[2:] for abi_type, value in zip(types, solidity_values)]
hex_string = '0x' + ''.join(encoded_values)
return '0x' + self.hash(self.encode(self.web3.toText(hex_string)), 'sha256')
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else int(value) for value in array]
def getZeroExOrderHash2(self, order):
return self.soliditySha3([
order['exchangeContractAddress'],
order['maker'],
order['taker'],
order['makerTokenAddress'],
order['takerTokenAddress'],
order['feeRecipient'],
order['makerTokenAmount'],
order['takerTokenAmount'],
order['makerFee'],
order['takerFee'],
order['expirationUnixTimestampSec'],
order['salt'],
])
def getZeroExOrderHash(self, order):
unpacked = [
self.web3.toChecksumAddress(order['exchangeContractAddress']),
self.web3.toChecksumAddress(order['maker']),
self.web3.toChecksumAddress(order['taker']),
self.web3.toChecksumAddress(order['makerTokenAddress']),
self.web3.toChecksumAddress(order['takerTokenAddress']),
self.web3.toChecksumAddress(order['feeRecipient']),
int(order['makerTokenAmount']),
int(order['takerTokenAmount']),
int(order['makerFee']),
int(order['takerFee']),
int(order['expirationUnixTimestampSec']),
int(order['salt']),
]
types = [
'address',
'address',
'address',
'address',
'address',
'address',
'uint256',
'uint256',
'uint256',
'uint256',
'uint256',
'uint256',
]
return self.web3.soliditySha3(types, unpacked).hex()
def remove_0x_prefix(self, value):
if value[:2] == '0x':
return value[2:]
return value
def getZeroExOrderHashV2(self, order):
def pad_20_bytes_to_32(twenty_bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i):
return i.to_bytes(32, byteorder="big")
def to_bytes(value):
if not isinstance(value, str):
raise TypeError("Value must be an instance of str")
if len(value) % 2:
value = "0x0" + self.remove_0x_prefix(value)
return base64.b16decode(self.remove_0x_prefix(value), casefold=True)
domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5"
order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o'
header = b"\x19\x01"
domain_struct_hash = self.web3.sha3(
domain_struct_header +
pad_20_bytes_to_32(to_bytes(order["exchangeAddress"]))
)
order_struct_hash = self.web3.sha3(
order_schema_hash +
pad_20_bytes_to_32(to_bytes(order["makerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["takerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) +
pad_20_bytes_to_32(to_bytes(order["senderAddress"])) +
int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["makerFee"])) +
int_to_32_big_endian_bytes(int(order["takerFee"])) +
int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) +
int_to_32_big_endian_bytes(int(order["salt"])) +
self.web3.sha3(to_bytes(order["makerAssetData"])) +
self.web3.sha3(to_bytes(order["takerAssetData"]))
)
sha3 = self.web3.sha3(
header +
domain_struct_hash +
order_struct_hash
)
return '0x' + base64.b16encode(sha3).decode('ascii').lower()
def signZeroExOrder(self, order, privateKey):
orderHash = self.getZeroExOrderHash(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'ecSignature': signature,
})
def signZeroExOrderV2(self, order, privateKey):
orderHash = self.getZeroExOrderHashV2(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'signature': self._convertECSignatureToSignatureHex(signature),
})
def _convertECSignatureToSignatureHex(self, signature):
v = signature["v"]
if v != 27 and v != 28:
v = v + 27
return (
"0x" +
self.remove_0x_prefix(hex(v)) +
self.remove_0x_prefix(signature["r"]) +
self.remove_0x_prefix(signature["s"]) +
"03"
)
def hashMessage(self, message):
message_bytes = bytes.fromhex(message)
return self.web3.sha3(b"\x19Ethereum Signed Message:\n" + str(len(message_bytes)).encode() + message_bytes).hex()
def signHash(self, hash, privateKey):
signature = self.web3.eth.account.signHash(hash[-64:], private_key=privateKey[-64:])
return {
'v': signature.v,
'r': self.web3.toHex(signature.r),
's': self.web3.toHex(signature.s),
}
def signMessage(self, message, privateKey):
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def dec_to_bytes(n):
if n > 0:
return dec_to_bytes(n // 256) + bytes([n % 256])
else:
return b''
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded)
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(dec_to_bytes(epoch).rjust(8, b'\x00'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
| true
| true
|
7906a8c3f5fdac8427b4722b6aa4f5168db10968
| 9,411
|
py
|
Python
|
climatespider/climatespider/spiders/AO_wugspider.py
|
burnman108/climateSpider
|
434ba25b6a30fe6d07231b7758cbc64d8243bf4c
|
[
"Apache-2.0"
] | null | null | null |
climatespider/climatespider/spiders/AO_wugspider.py
|
burnman108/climateSpider
|
434ba25b6a30fe6d07231b7758cbc64d8243bf4c
|
[
"Apache-2.0"
] | null | null | null |
climatespider/climatespider/spiders/AO_wugspider.py
|
burnman108/climateSpider
|
434ba25b6a30fe6d07231b7758cbc64d8243bf4c
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from climatespider.items import ClimatespiderItem
from scrapy.selector import Selector
from dateutil.parser import parse
import re
import datetime
from scrapy.exceptions import CloseSpider
def getyesterdaty():
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
return yesterday_date.strftime('%Y/%m/%d')
class wugSpider(CrawlSpider):
name = "WUGCrawlSpider_AO"
#today_date = datetime.now().strftime('%Y/%m/%d')
allowed_domains = ['www.wunderground.com']
start_urls = [
'https://www.wunderground.com/history/airport/ZBAA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54618/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBTJ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBYN/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSSS/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50888/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50136/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYHB/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50854/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSOF/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZLXY/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54602/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/VMMC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54401/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58506/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGHA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSHC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZHHH/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58606/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGGG/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGSZ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/53798/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYTL/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZUUU/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50774/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50949/{0}/DailyHistory.html'.format(getyesterdaty())
]
def parse(self, response):
sel = Selector(response)
indexlist = list(map(lambda x: x.replace(' ','').replace('.',''),sel.xpath('//table[@id="obsTable"]/thead/tr/th/text()').extract()))
date = re.match(r'.*(\d{4}\/\d{1,2}\/\d{1,2}).*', response.url).group(1)
datatable = sel.xpath('//tr[@class="no-metars"]')
# items = []
for each in datatable:
item = ClimatespiderItem()
item['area'] = re.match(r'.*history/(.*)/2\d{3}/.*', response.url).group(1)
# item['date'] = date
if len(indexlist) == 13:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[5]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[8]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[11]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[12]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[13]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-','')
try:
item['qx_DewPoint'] = each.xpath('td[4]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[4]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Pressure'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Visibility'] = each.xpath('td[7]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[7]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-','')
try:
item['qx_GustSpeed'] = each.xpath('td[10]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[10]/text()').extract()[0].strip().replace('-','')
yield item
else:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[4]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[7]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[10]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[11]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[12]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
# try:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
# except Exception as e:
# item['WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_DewPoint'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Pressure'] = each.xpath('td[5]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[5]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Visibility'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_WindSpeed'] = each.xpath('td[8]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[8]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_GustSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-', '')
yield item
# for index in range(len(indexlist)):
| 66.274648
| 140
| 0.568165
|
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from climatespider.items import ClimatespiderItem
from scrapy.selector import Selector
from dateutil.parser import parse
import re
import datetime
from scrapy.exceptions import CloseSpider
def getyesterdaty():
today_date = datetime.date.today()
yesterday_date = today_date - datetime.timedelta(days=1)
return yesterday_date.strftime('%Y/%m/%d')
class wugSpider(CrawlSpider):
name = "WUGCrawlSpider_AO"
allowed_domains = ['www.wunderground.com']
start_urls = [
'https://www.wunderground.com/history/airport/ZBAA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54618/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBTJ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZBYN/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSSS/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50888/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50136/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYHB/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50854/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSOF/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZLXY/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54602/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/VMMC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/54401/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58506/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGHA/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZSHC/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZHHH/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/58606/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGGG/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZGSZ/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/53798/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZYTL/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/airport/ZUUU/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50774/{0}/DailyHistory.html'.format(getyesterdaty()),
'https://www.wunderground.com/history/station/50949/{0}/DailyHistory.html'.format(getyesterdaty())
]
def parse(self, response):
sel = Selector(response)
indexlist = list(map(lambda x: x.replace(' ','').replace('.',''),sel.xpath('//table[@id="obsTable"]/thead/tr/th/text()').extract()))
date = re.match(r'.*(\d{4}\/\d{1,2}\/\d{1,2}).*', response.url).group(1)
datatable = sel.xpath('//tr[@class="no-metars"]')
for each in datatable:
item = ClimatespiderItem()
item['area'] = re.match(r'.*history/(.*)/2\d{3}/.*', response.url).group(1)
if len(indexlist) == 13:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[5]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[8]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[11]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[12]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[13]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindChill_HeatIndex'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-','')
try:
item['qx_DewPoint'] = each.xpath('td[4]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[4]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Pressure'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-','')
try:
item['qx_Visibility'] = each.xpath('td[7]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[7]/text()').extract()[0].strip().replace('-','')
try:
item['qx_WindSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-','')
try:
item['qx_GustSpeed'] = each.xpath('td[10]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[10]/text()').extract()[0].strip().replace('-','')
yield item
else:
item['the_date'] = date
item['the_time'] = parse(each.xpath('td[1]/text()').extract()[0]).strftime('%H:%M')
item['qx_Humidity'] = each.xpath('td[4]/text()').extract()[0]
item['qx_WindDir'] = each.xpath('td[7]/text()').extract()[0]
item['qx_Precip'] = each.xpath('td[10]/text()').extract()[0]
item['qx_Events'] = each.xpath('td[11]/text()').extract()[0].strip()
try:
item['qx_Condition'] = each.xpath('td[12]/text()').extract()[0]
except Exception as e:
item['qx_Condition'] = ''
try:
item['qx_Temp'] = each.xpath('td[2]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Temp'] = each.xpath('td[2]/text()').extract()[0].strip().replace('-','')
try:
item['qx_DewPoint'] = each.xpath('td[3]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_DewPoint'] = each.xpath('td[3]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Pressure'] = each.xpath('td[5]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Pressure'] = each.xpath('td[5]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_Visibility'] = each.xpath('td[6]/span/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_Visibility'] = each.xpath('td[6]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_WindSpeed'] = each.xpath('td[8]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_WindSpeed'] = each.xpath('td[8]/text()').extract()[0].strip().replace('-', '')
try:
item['qx_GustSpeed'] = each.xpath('td[9]/span[1]/span[@class="wx-value"]/text()').extract()[0]
except Exception as e:
item['qx_GustSpeed'] = each.xpath('td[9]/text()').extract()[0].strip().replace('-', '')
yield item
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.