hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f736716d567c76b349b3ec264bc0736f0ffae941 | 12,320 | py | Python | trafficmonitor/gui/filter.py | Sumanth007/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | null | null | null | trafficmonitor/gui/filter.py | Sumanth007/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | 1 | 2022-03-22T21:21:19.000Z | 2022-03-22T21:21:19.000Z | trafficmonitor/gui/filter.py | SumanthTirumale/Traffic-Monitor | 2623f5c03a362b14415620528f05a91aba960374 | [
"MIT"
] | null | null | null | from pathlib import Path
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.Qt import Qt
from PyQt5.QtCore import QDateTime
class Filter(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("Traffic Monitor")
self.image_path = str(Path(__file__).absolute().parent.parent / "images")
self.setWindowIcon(QIcon(str(Path(self.image_path)/"logo.ico")))
self.resize(500, 500)
self.check_box_http_get = QCheckBox("Show GET")
self.check_box_http_post = QCheckBox("Show POST")
self.check_box_http_put = QCheckBox("Show PUT")
self.check_box_http_delete = QCheckBox("Show DELETE")
self.check_box_http_options = QCheckBox("Show OPTIONS")
self.check_box_http_trace = QCheckBox("Show TRACE")
self.check_box_http_connect = QCheckBox("Show CONNECT")
self.check_box_http_head = QCheckBox("Show HEAD")
self.check_box_image_extn = QCheckBox("Hide Image File Extensions")
self.check_box_font_extn = QCheckBox("Hide Font File Extensions")
self.check_box_css_extn = QCheckBox("Hide CSS Files")
self.check_box_javascript_extn = QCheckBox("Hide Javascript Files")
self.check_box_url_contains = QCheckBox("Show if URL Contains")
self.check_box_status_code_contains = QCheckBox("Show if Status Code Contains")
self.edit_url_contains = QLineEdit()
self.edit_url_contains.setDisabled(True)
self.edit_status_code_contains = QLineEdit()
self.edit_status_code_contains.setDisabled(True)
self.check_box_start_date_time = QCheckBox("Start Datetime")
self.check_box_end_date_time = QCheckBox("End Datetime")
self.start_date_time_picker = QDateTimeEdit()
self.start_date_time_picker.setCalendarPopup(True)
self.start_date_time_picker.setDisabled(True)
self.start_date_time_picker.setDateTime(QDateTime.currentDateTime().addSecs(-3600*2))
self.end_date_time_picker = QDateTimeEdit()
self.end_date_time_picker.setCalendarPopup(True)
self.end_date_time_picker.setDisabled(True)
self.end_date_time_picker.setDateTime(QDateTime.currentDateTime())
self.button_set = QPushButton("Set")
self.button_clear = QPushButton("Clear All")
# initialize variables
# dictionary to save all filters
self.filter_dict = {
"GET": False,
"POST": False,
"PUT": False,
"DELETE": False,
"OPTIONS": False,
"TRACE": False,
"CONNECT": False,
"HEAD": False,
"IMAGE_FILE_EXTENSIONS": False,
"FONT_FILE_EXTENSIONS": False,
"CSS_FILES": False,
"JAVASCRIPT_FILES": False,
"FILTER_URL": "",
"FILTER_STATUS_CODES": "",
"START_DATE_TIME": "",
"END_DATE_TIME": "",
}
self.center()
self.init_signals()
self.init_ui()
def center(self):
"""Method to center the QMainWindow"""
frame_gm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
center_point = QApplication.desktop().screenGeometry(screen).center()
frame_gm.moveCenter(center_point)
self.move(frame_gm.topLeft())
def init_ui(self):
vertical_box_main = QVBoxLayout()
horizontal_box1 = QHBoxLayout()
horizontal_box2 = QHBoxLayout()
horizontal_box3 = QHBoxLayout()
horizontal_box4 = QHBoxLayout()
response_type_vertical_box = QVBoxLayout()
grid_box_http_methods = QGridLayout()
group_box_http_methods = QGroupBox("HTTP Methods")
grid_box_http_methods.addWidget(self.check_box_http_get, 0, 0)
grid_box_http_methods.addWidget(self.check_box_http_options, 0, 1)
grid_box_http_methods.addWidget(self.check_box_http_post, 1, 0)
grid_box_http_methods.addWidget(self.check_box_http_trace, 1, 1)
grid_box_http_methods.addWidget(self.check_box_http_put, 2, 0)
grid_box_http_methods.addWidget(self.check_box_http_connect, 2, 1)
grid_box_http_methods.addWidget(self.check_box_http_delete, 3, 0)
grid_box_http_methods.addWidget(self.check_box_http_head, 3, 1)
group_box_http_methods.setLayout(grid_box_http_methods)
group_box_response_type = QGroupBox("Response Type")
response_type_vertical_box.addWidget(self.check_box_image_extn)
response_type_vertical_box.addWidget(self.check_box_font_extn)
response_type_vertical_box.addWidget(self.check_box_css_extn)
response_type_vertical_box.addWidget(self.check_box_javascript_extn)
group_box_response_type.setLayout(response_type_vertical_box)
horizontal_box1.addWidget(group_box_http_methods)
horizontal_box1.addWidget(group_box_response_type)
group_box_filter_url = QGroupBox("Filter Urls")
form_box_filter_url = QFormLayout()
form_box_filter_url.addRow(self.check_box_url_contains, self.edit_url_contains)
group_box_filter_url.setLayout(form_box_filter_url)
horizontal_box2.addWidget(group_box_filter_url)
group_box_filter_status_code = QGroupBox("Filter Status Code")
form_box_filter_status_code = QFormLayout()
form_box_filter_status_code.addRow(self.check_box_status_code_contains, self.edit_status_code_contains)
group_box_filter_status_code.setLayout(form_box_filter_status_code)
horizontal_box3.addWidget(group_box_filter_status_code)
group_box_date_time = QGroupBox("DateTime")
form_box_date_time = QFormLayout()
form_box_date_time.addRow(self.check_box_start_date_time, self.start_date_time_picker)
form_box_date_time.addRow(self.check_box_end_date_time, self.end_date_time_picker)
group_box_date_time.setLayout(form_box_date_time)
group_box_buttons = QGroupBox()
vertical_buttons = QVBoxLayout()
vertical_buttons.addWidget(self.button_set)
vertical_buttons.addWidget(self.button_clear)
group_box_buttons.setLayout(vertical_buttons)
horizontal_box4.addWidget(group_box_date_time)
horizontal_box4.addWidget(group_box_buttons)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box1)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box2)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box3)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box4)
vertical_box_main.addStretch()
self.setLayout(vertical_box_main)
self.setWindowModality(Qt.ApplicationModal)
self.show()
def init_signals(self):
self.check_box_url_contains.stateChanged.connect(self.evt_url_state)
self.check_box_status_code_contains.stateChanged.connect(self.evt_status_code_state)
self.check_box_start_date_time.stateChanged.connect(self.evt_start_date_time_state)
self.check_box_end_date_time.stateChanged.connect(self.evt_end_date_time_state)
self.button_set.clicked.connect(self.evt_btn_set)
self.button_clear.clicked.connect(self.evt_btn_clear_all)
def evt_url_state(self):
if self.check_box_url_contains.isChecked():
self.edit_url_contains.setDisabled(False)
else:
self.edit_url_contains.setDisabled(True)
def evt_status_code_state(self):
if self.check_box_status_code_contains.isChecked():
self.edit_status_code_contains.setDisabled(False)
else:
self.edit_status_code_contains.setDisabled(True)
def evt_start_date_time_state(self):
if self.check_box_start_date_time.isChecked():
self.start_date_time_picker.setDisabled(False)
else:
self.start_date_time_picker.setDisabled(True)
def evt_end_date_time_state(self):
if self.check_box_end_date_time.isChecked():
self.end_date_time_picker.setDisabled(False)
else:
self.end_date_time_picker.setDisabled(True)
def evt_btn_set(self):
self.filter_dict['GET'] = self.check_box_http_get.isChecked()
self.filter_dict['OPTIONS'] = self.check_box_http_options.isChecked()
self.filter_dict['POST'] = self.check_box_http_post.isChecked()
self.filter_dict['TRACE'] = self.check_box_http_trace.isChecked()
self.filter_dict['PUT'] = self.check_box_http_put.isChecked()
self.filter_dict['CONNECT'] = self.check_box_http_connect.isChecked()
self.filter_dict['DELETE'] = self.check_box_http_delete.isChecked()
self.filter_dict['HEAD'] = self.check_box_http_head.isChecked()
self.filter_dict['IMAGE_FILE_EXTENSIONS'] = self.check_box_image_extn.isChecked()
self.filter_dict['FONT_FILE_EXTENSIONS'] = self.check_box_font_extn.isChecked()
self.filter_dict['CSS_FILES'] = self.check_box_css_extn.isChecked()
self.filter_dict['JAVASCRIPT_FILES'] = self.check_box_javascript_extn.isChecked()
if self.check_box_url_contains.isChecked():
self.filter_dict['FILTER_URL'] = self.edit_url_contains.text()
else:
self.filter_dict['FILTER_URL'] = ''
if self.check_box_status_code_contains.isChecked():
self.filter_dict['FILTER_STATUS_CODES'] = self.edit_status_code_contains.text()
else:
self.filter_dict['FILTER_STATUS_CODES'] = ''
if self.check_box_start_date_time.isChecked() and self.check_box_end_date_time.isChecked():
start_date_time = self.start_date_time_picker.dateTime()
end_date_time = self.end_date_time_picker.dateTime()
if start_date_time <= end_date_time:
self.filter_dict['START_DATE_TIME'] = start_date_time.toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['END_DATE_TIME'] = end_date_time.toString("yyyy-MM-dd hh:mm:ss")
else:
QMessageBox.information(self, "Warning", "Start Date time should be lesser than current Date time")
else:
if self.check_box_start_date_time.isChecked():
start_date_time = self.start_date_time_picker.dateTime().toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['START_DATE_TIME'] = start_date_time
else:
self.filter_dict['START_DATE_TIME'] = ''
if self.check_box_end_date_time.isChecked():
end_date_time = self.end_date_time_picker.dateTime().toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['END_DATE_TIME'] = end_date_time
else:
self.filter_dict['END_DATE_TIME'] = ''
self.close()
def evt_btn_clear_all(self):
self.check_box_http_get.setChecked(False)
self.check_box_http_options.setChecked(False)
self.check_box_http_post.setChecked(False)
self.check_box_http_trace.setChecked(False)
self.check_box_http_put.setChecked(False)
self.check_box_http_connect.setChecked(False)
self.check_box_http_delete.setChecked(False)
self.check_box_http_head.setChecked(False)
self.check_box_image_extn.setChecked(False)
self.check_box_font_extn.setChecked(False)
self.check_box_css_extn.setChecked(False)
self.check_box_javascript_extn.setChecked(False)
self.check_box_url_contains.setChecked(False)
self.edit_url_contains.setDisabled(True)
self.check_box_status_code_contains.setChecked(False)
self.edit_status_code_contains.setDisabled(True)
self.check_box_start_date_time.setChecked(False)
self.start_date_time_picker.setDisabled(True)
self.check_box_end_date_time.setChecked(False)
self.end_date_time_picker.setDisabled(True) | 43.076923 | 116 | 0.688149 | from pathlib import Path
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.Qt import Qt
from PyQt5.QtCore import QDateTime
class Filter(QDialog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle("Traffic Monitor")
self.image_path = str(Path(__file__).absolute().parent.parent / "images")
self.setWindowIcon(QIcon(str(Path(self.image_path)/"logo.ico")))
self.resize(500, 500)
self.check_box_http_get = QCheckBox("Show GET")
self.check_box_http_post = QCheckBox("Show POST")
self.check_box_http_put = QCheckBox("Show PUT")
self.check_box_http_delete = QCheckBox("Show DELETE")
self.check_box_http_options = QCheckBox("Show OPTIONS")
self.check_box_http_trace = QCheckBox("Show TRACE")
self.check_box_http_connect = QCheckBox("Show CONNECT")
self.check_box_http_head = QCheckBox("Show HEAD")
self.check_box_image_extn = QCheckBox("Hide Image File Extensions")
self.check_box_font_extn = QCheckBox("Hide Font File Extensions")
self.check_box_css_extn = QCheckBox("Hide CSS Files")
self.check_box_javascript_extn = QCheckBox("Hide Javascript Files")
self.check_box_url_contains = QCheckBox("Show if URL Contains")
self.check_box_status_code_contains = QCheckBox("Show if Status Code Contains")
self.edit_url_contains = QLineEdit()
self.edit_url_contains.setDisabled(True)
self.edit_status_code_contains = QLineEdit()
self.edit_status_code_contains.setDisabled(True)
self.check_box_start_date_time = QCheckBox("Start Datetime")
self.check_box_end_date_time = QCheckBox("End Datetime")
self.start_date_time_picker = QDateTimeEdit()
self.start_date_time_picker.setCalendarPopup(True)
self.start_date_time_picker.setDisabled(True)
self.start_date_time_picker.setDateTime(QDateTime.currentDateTime().addSecs(-3600*2))
self.end_date_time_picker = QDateTimeEdit()
self.end_date_time_picker.setCalendarPopup(True)
self.end_date_time_picker.setDisabled(True)
self.end_date_time_picker.setDateTime(QDateTime.currentDateTime())
self.button_set = QPushButton("Set")
self.button_clear = QPushButton("Clear All")
self.filter_dict = {
"GET": False,
"POST": False,
"PUT": False,
"DELETE": False,
"OPTIONS": False,
"TRACE": False,
"CONNECT": False,
"HEAD": False,
"IMAGE_FILE_EXTENSIONS": False,
"FONT_FILE_EXTENSIONS": False,
"CSS_FILES": False,
"JAVASCRIPT_FILES": False,
"FILTER_URL": "",
"FILTER_STATUS_CODES": "",
"START_DATE_TIME": "",
"END_DATE_TIME": "",
}
self.center()
self.init_signals()
self.init_ui()
def center(self):
frame_gm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
center_point = QApplication.desktop().screenGeometry(screen).center()
frame_gm.moveCenter(center_point)
self.move(frame_gm.topLeft())
def init_ui(self):
vertical_box_main = QVBoxLayout()
horizontal_box1 = QHBoxLayout()
horizontal_box2 = QHBoxLayout()
horizontal_box3 = QHBoxLayout()
horizontal_box4 = QHBoxLayout()
response_type_vertical_box = QVBoxLayout()
grid_box_http_methods = QGridLayout()
group_box_http_methods = QGroupBox("HTTP Methods")
grid_box_http_methods.addWidget(self.check_box_http_get, 0, 0)
grid_box_http_methods.addWidget(self.check_box_http_options, 0, 1)
grid_box_http_methods.addWidget(self.check_box_http_post, 1, 0)
grid_box_http_methods.addWidget(self.check_box_http_trace, 1, 1)
grid_box_http_methods.addWidget(self.check_box_http_put, 2, 0)
grid_box_http_methods.addWidget(self.check_box_http_connect, 2, 1)
grid_box_http_methods.addWidget(self.check_box_http_delete, 3, 0)
grid_box_http_methods.addWidget(self.check_box_http_head, 3, 1)
group_box_http_methods.setLayout(grid_box_http_methods)
group_box_response_type = QGroupBox("Response Type")
response_type_vertical_box.addWidget(self.check_box_image_extn)
response_type_vertical_box.addWidget(self.check_box_font_extn)
response_type_vertical_box.addWidget(self.check_box_css_extn)
response_type_vertical_box.addWidget(self.check_box_javascript_extn)
group_box_response_type.setLayout(response_type_vertical_box)
horizontal_box1.addWidget(group_box_http_methods)
horizontal_box1.addWidget(group_box_response_type)
group_box_filter_url = QGroupBox("Filter Urls")
form_box_filter_url = QFormLayout()
form_box_filter_url.addRow(self.check_box_url_contains, self.edit_url_contains)
group_box_filter_url.setLayout(form_box_filter_url)
horizontal_box2.addWidget(group_box_filter_url)
group_box_filter_status_code = QGroupBox("Filter Status Code")
form_box_filter_status_code = QFormLayout()
form_box_filter_status_code.addRow(self.check_box_status_code_contains, self.edit_status_code_contains)
group_box_filter_status_code.setLayout(form_box_filter_status_code)
horizontal_box3.addWidget(group_box_filter_status_code)
group_box_date_time = QGroupBox("DateTime")
form_box_date_time = QFormLayout()
form_box_date_time.addRow(self.check_box_start_date_time, self.start_date_time_picker)
form_box_date_time.addRow(self.check_box_end_date_time, self.end_date_time_picker)
group_box_date_time.setLayout(form_box_date_time)
group_box_buttons = QGroupBox()
vertical_buttons = QVBoxLayout()
vertical_buttons.addWidget(self.button_set)
vertical_buttons.addWidget(self.button_clear)
group_box_buttons.setLayout(vertical_buttons)
horizontal_box4.addWidget(group_box_date_time)
horizontal_box4.addWidget(group_box_buttons)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box1)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box2)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box3)
vertical_box_main.addStretch()
vertical_box_main.addLayout(horizontal_box4)
vertical_box_main.addStretch()
self.setLayout(vertical_box_main)
self.setWindowModality(Qt.ApplicationModal)
self.show()
def init_signals(self):
self.check_box_url_contains.stateChanged.connect(self.evt_url_state)
self.check_box_status_code_contains.stateChanged.connect(self.evt_status_code_state)
self.check_box_start_date_time.stateChanged.connect(self.evt_start_date_time_state)
self.check_box_end_date_time.stateChanged.connect(self.evt_end_date_time_state)
self.button_set.clicked.connect(self.evt_btn_set)
self.button_clear.clicked.connect(self.evt_btn_clear_all)
def evt_url_state(self):
if self.check_box_url_contains.isChecked():
self.edit_url_contains.setDisabled(False)
else:
self.edit_url_contains.setDisabled(True)
def evt_status_code_state(self):
if self.check_box_status_code_contains.isChecked():
self.edit_status_code_contains.setDisabled(False)
else:
self.edit_status_code_contains.setDisabled(True)
def evt_start_date_time_state(self):
if self.check_box_start_date_time.isChecked():
self.start_date_time_picker.setDisabled(False)
else:
self.start_date_time_picker.setDisabled(True)
def evt_end_date_time_state(self):
if self.check_box_end_date_time.isChecked():
self.end_date_time_picker.setDisabled(False)
else:
self.end_date_time_picker.setDisabled(True)
def evt_btn_set(self):
self.filter_dict['GET'] = self.check_box_http_get.isChecked()
self.filter_dict['OPTIONS'] = self.check_box_http_options.isChecked()
self.filter_dict['POST'] = self.check_box_http_post.isChecked()
self.filter_dict['TRACE'] = self.check_box_http_trace.isChecked()
self.filter_dict['PUT'] = self.check_box_http_put.isChecked()
self.filter_dict['CONNECT'] = self.check_box_http_connect.isChecked()
self.filter_dict['DELETE'] = self.check_box_http_delete.isChecked()
self.filter_dict['HEAD'] = self.check_box_http_head.isChecked()
self.filter_dict['IMAGE_FILE_EXTENSIONS'] = self.check_box_image_extn.isChecked()
self.filter_dict['FONT_FILE_EXTENSIONS'] = self.check_box_font_extn.isChecked()
self.filter_dict['CSS_FILES'] = self.check_box_css_extn.isChecked()
self.filter_dict['JAVASCRIPT_FILES'] = self.check_box_javascript_extn.isChecked()
if self.check_box_url_contains.isChecked():
self.filter_dict['FILTER_URL'] = self.edit_url_contains.text()
else:
self.filter_dict['FILTER_URL'] = ''
if self.check_box_status_code_contains.isChecked():
self.filter_dict['FILTER_STATUS_CODES'] = self.edit_status_code_contains.text()
else:
self.filter_dict['FILTER_STATUS_CODES'] = ''
if self.check_box_start_date_time.isChecked() and self.check_box_end_date_time.isChecked():
start_date_time = self.start_date_time_picker.dateTime()
end_date_time = self.end_date_time_picker.dateTime()
if start_date_time <= end_date_time:
self.filter_dict['START_DATE_TIME'] = start_date_time.toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['END_DATE_TIME'] = end_date_time.toString("yyyy-MM-dd hh:mm:ss")
else:
QMessageBox.information(self, "Warning", "Start Date time should be lesser than current Date time")
else:
if self.check_box_start_date_time.isChecked():
start_date_time = self.start_date_time_picker.dateTime().toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['START_DATE_TIME'] = start_date_time
else:
self.filter_dict['START_DATE_TIME'] = ''
if self.check_box_end_date_time.isChecked():
end_date_time = self.end_date_time_picker.dateTime().toString("yyyy-MM-dd hh:mm:ss")
self.filter_dict['END_DATE_TIME'] = end_date_time
else:
self.filter_dict['END_DATE_TIME'] = ''
self.close()
def evt_btn_clear_all(self):
self.check_box_http_get.setChecked(False)
self.check_box_http_options.setChecked(False)
self.check_box_http_post.setChecked(False)
self.check_box_http_trace.setChecked(False)
self.check_box_http_put.setChecked(False)
self.check_box_http_connect.setChecked(False)
self.check_box_http_delete.setChecked(False)
self.check_box_http_head.setChecked(False)
self.check_box_image_extn.setChecked(False)
self.check_box_font_extn.setChecked(False)
self.check_box_css_extn.setChecked(False)
self.check_box_javascript_extn.setChecked(False)
self.check_box_url_contains.setChecked(False)
self.edit_url_contains.setDisabled(True)
self.check_box_status_code_contains.setChecked(False)
self.edit_status_code_contains.setDisabled(True)
self.check_box_start_date_time.setChecked(False)
self.start_date_time_picker.setDisabled(True)
self.check_box_end_date_time.setChecked(False)
self.end_date_time_picker.setDisabled(True) | true | true |
f73671817556bbbd670b8a6f4c8d5928a731b5df | 26,965 | py | Python | envs/simulation/robot.py | cww97/visual-language-grasping | f96404c9997ef55ede07293ce319ca19a39ae5ec | [
"BSD-2-Clause"
] | 3 | 2020-05-08T11:14:21.000Z | 2021-07-09T15:30:01.000Z | envs/simulation/robot.py | cww97/visual-language-grasping | f96404c9997ef55ede07293ce319ca19a39ae5ec | [
"BSD-2-Clause"
] | 4 | 2020-03-10T13:24:43.000Z | 2021-07-13T06:09:03.000Z | envs/simulation/robot.py | cww97/visual-language-grasping | f96404c9997ef55ede07293ce319ca19a39ae5ec | [
"BSD-2-Clause"
] | 1 | 2021-04-23T01:38:46.000Z | 2021-04-23T01:38:46.000Z | import os
import time
import numpy as np
import yaml
import utils
from . import vrep
from ..robot import Robot as BaseRobot
from ..robot import Reward
from ..data import Data as TextData
import random
from bisect import bisect_right
import cv2
import os
class SimRobot(BaseRobot):
def __init__(self, obj_mesh_dir, num_obj, *args):
BaseRobot.__init__(self, *args)
self.text_data = TextData()
# Define colors for object meshes (Tableau palette)
self.color_name = ['blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink']
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]]) / 255.0 # pink
# Read files in object mesh directory
self.obj_mesh_dir = obj_mesh_dir
self.num_obj = num_obj
self.mesh_list = list(filter(lambda x: x.endswith('.obj'), os.listdir(self.obj_mesh_dir)))
try:
with open(os.path.join(obj_mesh_dir, 'blocks.yml')) as f:
yaml_dict = yaml.safe_load(f)
self.groups = yaml_dict['groups']
self.mesh_name = yaml_dict['names']
for obj in self.mesh_list:
if obj not in self.mesh_name.keys():
raise Exception
except Exception:
print('Failed to read block names/groups')
exit(1)
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simExtRemoteApiStart(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
# MODIFY remoteApiConnections.txt
# Connect to simulator
vrep.simxFinish(-1) # Just in case, close all opened connections
# Connect to V-REP on port 19997
self.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('Connected to simulation.')
# self.restart_sim()
self.MODE = vrep.simx_opmode_blocking
# Setup virtual camera in simulation
self.setup_sim_camera()
self.object_handles = []
self.object_left_handles = []
self.target_handle = None
# Add objects to simulation environment
# self.add_objects()
def setup_sim_camera(self):
# Get handle to camera
sim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', self.MODE)
_, self.up_cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_ortho', self.MODE)
# Get camera pose and intrinsics in simulationo
sim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, self.MODE)
sim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, self.MODE)
cam_trans = np.eye(4, 4)
cam_trans[0:3, 3] = np.asarray(cam_position)
cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
cam_rotm = np.eye(4, 4)
cam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))
# Compute rigid transformation representating camera pose
self.cam_pose = np.dot(cam_trans, cam_rotm)
self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])
self.cam_depth_scale = 1
# Get background image
self.bg_color_img, self.bg_depth_img = self.get_camera_data()
self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale
def add_objects(self, mesh_idx=-1, mesh_color=-1):
# TODO
# handle <-> ind <-> obj -> name
# Just for debug
# print([self.mesh_list[ind] for ind in self.obj_mesh_ind])
# self.obj_mesh_ind = np.array(range(len(self.mesh_list)))
# self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]
# Randomly choose objects to add to scene
if mesh_idx == -1:
group_chosen = np.random.choice(self.groups, size=self.num_obj, replace=False)
self.obj_mesh_ind = np.array([self.mesh_list.index(np.random.choice(obj)) for obj in group_chosen])
self.obj_mesh_color = self.color_space[np.random.choice(np.arange(self.color_space.shape[0]), size=self.num_obj, replace=False)]
else:
self.obj_mesh_ind = np.array([mesh_idx])
self.obj_mesh_color = np.array([mesh_color])
# import pdb; pdb.set_trace()
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
self.object_handles = []
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]
ret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer', vrep.sim_scripttype_childscript, 'importShape', [0, 0, 255, 0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
exit()
# print(ret_ints, ret_ints[0])
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
time.sleep(2)
self.object_left_handles = self.object_handles.copy()
self.prev_obj_positions = []
self.obj_positions = []
self.get_instruction() # nb
# import pdb; pdb.set_trace()
def restart_sim(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3), vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
# V-REP bug requiring multiple starts and stops to restart
while gripper_position[2] > 0.4:
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
def is_stable(self):
# Check if simulation is stable by checking if gripper is within workspace
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
sim_is_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and \
gripper_position[0] < self.workspace_limits[0][1] + 0.1 and \
gripper_position[1] > self.workspace_limits[1][0] - 0.1 and \
gripper_position[1] < self.workspace_limits[1][1] + 0.1 and \
gripper_position[2] > self.workspace_limits[2][0] and \
gripper_position[2] < self.workspace_limits[2][1]
if not sim_is_ok:
print('Simulation unstable, Reset.')
return sim_is_ok
def reset(self):
self.restart_sim()
self.add_objects()
# def stop_sim(self):objects/blocks
# if self.is_sim:
# # Now send some data to V-REP in a non-blocking fashion:
# # vrep.simxAddStatusbarMessage(sim_client,'Hello V-REP!',vrep.simx_opmode_oneshot)
# # # Start the simulation
# # vrep.simxStartSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # # Stop simulation:
# # vrep.simxStopSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
# vrep.simxGetPingTime(self.sim_client)
# # Now close the connection to V-REP:
# vrep.simxFinish(self.sim_client)
def get_task_score(self):
key_positions = np.asarray([[-0.625, 0.125, 0.0], # red
[-0.625, -0.125, 0.0], # blue
[-0.375, 0.125, 0.0], # green
[-0.375, -0.125, 0.0]]) # yellow
obj_positions = np.asarray(self.get_obj_positions())
obj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])
obj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))
key_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])
key_positions = np.tile(key_positions, (1, obj_positions.shape[1], 1))
key_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))
key_nn_idx = np.argmin(key_dist, axis=0)
return np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)
def check_goal_reached(self, handle):
# goal_reached = self.get_task_score() == self.num_obj
goal_reached = self.target_handle == handle
return goal_reached
def get_obj_positions(self):
obj_positions = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
obj_positions.append(object_position)
return obj_positions
def get_obj_positions_and_orientations(self):
obj_positions = []
obj_orientations = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
sim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
obj_positions.append(object_position)
obj_orientations.append(object_orientation)
return obj_positions, obj_orientations
def reposition_objects(self, workspace_limits):
# Move gripper out of the way
self.move_to([-0.1, 0, 0.3], None)
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target', self.MODE)
# vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (-0.5,0,0.3), self.MODE)
# time.sleep(1)
for object_handle in self.object_handles:
# Drop object at random x,y location and random orientation in robot workspace
drop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1
drop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
vrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, self.MODE)
vrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, self.MODE)
time.sleep(2)
def get_camera_data(self, handle=-1):
if handle == -1:
handle = self.cam_handle
# Get color image from simulation
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, handle, 0, self.MODE)
color_img = np.asarray(raw_image)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float) / 255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.fliplr(color_img)
color_img = color_img.astype(np.uint8)
# Get depth image from simulation
sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, handle, self.MODE)
depth_img = np.asarray(depth_buffer)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.fliplr(depth_img)
zNear = 0.01
zFar = 10
depth_img = depth_img * (zFar - zNear) + zNear
return color_img, depth_img
def get_instruction(self):
# TODO
# add more template
instruction_template = "pick up the {color} {shape}."
ind = np.random.randint(0, self.num_obj)
color = utils.get_mush_color_name(self.obj_mesh_color[ind])
shape = np.random.choice(self.mesh_name[self.mesh_list[self.obj_mesh_ind[ind]]])
self.target_handle = self.object_handles[ind]
self.instruction_str = instruction_template.format(color=color, shape=shape) # nb
self.instruction = self.text_data.get_tensor(self.instruction_str)
return self.instruction
def close_gripper(self, _async=False):
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.047: # Block until gripper is fully closed
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
# print(gripper_joint_position)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
return gripper_fully_closed
def open_gripper(self, _async=False):
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
while gripper_joint_position < 0.0536: # Block until gripper is fully open
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
def move_to(self, tool_position, tool_orientation):
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.02 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_magnitude / 0.02))
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
# Primitives ----------------------------------------------------------
def random_grasp_action(self):
'''
angles = []
for i in range(8):
angle = np.deg2rad(i * (360.0 / 16))
tool_rotation_angle = (angle % np.pi) - np.pi / 2
angles.append(tool_rotation_angle)
print(angles)
'''
# assert len(self.object_left_handles) > 0
object_handle = random.sample(self.object_left_handles, 1)[0]
_, orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
all_angles = [-1.5708, -1.1781, -0.7854, -0.3927, 0.0, 0.3927, 0.7854, 1.1781]
possible_angles = [orientation[1], orientation[1] - np.pi/2.0]
anegle = random.sample(possible_angles, 1)[0]
angle = max(0, bisect_right(all_angles, orientation[1]) - 1)
_, position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
action_x = (position[1] - self.workspace_limits[1][0]) / self.heightmap_resolution
action_y = (position[0] - self.workspace_limits[0][0]) / self.heightmap_resolution
action_x = min(action_x, 223)
action_y = min(action_y, 223)
action = (angle, int(action_x), int(action_y))
# print(object_handle, action)
# import pdb; pdb.set_trace()
return action
def step(self, action, valid_depth_heightmap, num_rotations, heightmap_resolution):
# Compute 3D position of pixel
angle = np.deg2rad(action[0] * (360.0 / num_rotations))
best_pix_x = action[2]
best_pix_y = action[1]
primitive_position = [
best_pix_x * heightmap_resolution + self.workspace_limits[0][0],
best_pix_y * heightmap_resolution + self.workspace_limits[1][0],
valid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]
]
reward = self.grasp(primitive_position, angle)
done = (reward == Reward.SUCCESS)
# print(reward, done)
return reward.value, done
def grasp(self, position, heightmap_rotation_angle):
# print('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Avoid collision with floor
position = np.asarray(position).copy()
position[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)
# Move gripper to location above grasp target
grasp_location_margin = 0.15
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_grasp_target
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
# if np.floor(move_direction[0] / move_step[0]) == np.nan or move_step[0] == 0: import pdb; pdb.set_trace()
num_move_steps = int(np.floor(move_direction[0] / move_step[0])) if move_step[0] != 0 else 1
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is open
self.open_gripper()
# Approach grasp target
self.move_to(position, None)
# Close gripper to grasp target
gripper_full_closed = self.close_gripper()
# Move gripper to location above grasp target
self.move_to(location_above_grasp_target, None)
# Check if grasp is successful
gripper_full_closed = self.close_gripper()
grasp_sth = not gripper_full_closed
# Move the grasped object elsewhere
if grasp_sth:
object_positions = np.asarray(self.get_obj_positions())
object_positions = object_positions[:, 2]
grasped_object_ind = np.argmax(object_positions)
grasped_object_handle = self.object_handles[grasped_object_ind]
vrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1, (-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), self.MODE)
self.object_left_handles.remove(grasped_object_handle)
if grasped_object_handle == self.target_handle:
return Reward.SUCCESS
else:
return Reward.WRONG
else:
return Reward.FAIL
def push(self, position, heightmap_rotation_angle, workspace_limits):
# print('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Adjust pushing point to be on tip of finger
position[2] = position[2] + 0.026
# Compute pushing direction
push_orientation = [1.0, 0.0]
push_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[1] * np.sin(heightmap_rotation_angle), push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[1] * np.cos(heightmap_rotation_angle)])
# Move gripper to location above pushing point
pushing_point_margin = 0.1
location_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_pushing_point
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_direction[0] / move_step[0]))
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is closed
self.close_gripper()
# Approach pushing point
self.move_to(position, None)
# Compute target location (push to the right)
push_length = 0.1
target_x = min(max(position[0] + push_direction[0] * push_length, workspace_limits[0][0]), workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1] * push_length, workspace_limits[1][0]), workspace_limits[1][1])
push_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))
# Move in pushing direction towards target location
self.move_to([target_x, target_y, position[2]], None)
# Move gripper to location above grasp target
self.move_to([target_x, target_y, location_above_pushing_point[2]], None)
push_success = True
return push_success
# def place(self, position, heightmap_rotation_angle, workspace_limits):
# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))
# # Compute tool orientation from heightmap rotation angle
# tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2
# # Avoid collision with floor
# position[2] = max(position[2] + 0.04 + 0.02, workspace_limits[2][0] + 0.02)
# # Move gripper to location above place target
# place_location_margin = 0.1
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
# location_above_place_target = (position[0], position[1], position[2] + place_location_margin)
# self.move_to(location_above_place_target, None)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# if tool_rotation_angle - gripper_orientation[1] > 0:
# increment = 0.2
# else:
# increment = -0.2
# while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + increment, np.pi/2), vrep.simx_opmode_blocking)
# time.sleep(0.01)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)
# # Approach place target
# self.move_to(position, None)
# # Ensure gripper is open
# self.open_gripper()
# # Move gripper to location above place target
# self.move_to(location_above_place_target, None)
| 49.568015 | 323 | 0.744113 | import os
import time
import numpy as np
import yaml
import utils
from . import vrep
from ..robot import Robot as BaseRobot
from ..robot import Reward
from ..data import Data as TextData
import random
from bisect import bisect_right
import cv2
import os
class SimRobot(BaseRobot):
def __init__(self, obj_mesh_dir, num_obj, *args):
BaseRobot.__init__(self, *args)
self.text_data = TextData()
self.color_name = ['blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink']
self.color_space = np.asarray([[78.0, 121.0, 167.0],
[89.0, 161.0, 79.0],
[156, 117, 95],
[242, 142, 43],
[237.0, 201.0, 72.0],
[186, 176, 172],
[255.0, 87.0, 89.0],
[176, 122, 161],
[118, 183, 178],
[255, 157, 167]]) / 255.0
self.obj_mesh_dir = obj_mesh_dir
self.num_obj = num_obj
self.mesh_list = list(filter(lambda x: x.endswith('.obj'), os.listdir(self.obj_mesh_dir)))
try:
with open(os.path.join(obj_mesh_dir, 'blocks.yml')) as f:
yaml_dict = yaml.safe_load(f)
self.groups = yaml_dict['groups']
self.mesh_name = yaml_dict['names']
for obj in self.mesh_list:
if obj not in self.mesh_name.keys():
raise Exception
except Exception:
print('Failed to read block names/groups')
exit(1)
vrep.simxFinish(-1)
self.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('Connected to simulation.')
self.MODE = vrep.simx_opmode_blocking
self.setup_sim_camera()
self.object_handles = []
self.object_left_handles = []
self.target_handle = None
def setup_sim_camera(self):
sim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', self.MODE)
_, self.up_cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_ortho', self.MODE)
sim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, self.MODE)
sim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, self.MODE)
cam_trans = np.eye(4, 4)
cam_trans[0:3, 3] = np.asarray(cam_position)
cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
cam_rotm = np.eye(4, 4)
cam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))
self.cam_pose = np.dot(cam_trans, cam_rotm)
self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])
self.cam_depth_scale = 1
self.bg_color_img, self.bg_depth_img = self.get_camera_data()
self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale
def add_objects(self, mesh_idx=-1, mesh_color=-1):
if mesh_idx == -1:
group_chosen = np.random.choice(self.groups, size=self.num_obj, replace=False)
self.obj_mesh_ind = np.array([self.mesh_list.index(np.random.choice(obj)) for obj in group_chosen])
self.obj_mesh_color = self.color_space[np.random.choice(np.arange(self.color_space.shape[0]), size=self.num_obj, replace=False)]
else:
self.obj_mesh_ind = np.array([mesh_idx])
self.obj_mesh_color = np.array([mesh_color])
self.object_handles = []
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]
ret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer', vrep.sim_scripttype_childscript, 'importShape', [0, 0, 255, 0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
exit()
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
time.sleep(2)
self.object_left_handles = self.object_handles.copy()
self.prev_obj_positions = []
self.obj_positions = []
self.get_instruction()
def restart_sim(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3), vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
while gripper_position[2] > 0.4:
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
def is_stable(self):
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
sim_is_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and \
gripper_position[0] < self.workspace_limits[0][1] + 0.1 and \
gripper_position[1] > self.workspace_limits[1][0] - 0.1 and \
gripper_position[1] < self.workspace_limits[1][1] + 0.1 and \
gripper_position[2] > self.workspace_limits[2][0] and \
gripper_position[2] < self.workspace_limits[2][1]
if not sim_is_ok:
print('Simulation unstable, Reset.')
return sim_is_ok
def reset(self):
self.restart_sim()
self.add_objects()
_positions.shape[1], 1))
key_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))
key_nn_idx = np.argmin(key_dist, axis=0)
return np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)
def check_goal_reached(self, handle):
goal_reached = self.target_handle == handle
return goal_reached
def get_obj_positions(self):
obj_positions = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
obj_positions.append(object_position)
return obj_positions
def get_obj_positions_and_orientations(self):
obj_positions = []
obj_orientations = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
sim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
obj_positions.append(object_position)
obj_orientations.append(object_orientation)
return obj_positions, obj_orientations
def reposition_objects(self, workspace_limits):
self.move_to([-0.1, 0, 0.3], None)
for object_handle in self.object_handles:
drop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1
drop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
vrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, self.MODE)
vrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, self.MODE)
time.sleep(2)
def get_camera_data(self, handle=-1):
if handle == -1:
handle = self.cam_handle
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, handle, 0, self.MODE)
color_img = np.asarray(raw_image)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float) / 255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.fliplr(color_img)
color_img = color_img.astype(np.uint8)
sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, handle, self.MODE)
depth_img = np.asarray(depth_buffer)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.fliplr(depth_img)
zNear = 0.01
zFar = 10
depth_img = depth_img * (zFar - zNear) + zNear
return color_img, depth_img
def get_instruction(self):
instruction_template = "pick up the {color} {shape}."
ind = np.random.randint(0, self.num_obj)
color = utils.get_mush_color_name(self.obj_mesh_color[ind])
shape = np.random.choice(self.mesh_name[self.mesh_list[self.obj_mesh_ind[ind]]])
self.target_handle = self.object_handles[ind]
self.instruction_str = instruction_template.format(color=color, shape=shape)
self.instruction = self.text_data.get_tensor(self.instruction_str)
return self.instruction
def close_gripper(self, _async=False):
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.047:
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
return gripper_fully_closed
def open_gripper(self, _async=False):
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
while gripper_joint_position < 0.0536:
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
def move_to(self, tool_position, tool_orientation):
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.02 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_magnitude / 0.02))
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
def random_grasp_action(self):
object_handle = random.sample(self.object_left_handles, 1)[0]
_, orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
all_angles = [-1.5708, -1.1781, -0.7854, -0.3927, 0.0, 0.3927, 0.7854, 1.1781]
possible_angles = [orientation[1], orientation[1] - np.pi/2.0]
anegle = random.sample(possible_angles, 1)[0]
angle = max(0, bisect_right(all_angles, orientation[1]) - 1)
_, position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
action_x = (position[1] - self.workspace_limits[1][0]) / self.heightmap_resolution
action_y = (position[0] - self.workspace_limits[0][0]) / self.heightmap_resolution
action_x = min(action_x, 223)
action_y = min(action_y, 223)
action = (angle, int(action_x), int(action_y))
return action
def step(self, action, valid_depth_heightmap, num_rotations, heightmap_resolution):
angle = np.deg2rad(action[0] * (360.0 / num_rotations))
best_pix_x = action[2]
best_pix_y = action[1]
primitive_position = [
best_pix_x * heightmap_resolution + self.workspace_limits[0][0],
best_pix_y * heightmap_resolution + self.workspace_limits[1][0],
valid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]
]
reward = self.grasp(primitive_position, angle)
done = (reward == Reward.SUCCESS)
return reward.value, done
def grasp(self, position, heightmap_rotation_angle):
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
position = np.asarray(position).copy()
position[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)
grasp_location_margin = 0.15
location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)
tool_position = location_above_grasp_target
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_direction[0] / move_step[0])) if move_step[0] != 0 else 1
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
self.open_gripper()
self.move_to(position, None)
gripper_full_closed = self.close_gripper()
self.move_to(location_above_grasp_target, None)
gripper_full_closed = self.close_gripper()
grasp_sth = not gripper_full_closed
if grasp_sth:
object_positions = np.asarray(self.get_obj_positions())
object_positions = object_positions[:, 2]
grasped_object_ind = np.argmax(object_positions)
grasped_object_handle = self.object_handles[grasped_object_ind]
vrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1, (-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), self.MODE)
self.object_left_handles.remove(grasped_object_handle)
if grasped_object_handle == self.target_handle:
return Reward.SUCCESS
else:
return Reward.WRONG
else:
return Reward.FAIL
def push(self, position, heightmap_rotation_angle, workspace_limits):
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
position[2] = position[2] + 0.026
push_orientation = [1.0, 0.0]
push_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[1] * np.sin(heightmap_rotation_angle), push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[1] * np.cos(heightmap_rotation_angle)])
pushing_point_margin = 0.1
location_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)
tool_position = location_above_pushing_point
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_direction[0] / move_step[0]))
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
self.close_gripper()
self.move_to(position, None)
push_length = 0.1
target_x = min(max(position[0] + push_direction[0] * push_length, workspace_limits[0][0]), workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1] * push_length, workspace_limits[1][0]), workspace_limits[1][1])
push_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))
self.move_to([target_x, target_y, position[2]], None)
self.move_to([target_x, target_y, location_above_pushing_point[2]], None)
push_success = True
return push_success
| true | true |
f7367350088cb16efe08739ab429a004915de773 | 3,613 | py | Python | nwnodedetector/nwocrsounds.py | wesh92/nwnodetool | 1c79418d9ad0f1f22dfbc6aab390ca9603fb1596 | [
"Apache-2.0"
] | null | null | null | nwnodedetector/nwocrsounds.py | wesh92/nwnodetool | 1c79418d9ad0f1f22dfbc6aab390ca9603fb1596 | [
"Apache-2.0"
] | null | null | null | nwnodedetector/nwocrsounds.py | wesh92/nwnodetool | 1c79418d9ad0f1f22dfbc6aab390ca9603fb1596 | [
"Apache-2.0"
] | null | null | null | """
New World OCR Node Detector
Created: 2021-10-07
Dev: Wes H.
Uses OCR to get coordinates from top right of the NW game window
and imposes that against a list of possible nodes.
When you're close to one it will play a bell noise!
"""
import winsound
from PIL import ImageGrab, ImageOps, Image
import pytesseract
import psutil
from time import sleep
import pathlib
import iron_markers as imark
import essence_markers as emark
import chest_essence as ce
import numpy as np
# for 3440*1440 : (3182,19,3416,39)
localpath = str(pathlib.Path(__file__).parent.resolve())
pytesseract.pytesseract.tesseract_cmd = rf"{localpath}\Tesseract-OCR\tesseract.exe"
# node = [['7831', '1673'], ['9341', '2725']]
node = ce.chest_essence
def screen_loc_check(items, screen_img):
z = ImageOps.crop(screen_img, (173,0,50,0))
zpos = pytesseract.image_to_string(z, config="--psm 13 outputbase digits")
zpos = str(zpos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
if zpos.isdigit() and int(float(zpos)) >= 100:
xcrop = (0,0,220,0)
ycrop = (82,0,128,0)
else:
xcrop = (0,0,210,0)
ycrop = (88,0,120,0)
x = ImageOps.crop(screen_img, xcrop)
y = ImageOps.crop(screen_img, ycrop)
x = x.resize((150, 100))
y = y.resize((150, 100))
datax = np.array(x)
datay = np.array(y)
r1, g1, b1 = 235, 235, 165
r1x, g1x, b1x = 110, 105, 70
r2, g2, b2 = 0, 0, 0
redx, greenx, bluex = datax[:,:,0], datax[:,:,1], datax[:,:,2]
redy, greeny, bluey = datay[:,:,0], datay[:,:,1], datay[:,:,2]
mask1x = (redx <= r1x) & (greenx <= g1x) & (bluex <= b1x)
mask2x = (redx >= r1) & (greenx >= g1) & (bluex >= b1)
mask1y = (redy <= r1x) & (greeny <= g1x) & (bluey <= b1x)
mask2y = (redy >= r1) & (greeny >= g1) & (bluey >= b1)
datax[:,:,:3][mask1x] = [r2, g2, b2]
datax[:,:,:3][mask2x] = [r2, g2, b2]
datay[:,:,:3][mask1y] = [r2, g2, b2]
datay[:,:,:3][mask2y] = [r2, g2, b2]
x = Image.fromarray(datax)
y = Image.fromarray(datay)
x.convert("L")
y.convert("L")
xpos = pytesseract.image_to_string(x, config="--psm 13 outputbase digits")
ypos = pytesseract.image_to_string(y, config="--psm 13 outputbase digits")
xpos = str(xpos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
ypos = str(ypos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
pos = [xpos, ypos]
confirms = []
for element in items:
min_x = int(float(element[0]))-15
max_x = int(float(element[0]))+15
min_y = int(float(element[1]))-15
max_y = int(float(element[1]))+15
if pos[0].isdigit() and pos[1].isdigit():
if int(float(pos[0])) >= min_x and int(float(pos[0])) <= max_x and int(float(pos[1])) >= min_y and int(float(pos[1])) <= max_y:
confirms.append(True)
else:
confirms.append(False)
else:
pass
if any(confirms):
print("All Match\n ---------")
print(pos[0], pos[1])
return True
else:
print("Miss\n ---------")
print(pos[0], pos[1])
return False
while "NewWorld.exe" in (p.name() for p in psutil.process_iter()):
screen = ImageGrab.grab(bbox=(3191, 19, 3440, 39))
remote_image = screen.convert('RGBA')
remote_image.save('grabbed.png')
if screen_loc_check(node, remote_image) is True:
duration = 333
freq = 880
winsound.Beep(freq, duration)
sleep(1)
| 34.740385 | 139 | 0.575699 | import winsound
from PIL import ImageGrab, ImageOps, Image
import pytesseract
import psutil
from time import sleep
import pathlib
import iron_markers as imark
import essence_markers as emark
import chest_essence as ce
import numpy as np
localpath = str(pathlib.Path(__file__).parent.resolve())
pytesseract.pytesseract.tesseract_cmd = rf"{localpath}\Tesseract-OCR\tesseract.exe"
node = ce.chest_essence
def screen_loc_check(items, screen_img):
z = ImageOps.crop(screen_img, (173,0,50,0))
zpos = pytesseract.image_to_string(z, config="--psm 13 outputbase digits")
zpos = str(zpos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
if zpos.isdigit() and int(float(zpos)) >= 100:
xcrop = (0,0,220,0)
ycrop = (82,0,128,0)
else:
xcrop = (0,0,210,0)
ycrop = (88,0,120,0)
x = ImageOps.crop(screen_img, xcrop)
y = ImageOps.crop(screen_img, ycrop)
x = x.resize((150, 100))
y = y.resize((150, 100))
datax = np.array(x)
datay = np.array(y)
r1, g1, b1 = 235, 235, 165
r1x, g1x, b1x = 110, 105, 70
r2, g2, b2 = 0, 0, 0
redx, greenx, bluex = datax[:,:,0], datax[:,:,1], datax[:,:,2]
redy, greeny, bluey = datay[:,:,0], datay[:,:,1], datay[:,:,2]
mask1x = (redx <= r1x) & (greenx <= g1x) & (bluex <= b1x)
mask2x = (redx >= r1) & (greenx >= g1) & (bluex >= b1)
mask1y = (redy <= r1x) & (greeny <= g1x) & (bluey <= b1x)
mask2y = (redy >= r1) & (greeny >= g1) & (bluey >= b1)
datax[:,:,:3][mask1x] = [r2, g2, b2]
datax[:,:,:3][mask2x] = [r2, g2, b2]
datay[:,:,:3][mask1y] = [r2, g2, b2]
datay[:,:,:3][mask2y] = [r2, g2, b2]
x = Image.fromarray(datax)
y = Image.fromarray(datay)
x.convert("L")
y.convert("L")
xpos = pytesseract.image_to_string(x, config="--psm 13 outputbase digits")
ypos = pytesseract.image_to_string(y, config="--psm 13 outputbase digits")
xpos = str(xpos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
ypos = str(ypos).replace('\n', '').replace('\x0c', '').replace('(', '').replace(']', '').replace('[', '')
pos = [xpos, ypos]
confirms = []
for element in items:
min_x = int(float(element[0]))-15
max_x = int(float(element[0]))+15
min_y = int(float(element[1]))-15
max_y = int(float(element[1]))+15
if pos[0].isdigit() and pos[1].isdigit():
if int(float(pos[0])) >= min_x and int(float(pos[0])) <= max_x and int(float(pos[1])) >= min_y and int(float(pos[1])) <= max_y:
confirms.append(True)
else:
confirms.append(False)
else:
pass
if any(confirms):
print("All Match\n ---------")
print(pos[0], pos[1])
return True
else:
print("Miss\n ---------")
print(pos[0], pos[1])
return False
while "NewWorld.exe" in (p.name() for p in psutil.process_iter()):
screen = ImageGrab.grab(bbox=(3191, 19, 3440, 39))
remote_image = screen.convert('RGBA')
remote_image.save('grabbed.png')
if screen_loc_check(node, remote_image) is True:
duration = 333
freq = 880
winsound.Beep(freq, duration)
sleep(1)
| true | true |
f73673a5069a8351aaff5f959c4efb2b38cc83ca | 5,325 | py | Python | models/Conversation.py | amstee/NeoRestAPI | 9c0e28a82ae0d23bab03993340df73383a9454af | [
"Apache-2.0"
] | 3 | 2018-09-20T16:27:39.000Z | 2019-10-29T23:35:14.000Z | models/Conversation.py | amstee/NeoRestAPI | 9c0e28a82ae0d23bab03993340df73383a9454af | [
"Apache-2.0"
] | 1 | 2021-02-08T20:23:30.000Z | 2021-02-08T20:23:30.000Z | models/Conversation.py | amstee/NeoRestAPI | 9c0e28a82ae0d23bab03993340df73383a9454af | [
"Apache-2.0"
] | null | null | null | from config.database import db
from dateutil import parser as DateParser
from flask_socketio import emit
from utils.log import logger_set
import datetime
from config.log import LOG_DATABASE_FILE
from mobile import push_ios as ios
from mobile import push_android as android
logger = logger_set(module=__name__, file=LOG_DATABASE_FILE)
class Conversation(db.Model):
__tablename__ = "conversations"
id = db.Column(db.Integer, primary_key=True)
circle_id = db.Column(db.Integer, db.ForeignKey("circles.id"))
name = db.Column(db.String(120))
created = db.Column(db.DateTime)
updated = db.Column(db.DateTime)
device_access = db.Column(db.Boolean)
circle = db.relationship("Circle", back_populates="conversations")
links = db.relationship("UserToConversation", back_populates="conversation")
messages = db.relationship("Message", back_populates="conversation", order_by="Message.id",
cascade="all, delete-orphan")
def __repr__(self):
return "<Conversation(id='%d' name='%s' created='%s' updated='%s')>" % (self.id, self.name,
str(self.created), str(self.updated))
def __init__(self, name=None, created=datetime.datetime.now(), updated=datetime.datetime.now(),
device_access=False, circle=None):
if created is not None:
if type(created) is str:
self.created = DateParser.parse(created)
else:
self.created = created
if updated is not None:
if type(updated) is str:
self.updated = DateParser.parse(updated)
else:
self.updated = updated
if name is not None and name != "":
self.name = name
else:
self.name = "Conversation"
if circle is not None:
self.circle = circle
self.device_access = device_access
db.session.add(self)
db.session.flush()
logger.debug("Database add: conversations%s", {"id": self.id,
"name": self.name,
"circle_id": self.circle_id,
"device_access": self.device_access})
def mobile_notification(self, title, body):
for link in self.links:
#ios.send_notification(link.user, alert)
android.send_notification(link.user, title=title, body=body)
def has_members(self, *args):
for member in args:
tf = False
for link in self.links:
if link.user_id == member.id:
tf = True
if tf is False:
return False
return True
def update_content(self, created=None, updated=datetime.datetime.now(), name=None, device_access=None):
if created is not None:
if type(created) is str:
self.created = DateParser.parse(created)
else:
self.created = created
if updated is not None:
if type(updated) is str:
self.updated = DateParser.parse(updated)
else:
self.updated = updated
if name is not None and name != "":
self.name = name
if device_access is not None:
self.device_access = device_access
db.session.commit()
db.session.flush()
logger.debug("Database update: conversations%s", {"id": self.id,
"name": self.name,
"circle_id": self.circle_id,
"device_access": self.device_access})
def notify_users(self, p1='conversation', p2=None):
if p2 is None:
p2 = {}
p2['conversation_id'] = self.id
emit(p1, p2, room='conversation_' + str(self.id), namespace='/')
def set_other_admin(self):
for link in self.links:
link.update_content(privilege="ADMIN")
return True
def check_validity(self):
if (len(self.links) + (1 if self.device_access else 0)) <= 1:
for link in self.links:
db.session.delete(link)
db.session.delete(self)
return False
return True
def get_content(self):
return {
"id": self.id,
"name": self.name,
"created": self.created,
"updated": self.updated,
"circle": self.circle.get_simple_content(),
"links": [link.get_content() for link in self.links],
"messages": [message.get_simple_content() for message in self.messages],
"device_access": self.device_access
}
def get_simple_content(self):
return {
"id": self.id,
"name": self.name,
"created": self.created,
"updated": self.updated,
"circle_id": self.circle_id,
"device_access": self.device_access
}
| 39.738806 | 118 | 0.53277 | from config.database import db
from dateutil import parser as DateParser
from flask_socketio import emit
from utils.log import logger_set
import datetime
from config.log import LOG_DATABASE_FILE
from mobile import push_ios as ios
from mobile import push_android as android
logger = logger_set(module=__name__, file=LOG_DATABASE_FILE)
class Conversation(db.Model):
__tablename__ = "conversations"
id = db.Column(db.Integer, primary_key=True)
circle_id = db.Column(db.Integer, db.ForeignKey("circles.id"))
name = db.Column(db.String(120))
created = db.Column(db.DateTime)
updated = db.Column(db.DateTime)
device_access = db.Column(db.Boolean)
circle = db.relationship("Circle", back_populates="conversations")
links = db.relationship("UserToConversation", back_populates="conversation")
messages = db.relationship("Message", back_populates="conversation", order_by="Message.id",
cascade="all, delete-orphan")
def __repr__(self):
return "<Conversation(id='%d' name='%s' created='%s' updated='%s')>" % (self.id, self.name,
str(self.created), str(self.updated))
def __init__(self, name=None, created=datetime.datetime.now(), updated=datetime.datetime.now(),
device_access=False, circle=None):
if created is not None:
if type(created) is str:
self.created = DateParser.parse(created)
else:
self.created = created
if updated is not None:
if type(updated) is str:
self.updated = DateParser.parse(updated)
else:
self.updated = updated
if name is not None and name != "":
self.name = name
else:
self.name = "Conversation"
if circle is not None:
self.circle = circle
self.device_access = device_access
db.session.add(self)
db.session.flush()
logger.debug("Database add: conversations%s", {"id": self.id,
"name": self.name,
"circle_id": self.circle_id,
"device_access": self.device_access})
def mobile_notification(self, title, body):
for link in self.links:
android.send_notification(link.user, title=title, body=body)
def has_members(self, *args):
for member in args:
tf = False
for link in self.links:
if link.user_id == member.id:
tf = True
if tf is False:
return False
return True
def update_content(self, created=None, updated=datetime.datetime.now(), name=None, device_access=None):
if created is not None:
if type(created) is str:
self.created = DateParser.parse(created)
else:
self.created = created
if updated is not None:
if type(updated) is str:
self.updated = DateParser.parse(updated)
else:
self.updated = updated
if name is not None and name != "":
self.name = name
if device_access is not None:
self.device_access = device_access
db.session.commit()
db.session.flush()
logger.debug("Database update: conversations%s", {"id": self.id,
"name": self.name,
"circle_id": self.circle_id,
"device_access": self.device_access})
def notify_users(self, p1='conversation', p2=None):
if p2 is None:
p2 = {}
p2['conversation_id'] = self.id
emit(p1, p2, room='conversation_' + str(self.id), namespace='/')
def set_other_admin(self):
for link in self.links:
link.update_content(privilege="ADMIN")
return True
def check_validity(self):
if (len(self.links) + (1 if self.device_access else 0)) <= 1:
for link in self.links:
db.session.delete(link)
db.session.delete(self)
return False
return True
def get_content(self):
return {
"id": self.id,
"name": self.name,
"created": self.created,
"updated": self.updated,
"circle": self.circle.get_simple_content(),
"links": [link.get_content() for link in self.links],
"messages": [message.get_simple_content() for message in self.messages],
"device_access": self.device_access
}
def get_simple_content(self):
return {
"id": self.id,
"name": self.name,
"created": self.created,
"updated": self.updated,
"circle_id": self.circle_id,
"device_access": self.device_access
}
| true | true |
f73674fecfcfecd4c0550bfc2dbcbb507f581464 | 9,561 | py | Python | test/comparison/v1/REF_M_30806+30807+30808_Specular_all_set_1.py | jenest/reflectivity_ui | 9fc746e1729d57268c621e6a655b5022fba3af8b | [
"Apache-2.0"
] | 1 | 2017-11-19T22:23:13.000Z | 2017-11-19T22:23:13.000Z | test/comparison/v1/REF_M_30806+30807+30808_Specular_all_set_1.py | mdoucet/reflectivity_ui | aa646e6f8ad91eaedf70ec7b9230e79970e3cdf0 | [
"Apache-2.0"
] | 3 | 2020-05-28T13:29:53.000Z | 2021-07-14T21:37:32.000Z | test/comparison/v1/REF_M_30806+30807+30808_Specular_all_set_1.py | mdoucet/reflectivity_ui | aa646e6f8ad91eaedf70ec7b9230e79970e3cdf0 | [
"Apache-2.0"
] | 1 | 2021-06-09T15:12:07.000Z | 2021-06-09T15:12:07.000Z | # Cross-section: Off_Off
# Run:30806
######################################################################
#Python Script Generated by GeneratePythonScript Algorithm
######################################################################
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30794.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30794_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30794_entry-Off_Off',
OutputWorkspace='wsg')
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30806.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30806_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30806_entry-Off_Off',
OutputWorkspace='wsg')
MagnetismReflectometryReduction(InputWorkspace='wsg',
NormalizationWorkspace='30794_entry-Off_Off',
SignalPeakPixelRange='181,195',
SubtractSignalBackground=False,
SignalBackgroundPixelRange='49,88',
NormPeakPixelRange='202,216',
SubtractNormBackground=False,
NormBackgroundPixelRange='94,104',
LowResDataAxisPixelRange='69,172',
LowResNormAxisPixelRange='71,175',
TimeAxisRange='11761.6,44977.7',
RoundUpPixel=False,
SpecularPixel=187.30000000000001,
FinalRebin=False,
QMin=0.001,
QStep=-0.02,
TimeAxisStep=400,
ConstQTrim=0.10000000000000001,
OutputWorkspace='r30806')
Scale(InputWorkspace='r30806',
OutputWorkspace='r30806',
Factor=1.6068516739750773)
Scale(InputWorkspace='r30806',
OutputWorkspace='r30806_scaled',
Factor=2.4266100000000002)
AddSampleLog(Workspace='r30806_scaled',
LogName='scaling_factor',
LogText='2.42661',
LogType='Number')
# Run:30807
######################################################################
#Python Script Generated by GeneratePythonScript Algorithm
######################################################################
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30794.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30794_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30794_entry-Off_Off',
OutputWorkspace='wsg')
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30807.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30807_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30807_entry-Off_Off',
OutputWorkspace='wsg')
GroupWorkspaces(InputWorkspaces='30807_entry-Off_Off',
OutputWorkspace='wsg')
MagnetismReflectometryReduction(InputWorkspace='wsg',
NormalizationWorkspace='30794_entry-Off_Off',
SignalPeakPixelRange='181,195',
SubtractSignalBackground=False,
SignalBackgroundPixelRange='49,88',
NormPeakPixelRange='202,216',
SubtractNormBackground=False,
NormBackgroundPixelRange='94,104',
LowResDataAxisPixelRange='69,172',
LowResNormAxisPixelRange='71,175',
TimeAxisRange='11768.3,45065.6',
RoundUpPixel=False,
SpecularPixel=187.80000000000001,
FinalRebin=False,
QMin=0.001,
QStep=-0.02,
TimeAxisStep=400,
ConstQTrim=0.10000000000000001,
OutputWorkspace='r30807')
Scale(InputWorkspace='r30807',
OutputWorkspace='r30807',
Factor=0.57943431658725553)
Scale(InputWorkspace='r30807',
OutputWorkspace='r30807_scaled',
Factor=0.61799999999999999)
AddSampleLog(Workspace='r30807_scaled',
LogName='scaling_factor',
LogText='0.618',
LogType='Number')
# Run:30808
######################################################################
#Python Script Generated by GeneratePythonScript Algorithm
######################################################################
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30796.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30796_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30796_entry-Off_Off',
OutputWorkspace='wsg')
LoadEventNexus(Filename='/SNS/REF_M/IPTS-21391/nexus/REF_M_30808.nxs.h5',
OutputWorkspace='raw_events')
FilterByLogValue(InputWorkspace='raw_events',
OutputWorkspace='30808_entry-Off_Off',
LogName='BL4A:SF:ICP:getDI',
MinimumValue=15,
MaximumValue=15,
TimeTolerance=0.10000000000000001,
LogBoundary='Left')
GroupWorkspaces(InputWorkspaces='30808_entry-Off_Off',
OutputWorkspace='wsg')
MagnetismReflectometryReduction(InputWorkspace='wsg',
NormalizationWorkspace='30796_entry-Off_Off',
SignalPeakPixelRange='183,197',
SubtractSignalBackground=False,
SignalBackgroundPixelRange='49,88',
NormPeakPixelRange='202,216',
SubtractNormBackground=False,
NormBackgroundPixelRange='94,104',
LowResDataAxisPixelRange='69,172',
LowResNormAxisPixelRange='82,175',
TimeAxisRange='11733.5,45058.3',
RoundUpPixel=False,
SpecularPixel=189,
FinalRebin=False,
QMin=0.001,
QStep=-0.02,
TimeAxisStep=400,
ConstQTrim=0.10000000000000001,
OutputWorkspace='r30808')
Scale(InputWorkspace='r30808',
OutputWorkspace='r30808',
Factor=0.19100143649212772)
Scale(InputWorkspace='r30808',
OutputWorkspace='r30808_scaled',
Factor=0.72276980360217025)
AddSampleLog(Workspace='r30808_scaled',
LogName='scaling_factor',
LogText='0.722769803602',
LogType='Number')
| 56.573964 | 77 | 0.448279 | true | true | |
f7367526b246e8930080e23b35f47d510ab893d1 | 5,922 | py | Python | Python/venv/lib/python3.7/site-packages/IPython/terminal/ptutils.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | null | null | null | Python/venv/lib/python3.7/site-packages/IPython/terminal/ptutils.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | 1 | 2020-03-16T07:10:32.000Z | 2020-03-16T07:10:32.000Z | Python/venv/lib/python3.7/site-packages/IPython/terminal/ptutils.py | HenriqueBuzin/TCC | 5fb9db42e97e28131bff97da3252a9ee33b3684e | [
"Unlicense"
] | null | null | null | """prompt-toolkit utilities
Everything in this module is a private API,
not to be used outside IPython.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import unicodedata
from wcwidth import wcwidth
from IPython.core.completer import (
provisionalcompleter, cursor_to_position,
_deduplicate_completions)
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.patch_stdout import patch_stdout
import pygments.lexers as pygments_lexers
_completion_sentinel = object()
def _elide(string, *, min_elide=30):
"""
If a string is long enough, and has at least 2 dots,
replace the middle part with ellipses.
If three consecutive dots, or two consecutive dots are encountered these are
replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode
equivalents
"""
string = string.replace('...','\N{HORIZONTAL ELLIPSIS}')
string = string.replace('..','\N{TWO DOT LEADER}')
if len(string) < min_elide:
return string
parts = string.split('.')
if len(parts) <= 3:
return string
return '{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}'.format(parts[0], parts[1][0], parts[-2][-1], parts[-1])
def _adjust_completion_text_based_on_context(text, body, offset):
if text.endswith('=') and len(body) > offset and body[offset] is '=':
return text[:-1]
else:
return text
class IPythonPTCompleter(Completer):
"""Adaptor to provide IPython completions to prompt_toolkit"""
def __init__(self, ipy_completer=None, shell=None):
if shell is None and ipy_completer is None:
raise TypeError("Please pass shell=an InteractiveShell instance.")
self._ipy_completer = ipy_completer
self.shell = shell
@property
def ipy_completer(self):
if self._ipy_completer:
return self._ipy_completer
else:
return self.shell.Completer
def get_completions(self, document, complete_event):
if not document.current_line.strip():
return
# Some bits of our completion system may print stuff (e.g. if a module
# is imported). This context manager ensures that doesn't interfere with
# the prompt.
with patch_stdout(), provisionalcompleter():
body = document.text
cursor_row = document.cursor_position_row
cursor_col = document.cursor_position_col
cursor_position = document.cursor_position
offset = cursor_to_position(body, cursor_row, cursor_col)
yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
@staticmethod
def _get_completions(body, offset, cursor_position, ipyc):
"""
Private equivalent of get_completions() use only for unit_testing.
"""
debug = getattr(ipyc, 'debug', False)
completions = _deduplicate_completions(
body, ipyc.completions(body, offset))
for c in completions:
if not c.text:
# Guard against completion machinery giving us an empty string.
continue
text = unicodedata.normalize('NFC', c.text)
# When the first character of the completion has a zero length,
# then it's probably a decomposed unicode character. E.g. caused by
# the "\dot" completion. Try to compose again with the previous
# character.
if wcwidth(text[0]) == 0:
if cursor_position + c.start > 0:
char_before = body[c.start - 1]
fixed_text = unicodedata.normalize(
'NFC', char_before + text)
# Yield the modified completion instead, if this worked.
if wcwidth(text[0:1]) == 1:
yield Completion(fixed_text, start_position=c.start - offset - 1)
continue
# TODO: Use Jedi to determine meta_text
# (Jedi currently has a bug that results in incorrect information.)
# meta_text = ''
# yield Completion(m, start_position=start_pos,
# display_meta=meta_text)
display_text = c.text
adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset)
if c.type == 'function':
yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()'), display_meta=c.type+c.signature)
else:
yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text), display_meta=c.type)
class IPythonPTLexer(Lexer):
"""
Wrapper around PythonLexer and BashLexer.
"""
def __init__(self):
l = pygments_lexers
self.python_lexer = PygmentsLexer(l.Python3Lexer)
self.shell_lexer = PygmentsLexer(l.BashLexer)
self.magic_lexers = {
'HTML': PygmentsLexer(l.HtmlLexer),
'html': PygmentsLexer(l.HtmlLexer),
'javascript': PygmentsLexer(l.JavascriptLexer),
'js': PygmentsLexer(l.JavascriptLexer),
'perl': PygmentsLexer(l.PerlLexer),
'ruby': PygmentsLexer(l.RubyLexer),
'latex': PygmentsLexer(l.TexLexer),
}
def lex_document(self, document):
text = document.text.lstrip()
lexer = self.python_lexer
if text.startswith('!') or text.startswith('%%bash'):
lexer = self.shell_lexer
elif text.startswith('%%'):
for magic, l in self.magic_lexers.items():
if text.startswith('%%' + magic):
lexer = l
break
return lexer.lex_document(document)
| 37.245283 | 148 | 0.631712 |
import unicodedata
from wcwidth import wcwidth
from IPython.core.completer import (
provisionalcompleter, cursor_to_position,
_deduplicate_completions)
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.patch_stdout import patch_stdout
import pygments.lexers as pygments_lexers
_completion_sentinel = object()
def _elide(string, *, min_elide=30):
string = string.replace('...','\N{HORIZONTAL ELLIPSIS}')
string = string.replace('..','\N{TWO DOT LEADER}')
if len(string) < min_elide:
return string
parts = string.split('.')
if len(parts) <= 3:
return string
return '{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}'.format(parts[0], parts[1][0], parts[-2][-1], parts[-1])
def _adjust_completion_text_based_on_context(text, body, offset):
if text.endswith('=') and len(body) > offset and body[offset] is '=':
return text[:-1]
else:
return text
class IPythonPTCompleter(Completer):
def __init__(self, ipy_completer=None, shell=None):
if shell is None and ipy_completer is None:
raise TypeError("Please pass shell=an InteractiveShell instance.")
self._ipy_completer = ipy_completer
self.shell = shell
@property
def ipy_completer(self):
if self._ipy_completer:
return self._ipy_completer
else:
return self.shell.Completer
def get_completions(self, document, complete_event):
if not document.current_line.strip():
return
# the prompt.
with patch_stdout(), provisionalcompleter():
body = document.text
cursor_row = document.cursor_position_row
cursor_col = document.cursor_position_col
cursor_position = document.cursor_position
offset = cursor_to_position(body, cursor_row, cursor_col)
yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
@staticmethod
def _get_completions(body, offset, cursor_position, ipyc):
debug = getattr(ipyc, 'debug', False)
completions = _deduplicate_completions(
body, ipyc.completions(body, offset))
for c in completions:
if not c.text:
# Guard against completion machinery giving us an empty string.
continue
text = unicodedata.normalize('NFC', c.text)
# When the first character of the completion has a zero length,
# then it's probably a decomposed unicode character. E.g. caused by
if wcwidth(text[0]) == 0:
if cursor_position + c.start > 0:
char_before = body[c.start - 1]
fixed_text = unicodedata.normalize(
'NFC', char_before + text)
if wcwidth(text[0:1]) == 1:
yield Completion(fixed_text, start_position=c.start - offset - 1)
continue
display_text = c.text
adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset)
if c.type == 'function':
yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()'), display_meta=c.type+c.signature)
else:
yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text), display_meta=c.type)
class IPythonPTLexer(Lexer):
def __init__(self):
l = pygments_lexers
self.python_lexer = PygmentsLexer(l.Python3Lexer)
self.shell_lexer = PygmentsLexer(l.BashLexer)
self.magic_lexers = {
'HTML': PygmentsLexer(l.HtmlLexer),
'html': PygmentsLexer(l.HtmlLexer),
'javascript': PygmentsLexer(l.JavascriptLexer),
'js': PygmentsLexer(l.JavascriptLexer),
'perl': PygmentsLexer(l.PerlLexer),
'ruby': PygmentsLexer(l.RubyLexer),
'latex': PygmentsLexer(l.TexLexer),
}
def lex_document(self, document):
text = document.text.lstrip()
lexer = self.python_lexer
if text.startswith('!') or text.startswith('%%bash'):
lexer = self.shell_lexer
elif text.startswith('%%'):
for magic, l in self.magic_lexers.items():
if text.startswith('%%' + magic):
lexer = l
break
return lexer.lex_document(document)
| true | true |
f73675bb454e512b33aeba15ea4bcbf27706c67c | 3,113 | py | Python | Scripts/read_msp.py | ljocha/DeepEI | 96aee49192ac805dda7971041c01e16c62cd3cbc | [
"Apache-2.0"
] | null | null | null | Scripts/read_msp.py | ljocha/DeepEI | 96aee49192ac805dda7971041c01e16c62cd3cbc | [
"Apache-2.0"
] | null | null | null | Scripts/read_msp.py | ljocha/DeepEI | 96aee49192ac805dda7971041c01e16c62cd3cbc | [
"Apache-2.0"
] | 1 | 2022-01-12T10:52:30.000Z | 2022-01-12T10:52:30.000Z | from DeepEI.utils import ms2vec, get_cdk_fingerprints, get_cdk_descriptors
from matchms.importing import load_from_msp
import json
import numpy as np
from scipy.sparse import csr_matrix, save_npz
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolDescriptors import CalcExactMolWt
# incompatible with DeepEI/utils.py
#from pycdk.pycdk import MolFromSmiles, parser_formula, MolToFormula
from concurrent.futures import ProcessPoolExecutor
import os
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--ncores','-n',type=int,help='number of cores',default=1)
p.add_argument('--dest','-d',type=str,help='destination directory',default='.')
p.add_argument('infile',type=str,help='input file')
args = p.parse_args()
file_msp = args.infile
ncores = args.ncores
dest = args.dest
if not os.path.isdir(dest):
print(f"{dest} does not exist")
exit(1)
def process_mol(nm):
n,m = nm
try:
osmiles = m.get('smiles')
mol = Chem.MolFromSmiles(osmiles)
name = m.get('name')
peakindex = m.peaks.mz
peakintensity = m.peaks.intensities
molwt = CalcExactMolWt(mol)
if molwt > 2000:
return {}
smiles = Chem.MolToSmiles(mol)
# XXX: pycdk
# elements = parser_formula(MolToFormula(MolFromSmiles(smiles)))
# for e in elements:
# if e not in ['C', 'H', 'O', 'N', 'S', 'P', 'Si', 'F', 'Cl', 'Br', 'I']:
# print(f"{osmiles}: uncommon element {e}, skipping")
# return {}
morgan_fp = np.array(AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=4096))
cdk_fp = get_cdk_fingerprints(smiles)
cdk_des = np.array(get_cdk_descriptors(smiles))
# XXX
# ri = list(m['RI'].values())
peak_vec = ms2vec(peakindex,peakintensity)
print(f"{n}:{osmiles}: done")
return {
'smiles': smiles,
'name': name,
'peak_vec': peak_vec,
# 'ri': ri,
'morgan_fp': morgan_fp,
'cdk_fp': cdk_fp,
'cdk_des': cdk_des,
'molwt': molwt,
}
except BaseException as e:
print(f"{osmiles}: {e}")
return {}
print(f"Loading {file_msp}...")
all_mol = load_from_msp(file_msp)
print("done")
with ProcessPoolExecutor(max_workers=ncores) as pool:
all_output = pool.map(process_mol, enumerate(all_mol))
# filter out empty entries
all_output = list(filter(lambda x: x,all_output))
all_smiles = list(map(lambda x: x['smiles'], all_output))
Peak_data = np.array(list(map(lambda x: x['peak_vec'], all_output)))
# RI_data = map(lambda x: x['smiles'], all_output)
Morgan_fp = np.array(list(map(lambda x: x['morgan_fp'], all_output)))
CDK_fp = np.array(list(map(lambda x: x['cdk_fp'], all_output)))
CDK_des = np.array(list(map(lambda x: x['cdk_des'], all_output)))
MolWt = np.array(list(map(lambda x: x['molwt'], all_output)))
print("writing output ...")
os.chdir(dest)
# np.save('retention.npy', np.array(RI_data))
np.save('descriptor.npy', CDK_des)
np.save('molwt.npy', MolWt)
Peak_data = csr_matrix(Peak_data)
Morgan_fp = csr_matrix(Morgan_fp)
CDK_fp = csr_matrix(CDK_fp)
save_npz('peakvec.npz', Peak_data)
save_npz('morgan.npz', Morgan_fp)
save_npz('fingerprints.npz', CDK_fp)
with open('all_smiles.json', 'w') as t:
json.dump(all_smiles, t)
print("done")
| 28.559633 | 81 | 0.712817 | from DeepEI.utils import ms2vec, get_cdk_fingerprints, get_cdk_descriptors
from matchms.importing import load_from_msp
import json
import numpy as np
from scipy.sparse import csr_matrix, save_npz
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdMolDescriptors import CalcExactMolWt
from concurrent.futures import ProcessPoolExecutor
import os
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('--ncores','-n',type=int,help='number of cores',default=1)
p.add_argument('--dest','-d',type=str,help='destination directory',default='.')
p.add_argument('infile',type=str,help='input file')
args = p.parse_args()
file_msp = args.infile
ncores = args.ncores
dest = args.dest
if not os.path.isdir(dest):
print(f"{dest} does not exist")
exit(1)
def process_mol(nm):
n,m = nm
try:
osmiles = m.get('smiles')
mol = Chem.MolFromSmiles(osmiles)
name = m.get('name')
peakindex = m.peaks.mz
peakintensity = m.peaks.intensities
molwt = CalcExactMolWt(mol)
if molwt > 2000:
return {}
smiles = Chem.MolToSmiles(mol)
morgan_fp = np.array(AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=4096))
cdk_fp = get_cdk_fingerprints(smiles)
cdk_des = np.array(get_cdk_descriptors(smiles))
peak_vec = ms2vec(peakindex,peakintensity)
print(f"{n}:{osmiles}: done")
return {
'smiles': smiles,
'name': name,
'peak_vec': peak_vec,
'morgan_fp': morgan_fp,
'cdk_fp': cdk_fp,
'cdk_des': cdk_des,
'molwt': molwt,
}
except BaseException as e:
print(f"{osmiles}: {e}")
return {}
print(f"Loading {file_msp}...")
all_mol = load_from_msp(file_msp)
print("done")
with ProcessPoolExecutor(max_workers=ncores) as pool:
all_output = pool.map(process_mol, enumerate(all_mol))
all_output = list(filter(lambda x: x,all_output))
all_smiles = list(map(lambda x: x['smiles'], all_output))
Peak_data = np.array(list(map(lambda x: x['peak_vec'], all_output)))
Morgan_fp = np.array(list(map(lambda x: x['morgan_fp'], all_output)))
CDK_fp = np.array(list(map(lambda x: x['cdk_fp'], all_output)))
CDK_des = np.array(list(map(lambda x: x['cdk_des'], all_output)))
MolWt = np.array(list(map(lambda x: x['molwt'], all_output)))
print("writing output ...")
os.chdir(dest)
np.save('descriptor.npy', CDK_des)
np.save('molwt.npy', MolWt)
Peak_data = csr_matrix(Peak_data)
Morgan_fp = csr_matrix(Morgan_fp)
CDK_fp = csr_matrix(CDK_fp)
save_npz('peakvec.npz', Peak_data)
save_npz('morgan.npz', Morgan_fp)
save_npz('fingerprints.npz', CDK_fp)
with open('all_smiles.json', 'w') as t:
json.dump(all_smiles, t)
print("done")
| true | true |
f73675de7d4f1ed4ca073bbdca3fc505c9bee00a | 1,629 | py | Python | ish_parser/Components/SkyCoverComponent.py | vtoupet/ish_parser | 121b553937c89f14ab7af62f7daaf301f18c0aa8 | [
"MIT"
] | 52 | 2015-02-02T21:52:58.000Z | 2022-03-03T12:38:44.000Z | ish_parser/Components/SkyCoverComponent.py | vtoupet/ish_parser | 121b553937c89f14ab7af62f7daaf301f18c0aa8 | [
"MIT"
] | 19 | 2015-07-27T13:34:01.000Z | 2021-07-23T14:01:41.000Z | ish_parser/Components/SkyCoverComponent.py | vtoupet/ish_parser | 121b553937c89f14ab7af62f7daaf301f18c0aa8 | [
"MIT"
] | 31 | 2015-05-13T03:24:55.000Z | 2021-07-20T12:53:44.000Z | from ..Distance import Distance
from ..CloudCoverage import CloudCoverage
from ..Constant import Constant
from .BaseComponent import BaseComponent
class SkyCoverComponent(BaseComponent):
''' handle GA1..GA8 sky component types '''
CLOUD_TYPES = {
"00": "Cirrus (Ci)",
"01": "Cirrocumulus (Cc)",
"02": "Cirrostratus (Cs)",
"03": "Altocumulus (Ac)",
"04": "Altostratus (As)",
"05": "Nimbostratus (Ns)",
"06": "Stratocumulus (Sc)",
"07": "Stratus (St)",
"08": "Cumulus (Cu)",
"09": "Cumulonimbus (Cb)",
"10": """Cloud not visible owing to darkness, fog,
duststorm, sandstorm, or other analogous phenomena / sky obscured""",
"11": "Not used",
"12": "Towering Cumulus (Tcu)",
"13": "Stratus fractus (Stfra)",
"14": "Stratocumulus Lenticular (Scsl)",
"15": "Cumulus Fractus (Cufra)",
"16": "Cumulonimbus Mammatus (Cbmam)",
"17": "Altocumulus Lenticular (Acsl)",
"18": "Altocumulus Castellanus (Accas)",
"19": "Altocumulus Mammatus (Acmam)",
"20": "Cirrocumulus Lenticular (Ccsl)",
"21": "Cirrus and/or Cirrocumulus",
"22": "Stratus and/or Fracto-stratus",
"23": "Cumulus and/or Fracto-cumulus"}
def loads(self, string):
self.sky_cover = {
'coverage': CloudCoverage(string[0:2],
CloudCoverage.OKTA, string[2:3]),
'base_height': Distance(int(string[4:9]),
Distance.METERS, string[9:10]),
'cloud_type': Constant(string[9:11], None,
string[11:12], self.CLOUD_TYPES)}
def __repr__(self):
return str(self.sky_cover)
def __str__(self):
return str(self.sky_cover)
| 32.58 | 80 | 0.622468 | from ..Distance import Distance
from ..CloudCoverage import CloudCoverage
from ..Constant import Constant
from .BaseComponent import BaseComponent
class SkyCoverComponent(BaseComponent):
CLOUD_TYPES = {
"00": "Cirrus (Ci)",
"01": "Cirrocumulus (Cc)",
"02": "Cirrostratus (Cs)",
"03": "Altocumulus (Ac)",
"04": "Altostratus (As)",
"05": "Nimbostratus (Ns)",
"06": "Stratocumulus (Sc)",
"07": "Stratus (St)",
"08": "Cumulus (Cu)",
"09": "Cumulonimbus (Cb)",
"10": """Cloud not visible owing to darkness, fog,
duststorm, sandstorm, or other analogous phenomena / sky obscured""",
"11": "Not used",
"12": "Towering Cumulus (Tcu)",
"13": "Stratus fractus (Stfra)",
"14": "Stratocumulus Lenticular (Scsl)",
"15": "Cumulus Fractus (Cufra)",
"16": "Cumulonimbus Mammatus (Cbmam)",
"17": "Altocumulus Lenticular (Acsl)",
"18": "Altocumulus Castellanus (Accas)",
"19": "Altocumulus Mammatus (Acmam)",
"20": "Cirrocumulus Lenticular (Ccsl)",
"21": "Cirrus and/or Cirrocumulus",
"22": "Stratus and/or Fracto-stratus",
"23": "Cumulus and/or Fracto-cumulus"}
def loads(self, string):
self.sky_cover = {
'coverage': CloudCoverage(string[0:2],
CloudCoverage.OKTA, string[2:3]),
'base_height': Distance(int(string[4:9]),
Distance.METERS, string[9:10]),
'cloud_type': Constant(string[9:11], None,
string[11:12], self.CLOUD_TYPES)}
def __repr__(self):
return str(self.sky_cover)
def __str__(self):
return str(self.sky_cover)
| true | true |
f736780580822402f201e833c99571db43c8978d | 17,290 | py | Python | src/waldur_mastermind/marketplace/tests/test_order_items.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | 2 | 2018-08-16T14:42:24.000Z | 2019-07-20T03:36:59.000Z | src/waldur_mastermind/marketplace/tests/test_order_items.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | null | null | null | src/waldur_mastermind/marketplace/tests/test_order_items.py | virtengine/ve-waldur-v2 | c9d90bc659171de678bd552e92cfc59cffb6fc3a | [
"MIT"
] | 3 | 2019-02-27T19:17:49.000Z | 2019-07-25T21:40:01.000Z | import unittest
from ddt import data, ddt
from django.core.exceptions import ValidationError
from rest_framework import status, test
from waldur_core.quotas import signals as quota_signals
from waldur_core.structure.tests import fixtures as structure_fixtures
from waldur_mastermind.marketplace import models
from waldur_mastermind.marketplace.tests import factories, fixtures
@ddt
class OrderItemFilterTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
self.url = factories.OrderItemFactory.get_list_url()
@data('staff', 'owner', 'admin', 'manager')
def test_items_should_be_visible_to_colleagues_and_staff(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 1)
@data('user')
def test_items_should_be_invisible_to_other_users(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 0)
def test_items_should_be_invisible_to_unauthenticated_users(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_filter_order_items_for_service_manager(self):
# Arrange
offering = factories.OfferingFactory(customer=self.fixture.customer)
offering.add_user(self.fixture.user)
order_item = factories.OrderItemFactory(offering=offering, order=self.order)
# Act
self.client.force_authenticate(self.fixture.owner)
response = self.client.get(
self.url, {'service_manager_uuid': self.fixture.user.uuid.hex}
)
# Assert
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['uuid'], order_item.uuid.hex)
@unittest.skip('OrderItem creation is irrelevant now.')
@ddt
class ItemCreateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.offering = factories.OfferingFactory()
@data('staff', 'owner', 'admin', 'manager')
def test_user_can_create_item_with_relation_project(self, user):
response = self.create_item(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.OrderItem.objects.filter(order=self.order).exists())
@data('user')
def test_user_can_not_create_item_with_not_relation_project(self, user):
response = self.create_item(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_unauthorized_user_can_not_create_item(self):
url = factories.OrderItemFactory.get_list_url()
payload = {
'offering': factories.OfferingFactory.get_url(self.offering),
'order': factories.OrderFactory.get_url(self.order),
}
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_not_create_item_if_order_state_is_not_draft(self):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.create_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
def test_user_can_not_create_item_if_offering_is_not_available(self):
self.offering.is_active = False
self.offering.save()
response = self.create_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
def create_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_list_url()
payload = {
'offering': factories.OfferingFactory.get_url(self.offering),
'order': factories.OrderFactory.get_url(self.order),
}
return self.client.post(url, payload)
@unittest.skip('OrderItem update is irrelevant now.')
@ddt
class ItemUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
@data('staff', 'owner')
def test_staff_and_owner_can_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@data('user')
def test_other_user_can_not_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@data('admin', 'manager')
def test_admin_and_manager_can_not_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data('staff', 'owner')
def test_can_not_update_item_if_order_state_is_not_draft(self, user):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def update_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item)
payload = {
'offering': factories.OfferingFactory.get_url(self.order_item.offering),
'plan': factories.PlanFactory.get_url(self.order_item.plan),
}
response = self.client.patch(url, payload)
self.order_item.refresh_from_db()
return response
@ddt
class ItemDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
@data('staff', 'owner', 'admin', 'manager')
def test_authorized_user_can_delete_item(self, user):
response = self.delete_item(user)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
@data('user')
def test_other_user_can_not_delete_item(self, user):
response = self.delete_item(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(models.OrderItem.objects.filter(order=self.order).exists())
def test_unauthorized_user_can_not_delete_item(self):
url = factories.OrderItemFactory.get_url(self.order_item)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@data('staff', 'owner')
def test_can_not_update_item_if_order_state_is_not_draft(self, user):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.delete_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def delete_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item)
response = self.client.delete(url)
return response
@ddt
class ItemTerminateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.offering = factories.OfferingFactory(type='Support.OfferingTemplate')
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(
order=self.order, offering=self.offering
)
@data('staff', 'owner', 'admin', 'manager')
def test_authorized_user_can_terminate_item(self, user):
response = self.terminate_item(user)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.order_item.refresh_from_db()
self.assertEqual(self.order_item.state, models.OrderItem.States.TERMINATING)
@data(
models.OrderItem.States.DONE,
models.OrderItem.States.ERRED,
models.OrderItem.States.TERMINATED,
)
def test_order_item_cannot_be_terminated_if_it_is_in_terminal_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.terminate_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_terminate_order_if_it_is_not_supported_by_offering(self):
self.offering.type = 'Packages.Template'
self.offering.save()
response = self.terminate_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def terminate_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item, 'terminate')
return self.client.post(url)
class AggregateResourceCountTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ServiceFixture()
self.project = self.fixture.project
self.customer = self.fixture.customer
self.plan = factories.PlanFactory()
self.resource = models.Resource.objects.create(
project=self.project, offering=self.plan.offering, plan=self.plan,
)
self.category = self.plan.offering.category
def test_when_resource_scope_is_updated_resource_count_is_increased(self):
self.resource.scope = self.fixture.resource
self.resource.save()
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
1,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
1,
)
def test_when_resource_scope_is_updated_resource_count_is_decreased(self):
self.resource.scope = self.fixture.resource
self.resource.save()
self.resource.state = models.Resource.States.TERMINATED
self.resource.save()
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
0,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
0,
)
def test_recalculate_count(self):
self.resource.scope = self.fixture.resource
self.resource.save()
models.AggregateResourceCount.objects.all().delete()
quota_signals.recalculate_quotas.send(sender=self)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
1,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
1,
)
class ItemValidateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
def test_types_of_items_in_one_order_must_be_the_same(self):
new_item = factories.OrderItemFactory(
order=self.fixture.order,
offering=self.fixture.offering,
type=models.RequestTypeMixin.Types.UPDATE,
)
self.assertRaises(ValidationError, new_item.clean)
@ddt
class ItemRejectTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.offering = factories.OfferingFactory(
type='Support.OfferingTemplate', customer=self.fixture.customer
)
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
resource = factories.ResourceFactory(offering=self.offering)
self.order_item = factories.OrderItemFactory(
resource=resource,
order=self.order,
offering=self.offering,
state=models.OrderItem.States.EXECUTING,
)
@data(
'staff', 'owner',
)
def test_authorized_user_can_reject_item(self, user):
response = self.reject_item(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.order_item.refresh_from_db()
self.assertEqual(self.order_item.state, models.OrderItem.States.TERMINATED)
@data(
'admin', 'manager',
)
def test_user_cannot_reject_item(self, user):
response = self.reject_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data(models.OrderItem.States.TERMINATED,)
def test_order_item_cannot_be_rejected_if_it_is_in_terminated_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.reject_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@data(
models.OrderItem.States.DONE,
models.OrderItem.States.ERRED,
models.OrderItem.States.TERMINATING,
models.OrderItem.States.EXECUTING,
models.OrderItem.States.PENDING,
)
def test_order_item_can_be_rejected_if_it_is_not_in_terminated_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.reject_item('staff')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_create_order_item_with_basic_offering_is_rejected_resource_is_marked_as_terminated(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(
models.Resource.States.TERMINATED, self.order_item.resource.state
)
def test_when_update_order_item_with_basic_offering_is_rejected_resource_is_marked_as_erred(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.order_item.type = models.OrderItem.Types.UPDATE
self.order_item.save()
plan_period = factories.ResourcePlanPeriodFactory()
old_plan = plan_period.plan
old_plan.offering = self.offering
old_plan.save()
old_limits = {'unit': 50}
resource = self.order_item.resource
resource.plan = old_plan
resource.limits = old_limits
resource.save()
plan_period.resource = resource
plan_period.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(models.Resource.States.ERRED, self.order_item.resource.state)
self.assertEqual(old_plan, self.order_item.resource.plan)
self.assertEqual(old_limits, self.order_item.resource.limits)
def test_when_terminate_order_item_with_basic_offering_is_rejected_resource_is_marked_as_erred(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.order_item.type = models.OrderItem.Types.TERMINATE
self.order_item.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(models.Resource.States.ERRED, self.order_item.resource.state)
def reject_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item, 'reject')
return self.client.post(url)
| 38.59375 | 101 | 0.688664 | import unittest
from ddt import data, ddt
from django.core.exceptions import ValidationError
from rest_framework import status, test
from waldur_core.quotas import signals as quota_signals
from waldur_core.structure.tests import fixtures as structure_fixtures
from waldur_mastermind.marketplace import models
from waldur_mastermind.marketplace.tests import factories, fixtures
@ddt
class OrderItemFilterTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
self.url = factories.OrderItemFactory.get_list_url()
@data('staff', 'owner', 'admin', 'manager')
def test_items_should_be_visible_to_colleagues_and_staff(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 1)
@data('user')
def test_items_should_be_invisible_to_other_users(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 0)
def test_items_should_be_invisible_to_unauthenticated_users(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_filter_order_items_for_service_manager(self):
offering = factories.OfferingFactory(customer=self.fixture.customer)
offering.add_user(self.fixture.user)
order_item = factories.OrderItemFactory(offering=offering, order=self.order)
self.client.force_authenticate(self.fixture.owner)
response = self.client.get(
self.url, {'service_manager_uuid': self.fixture.user.uuid.hex}
)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['uuid'], order_item.uuid.hex)
@unittest.skip('OrderItem creation is irrelevant now.')
@ddt
class ItemCreateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.offering = factories.OfferingFactory()
@data('staff', 'owner', 'admin', 'manager')
def test_user_can_create_item_with_relation_project(self, user):
response = self.create_item(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.OrderItem.objects.filter(order=self.order).exists())
@data('user')
def test_user_can_not_create_item_with_not_relation_project(self, user):
response = self.create_item(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_unauthorized_user_can_not_create_item(self):
url = factories.OrderItemFactory.get_list_url()
payload = {
'offering': factories.OfferingFactory.get_url(self.offering),
'order': factories.OrderFactory.get_url(self.order),
}
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_can_not_create_item_if_order_state_is_not_draft(self):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.create_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
def test_user_can_not_create_item_if_offering_is_not_available(self):
self.offering.is_active = False
self.offering.save()
response = self.create_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
def create_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_list_url()
payload = {
'offering': factories.OfferingFactory.get_url(self.offering),
'order': factories.OrderFactory.get_url(self.order),
}
return self.client.post(url, payload)
@unittest.skip('OrderItem update is irrelevant now.')
@ddt
class ItemUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
@data('staff', 'owner')
def test_staff_and_owner_can_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@data('user')
def test_other_user_can_not_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@data('admin', 'manager')
def test_admin_and_manager_can_not_update_item(self, user):
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data('staff', 'owner')
def test_can_not_update_item_if_order_state_is_not_draft(self, user):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.update_item(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def update_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item)
payload = {
'offering': factories.OfferingFactory.get_url(self.order_item.offering),
'plan': factories.PlanFactory.get_url(self.order_item.plan),
}
response = self.client.patch(url, payload)
self.order_item.refresh_from_db()
return response
@ddt
class ItemDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(order=self.order)
@data('staff', 'owner', 'admin', 'manager')
def test_authorized_user_can_delete_item(self, user):
response = self.delete_item(user)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(models.OrderItem.objects.filter(order=self.order).exists())
@data('user')
def test_other_user_can_not_delete_item(self, user):
response = self.delete_item(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(models.OrderItem.objects.filter(order=self.order).exists())
def test_unauthorized_user_can_not_delete_item(self):
url = factories.OrderItemFactory.get_url(self.order_item)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@data('staff', 'owner')
def test_can_not_update_item_if_order_state_is_not_draft(self, user):
self.order.state = models.Order.States.DONE
self.order.save()
response = self.delete_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def delete_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item)
response = self.client.delete(url)
return response
@ddt
class ItemTerminateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.offering = factories.OfferingFactory(type='Support.OfferingTemplate')
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item = factories.OrderItemFactory(
order=self.order, offering=self.offering
)
@data('staff', 'owner', 'admin', 'manager')
def test_authorized_user_can_terminate_item(self, user):
response = self.terminate_item(user)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.order_item.refresh_from_db()
self.assertEqual(self.order_item.state, models.OrderItem.States.TERMINATING)
@data(
models.OrderItem.States.DONE,
models.OrderItem.States.ERRED,
models.OrderItem.States.TERMINATED,
)
def test_order_item_cannot_be_terminated_if_it_is_in_terminal_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.terminate_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_terminate_order_if_it_is_not_supported_by_offering(self):
self.offering.type = 'Packages.Template'
self.offering.save()
response = self.terminate_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def terminate_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item, 'terminate')
return self.client.post(url)
class AggregateResourceCountTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ServiceFixture()
self.project = self.fixture.project
self.customer = self.fixture.customer
self.plan = factories.PlanFactory()
self.resource = models.Resource.objects.create(
project=self.project, offering=self.plan.offering, plan=self.plan,
)
self.category = self.plan.offering.category
def test_when_resource_scope_is_updated_resource_count_is_increased(self):
self.resource.scope = self.fixture.resource
self.resource.save()
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
1,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
1,
)
def test_when_resource_scope_is_updated_resource_count_is_decreased(self):
self.resource.scope = self.fixture.resource
self.resource.save()
self.resource.state = models.Resource.States.TERMINATED
self.resource.save()
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
0,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
0,
)
def test_recalculate_count(self):
self.resource.scope = self.fixture.resource
self.resource.save()
models.AggregateResourceCount.objects.all().delete()
quota_signals.recalculate_quotas.send(sender=self)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.project, category=self.category
).count,
1,
)
self.assertEqual(
models.AggregateResourceCount.objects.get(
scope=self.customer, category=self.category
).count,
1,
)
class ItemValidateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.MarketplaceFixture()
def test_types_of_items_in_one_order_must_be_the_same(self):
new_item = factories.OrderItemFactory(
order=self.fixture.order,
offering=self.fixture.offering,
type=models.RequestTypeMixin.Types.UPDATE,
)
self.assertRaises(ValidationError, new_item.clean)
@ddt
class ItemRejectTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = structure_fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.offering = factories.OfferingFactory(
type='Support.OfferingTemplate', customer=self.fixture.customer
)
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
resource = factories.ResourceFactory(offering=self.offering)
self.order_item = factories.OrderItemFactory(
resource=resource,
order=self.order,
offering=self.offering,
state=models.OrderItem.States.EXECUTING,
)
@data(
'staff', 'owner',
)
def test_authorized_user_can_reject_item(self, user):
response = self.reject_item(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.order_item.refresh_from_db()
self.assertEqual(self.order_item.state, models.OrderItem.States.TERMINATED)
@data(
'admin', 'manager',
)
def test_user_cannot_reject_item(self, user):
response = self.reject_item(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data(models.OrderItem.States.TERMINATED,)
def test_order_item_cannot_be_rejected_if_it_is_in_terminated_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.reject_item('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@data(
models.OrderItem.States.DONE,
models.OrderItem.States.ERRED,
models.OrderItem.States.TERMINATING,
models.OrderItem.States.EXECUTING,
models.OrderItem.States.PENDING,
)
def test_order_item_can_be_rejected_if_it_is_not_in_terminated_state(self, state):
self.order_item.state = state
self.order_item.save()
response = self.reject_item('staff')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_when_create_order_item_with_basic_offering_is_rejected_resource_is_marked_as_terminated(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(
models.Resource.States.TERMINATED, self.order_item.resource.state
)
def test_when_update_order_item_with_basic_offering_is_rejected_resource_is_marked_as_erred(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.order_item.type = models.OrderItem.Types.UPDATE
self.order_item.save()
plan_period = factories.ResourcePlanPeriodFactory()
old_plan = plan_period.plan
old_plan.offering = self.offering
old_plan.save()
old_limits = {'unit': 50}
resource = self.order_item.resource
resource.plan = old_plan
resource.limits = old_limits
resource.save()
plan_period.resource = resource
plan_period.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(models.Resource.States.ERRED, self.order_item.resource.state)
self.assertEqual(old_plan, self.order_item.resource.plan)
self.assertEqual(old_limits, self.order_item.resource.limits)
def test_when_terminate_order_item_with_basic_offering_is_rejected_resource_is_marked_as_erred(
self,
):
self.offering.type = 'Marketplace.Basic'
self.offering.save()
self.order_item.type = models.OrderItem.Types.TERMINATE
self.order_item.save()
self.reject_item('owner')
self.order_item.refresh_from_db()
self.assertEqual(models.Resource.States.ERRED, self.order_item.resource.state)
def reject_item(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderItemFactory.get_url(self.order_item, 'reject')
return self.client.post(url)
| true | true |
f7367865c9e793d8304154e17a20b6e94a107b97 | 3,087 | py | Python | readthedocs/rtd_tests/tests/test_version_commit_name.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | 4,054 | 2015-01-01T00:58:07.000Z | 2019-06-28T05:50:49.000Z | readthedocs/rtd_tests/tests/test_version_commit_name.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | 4,282 | 2015-01-01T21:38:49.000Z | 2019-06-28T15:41:00.000Z | readthedocs/rtd_tests/tests/test_version_commit_name.py | mforbes/readthedocs.org | 92f6224a67648a6d27e7a295973c2718d07cee11 | [
"MIT"
] | 3,224 | 2015-01-01T07:38:45.000Z | 2019-06-28T09:19:10.000Z | # -*- coding: utf-8 -*-
from django.test import TestCase
from django_dynamic_fixture import get, new
from readthedocs.builds.constants import (
BRANCH,
LATEST,
STABLE,
TAG,
EXTERNAL,
)
from readthedocs.builds.models import Version
from readthedocs.projects.constants import REPO_TYPE_GIT, REPO_TYPE_HG
from readthedocs.projects.models import Project
class VersionCommitNameTests(TestCase):
def test_branch_name_unicode_non_ascii(self):
unicode_name = b'abc_\xd1\x84_\xe2\x99\x98'.decode('utf-8')
version = new(Version, identifier=unicode_name, type=BRANCH)
self.assertEqual(version.identifier_friendly, unicode_name)
def test_branch_name_made_friendly_when_sha(self):
commit_hash = '3d92b728b7d7b842259ac2020c2fa389f13aff0d'
version = new(
Version, identifier=commit_hash,
slug=STABLE, verbose_name=STABLE, type=TAG,
)
# we shorten commit hashes to keep things readable
self.assertEqual(version.identifier_friendly, '3d92b728')
def test_branch_name(self):
version = new(
Version, identifier='release-2.5.x',
slug='release-2.5.x', verbose_name='release-2.5.x',
type=BRANCH,
)
self.assertEqual(version.commit_name, 'release-2.5.x')
def test_tag_name(self):
version = new(
Version, identifier='10f1b29a2bd2', slug='release-2.5.0',
verbose_name='release-2.5.0', type=TAG,
)
self.assertEqual(version.commit_name, 'release-2.5.0')
def test_branch_with_name_stable(self):
version = new(
Version, identifier='origin/stable', slug=STABLE,
verbose_name='stable', type=BRANCH,
)
self.assertEqual(version.commit_name, 'stable')
def test_stable_version_tag(self):
version = new(
Version,
identifier='3d92b728b7d7b842259ac2020c2fa389f13aff0d',
slug=STABLE, verbose_name=STABLE, type=TAG,
)
self.assertEqual(
version.commit_name,
'3d92b728b7d7b842259ac2020c2fa389f13aff0d',
)
def test_hg_latest_branch(self):
hg_project = get(Project, repo_type=REPO_TYPE_HG)
version = new(
Version, identifier='default', slug=LATEST,
verbose_name=LATEST, type=BRANCH, project=hg_project,
)
self.assertEqual(version.commit_name, 'default')
def test_git_latest_branch(self):
git_project = get(Project, repo_type=REPO_TYPE_GIT)
version = new(
Version, project=git_project,
identifier='origin/master', slug=LATEST,
verbose_name=LATEST, type=BRANCH,
)
self.assertEqual(version.commit_name, 'master')
def test_external_version(self):
identifier = 'ec26de721c3235aad62de7213c562f8c821'
version = new(
Version, identifier=identifier,
slug='11', verbose_name='11',
type=EXTERNAL,
)
self.assertEqual(version.commit_name, identifier)
| 34.3 | 70 | 0.649822 |
from django.test import TestCase
from django_dynamic_fixture import get, new
from readthedocs.builds.constants import (
BRANCH,
LATEST,
STABLE,
TAG,
EXTERNAL,
)
from readthedocs.builds.models import Version
from readthedocs.projects.constants import REPO_TYPE_GIT, REPO_TYPE_HG
from readthedocs.projects.models import Project
class VersionCommitNameTests(TestCase):
def test_branch_name_unicode_non_ascii(self):
unicode_name = b'abc_\xd1\x84_\xe2\x99\x98'.decode('utf-8')
version = new(Version, identifier=unicode_name, type=BRANCH)
self.assertEqual(version.identifier_friendly, unicode_name)
def test_branch_name_made_friendly_when_sha(self):
commit_hash = '3d92b728b7d7b842259ac2020c2fa389f13aff0d'
version = new(
Version, identifier=commit_hash,
slug=STABLE, verbose_name=STABLE, type=TAG,
)
self.assertEqual(version.identifier_friendly, '3d92b728')
def test_branch_name(self):
version = new(
Version, identifier='release-2.5.x',
slug='release-2.5.x', verbose_name='release-2.5.x',
type=BRANCH,
)
self.assertEqual(version.commit_name, 'release-2.5.x')
def test_tag_name(self):
version = new(
Version, identifier='10f1b29a2bd2', slug='release-2.5.0',
verbose_name='release-2.5.0', type=TAG,
)
self.assertEqual(version.commit_name, 'release-2.5.0')
def test_branch_with_name_stable(self):
version = new(
Version, identifier='origin/stable', slug=STABLE,
verbose_name='stable', type=BRANCH,
)
self.assertEqual(version.commit_name, 'stable')
def test_stable_version_tag(self):
version = new(
Version,
identifier='3d92b728b7d7b842259ac2020c2fa389f13aff0d',
slug=STABLE, verbose_name=STABLE, type=TAG,
)
self.assertEqual(
version.commit_name,
'3d92b728b7d7b842259ac2020c2fa389f13aff0d',
)
def test_hg_latest_branch(self):
hg_project = get(Project, repo_type=REPO_TYPE_HG)
version = new(
Version, identifier='default', slug=LATEST,
verbose_name=LATEST, type=BRANCH, project=hg_project,
)
self.assertEqual(version.commit_name, 'default')
def test_git_latest_branch(self):
git_project = get(Project, repo_type=REPO_TYPE_GIT)
version = new(
Version, project=git_project,
identifier='origin/master', slug=LATEST,
verbose_name=LATEST, type=BRANCH,
)
self.assertEqual(version.commit_name, 'master')
def test_external_version(self):
identifier = 'ec26de721c3235aad62de7213c562f8c821'
version = new(
Version, identifier=identifier,
slug='11', verbose_name='11',
type=EXTERNAL,
)
self.assertEqual(version.commit_name, identifier)
| true | true |
f736799f40ea5a3700c14af04a10d7f61032ff9a | 877 | py | Python | src/util/SetInterval.py | gekkyo/dendai_ict | 87fd9b9ba028d81d6f87b0fa4af864584e9eb5e2 | [
"CC0-1.0"
] | null | null | null | src/util/SetInterval.py | gekkyo/dendai_ict | 87fd9b9ba028d81d6f87b0fa4af864584e9eb5e2 | [
"CC0-1.0"
] | 1 | 2022-03-12T01:02:07.000Z | 2022-03-12T01:02:07.000Z | src/util/SetInterval.py | gekkyo/dendai_ict | 87fd9b9ba028d81d6f87b0fa4af864584e9eb5e2 | [
"CC0-1.0"
] | null | null | null | import logging
import threading
import time
from typing import Any
class SetInterval:
def __init__(self, interval: float, action: Any) -> None:
"""コンストラクタ
Args:
interval (float): 呼び出し間隔
action (Any): 呼ぶ出す関数
"""
logging.info("init")
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.__set_interval)
self.thread.start()
def __set_interval(self) -> None:
"""スレッド処理"""
next_time = time.time() + self.interval
while not self.stopEvent.wait(next_time - time.time()):
next_time += self.interval
self.action()
# t.daemon = True
def cancel(self) -> None:
"""スレッドを止める"""
logging.info("cancel")
self.stopEvent.set()
| 25.057143 | 66 | 0.573546 | import logging
import threading
import time
from typing import Any
class SetInterval:
def __init__(self, interval: float, action: Any) -> None:
logging.info("init")
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.__set_interval)
self.thread.start()
def __set_interval(self) -> None:
next_time = time.time() + self.interval
while not self.stopEvent.wait(next_time - time.time()):
next_time += self.interval
self.action()
def cancel(self) -> None:
logging.info("cancel")
self.stopEvent.set()
| true | true |
f73679f388fc6181a9a6b17c30d5729b89190c20 | 147 | py | Python | pinax/cart/models/cart.py | pinax/pinax-cart | 5a99c47ba8ce314f3a760717833cd5f4f5cf532f | [
"MIT"
] | 2 | 2015-12-15T22:49:54.000Z | 2016-09-01T12:50:07.000Z | pinax/cart/models/cart.py | pinax/pinax-cart | 5a99c47ba8ce314f3a760717833cd5f4f5cf532f | [
"MIT"
] | 3 | 2019-03-01T17:33:40.000Z | 2019-03-31T00:38:52.000Z | pinax/cart/models/cart.py | pinax/pinax-cart | 5a99c47ba8ce314f3a760717833cd5f4f5cf532f | [
"MIT"
] | 2 | 2019-03-31T00:28:17.000Z | 2021-03-17T16:13:30.000Z | from django.conf import settings
from django.db import models
class Cart(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
| 18.375 | 55 | 0.789116 | from django.conf import settings
from django.db import models
class Cart(models.Model):
owner = models.ForeignKey(settings.AUTH_USER_MODEL)
| true | true |
f7367b513652b4c598452dcba5f0e55aea742d5d | 630 | py | Python | p2p_python/serializer.py | namuyan/p2p-python | d67982f10d7e2506fbdacce933ce8ce3c8771909 | [
"MIT"
] | 63 | 2018-02-06T08:04:35.000Z | 2022-01-07T15:09:23.000Z | p2p_python/serializer.py | namuyan/p2p-python | d67982f10d7e2506fbdacce933ce8ce3c8771909 | [
"MIT"
] | 4 | 2018-04-30T14:42:33.000Z | 2020-07-20T05:40:50.000Z | p2p_python/serializer.py | namuyan/p2p-python | d67982f10d7e2506fbdacce933ce8ce3c8771909 | [
"MIT"
] | 13 | 2018-08-23T09:11:41.000Z | 2022-02-24T17:29:16.000Z | import msgpack
def dump(obj, fp, default=None):
msgpack.pack(obj, fp, use_bin_type=True, default=default)
def dumps(obj, default=None):
return msgpack.packb(obj, use_bin_type=True, default=default)
def load(fp, object_hook=None):
return msgpack.unpack(fp, object_hook=object_hook, encoding='utf8')
def loads(b, object_hook=None):
return msgpack.unpackb(b, object_hook=object_hook, encoding='utf8')
def stream_unpacker(fp, object_hook=None):
return msgpack.Unpacker(fp, object_hook=object_hook, encoding='utf8')
__all__ = [
"dump",
"dumps",
"load",
"loads",
"stream_unpacker",
]
| 20.322581 | 73 | 0.706349 | import msgpack
def dump(obj, fp, default=None):
msgpack.pack(obj, fp, use_bin_type=True, default=default)
def dumps(obj, default=None):
return msgpack.packb(obj, use_bin_type=True, default=default)
def load(fp, object_hook=None):
return msgpack.unpack(fp, object_hook=object_hook, encoding='utf8')
def loads(b, object_hook=None):
return msgpack.unpackb(b, object_hook=object_hook, encoding='utf8')
def stream_unpacker(fp, object_hook=None):
return msgpack.Unpacker(fp, object_hook=object_hook, encoding='utf8')
__all__ = [
"dump",
"dumps",
"load",
"loads",
"stream_unpacker",
]
| true | true |
f7367b85ef33529c5c360e68d214cb8e6a80a38f | 4,752 | py | Python | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 6 | 2021-07-26T14:21:25.000Z | 2021-07-26T14:32:01.000Z | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 9 | 2021-03-18T23:10:27.000Z | 2022-03-11T23:43:55.000Z | dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py | njalloul90/Genomics_Oncology_Platform | 9bf6d0edca5df783f4e371fa1bc46b7b1576fe70 | [
"MIT"
] | 2 | 2019-03-11T05:06:49.000Z | 2019-03-22T21:48:49.000Z | """
PyColourChooser
Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
This file is part of PyColourChooser.
This version of PyColourChooser is open source; you can redistribute it
and/or modify it under the licensed terms.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatibility update.
#
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPyColorChooser -> PyColorChooser
# o wxPyColourChooser -> PyColourChooser
#
# Tags: phoenix-port
import wx
class BitmapBuffer(wx.MemoryDC):
"""A screen buffer class.
This class implements a screen output buffer. Data is meant to
be drawn in the buffer class and then blitted directly to the
output device, or on-screen window.
"""
def __init__(self, width, height, colour):
"""Initialize the empty buffer object."""
wx.MemoryDC.__init__(self)
self.width = width
self.height = height
self.colour = colour
self.bitmap = wx.Bitmap(self.width, self.height)
self.SelectObject(self.bitmap)
# Initialize the buffer to the background colour
self.SetBackground(wx.Brush(self.colour, wx.BRUSHSTYLE_SOLID))
self.Clear()
# Make each logical unit of the buffer equal to 1 pixel
self.SetMapMode(wx.MM_TEXT)
def GetBitmap(self):
"""Returns the internal bitmap for direct drawing."""
return self.bitmap
# GetPixel seems to always return (-1, -1, -1, 255)
# on OS X so this is a workaround for that issue.
def GetPixelColour(self, x, y):
"""Gets the color value of the pixel at the given
cords.
"""
img = self.GetAsBitmap().ConvertToImage()
red = img.GetRed(x, y)
green = img.GetGreen(x, y)
blue = img.GetBlue(x, y)
return wx.Colour(red, green, blue)
class Canvas(wx.Window):
"""A canvas class for arbitrary drawing.
The Canvas class implements a window that allows for drawing
arbitrary graphics. It implements a double buffer scheme and
blits the off-screen buffer to the window during paint calls
by the windowing system for speed.
Some other methods for determining the canvas colour and size
are also provided.
"""
def __init__(self, parent, id,
pos=wx.DefaultPosition,
style=wx.SIMPLE_BORDER,
forceClientSize=None):
"""Creates a canvas instance and initializes the off-screen
buffer. Also sets the handler for rendering the canvas
automatically via size and paint calls from the windowing
system."""
wx.Window.__init__(self, parent, id, pos, style=style)
if forceClientSize:
self.SetMaxClientSize(forceClientSize)
self.SetMinClientSize(forceClientSize)
# Perform an intial sizing
self.ReDraw()
# Register event handlers
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
def MakeNewBuffer(self):
size = self.GetClientSize()
self.buffer = BitmapBuffer(size[0], size[1],
self.GetBackgroundColour())
def onSize(self, event):
"""Perform actual redraw to off-screen buffer only when the
size of the canvas has changed. This saves a lot of computation
since the same image can be re-used, provided the canvas size
hasn't changed."""
self.MakeNewBuffer()
self.DrawBuffer()
self.Refresh()
def ReDraw(self):
"""Explicitly tells the canvas to redraw it's contents."""
self.onSize(None)
def Refresh(self):
"""Re-draws the buffer contents on-screen."""
dc = wx.ClientDC(self)
self.Blit(dc)
def onPaint(self, event):
"""Renders the off-screen buffer on-screen."""
dc = wx.PaintDC(self)
self.Blit(dc)
def Blit(self, dc):
"""Performs the blit of the buffer contents on-screen."""
width, height = self.buffer.GetSize()
dc.Blit(0, 0, width, height, self.buffer, 0, 0)
def GetBoundingRect(self):
"""Returns a tuple that contains the co-ordinates of the
top-left and bottom-right corners of the canvas."""
x, y = self.GetPosition()
w, h = self.GetSize()
return(x, y + h, x + w, y)
def DrawBuffer(self):
"""Actual drawing function for drawing into the off-screen
buffer. To be overrideen in the implementing class. Do nothing
by default."""
pass
| 32.547945 | 71 | 0.643729 |
import wx
class BitmapBuffer(wx.MemoryDC):
def __init__(self, width, height, colour):
wx.MemoryDC.__init__(self)
self.width = width
self.height = height
self.colour = colour
self.bitmap = wx.Bitmap(self.width, self.height)
self.SelectObject(self.bitmap)
self.SetBackground(wx.Brush(self.colour, wx.BRUSHSTYLE_SOLID))
self.Clear()
self.SetMapMode(wx.MM_TEXT)
def GetBitmap(self):
return self.bitmap
def GetPixelColour(self, x, y):
img = self.GetAsBitmap().ConvertToImage()
red = img.GetRed(x, y)
green = img.GetGreen(x, y)
blue = img.GetBlue(x, y)
return wx.Colour(red, green, blue)
class Canvas(wx.Window):
def __init__(self, parent, id,
pos=wx.DefaultPosition,
style=wx.SIMPLE_BORDER,
forceClientSize=None):
wx.Window.__init__(self, parent, id, pos, style=style)
if forceClientSize:
self.SetMaxClientSize(forceClientSize)
self.SetMinClientSize(forceClientSize)
self.ReDraw()
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
def MakeNewBuffer(self):
size = self.GetClientSize()
self.buffer = BitmapBuffer(size[0], size[1],
self.GetBackgroundColour())
def onSize(self, event):
self.MakeNewBuffer()
self.DrawBuffer()
self.Refresh()
def ReDraw(self):
self.onSize(None)
def Refresh(self):
dc = wx.ClientDC(self)
self.Blit(dc)
def onPaint(self, event):
dc = wx.PaintDC(self)
self.Blit(dc)
def Blit(self, dc):
width, height = self.buffer.GetSize()
dc.Blit(0, 0, width, height, self.buffer, 0, 0)
def GetBoundingRect(self):
x, y = self.GetPosition()
w, h = self.GetSize()
return(x, y + h, x + w, y)
def DrawBuffer(self):
pass
| true | true |
f7367bf515d1485d5cfca2aca9bb5f63d46f501e | 1,013 | py | Python | input.py | aragaer/pa-client | a2d2cc0300e931aecf1fe86f3a34b05b427c5fa3 | [
"MIT"
] | null | null | null | input.py | aragaer/pa-client | a2d2cc0300e931aecf1fe86f3a34b05b427c5fa3 | [
"MIT"
] | null | null | null | input.py | aragaer/pa-client | a2d2cc0300e931aecf1fe86f3a34b05b427c5fa3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import subprocess
def naive_config_read(cfg):
for line in cfg:
key, val = line.strip().split(':')
if key == 'pipe':
return val.strip()
else:
print("Input pipe not found in config")
exit(0)
def main(config_file):
if not os.path.exists(config_file):
os.chdir(os.path.dirname(__file__))
with open(config_file) as cfg:
input_pipe = naive_config_read(cfg)
result = subprocess.run("dmenu",
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE)
text = result.stdout.decode().strip()
if not text:
return
print("message", text, sep=':')
with open(input_pipe, 'w') as pipe:
print("message", text, sep=':', file=pipe)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="config.yml")
args = parser.parse_args()
main(args.config)
| 24.119048 | 63 | 0.591313 |
import argparse
import os
import subprocess
def naive_config_read(cfg):
for line in cfg:
key, val = line.strip().split(':')
if key == 'pipe':
return val.strip()
else:
print("Input pipe not found in config")
exit(0)
def main(config_file):
if not os.path.exists(config_file):
os.chdir(os.path.dirname(__file__))
with open(config_file) as cfg:
input_pipe = naive_config_read(cfg)
result = subprocess.run("dmenu",
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE)
text = result.stdout.decode().strip()
if not text:
return
print("message", text, sep=':')
with open(input_pipe, 'w') as pipe:
print("message", text, sep=':', file=pipe)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default="config.yml")
args = parser.parse_args()
main(args.config)
| true | true |
f7367c74e0c12e658709e53a56ee8a06dbd8413b | 10,204 | py | Python | cogs/extras.py | HeyItsIconic/Kurisu | 4e203bc17a55bd0bd087f9e1ce0b891da8bfa916 | [
"Apache-2.0"
] | null | null | null | cogs/extras.py | HeyItsIconic/Kurisu | 4e203bc17a55bd0bd087f9e1ce0b891da8bfa916 | [
"Apache-2.0"
] | null | null | null | cogs/extras.py | HeyItsIconic/Kurisu | 4e203bc17a55bd0bd087f9e1ce0b891da8bfa916 | [
"Apache-2.0"
] | null | null | null | import datetime
import discord
import os
import random
import re
import string
from utils.checks import is_staff
from discord.ext import commands
from discord import TextChannel
class Extras(commands.Cog):
"""
Extra things.
"""
def __init__(self, bot):
self.bot = bot
self.nick_pattern = re.compile("^[a-z]{2,}.*$", re.RegexFlag.IGNORECASE)
prune_key = "nokey"
def check_nickname(self, nickname):
if match := self.nick_pattern.fullmatch(nickname):
return len(nickname) <= 32
else:
return match
@commands.command(aliases=['about'])
async def kurisu(self, ctx):
"""About Kurisu"""
embed = discord.Embed(title="Kurisu", color=discord.Color.green())
embed.set_author(name="Started by 916253, maintained by ihaveahax")
embed.set_thumbnail(url="http://i.imgur.com/hjVY4Et.jpg")
embed.url = "https://github.com/nh-server/Kurisu"
embed.description = "Kurisu, the Nintendo Homebrew Discord bot!"
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command()
async def membercount(self, ctx):
"""Prints the member count of the server."""
await ctx.send(f"{ctx.guild.name} has {ctx.guild.member_count:,} members!")
@commands.command()
async def uptime(self, ctx):
"""Print total uptime of the bot."""
await ctx.send(f"Uptime: {datetime.datetime.now() - self.bot.startup}")
@commands.guild_only()
@is_staff("SuperOP")
@commands.command(hidden=True)
async def copyrole(self, ctx, role: discord.Role, src_channel: discord.TextChannel, des_channels: commands.Greedy[discord.TextChannel]):
"""Copy role permission from a channel to channels"""
perms = src_channel.overwrites_for(role)
for c in des_channels:
await c.set_permissions(role, overwrite=perms)
await ctx.send("Changed permissions successfully")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command(hidden=True)
async def userroles(self, ctx, u: discord.Member = None):
"""Gets user roles and their id. Staff only."""
if not u:
u = ctx.author
msg = f"{u}'s Roles:\n\n"
for role in u.roles:
if role.is_default():
continue
msg += f"{role} = {role.id}\n"
await ctx.author.send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command(hidden=True)
async def serverroles(self, ctx, exp: str):
"""Gets the server roles and their id by regex. Staff only."""
msg = f"Server roles matching `{exp}`:\n\n"
for role in ctx.guild.roles:
if bool(re.search(exp, role.name, re.IGNORECASE)):
msg += f"{role.name} = {role.id}\n"
await ctx.author.send(msg)
@is_staff("OP")
@commands.command(hidden=True)
async def embedtext(self, ctx, *, text):
"""Embed content."""
await ctx.send(embed=discord.Embed(description=text))
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def estprune(self, ctx, days=30):
"""Estimate count of members that would be pruned based on the amount of days. Staff only."""
if days > 30:
await ctx.send("Maximum 30 days")
return
if days < 1:
await ctx.send("Minimum 1 day")
return
msg = await ctx.send("I'm figuring this out!")
async with ctx.channel.typing():
count = await ctx.guild.estimate_pruned_members(days=days)
await msg.edit(content=f"{count:,} members inactive for {days} day(s) would be kicked from {ctx.guild.name}!")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def activecount(self, ctx, days=30):
"""Shows the number of members active in the past amount of days. Staff only."""
if days > 30:
await ctx.send("Maximum 30 days")
return
if days < 1:
await ctx.send("Minimum 1 day")
return
msg = await ctx.send("I'm figuring this out!")
async with ctx.channel.typing():
count = await ctx.guild.estimate_pruned_members(days=days)
if days == 1:
await msg.edit(content=f"{ctx.guild.member_count - count:,} members were active today in {ctx.guild.name}!")
else:
await msg.edit(content=f"{ctx.guild.member_count - count:,} members were active in the past {days} days in {ctx.guild.name}!")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def prune30(self, ctx, key=""):
"""Prune members that are inactive for 30 days. Staff only."""
if self.bot.pruning > 0:
await ctx.send("Pruning is already in progress.")
return
if key != self.prune_key:
if key != "":
await ctx.send("That's not the correct key.")
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await ctx.send(
f"Are you sure you want to prune members inactive for 30 days?\nTo see how many members get kicked, use `.estprune`.\nTo confirm the prune, use the command `.prune30 {self.prune_key}`.")
return
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await ctx.send("Starting pruning!")
count = await ctx.guild.prune_members(days=30)
self.bot.pruning = count
await self.bot.channels['mods'].send(f"{count:,} are currently being kicked from {ctx.guild.name}!")
msg = f"👢 **Prune**: {ctx.author.mention} pruned {count:,} members"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.command()
async def disableleavelogs(self, ctx):
"""DEBUG COMMAND"""
self.bot.pruning = True
await ctx.send("disable")
@is_staff("HalfOP")
@commands.command()
async def enableleavelogs(self, ctx):
"""DEBUG COMMAND"""
self.bot.pruning = False
await ctx.send("enable")
@commands.command(name="32c3")
async def _32c3(self, ctx):
"""Console Hacking 2015"""
await ctx.send("https://www.youtube.com/watch?v=bZczf57HSag")
@commands.command(name="33c3")
async def _33c3(self, ctx):
"""Nintendo Hacking 2016"""
await ctx.send("https://www.youtube.com/watch?v=8C5cn_Qj0G8")
@commands.command(name="34c3")
async def _34c3(self, ctx):
"""Console Security - Switch"""
await ctx.send("https://www.youtube.com/watch?v=Ec4NgWRE8ik")
@is_staff("Owner")
@commands.guild_only()
@commands.command(hidden=True)
async def dumpchannel(self, ctx, channel: TextChannel, limit=100):
"""Dump 100 messages from a channel to a file."""
await ctx.send(f"Dumping {limit} messages from {channel.mention}")
os.makedirs(f"#{channel.name}-{channel.id}", exist_ok=True)
async for message in channel.history(limit=limit):
with open(f"#{channel.name}-{channel.id}/{message.id}.txt", "w") as f:
f.write(message.content)
await ctx.send("Done!")
@commands.guild_only()
@commands.command(hidden=True)
async def togglechannel(self, ctx, channelname):
"""Enable or disable access to specific channels."""
await ctx.message.delete()
author = ctx.author
if ctx.channel != self.bot.channels['bot-cmds']:
return await ctx.send(f"{ctx.author.mention}: .togglechannel can only be used in <#261581918653513729>.", delete_after=10)
try:
if channelname == "elsewhere":
if self.bot.roles['#elsewhere'] in author.roles:
await author.remove_roles(self.bot.roles['#elsewhere'])
await author.send("Access to #elsewhere removed.")
elif self.bot.roles['No-elsewhere'] not in author.roles:
await author.add_roles(self.bot.roles['#elsewhere'])
await author.send("Access to #elsewhere granted.")
else:
await author.send("Your access to elsewhere is restricted, contact staff to remove it.")
elif channelname == "artswhere":
if self.bot.roles['#art-discussion'] in author.roles:
await author.remove_roles(self.bot.roles['#art-discussion'])
await author.send("Access to #art-discussion removed.")
elif self.bot.roles['No-art'] not in author.roles:
await author.add_roles(self.bot.roles['#art-discussion'])
await author.send("Access to #art-discussion granted.")
else:
await author.send("Your access to #art-discussion is restricted, contact staff to remove it.")
else:
await author.send(f"{channelname} is not a valid toggleable channel.")
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@commands.dm_only()
@commands.cooldown(rate=1, per=21600.0, type=commands.BucketType.member)
@commands.command()
async def nickme(self, ctx, *, nickname):
"""Change your nickname. Nitro Booster and crc only. 6 Hours Cooldown."""
member = self.bot.guild.get_member(ctx.author.id)
if self.bot.roles['crc'] not in member.roles and self.bot.roles['Nitro Booster'] not in member.roles:
return await ctx.send("This command can only be used by Nitro Boosters and members of crc!")
if self.check_nickname(nickname):
try:
await member.edit(nick=nickname)
await ctx.send(f"Your nickname is now `{nickname}`!")
except discord.errors.Forbidden:
await ctx.send("💢 I can't change your nickname! (Permission Error)")
else:
await ctx.send("The nickname doesn't comply with our nickname policy or it's too long!")
ctx.command.reset_cooldown(ctx)
def setup(bot):
bot.add_cog(Extras(bot))
| 41.819672 | 202 | 0.609369 | import datetime
import discord
import os
import random
import re
import string
from utils.checks import is_staff
from discord.ext import commands
from discord import TextChannel
class Extras(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.nick_pattern = re.compile("^[a-z]{2,}.*$", re.RegexFlag.IGNORECASE)
prune_key = "nokey"
def check_nickname(self, nickname):
if match := self.nick_pattern.fullmatch(nickname):
return len(nickname) <= 32
else:
return match
@commands.command(aliases=['about'])
async def kurisu(self, ctx):
embed = discord.Embed(title="Kurisu", color=discord.Color.green())
embed.set_author(name="Started by 916253, maintained by ihaveahax")
embed.set_thumbnail(url="http://i.imgur.com/hjVY4Et.jpg")
embed.url = "https://github.com/nh-server/Kurisu"
embed.description = "Kurisu, the Nintendo Homebrew Discord bot!"
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command()
async def membercount(self, ctx):
await ctx.send(f"{ctx.guild.name} has {ctx.guild.member_count:,} members!")
@commands.command()
async def uptime(self, ctx):
await ctx.send(f"Uptime: {datetime.datetime.now() - self.bot.startup}")
@commands.guild_only()
@is_staff("SuperOP")
@commands.command(hidden=True)
async def copyrole(self, ctx, role: discord.Role, src_channel: discord.TextChannel, des_channels: commands.Greedy[discord.TextChannel]):
perms = src_channel.overwrites_for(role)
for c in des_channels:
await c.set_permissions(role, overwrite=perms)
await ctx.send("Changed permissions successfully")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command(hidden=True)
async def userroles(self, ctx, u: discord.Member = None):
if not u:
u = ctx.author
msg = f"{u}'s Roles:\n\n"
for role in u.roles:
if role.is_default():
continue
msg += f"{role} = {role.id}\n"
await ctx.author.send(msg)
@is_staff("HalfOP")
@commands.guild_only()
@commands.command(hidden=True)
async def serverroles(self, ctx, exp: str):
msg = f"Server roles matching `{exp}`:\n\n"
for role in ctx.guild.roles:
if bool(re.search(exp, role.name, re.IGNORECASE)):
msg += f"{role.name} = {role.id}\n"
await ctx.author.send(msg)
@is_staff("OP")
@commands.command(hidden=True)
async def embedtext(self, ctx, *, text):
await ctx.send(embed=discord.Embed(description=text))
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def estprune(self, ctx, days=30):
if days > 30:
await ctx.send("Maximum 30 days")
return
if days < 1:
await ctx.send("Minimum 1 day")
return
msg = await ctx.send("I'm figuring this out!")
async with ctx.channel.typing():
count = await ctx.guild.estimate_pruned_members(days=days)
await msg.edit(content=f"{count:,} members inactive for {days} day(s) would be kicked from {ctx.guild.name}!")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def activecount(self, ctx, days=30):
if days > 30:
await ctx.send("Maximum 30 days")
return
if days < 1:
await ctx.send("Minimum 1 day")
return
msg = await ctx.send("I'm figuring this out!")
async with ctx.channel.typing():
count = await ctx.guild.estimate_pruned_members(days=days)
if days == 1:
await msg.edit(content=f"{ctx.guild.member_count - count:,} members were active today in {ctx.guild.name}!")
else:
await msg.edit(content=f"{ctx.guild.member_count - count:,} members were active in the past {days} days in {ctx.guild.name}!")
@is_staff("HalfOP")
@commands.guild_only()
@commands.command()
async def prune30(self, ctx, key=""):
if self.bot.pruning > 0:
await ctx.send("Pruning is already in progress.")
return
if key != self.prune_key:
if key != "":
await ctx.send("That's not the correct key.")
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await ctx.send(
f"Are you sure you want to prune members inactive for 30 days?\nTo see how many members get kicked, use `.estprune`.\nTo confirm the prune, use the command `.prune30 {self.prune_key}`.")
return
self.prune_key = ''.join(random.sample(string.ascii_letters, 8))
await ctx.send("Starting pruning!")
count = await ctx.guild.prune_members(days=30)
self.bot.pruning = count
await self.bot.channels['mods'].send(f"{count:,} are currently being kicked from {ctx.guild.name}!")
msg = f"👢 **Prune**: {ctx.author.mention} pruned {count:,} members"
await self.bot.channels['mod-logs'].send(msg)
@is_staff("HalfOP")
@commands.command()
async def disableleavelogs(self, ctx):
self.bot.pruning = True
await ctx.send("disable")
@is_staff("HalfOP")
@commands.command()
async def enableleavelogs(self, ctx):
self.bot.pruning = False
await ctx.send("enable")
@commands.command(name="32c3")
async def _32c3(self, ctx):
await ctx.send("https://www.youtube.com/watch?v=bZczf57HSag")
@commands.command(name="33c3")
async def _33c3(self, ctx):
await ctx.send("https://www.youtube.com/watch?v=8C5cn_Qj0G8")
@commands.command(name="34c3")
async def _34c3(self, ctx):
await ctx.send("https://www.youtube.com/watch?v=Ec4NgWRE8ik")
@is_staff("Owner")
@commands.guild_only()
@commands.command(hidden=True)
async def dumpchannel(self, ctx, channel: TextChannel, limit=100):
await ctx.send(f"Dumping {limit} messages from {channel.mention}")
os.makedirs(f"#{channel.name}-{channel.id}", exist_ok=True)
async for message in channel.history(limit=limit):
with open(f"#{channel.name}-{channel.id}/{message.id}.txt", "w") as f:
f.write(message.content)
await ctx.send("Done!")
@commands.guild_only()
@commands.command(hidden=True)
async def togglechannel(self, ctx, channelname):
await ctx.message.delete()
author = ctx.author
if ctx.channel != self.bot.channels['bot-cmds']:
return await ctx.send(f"{ctx.author.mention}: .togglechannel can only be used in <#261581918653513729>.", delete_after=10)
try:
if channelname == "elsewhere":
if self.bot.roles['#elsewhere'] in author.roles:
await author.remove_roles(self.bot.roles['#elsewhere'])
await author.send("Access to #elsewhere removed.")
elif self.bot.roles['No-elsewhere'] not in author.roles:
await author.add_roles(self.bot.roles['#elsewhere'])
await author.send("Access to #elsewhere granted.")
else:
await author.send("Your access to elsewhere is restricted, contact staff to remove it.")
elif channelname == "artswhere":
if self.bot.roles['#art-discussion'] in author.roles:
await author.remove_roles(self.bot.roles['#art-discussion'])
await author.send("Access to #art-discussion removed.")
elif self.bot.roles['No-art'] not in author.roles:
await author.add_roles(self.bot.roles['#art-discussion'])
await author.send("Access to #art-discussion granted.")
else:
await author.send("Your access to #art-discussion is restricted, contact staff to remove it.")
else:
await author.send(f"{channelname} is not a valid toggleable channel.")
except discord.errors.Forbidden:
await ctx.send("💢 I don't have permission to do this.")
@commands.dm_only()
@commands.cooldown(rate=1, per=21600.0, type=commands.BucketType.member)
@commands.command()
async def nickme(self, ctx, *, nickname):
member = self.bot.guild.get_member(ctx.author.id)
if self.bot.roles['crc'] not in member.roles and self.bot.roles['Nitro Booster'] not in member.roles:
return await ctx.send("This command can only be used by Nitro Boosters and members of crc!")
if self.check_nickname(nickname):
try:
await member.edit(nick=nickname)
await ctx.send(f"Your nickname is now `{nickname}`!")
except discord.errors.Forbidden:
await ctx.send("💢 I can't change your nickname! (Permission Error)")
else:
await ctx.send("The nickname doesn't comply with our nickname policy or it's too long!")
ctx.command.reset_cooldown(ctx)
def setup(bot):
bot.add_cog(Extras(bot))
| true | true |
f7367caedc1d4edd7e13f64309d55eeb44425b99 | 92,901 | py | Python | scripts/parameter_validation_generator.py | ziga-lunarg/Vulkan-ValidationLayers | d23bc291bad0f606515d368e51018a5cb30e2ba9 | [
"Apache-2.0"
] | null | null | null | scripts/parameter_validation_generator.py | ziga-lunarg/Vulkan-ValidationLayers | d23bc291bad0f606515d368e51018a5cb30e2ba9 | [
"Apache-2.0"
] | null | null | null | scripts/parameter_validation_generator.py | ziga-lunarg/Vulkan-ValidationLayers | d23bc291bad0f606515d368e51018a5cb30e2ba9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2021 The Khronos Group Inc.
# Copyright (c) 2015-2021 Valve Corporation
# Copyright (c) 2015-2021 LunarG, Inc.
# Copyright (c) 2015-2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Dustin Graves <dustin@lunarg.com>
# Author: Mark Lobodzinski <mark@lunarg.com>
# Author: Dave Houlton <daveh@lunarg.com>
import os,re,sys,string,json
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
# Helper for iterating over a list where each element is possibly a single element or another 1-dimensional list
# Generates (setter, deleter, element) for each element where:
# - element = the next element in the list
# - setter(x) = a function that will set the entry in `lines` corresponding to `element` to `x`
# - deleter() = a function that will delete the entry corresponding to `element` in `lines`
def multi_string_iter(lines):
for i, ul in enumerate(lines):
if not isinstance(ul, list):
def setter(x): lines[i] = x
def deleter(): del(lines[i])
yield (setter, deleter, ul)
else:
for j, l in enumerate(lines[i]):
def setter(x): lines[i][j] = x
def deleter(): del(lines[i][j])
yield (setter, deleter, l)
# ParameterValidationGeneratorOptions - subclass of GeneratorOptions.
#
# Adds options used by ParameterValidationOutputGenerator object during Parameter validation layer generation.
#
# Additional members
# protectFile - True if multiple inclusion protection should be
# generated (based on the filename) around the entire header.
# protectFeature - True if #ifndef..#endif protection should be
# generated around a feature interface in the header file.
# genFuncPointers - True if function pointer typedefs should be
# generated
# protectProto - If conditional protection should be generated
# around prototype declarations, set to either '#ifdef'
# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
# to require opt-out (#ifndef protectProtoStr). Otherwise
# set to None.
# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
# declarations, if protectProto is set
# apicall - string to use for the function declaration prefix,
# such as APICALL on Windows.
# apientry - string to use for the calling convention macro,
# in typedefs, such as APIENTRY.
# apientryp - string to use for the calling convention macro
# in function pointer typedefs, such as APIENTRYP.
# indentFuncProto - True if prototype declarations should put each
# parameter on a separate line
# indentFuncPointer - True if typedefed function pointers should put each
# parameter on a separate line
# alignFuncParam - if nonzero and parameters are being put on a
# separate line, align parameter names at the specified column
class ParameterValidationGeneratorOptions(GeneratorOptions):
def __init__(self,
conventions = None,
filename = None,
directory = '.',
genpath = None,
apiname = 'vulkan',
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = 'vulkan',
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
emitSpirv = None,
sortProcedure = regSortFeatures,
apicall = 'VKAPI_ATTR ',
apientry = 'VKAPI_CALL ',
apientryp = 'VKAPI_PTR *',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 48,
expandEnumerants = False,
valid_usage_path = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
emitSpirv = emitSpirv,
sortProcedure = sortProcedure)
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
self.valid_usage_path = valid_usage_path
# ParameterValidationOutputGenerator - subclass of OutputGenerator.
# Generates param checker layer code.
#
# ---- methods ----
# ParamCheckerOutputGenerator(errFile, warnFile, diagFile) - args as for
# OutputGenerator. Defines additional internal state.
# ---- methods overriding base class ----
# beginFile(genOpts)
# endFile()
# beginFeature(interface, emit)
# endFeature()
# genType(typeinfo,name)
# genStruct(typeinfo,name)
# genGroup(groupinfo,name)
# genEnum(enuminfo, name)
# genCmd(cmdinfo)
class ParameterValidationOutputGenerator(OutputGenerator):
"""Generate Parameter Validation code based on XML element attributes"""
# This is an ordered list of sections in the header file.
ALL_SECTIONS = ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
self.declarations = []
inline_custom_source_preamble = """
"""
# These functions have additional, custom-written checks in the utils cpp file. CodeGen will automatically add a call
# to those functions of the form 'bool manual_PreCallValidateAPIName', where the 'vk' is dropped.
# see 'manual_PreCallValidateCreateGraphicsPipelines' as an example.
self.functions_with_manual_checks = [
'vkCreateInstance',
'vkCreateDevice',
'vkCreateQueryPool',
'vkCreateRenderPass',
'vkCreateRenderPass2',
'vkCreateRenderPass2KHR',
'vkCreateBuffer',
'vkCreateImage',
'vkCreatePipelineLayout',
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreateRayTracingPipelinesKHR',
'vkCreateSampler',
'vkCreateDescriptorSetLayout',
'vkFreeDescriptorSets',
'vkUpdateDescriptorSets',
'vkBeginCommandBuffer',
'vkCmdSetViewport',
'vkCmdSetScissor',
'vkCmdSetLineWidth',
'vkCmdDrawIndirect',
'vkCmdDrawIndexedIndirect',
'vkCmdDrawMultiEXT',
'vkCmdDrawMultiIndexedEXT',
'vkCmdClearAttachments',
'vkCmdBindIndexBuffer',
'vkCmdCopyBuffer',
'vkCmdUpdateBuffer',
'vkCmdFillBuffer',
'vkCreateSwapchainKHR',
'vkCreateSharedSwapchainsKHR',
'vkQueuePresentKHR',
'vkCreateDescriptorPool',
'vkCmdDispatch',
'vkCmdDispatchIndirect',
'vkCmdDispatchBaseKHR',
'vkCmdPushDescriptorSetKHR',
'vkCmdSetExclusiveScissorNV',
'vkCmdSetViewportShadingRatePaletteNV',
'vkCmdSetCoarseSampleOrderNV',
'vkCmdDrawMeshTasksNV',
'vkCmdDrawMeshTasksIndirectNV',
'vkCmdDrawMeshTasksIndirectCountNV',
'vkAllocateMemory',
'vkCreateAccelerationStructureNV',
'vkCreateAccelerationStructureKHR',
'vkGetAccelerationStructureHandleNV',
'vkGetPhysicalDeviceImageFormatProperties',
'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceImageFormatProperties2KHR',
'vkCmdBuildAccelerationStructureNV',
'vkCreateFramebuffer',
'vkCmdSetLineStippleEXT',
'vkSetDebugUtilsObjectNameEXT',
'vkSetDebugUtilsObjectTagEXT',
'vkCmdSetViewportWScalingNV',
'vkAcquireNextImageKHR',
'vkAcquireNextImage2KHR',
'vkCmdBindTransformFeedbackBuffersEXT',
'vkCmdBeginTransformFeedbackEXT',
'vkCmdEndTransformFeedbackEXT',
'vkCmdDrawIndirectByteCountEXT',
'vkCreateSamplerYcbcrConversion',
'vkCreateSamplerYcbcrConversionKHR',
'vkImportSemaphoreFdKHR',
'vkCmdBindVertexBuffers',
'vkCreateImageView',
'vkCopyAccelerationStructureToMemoryKHR',
'vkCmdCopyAccelerationStructureToMemoryKHR',
'vkCopyAccelerationStructureKHR',
'vkCmdCopyAccelerationStructureKHR',
'vkCopyMemoryToAccelerationStructureKHR',
'vkCmdCopyMemoryToAccelerationStructureKHR',
'vkCmdDrawIndirectCount',
'vkCmdDrawIndirectCountKHR',
'vkCmdDrawIndexedIndirectCount',
'vkCmdDrawIndexedIndirectCountKHR',
'vkCmdWriteAccelerationStructuresPropertiesKHR',
'vkWriteAccelerationStructuresPropertiesKHR',
'vkGetRayTracingCaptureReplayShaderGroupHandlesKHR',
'vkCmdTraceRaysKHR',
'vkCmdTraceRaysNV',
'vkCmdTraceRaysIndirectKHR',
'vkCmdBuildAccelerationStructureIndirectKHR',
'vkGetDeviceAccelerationStructureCompatibilityKHR',
'vkCmdSetViewportWithCountEXT',
'vkCmdSetScissorWithCountEXT',
'vkCmdBindVertexBuffers2EXT',
'vkCmdCopyBuffer2KHR',
'vkCmdBuildAccelerationStructuresKHR',
'vkCmdBuildAccelerationStructuresIndirectKHR',
'vkBuildAccelerationStructuresKHR',
'vkGetAccelerationStructureBuildSizesKHR',
'vkCmdWriteAccelerationStructuresPropertiesNV',
'vkCreateDisplayModeKHR',
'vkCreatePrivateDataSlotEXT',
'vkCmdSetVertexInputEXT',
'vkCmdPushConstants',
'vkMergePipelineCaches',
'vkGetPhysicalDeviceVideoFormatPropertiesKHR',
'vkCmdClearColorImage',
'vkCmdBeginRenderPass',
'vkCmdBeginRenderPass2KHR',
'vkCmdBeginRenderPass2',
'vkCmdSetDiscardRectangleEXT',
'vkGetQueryPoolResults',
'vkCmdBeginConditionalRenderingEXT',
'vkCreateWin32SurfaceKHR'
]
# Commands to ignore
self.blacklist = [
'vkGetInstanceProcAddr',
'vkGetDeviceProcAddr',
'vkEnumerateInstanceVersion',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetDeviceGroupSurfacePresentModes2EXT'
]
# Structure fields to ignore
self.structMemberBlacklist = { 'VkWriteDescriptorSet' : ['dstSet'], 'VkAccelerationStructureGeometryKHR' :['geometry'] }
# Validation conditions for some special case struct members that are conditionally validated
self.structMemberValidationConditions = { 'VkPipelineColorBlendStateCreateInfo' : { 'logicOp' : '{}logicOpEnable == VK_TRUE' } }
# Header version
self.headerVersion = None
# Internal state - accumulators for different inner block text
self.validation = [] # Text comprising the main per-api parameter validation routines
self.stypes = [] # Values from the VkStructureType enumeration
self.structTypes = dict() # Map of Vulkan struct typename to required VkStructureType
self.handleTypes = set() # Set of handle type names
self.commands = [] # List of CommandData records for all Vulkan commands
self.structMembers = [] # List of StructMemberData records for all Vulkan structs
self.validatedStructs = dict() # Map of structs type names to generated validation code for that struct type
self.enumRanges = set() # Set of enum names
self.enum_values_definitions = dict() # [enum, string] containing enumerated type map definitions
self.flag_values_definitions = dict() # [flag, string] containing flag type map definitions
self.stype_version_dict = dict() # String containing structtype to version map data
self.flags = set() # Map of flags typenames
self.flagBits = dict() # Map of flag bits typename to list of values
self.newFlags = set() # Map of flags typenames /defined in the current feature/
self.required_extensions = dict() # Dictionary of required extensions for each item in the current extension
self.extension_type = '' # Type of active feature (extension), device or instance
self.extension_names = dict() # Dictionary of extension names to extension name defines
self.structextends_list = [] # List of extensions which extend another struct
self.struct_feature_protect = dict() # Dictionary of structnames and FeatureExtraProtect strings
self.valid_vuids = set() # Set of all valid VUIDs
self.vuid_dict = dict() # VUID dictionary (from JSON)
self.alias_dict = dict() # Dict of cmd|struct aliases
self.header_file = False # Header file generation flag
self.source_file = False # Source file generation flag
self.instance_extension_list = '' # List of instance extension name defines
self.device_extension_list = '' # List of device extension name defines
self.returnedonly_structs = [] # List of structs with 'returnonly' attribute
self.called_types = set() # Set of types called via function/struct - not in list == app never passes in to validate
# Named tuples to store struct and command data
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isstaticarray', 'isbool', 'israngedenum',
'isconst', 'isoptional', 'iscount', 'noautovalidity',
'len', 'extstructs', 'condition', 'cdecl'])
self.CommandData = namedtuple('CommandData', ['name', 'params', 'cdecl', 'extension_type', 'result', 'promotion_info'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
#
# Generate Copyright comment block for file
def GenerateCopyright(self):
copyright = '/* *** THIS FILE IS GENERATED - DO NOT EDIT! ***\n'
copyright += ' * See parameter_validation_generator.py for modifications\n'
copyright += ' *\n'
copyright += ' * Copyright (c) 2015-2021 The Khronos Group Inc.\n'
copyright += ' * Copyright (c) 2015-2021 LunarG, Inc.\n'
copyright += ' * Copyright (C) 2015-2021 Google Inc.\n'
copyright += ' *\n'
copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n'
copyright += ' * you may not use this file except in compliance with the License.\n'
copyright += ' * Copyright (c) 2015-2017 Valve Corporation\n'
copyright += ' * You may obtain a copy of the License at\n'
copyright += ' *\n'
copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n'
copyright += ' *\n'
copyright += ' * Unless required by applicable law or agreed to in writing, software\n'
copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n'
copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
copyright += ' * See the License for the specific language governing permissions and\n'
copyright += ' * limitations under the License.\n'
copyright += ' *\n'
copyright += ' * Author: Mark Lobodzinski <mark@LunarG.com>\n'
copyright += ' * Author: Dave Houlton <daveh@LunarG.com>\n'
copyright += ' */\n\n'
return copyright
#
# Increases the global indent variable
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
#
# Decreases the global indent variable
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
#
# Walk the JSON-derived dict and find all "vuid" key values
def ExtractVUIDs(self, d):
if hasattr(d, 'items'):
for k, v in d.items():
if k == "vuid":
yield v
elif isinstance(v, dict):
for s in self.ExtractVUIDs(v):
yield s
elif isinstance (v, list):
for l in v:
for s in self.ExtractVUIDs(l):
yield s
#
# Called at file creation time
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
self.header_file = (genOpts.filename == 'parameter_validation.h')
self.source_file = (genOpts.filename == 'parameter_validation.cpp')
if not self.header_file and not self.source_file:
print("Error: Output Filenames have changed, update generator source.\n")
sys.exit(1)
if self.source_file or self.header_file:
# Output Copyright text
s = self.GenerateCopyright()
write(s, file=self.outFile)
if self.header_file:
return
stype_map = ''
stype_version_dict = dict()
# Create contents of Structs->API version unordered map
root = self.registry.reg
for node in root.findall('feature'):
version_name = node.get('name')
version_name = version_name.replace('VK_', 'VK_API_')
for enum_item in node.iter('enum'):
if enum_item.get('extends') == "VkStructureType":
struct_type_id = enum_item.get('name')
self.stype_version_dict[struct_type_id] = version_name
for extensions in root.findall('extensions'):
for extension in extensions.findall('extension'):
for entry in extension.iterfind('require/enum[@extends="VkStructureType"]'):
alias = entry.get('alias')
if alias is not None and (entry.get('comment') is None or 'typo' not in entry.get('comment')):
self.stype_version_dict[alias] = extension.get('name')
# Build map of structure type names to VkStructureType enum values
# Find all types of category "struct"
for struct in self.registry.tree.iterfind('types/type[@category="struct"]'):
# Check if struct has member named "sType" of type "VkStructureType" which has values defined
stype = struct.find('member[name="sType"][type="VkStructureType"][@values]')
if stype is not None:
# Store VkStructureType value for this type
self.structTypes[struct.get('name')] = stype.get('values')
self.valid_usage_path = genOpts.valid_usage_path
vu_json_filename = os.path.join(self.valid_usage_path + os.sep, 'validusage.json')
if os.path.isfile(vu_json_filename):
json_file = open(vu_json_filename, 'r', encoding='utf-8')
self.vuid_dict = json.load(json_file)
json_file.close()
if len(self.vuid_dict) == 0:
print("Error: Could not find, or error loading %s/validusage.json\n", vu_json_filename)
sys.exit(1)
#
# Build a set of all vuid text strings found in validusage.json
for json_vuid_string in self.ExtractVUIDs(self.vuid_dict):
self.valid_vuids.add(json_vuid_string)
#
# Headers
write('#include "chassis.h"', file=self.outFile)
self.newline()
write('#include "stateless_validation.h"', file=self.outFile)
self.newline()
#
# Called at end-time for final content output
def endFile(self):
if self.source_file:
# C-specific
self.newline()
# Don't need flag/enum lists if app can never call it to be validated
# But need to save everything as not all information is known until endFile()
for flag, string in self.flag_values_definitions.items():
if flag == 'VkGeometryInstanceFlagsKHR':
# only called in VkAccelerationStructureInstanceKHR which is never called anywhere explicitly
continue
flagBits = flag.replace('Flags', 'FlagBits')
if flag in self.called_types or flagBits in self.called_types:
write(string, file=self.outFile)
for enum, string in self.enum_values_definitions.items():
if enum in self.called_types:
write(string, file=self.outFile)
self.newline()
self.newline()
api_func = 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkInstance instance, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' if (api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s"\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n\n'
api_func += 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkPhysicalDevice pdev, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' const auto &target_pdev = physical_device_properties_map.find(pdev);\n'
api_func += ' if (target_pdev != physical_device_properties_map.end()) {\n'
api_func += ' auto effective_api_version = std::min(target_pdev->second->apiVersion, api_version);\n'
api_func += ' if (effective_api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s, "\n'
api_func += ' "which is the minimum of version requested in pApplicationInfo (%s) and supported by this physical device (%s), "\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(effective_api_version).c_str(),\n'
api_func += ' StringAPIVersion(api_version).c_str(), StringAPIVersion(target_pdev->second->apiVersion).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n'
write(api_func, file=self.outFile)
pnext_handler = 'bool StatelessValidation::ValidatePnextStructContents(const char *api_name, const ParameterName ¶meter_name,\n'
pnext_handler += ' const VkBaseOutStructure* header, const char *pnext_vuid, bool is_physdev_api, bool is_const_param) const {\n'
pnext_handler += ' bool skip = false;\n'
pnext_handler += ' switch(header->sType) {\n'
# Do some processing here to extract data from validatedstructs...
for item in self.structextends_list:
postProcSpec = {}
postProcSpec['ppp'] = '' if not item else '{postProcPrefix}'
postProcSpec['pps'] = '' if not item else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not item else '{postProcInsert}'
pnext_case = '\n'
pnext_check = ''
protect = ''
# Guard struct cases with feature ifdefs, if necessary
if item in self.struct_feature_protect.keys():
protect = self.struct_feature_protect[item]
pnext_case += '#ifdef %s\n' % protect
pnext_case += ' // Validation code for %s structure members\n' % item
pnext_case += ' case %s: { // Covers VUID-%s-sType-sType\n' % (self.structTypes[item], item)
# pNext version/extension-enabled checks
ver_info = ''
struct_type = self.structTypes[item]
if struct_type in self.stype_version_dict.keys():
ver_info = self.stype_version_dict[struct_type]
else:
struct_type[:-4]
if struct_type[:-4] in self.stype_version_dict.values():
ver_info = self.stype_version_dict[struct_type[:-4]]
else:
ver_info = None
api_check = False
if ver_info is not None:
if 'VK_API_VERSION_' in ver_info:
api_check = True
api_version = ver_info;
pnext_check += ' if (api_version < %s) {\n' % ver_info
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s) which was added in %s but the "\n' % (struct_type, ver_info)
pnext_check += ' "current effective API version is %s.",\n'
pnext_check += ' api_name, parameter_name.get_name().c_str(), StringAPIVersion(api_version).c_str());\n'
pnext_check += ' }\n'
else:
# Dependent on enabled extension
ext_name = ver_info
ext_name_define = self.extension_names[ver_info]
table_type = ''
if ext_name_define in self.instance_extension_list:
table_type = 'instance'
elif ext_name_define in self.device_extension_list:
table_type = 'device'
else:
print("Error in parameter_validation_generator.py CodeGen.")
pnext_check += ' if (is_const_param) {\n'
if table_type == 'device':
pnext_check += f' if ((is_physdev_api && !SupportedByPdev(physical_device, {ext_name_define})) || (!is_physdev_api && !IsExtEnabled({table_type}_extensions.{ext_name.lower()}))) {{\n'
else:
pnext_check += ' if (!%s_extensions.%s) {\n' % (table_type, ext_name.lower())
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s), but its parent extension "\n' % struct_type
pnext_check += ' "%s has not been enabled.",\n' % ext_name
pnext_check += ' api_name, parameter_name.get_name().c_str());\n'
pnext_check += ' }\n'
pnext_check += ' }\n'
pnext_check += '\n'
expr = self.expandStructCode(item, item, 'structure->', '', ' ', [], postProcSpec)
struct_validation_source = self.ScrubStructCode(expr)
if struct_validation_source != '':
pnext_check += ' if (is_const_param) {\n'
struct_validation_source = ' %s *structure = (%s *) header;\n' % (item, item) + struct_validation_source
struct_validation_source += ' }\n'
pnext_case += '%s%s' % (pnext_check, struct_validation_source)
pnext_case += ' } break;\n'
if protect:
pnext_case += '#endif // %s\n' % protect
# Skip functions containing no validation
if struct_validation_source or pnext_check != '':
pnext_handler += pnext_case;
else:
pnext_handler += '\n // No Validation code for %s structure members -- Covers VUID-%s-sType-sType\n' % (item, item)
pnext_handler += ' default:\n'
pnext_handler += ' skip = false;\n'
pnext_handler += ' }\n'
pnext_handler += ' return skip;\n'
pnext_handler += '}\n'
write(pnext_handler, file=self.outFile)
self.newline()
ext_template = 'bool StatelessValidation::OutputExtensionError(const std::string &api_name, const std::string &extension_name) const {\n'
ext_template += ' return LogError(instance,\n'
ext_template += ' kVUID_PVError_ExtensionNotEnabled, "Attemped to call %s() but its required extension %s has not been enabled\\n",\n'
ext_template += ' api_name.c_str(), extension_name.c_str());\n'
ext_template += '}\n'
write(ext_template, file=self.outFile)
self.newline()
commands_text = '\n'.join(self.validation)
write(commands_text, file=self.outFile)
self.newline()
if self.header_file:
# Output declarations and record intercepted procedures
write('\n'.join(self.declarations), file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
#
# Processing at beginning of each feature or extension
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.headerVersion = None
self.stypes = []
self.commands = []
self.structMembers = []
self.newFlags = set()
self.featureExtraProtect = GetFeatureProtect(interface)
# Get base list of extension dependencies for all items in this extension
base_required_extensions = []
if "VK_VERSION_1" not in self.featureName:
nameElem = interface[0][1]
name = nameElem.get('name')
# Save Name Define to get correct enable name later
self.extension_names[self.featureName] = name
# This extension is the first dependency for this command
base_required_extensions.append(self.featureName)
# Add any defined extension dependencies to the base dependency list for this extension
requires = interface.get('requires')
if requires is not None:
base_required_extensions.extend(requires.split(','))
# Build dictionary of extension dependencies for each item in this extension
self.required_extensions = dict()
for require_element in interface.findall('require'):
# Copy base extension dependency list
required_extensions = list(base_required_extensions)
# Add any additional extension dependencies specified in this require block
additional_extensions = require_element.get('extension')
if additional_extensions:
required_extensions.extend(additional_extensions.split(','))
# Save full extension list for all named items
for element in require_element.findall('*[@name]'):
self.required_extensions[element.get('name')] = required_extensions
# And note if this is an Instance or Device extension
self.extension_type = interface.get('type')
if interface.tag == 'extension':
if interface.get('type') == 'instance':
self.instance_extension_list += '%s, ' % GetNameDefine(interface)
else:
self.device_extension_list += '%s, ' % GetNameDefine(interface)
#
# Called at the end of each extension (feature)
def endFeature(self):
if self.header_file:
return
# C-specific
# Actually write the interface to the output file.
if (self.emit):
# If type declarations are needed by other features based on this one, it may be necessary to suppress the ExtraProtect,
# or move it below the 'for section...' loop.
ifdef = ''
if (self.featureExtraProtect is not None):
ifdef = '#ifdef %s\n' % self.featureExtraProtect
self.validation.append(ifdef)
# Generate the struct member checking code from the captured data
self.processStructMemberData()
# Generate the command parameter checking code from the captured data
self.processCmdData()
# Write the declaration for the HeaderVersion
if self.headerVersion:
write('const uint32_t GeneratedVulkanHeaderVersion = {};'.format(self.headerVersion), file=self.outFile)
# Write the declarations for the VkFlags values combining all flag bits
for flag in sorted(self.newFlags):
flagBits = flag.replace('Flags', 'FlagBits')
if flagBits in self.flagBits:
bits = self.flagBits[flagBits]
decl = 'const {} All{} = {}'.format(flag, flagBits, bits[0])
for bit in bits[1:]:
decl += '|' + bit
decl += ';'
self.flag_values_definitions[flag] = Guarded(self.featureExtraProtect, decl)
endif = '\n'
if (self.featureExtraProtect is not None):
endif = '#endif // %s\n' % self.featureExtraProtect
self.validation.append(endif)
# Finish processing in superclass
OutputGenerator.endFeature(self)
#
# Type generation
def genType(self, typeinfo, name, alias):
# record the name/alias pair
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
# If the type is a struct type, traverse the embedded <member> tags generating a structure. Otherwise, emit the tag text.
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name, alias)
elif (category == 'handle'):
self.handleTypes.add(name)
elif (category == 'bitmask'):
self.flags.add(name)
self.newFlags.add(name)
elif (category == 'define'):
if name == 'VK_HEADER_VERSION':
nameElem = typeElem.find('name')
self.headerVersion = noneStr(nameElem.tail).strip()
#
# Struct parameter check generation.
# This is a special case of the <type> tag where the contents are interpreted as a set of <member> tags instead of freeform C
# type declarations. The <member> tags are just like <param> tags - they are a declaration of a struct or union member.
# Only simple member declarations are supported (no nested structs etc.)
def genStruct(self, typeinfo, typeName, alias):
if not self.source_file:
return
# alias has already been recorded in genType, above
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
conditions = self.structMemberValidationConditions[typeName] if typeName in self.structMemberValidationConditions else None
members = typeinfo.elem.findall('.//member')
if self.featureExtraProtect is not None:
self.struct_feature_protect[typeName] = self.featureExtraProtect
#
# Iterate over members once to get length parameters for arrays
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
#
# Generate member info
membersInfo = []
returned_only = typeinfo.elem.attrib.get('returnedonly') is not None
for member in members:
# Get the member's type and name
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
stypeValue = ''
cdecl = self.makeCParamDecl(member, 0)
ispointer = self.paramIsPointer(member)
isconst = True if 'const' in cdecl else False
# Store pointer/array/string info -- Check for parameter name in lens set
iscount = False
if name in lens:
iscount = True
# The pNext members are not tagged as optional, but are treated as optional for parameter NULL checks. Static array
# members are also treated as optional to skip NULL pointer validation, as they won't be NULL.
isstaticarray = self.paramIsStaticArray(member)
isoptional = False
if self.paramIsOptional(member) or (name == 'pNext') or (isstaticarray):
isoptional = True
# Determine if value should be ignored by code generation.
noautovalidity = False
if (member.attrib.get('noautovalidity') is not None) or ((typeName in self.structMemberBlacklist) and (name in self.structMemberBlacklist[typeName])):
noautovalidity = True
# Some types are marked as noautovalidity, but stateless_validation.h will still want them for manual validation
noautovalidity_type_exceptions = [
"VkQueryPipelineStatisticFlags",
"VkBorderColor"
]
# Store all types that are from incoming calls if auto validity
# non-const pointers don't have auto gen code as used for return values
if (noautovalidity == False) or (type in noautovalidity_type_exceptions):
if not returned_only and (not ispointer or isconst):
self.called_types.add(type)
structextends = False
membersInfo.append(self.CommandParam(type=type, name=name,
ispointer=ispointer,
isstaticarray=isstaticarray,
isbool=True if type == 'VkBool32' else False,
israngedenum=True if type in self.enumRanges else False,
isconst=isconst,
isoptional=isoptional,
iscount=iscount,
noautovalidity=noautovalidity,
len=self.getLen(member),
extstructs=self.registry.validextensionstructs[typeName] if name == 'pNext' else None,
condition=conditions[name] if conditions and name in conditions else None,
cdecl=cdecl))
# If this struct extends another, keep its name in list for further processing
if typeinfo.elem.attrib.get('structextends') is not None:
self.structextends_list.append(typeName)
# Returnedonly structs should have most of their members ignored -- on entry, we only care about validating the sType and
# pNext members. Everything else will be overwritten by the callee.
if returned_only:
self.returnedonly_structs.append(typeName)
membersInfo = [m for m in membersInfo if m.name in ('sType', 'pNext')]
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Capture group (e.g. C "enum" type) info to be used for param check code generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
if not self.source_file:
return
# record the name/alias pair
if alias is not None:
self.alias_dict[groupName]=alias
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Store the sType values
if groupName == 'VkStructureType':
for elem in groupElem.findall('enum'):
self.stypes.append(elem.get('name'))
elif 'FlagBits' in groupName:
bits = []
for elem in groupElem.findall('enum'):
if elem.get('supported') != 'disabled':
bits.append(elem.get('name'))
if bits:
self.flagBits[groupName] = bits
else:
# Determine if begin/end ranges are needed (we don't do this for VkStructureType, which has a more finely grained check)
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
# Strip off the suffix from the prefix
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
isEnum = ('FLAG_BITS' not in expandPrefix)
if isEnum:
self.enumRanges.add(groupName)
# Create definition for a list containing valid enum values for this enumerated type
if self.featureExtraProtect is not None:
enum_entry = '#ifdef %s\n' % self.featureExtraProtect
else:
enum_entry = ''
enum_entry += 'const std::vector<%s> All%sEnums = {' % (groupName, groupName)
for enum in groupElem:
name = enum.get('name')
if name is not None and enum.get('supported') != 'disabled':
enum_entry += '%s, ' % name
enum_entry += '};'
if self.featureExtraProtect is not None:
enum_entry += '\n#endif // %s' % self.featureExtraProtect
self.enum_values_definitions[groupName] = enum_entry
#
# Capture command parameter info to be used for param check code generation.
def genCmd(self, cmdinfo, name, alias):
# record the name/alias pair
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genCmd(self, cmdinfo, name, alias)
decls = self.makeCDecls(cmdinfo.elem)
typedef = decls[1]
typedef = typedef.split(')',1)[1]
if self.header_file:
if name not in self.blacklist:
if (self.featureExtraProtect is not None):
self.declarations += [ '#ifdef %s' % self.featureExtraProtect ]
# Strip off 'vk' from API name
decl = '%s%s' % ('bool PreCallValidate', decls[0].split("VKAPI_CALL vk")[1])
decl_terminator = ' const override;'
if 'ValidationCache' in name:
decl_terminator = ' const;'
decl = str(decl).replace(';', decl_terminator)
self.declarations += [ decl ]
if (self.featureExtraProtect is not None):
self.declarations += [ '#endif' ]
if self.source_file:
if name not in self.blacklist:
params = cmdinfo.elem.findall('param')
# Get list of array lengths
lens = set()
for param in params:
len = self.getLen(param)
if len:
lens.add(len)
# Get param info
paramsInfo = []
for param in params:
paramInfo = self.getTypeNameTuple(param)
cdecl = self.makeCParamDecl(param, 0)
ispointer = self.paramIsPointer(param)
isconst = True if 'const' in cdecl else False
# non-const pointers don't have auto gen code as used for return values
if not ispointer or isconst:
self.called_types.add(paramInfo[0])
# Check for parameter name in lens set
iscount = False
if paramInfo[1] in lens:
iscount = True
paramsInfo.append(self.CommandParam(type=paramInfo[0], name=paramInfo[1],
ispointer=ispointer,
isstaticarray=self.paramIsStaticArray(param),
isbool=True if paramInfo[0] == 'VkBool32' else False,
israngedenum=True if paramInfo[0] in self.enumRanges else False,
isconst=isconst,
isoptional=self.paramIsOptional(param),
iscount=iscount,
noautovalidity=True if param.attrib.get('noautovalidity') is not None else False,
len=self.getLen(param),
extstructs=None,
condition=None,
cdecl=cdecl))
# Save return value information, if any
result_type = ''
promotion_info = ''
resultinfo = cmdinfo.elem.find('proto/type')
if (resultinfo is not None and resultinfo.text != 'void'):
result_type = resultinfo.text
if "VK_VERSION" in self.featureName and "VK_VERSION_1_0" != self.featureName:
if ('VkInstance' == paramsInfo[0].type or 'VkPhysicalDevice' == paramsInfo[0].type):
promotion_info = [paramsInfo[0].name, self.featureName]
self.commands.append(self.CommandData(name=name, params=paramsInfo, cdecl=self.makeCDecls(cmdinfo.elem)[0], extension_type=self.extension_type, result=result_type, promotion_info=promotion_info))
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = 0
paramtype = param.find('type')
if (paramtype.tail is not None) and ('*' in paramtype.tail):
ispointer = paramtype.tail.count('*')
elif paramtype.text[:4] == 'PFN_':
# Treat function pointer typedefs as a pointer to a single value
ispointer = 1
return ispointer
#
# Check if the parameter passed in is a static array
def paramIsStaticArray(self, param):
isstaticarray = 0
paramname = param.find('name')
if (paramname.tail is not None) and ('[' in paramname.tail):
isstaticarray = paramname.tail.count('[')
return isstaticarray
#
# Check if the parameter passed in is optional
# Returns a list of Boolean values for comma separated len attributes (len='false,true')
def paramIsOptional(self, param):
# See if the handle is optional
isoptional = False
# Simple, if it's optional, return true
optString = param.attrib.get('optional')
if optString:
if optString == 'true':
isoptional = True
elif ',' in optString:
opts = []
for opt in optString.split(','):
val = opt.strip()
if val == 'true':
opts.append(True)
elif val == 'false':
opts.append(False)
else:
print('Unrecognized len attribute value',val)
isoptional = opts
return isoptional
#
# Check if the handle passed in is optional
# Uses the same logic as ValidityOutputGenerator.isHandleOptional
def isHandleOptional(self, param, lenParam):
# Simple, if it's optional, return true
if param.isoptional:
return True
# If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes.
if param.noautovalidity:
return True
# If the parameter is an array and we haven't already returned, find out if any of the len parameters are optional
if lenParam and lenParam.isoptional:
return True
return False
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
# Default to altlen when available to avoid LaTeX markup
if 'altlen' in param.attrib:
len = param.attrib.get('altlen')
else:
len = param.attrib.get('len')
if len and len != 'null-terminated':
# Only first level is supported for multidimensional arrays. Conveniently, this also strips the trailing
# 'null-terminated' from arrays of strings
len = len.split(',')[0]
# Convert scope notation to pointer access
result = str(len).replace('::', '->')
elif self.paramIsStaticArray(param):
# For static arrays get length from inside []
array_match = re.search(r'\[(\d+)\]', param.find('name').tail)
if array_match:
result = array_match.group(1)
return result
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Find a named parameter in a parameter list
def getParamByName(self, params, name):
for param in params:
if param.name == name:
return param
return None
#
# Get the length paramater record for the specified length expression
def getLenParam(self, params, length):
# First check if any element of params matches length exactly
lenParam = self.getParamByName(params, length)
if not lenParam:
# Otherwise, look for any elements of params that appear within length
len_candidates = [p for p in params if re.search(r'\b{}\b'.format(p.name), length)]
# 0 or 1 matches are expected, >1 would require a special case and/or explicit validation
if len(len_candidates) == 0:
lenParam = None
elif len(len_candidates) == 1:
lenParam = len_candidates[0]
else:
raise Exception('Cannot determine length parameter for len attribute value {}'.format(length))
return lenParam
#
# Convert a vulkan.h command declaration into a parameter_validation.h definition
def getCmdDef(self, cmd):
# Strip the trailing ';' and split into individual lines
lines = cmd.cdecl[:-1].split('\n')
cmd_hdr = '\n'.join(lines)
return cmd_hdr
#
# Generate the code to check for a NULL dereference before calling the
# validation function
def genCheckedLengthCall(self, name, exprs):
count = name.count('->')
if count:
checkedExpr = []
localIndent = ''
elements = name.split('->')
# Open the if expression blocks
for i in range(0, count):
checkedExpr.append(localIndent + 'if ({} != NULL) {{\n'.format('->'.join(elements[0:i+1])))
localIndent = self.incIndent(localIndent)
# Add the validation expression
for expr in exprs:
checkedExpr.append(localIndent + expr)
# Close the if blocks
for i in range(0, count):
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
# No if statements were required
return exprs
#
# Generate code to check for a specific condition before executing validation code
def genConditionalCall(self, prefix, condition, exprs):
checkedExpr = []
localIndent = ''
formattedCondition = condition.format(prefix)
checkedExpr.append(localIndent + 'if ({})\n'.format(formattedCondition))
checkedExpr.append(localIndent + '{\n')
localIndent = self.incIndent(localIndent)
for expr in exprs:
checkedExpr.append(localIndent + expr)
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
#
# Get VUID identifier from implicit VUID tag
def GetVuid(self, name, suffix):
vuid_string = 'VUID-%s-%s' % (name, suffix)
vuid = "kVUIDUndefined"
if '->' in vuid_string:
return vuid
if vuid_string in self.valid_vuids:
vuid = "\"%s\"" % vuid_string
else:
if name in self.alias_dict:
alias_string = 'VUID-%s-%s' % (self.alias_dict[name], suffix)
if alias_string in self.valid_vuids:
vuid = "\"%s\"" % alias_string
return vuid
#
# Generate the sType check string
def makeStructTypeCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
stype = self.structTypes[value.type]
vuid_name = struct_type_name if struct_type_name is not None else funcPrintName
stype_vuid = self.GetVuid(value.type, "sType-sType")
param_vuid = self.GetVuid(vuid_name, "%s-parameter" % value.name)
if lenValue:
count_required_vuid = self.GetVuid(vuid_name, "%s-arraylength" % value.len)
# This is an array of struct pointers
if value.ispointer == 2:
checkExpr.append('skip |= validate_struct_pointer_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with a pointer to a count value
elif lenValue.ispointer:
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an individual struct
else:
checkExpr.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", {}{vn}, {sv}, {}, {}, {});\n'.format(
funcPrintName, valuePrintName, prefix, valueRequired, param_vuid, stype_vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
return checkExpr
#
# Generate the handle check string
def makeHandleCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
if lenValue:
if lenValue.ispointer:
# This is assumed to be an output array with a pointer to a count value
raise('Unsupported parameter validation case: Output handle array elements are not NULL checked')
else:
count_required_vuid = self.GetVuid(funcPrintName, "%s-arraylength" % (value.len))
# This is an array with an integer count value
checkExpr.append('skip |= validate_handle_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
# This is assumed to be an output handle pointer
raise('Unsupported parameter validation case: Output handles are not NULL checked')
return checkExpr
#
# Generate check string for an array of VkFlags values
def makeFlagsArrayCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
flagBitsName = value.type.replace('Flags', 'FlagBits')
if not flagBitsName in self.flagBits:
raise('Unsupported parameter validation case: array of reserved VkFlags')
else:
allFlags = 'All' + flagBitsName
checkExpr.append('skip |= validate_flags_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcPrintName, lenPrintName, valuePrintName, flagBitsName, allFlags, value.len, value.name, lenValueRequired, valueRequired, pf=prefix, **postProcSpec))
return checkExpr
#
# Generate pNext check string
def makeStructNextCheck(self, prefix, value, funcPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
# Generate an array of acceptable VkStructureType values for pNext
extStructCount = 0
extStructVar = 'NULL'
extStructNames = 'NULL'
pNextVuid = self.GetVuid(struct_type_name, "pNext-pNext")
sTypeVuid = self.GetVuid(struct_type_name, "sType-unique")
if value.extstructs:
extStructVar = 'allowed_structs_{}'.format(struct_type_name)
extStructCount = 'ARRAY_SIZE({})'.format(extStructVar)
extStructNames = '"' + ', '.join(value.extstructs) + '"'
checkExpr.append('const VkStructureType {}[] = {{ {} }};\n'.format(extStructVar, ', '.join([self.structTypes[s] for s in value.extstructs])))
checkExpr.append('skip |= validate_struct_pnext("{}", {ppp}"{}"{pps}, {}, {}{}, {}, {}, GeneratedVulkanHeaderVersion, {}, {});\n'.format(
funcPrintName, valuePrintName, extStructNames, prefix, value.name, extStructCount, extStructVar, pNextVuid, sTypeVuid, **postProcSpec))
return checkExpr
#
# Generate the pointer check string
def makePointerCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
vuid_tag_name = struct_type_name if struct_type_name is not None else funcPrintName
if lenValue:
length_deref = '->' in value.len
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len))
array_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
# TODO: Remove workaround for missing optional tag in vk.xml
if array_required_vuid == '"VUID-VkFramebufferCreateInfo-pAttachments-parameter"':
return []
# This is an array with a pointer to a count value
if lenValue.ispointer and not length_deref:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenPtrRequired == 'true' or lenValueRequired == 'true':
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenValueRequired == 'true':
if value.type != 'char':
# A valid VU can't use '->' in the middle so the generated VUID from the spec uses '::' instead
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len.replace('->', '::')))
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
# Arrays of strings receive special processing
checkExpr.append('skip |= validate_string_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
if checkExpr:
if lenValue and length_deref:
# Add checks to ensure the validation call does not dereference a NULL pointer to obtain the count
checkExpr = self.genCheckedLengthCall(value.len, checkExpr)
# This is an individual struct that is not allowed to be NULL
elif not value.isoptional:
# Function pointers need a reinterpret_cast to void*
ptr_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
if value.type[:4] == 'PFN_':
allocator_dict = {'pfnAllocation': '"VUID-VkAllocationCallbacks-pfnAllocation-00632"',
'pfnReallocation': '"VUID-VkAllocationCallbacks-pfnReallocation-00633"',
'pfnFree': '"VUID-VkAllocationCallbacks-pfnFree-00634"',
}
vuid = allocator_dict.get(value.name)
if vuid is not None:
ptr_required_vuid = vuid
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, {}{}, {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
# Special case for optional internal allocation function pointers.
if (value.type, value.name) == ('PFN_vkInternalAllocationNotification', 'pfnInternalAllocation'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalFree', postProcSpec))
elif (value.type, value.name) == ('PFN_vkInternalFreeNotification', 'pfnInternalFree'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalAllocation', postProcSpec))
return checkExpr
#
# Generate internal allocation function pointer check.
def internalAllocationCheck(self, funcPrintName, prefix, name, complementaryName, postProcSpec):
checkExpr = []
vuid = '"VUID-VkAllocationCallbacks-pfnInternalAllocation-00635"'
checkExpr.append('if ({}{} != NULL)'.format(prefix, name))
checkExpr.append('{')
local_indent = self.incIndent('')
# Function pointers need a reinterpret_cast to void*
checkExpr.append(local_indent + 'skip |= validate_required_pointer("{}", {ppp}"{}{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, prefix, complementaryName, prefix, complementaryName, vuid, **postProcSpec))
checkExpr.append('}\n')
return checkExpr
#
# Process struct member validation code, performing name substitution if required
def processStructMemberCode(self, line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec):
# Build format specifier list
kwargs = {}
if '{postProcPrefix}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcPrefix'] = 'ParameterName('
else:
kwargs['postProcPrefix'] = postProcSpec['ppp']
if '{postProcSuffix}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcSuffix'] = ', ParameterName::IndexVector{{ {}{} }})'.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcSuffix'] = postProcSpec['pps']
if '{postProcInsert}' in line:
# If we have a tuple that includes a format string and format parameters, need to use ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcInsert'] = '{}{}, '.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcInsert'] = postProcSpec['ppi']
if '{funcName}' in line:
kwargs['funcName'] = funcName
if '{valuePrefix}' in line:
kwargs['valuePrefix'] = memberNamePrefix
if '{displayNamePrefix}' in line:
# Check for a tuple that includes a format string and format parameters to be used with the ParameterName class
if type(memberDisplayNamePrefix) is tuple:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix[0]
else:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix
if kwargs:
# Need to escape the C++ curly braces
if 'IndexVector' in line:
line = line.replace('IndexVector{ ', 'IndexVector{{ ')
line = line.replace(' }),', ' }}),')
return line.format(**kwargs)
return line
#
# Process struct member validation code, stripping metadata
def ScrubStructCode(self, code):
scrubbed_lines = ''
for line in code:
if 'validate_struct_pnext' in line:
continue
if 'allowed_structs' in line:
continue
if 'xml-driven validation' in line:
continue
line = line.replace('{postProcPrefix}', '')
line = line.replace('{postProcSuffix}', '')
line = line.replace('{postProcInsert}', '')
line = line.replace('{funcName}', '')
line = line.replace('{valuePrefix}', '')
line = line.replace('{displayNamePrefix}', '')
line = line.replace('{IndexVector}', '')
line = line.replace('local_data->', '')
scrubbed_lines += line
return scrubbed_lines
#
# Process struct validation code for inclusion in function or parent struct validation code
def expandStructCode(self, item_type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, output, postProcSpec):
lines = self.validatedStructs[item_type]
for line in lines:
if output:
output[-1] += '\n'
if type(line) is list:
for sub in line:
output.append(self.processStructMemberCode(indent + sub, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
else:
output.append(self.processStructMemberCode(indent + line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
return output
#
# Process struct pointer/array validation code, performing name substitution if required
def expandStructPointerCode(self, prefix, value, lenValue, funcName, valueDisplayName, postProcSpec):
expr = []
expr.append('if ({}{} != NULL)\n'.format(prefix, value.name))
expr.append('{')
indent = self.incIndent(None)
if lenValue:
# Need to process all elements in the array
indexName = value.len.replace('Count', 'Index')
expr[-1] += '\n'
if lenValue.ispointer:
# If the length value is a pointer, de-reference it for the count.
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < *{}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
else:
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < {}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
expr.append(indent + '{')
indent = self.incIndent(indent)
# Prefix for value name to display in error message
if value.ispointer == 2:
memberNamePrefix = '{}{}[{}]->'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i]->'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}[{}].'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i].'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}->'.format(prefix, value.name)
memberDisplayNamePrefix = '{}->'.format(valueDisplayName)
# Expand the struct validation lines
expr = self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, expr, postProcSpec)
if lenValue:
# Close if and for scopes
indent = self.decIndent(indent)
expr.append(indent + '}\n')
expr.append('}\n')
return expr
#
# Generate the parameter checking code
def genFuncBody(self, funcName, values, valuePrefix, displayNamePrefix, structTypeName, is_phys_device = False):
lines = [] # Generated lines of code
unused = [] # Unused variable names
duplicateCountVuid = [] # prevent duplicate VUs being generated
# TODO Using a regex in this context is not ideal. Would be nicer if usedLines were a list of objects with "settings" (such as "is_phys_device")
validate_pnext_rx = re.compile(r'(.*validate_struct_pnext\(.*)(\).*\n*)', re.M)
for value in values:
usedLines = []
lenParam = None
#
# Prefix and suffix for post processing of parameter names for struct members. Arrays of structures need special processing to include the array index in the full parameter name.
postProcSpec = {}
postProcSpec['ppp'] = '' if not structTypeName else '{postProcPrefix}'
postProcSpec['pps'] = '' if not structTypeName else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not structTypeName else '{postProcInsert}'
#
# Generate the full name of the value, which will be printed in the error message, by adding the variable prefix to the value name
valueDisplayName = '{}{}'.format(displayNamePrefix, value.name)
#
# Check for NULL pointers, ignore the in-out count parameters that
# will be validated with their associated array
if (value.ispointer or value.isstaticarray) and not value.iscount:
# Parameters for function argument generation
req = 'true' # Parameter cannot be NULL
cpReq = 'true' # Count pointer cannot be NULL
cvReq = 'true' # Count value cannot be 0
lenDisplayName = None # Name of length parameter to print with validation messages; parameter name with prefix applied
countRequiredVuid = None # If there is a count required VUID to check
# Generate required/optional parameter strings for the pointer and count values
if value.isoptional:
req = 'false'
if value.len:
# The parameter is an array with an explicit count parameter
lenParam = self.getLenParam(values, value.len)
if lenParam:
lenDisplayName = value.len.replace(lenParam.name, displayNamePrefix + lenParam.name)
if lenParam.ispointer:
# Count parameters that are pointers are inout
if type(lenParam.isoptional) is list:
if lenParam.isoptional[0]:
cpReq = 'false'
if lenParam.isoptional[1]:
cvReq = 'false'
else:
if lenParam.isoptional:
cpReq = 'false'
# In case of count as field in another struct, look up field to see if count is optional.
len_deref = value.len.split('->')
if len(len_deref) == 2:
struct_fields = next((struct.members for struct in self.structMembers if struct.name == lenParam.type), None)
if struct_fields:
len_field_name = len_deref[1]
struct_field = next((field for field in struct_fields if field.name == len_field_name), None)
if struct_field and struct_field.isoptional:
cvReq = 'false'
else:
if lenParam.isoptional:
cvReq = 'false'
elif value.noautovalidity:
# Handle edge case where XML expresses a non-optional non-pointer value length with noautovalidity
# ex: <param noautovalidity="true"len="commandBufferCount">
vuidNameTag = structTypeName if structTypeName is not None else funcName
countRequiredVuid = self.GetVuid(vuidNameTag, "%s-arraylength" % (lenParam.name))
if countRequiredVuid in duplicateCountVuid:
countRequiredVuid = None
else:
duplicateCountVuid.append(countRequiredVuid)
else:
# Do not generate length checks for constant sized arrays
cpReq = 'false'
cvReq = 'false'
#
# The parameter will not be processed when tagged as 'noautovalidity'
# For the pointer to struct case, the struct pointer will not be validated, but any
# members not tagged as 'noautovalidity' will be validated
# We special-case the custom allocator checks, as they are explicit but can be auto-generated.
AllocatorFunctions = ['PFN_vkAllocationFunction', 'PFN_vkReallocationFunction', 'PFN_vkFreeFunction', 'PFN_vkInternalAllocationNotification', 'PFN_vkInternalFreeNotification']
if value.noautovalidity and value.type not in AllocatorFunctions and not countRequiredVuid:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
elif countRequiredVuid:
usedLines.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, "", {pf}{ln}, &{pf}{vn}, true, false, {}, kVUIDUndefined);\n'.format(
funcName, countRequiredVuid, pf=valuePrefix, ldn=lenDisplayName, ln=value.len, vn=value.name, **postProcSpec))
else:
if value.type in self.structTypes:
# If this is a pointer to a struct with an sType field, verify the type
usedLines += self.makeStructTypeCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
# If this is an input handle array that is not allowed to contain NULL handles, verify that none of the handles are VK_NULL_HANDLE
elif value.type in self.handleTypes and value.isconst and not self.isHandleOptional(value, lenParam):
usedLines += self.makeHandleCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.type in self.flags and value.isconst:
usedLines += self.makeFlagsArrayCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.isbool and value.isconst:
usedLines.append('skip |= validate_bool32_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.israngedenum and value.isconst:
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.type, enum_value_list, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.name == 'pNext':
usedLines += self.makeStructNextCheck(valuePrefix, value, funcName, valueDisplayName, postProcSpec, structTypeName)
else:
usedLines += self.makePointerCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
# If this is a pointer to a struct (input), see if it contains members that need to be checked
if value.type in self.validatedStructs:
if value.isconst: # or value.type in self.returnedonly_structs:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
elif value.type in self.returnedonly_structs:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
is_const_str = 'true' if value.isconst else 'false'
is_phys_device_str = 'true' if is_phys_device else 'false'
for setter, _, elem in multi_string_iter(usedLines):
elem = re.sub(r', (true|false)', '', elem)
m = validate_pnext_rx.match(elem)
if m is not None:
setter(f'{m.group(1)}, {is_phys_device_str}, {is_const_str}{m.group(2)}')
# Non-pointer types
else:
# The parameter will not be processes when tagged as 'noautovalidity'
# For the struct case, the struct type will not be validated, but any
# members not tagged as 'noautovalidity' will be validated
if value.noautovalidity:
# Log a diagnostic message when validation cannot be automatically generated and must be implemented manually
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
else:
vuid_name_tag = structTypeName if structTypeName is not None else funcName
if value.type in self.structTypes:
stype = self.structTypes[value.type]
vuid = self.GetVuid(value.type, "sType-sType")
undefined_vuid = '"kVUIDUndefined"'
usedLines.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", &({}{vn}), {sv}, false, kVUIDUndefined, {});\n'.format(
funcName, valueDisplayName, valuePrefix, vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
elif value.type in self.handleTypes:
if not self.isHandleOptional(value, None):
usedLines.append('skip |= validate_required_handle("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.type in self.flags and value.type.replace('Flags', 'FlagBits') not in self.flagBits:
vuid = self.GetVuid(vuid_name_tag, "%s-zerobitmask" % (value.name))
usedLines.append('skip |= validate_reserved_flags("{}", {ppp}"{}"{pps}, {pf}{}, {});\n'.format(funcName, valueDisplayName, value.name, vuid, pf=valuePrefix, **postProcSpec))
elif value.type in self.flags or value.type in self.flagBits:
if value.type in self.flags:
flagBitsName = value.type.replace('Flags', 'FlagBits')
flagsType = 'kOptionalFlags' if value.isoptional else 'kRequiredFlags'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = self.GetVuid(vuid_name_tag, "%s-requiredbitmask" % (value.name))
elif value.type in self.flagBits:
flagBitsName = value.type
flagsType = 'kOptionalSingleBit' if value.isoptional else 'kRequiredSingleBit'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = invalidVuid
allFlagsName = 'All' + flagBitsName
invalid_vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
allFlagsName = 'All' + flagBitsName
zeroVuidArg = '' if value.isoptional else ', ' + zeroVuid
usedLines.append('skip |= validate_flags("{}", {ppp}"{}"{pps}, "{}", {}, {pf}{}, {}, {}{});\n'.format(funcName, valueDisplayName, flagBitsName, allFlagsName, value.name, flagsType, invalidVuid, zeroVuidArg, pf=valuePrefix, **postProcSpec))
elif value.isbool:
usedLines.append('skip |= validate_bool32("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.israngedenum:
vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum("{}", {ppp}"{}"{pps}, "{}", {}, {}{}, {});\n'.format(funcName, valueDisplayName, value.type, enum_value_list, valuePrefix, value.name, vuid, **postProcSpec))
# If this is a struct, see if it contains members that need to be checked
if value.type in self.validatedStructs:
memberNamePrefix = '{}{}.'.format(valuePrefix, value.name)
memberDisplayNamePrefix = '{}.'.format(valueDisplayName)
usedLines.append(self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, '', [], postProcSpec))
# Append the parameter check to the function body for the current command
if usedLines:
# Apply special conditional checks
if value.condition:
usedLines = self.genConditionalCall(valuePrefix, value.condition, usedLines)
lines += usedLines
elif not value.iscount:
# If no expression was generated for this value, it is unreferenced by the validation function, unless
# it is an array count, which is indirectly referenced for array valiadation.
unused.append(value.name)
if not lines:
lines.append('// No xml-driven validation\n')
return lines, unused
#
# Generate the struct member check code from the captured data
def processStructMemberData(self):
indent = self.incIndent(None)
for struct in self.structMembers:
#
# The string returned by genFuncBody will be nested in an if check for a NULL pointer, so needs its indent incremented
lines, unused = self.genFuncBody('{funcName}', struct.members, '{valuePrefix}', '{displayNamePrefix}', struct.name)
if lines:
self.validatedStructs[struct.name] = lines
#
# Generate the command param check code from the captured data
def processCmdData(self):
indent = self.incIndent(None)
for command in self.commands:
# Skip first parameter if it is a dispatch handle (everything except vkCreateInstance)
startIndex = 0 if command.name == 'vkCreateInstance' else 1
lines, unused = self.genFuncBody(command.name, command.params[startIndex:], '', '', None, is_phys_device = command.params[0].type == 'VkPhysicalDevice')
# Cannot validate extension dependencies for device extension APIs having a physical device as their dispatchable object
if (command.name in self.required_extensions) and (self.extension_type != 'device' or command.params[0].type != 'VkPhysicalDevice'):
for ext in self.required_extensions[command.name]:
ext_name_define = ''
for extension in self.registry.extensions:
if extension.attrib['name'] == ext:
ext_name_define = GetNameDefine(extension)
break
ext_test = ''
if command.params[0].type in ["VkInstance", "VkPhysicalDevice"] or command.name == 'vkCreateInstance':
ext_test = 'if (!instance_extensions.%s) skip |= OutputExtensionError("%s", %s);\n' % (ext.lower(), command.name, ext_name_define)
else:
ext_test = 'if (!IsExtEnabled(device_extensions.%s)) skip |= OutputExtensionError("%s", %s);\n' % (ext.lower(), command.name, ext_name_define)
lines.insert(0, ext_test)
if lines:
func_sig = self.getCmdDef(command) + ' const {\n'
func_sig = func_sig.split('VKAPI_CALL vk')[1]
cmdDef = 'bool StatelessValidation::PreCallValidate' + func_sig
cmdDef += '%sbool skip = false;\n' % indent
if isinstance(command.promotion_info, list):
version_flag = command.promotion_info[1]
version_id = version_flag.replace('VK_VERSION', 'VK_API_VERSION')
cmdDef += '%s if (CheckPromotedApiAgainstVulkanVersion(%s, "%s", %s)) return true;\n' % (indent, command.promotion_info[0], command.name, version_id)
for line in lines:
if type(line) is list:
for sub in line:
cmdDef += indent + sub
else:
cmdDef += indent + line
# Insert call to custom-written function if present
if command.name in self.functions_with_manual_checks:
# Generate parameter list for manual fcn and down-chain calls
params_text = ''
for param in command.params:
params_text += '%s, ' % param.name
params_text = params_text[:-2] + ');\n'
cmdDef += ' if (!skip) skip |= manual_PreCallValidate'+ command.name[2:] + '(' + params_text
cmdDef += '%sreturn skip;\n' % indent
cmdDef += '}\n'
self.validation.append(cmdDef)
| 58.686671 | 300 | 0.582674 |
import os,re,sys,string,json
import xml.etree.ElementTree as etree
from generator import *
from collections import namedtuple
from common_codegen import *
def multi_string_iter(lines):
for i, ul in enumerate(lines):
if not isinstance(ul, list):
def setter(x): lines[i] = x
def deleter(): del(lines[i])
yield (setter, deleter, ul)
else:
for j, l in enumerate(lines[i]):
def setter(x): lines[i][j] = x
def deleter(): del(lines[i][j])
yield (setter, deleter, l)
genpath = None,
apiname = 'vulkan',
profile = None,
versions = '.*',
emitversions = '.*',
defaultExtensions = 'vulkan',
addExtensions = None,
removeExtensions = None,
emitExtensions = None,
emitSpirv = None,
sortProcedure = regSortFeatures,
apicall = 'VKAPI_ATTR ',
apientry = 'VKAPI_CALL ',
apientryp = 'VKAPI_PTR *',
indentFuncProto = True,
indentFuncPointer = False,
alignFuncParam = 48,
expandEnumerants = False,
valid_usage_path = ''):
GeneratorOptions.__init__(self,
conventions = conventions,
filename = filename,
directory = directory,
genpath = genpath,
apiname = apiname,
profile = profile,
versions = versions,
emitversions = emitversions,
defaultExtensions = defaultExtensions,
addExtensions = addExtensions,
removeExtensions = removeExtensions,
emitExtensions = emitExtensions,
emitSpirv = emitSpirv,
sortProcedure = sortProcedure)
self.apicall = apicall
self.apientry = apientry
self.apientryp = apientryp
self.indentFuncProto = indentFuncProto
self.indentFuncPointer = indentFuncPointer
self.alignFuncParam = alignFuncParam
self.expandEnumerants = expandEnumerants
self.valid_usage_path = valid_usage_path
class ParameterValidationOutputGenerator(OutputGenerator):
ALL_SECTIONS = ['command']
def __init__(self,
errFile = sys.stderr,
warnFile = sys.stderr,
diagFile = sys.stdout):
OutputGenerator.__init__(self, errFile, warnFile, diagFile)
self.INDENT_SPACES = 4
self.declarations = []
inline_custom_source_preamble = """
"""
self.functions_with_manual_checks = [
'vkCreateInstance',
'vkCreateDevice',
'vkCreateQueryPool',
'vkCreateRenderPass',
'vkCreateRenderPass2',
'vkCreateRenderPass2KHR',
'vkCreateBuffer',
'vkCreateImage',
'vkCreatePipelineLayout',
'vkCreateGraphicsPipelines',
'vkCreateComputePipelines',
'vkCreateRayTracingPipelinesNV',
'vkCreateRayTracingPipelinesKHR',
'vkCreateSampler',
'vkCreateDescriptorSetLayout',
'vkFreeDescriptorSets',
'vkUpdateDescriptorSets',
'vkBeginCommandBuffer',
'vkCmdSetViewport',
'vkCmdSetScissor',
'vkCmdSetLineWidth',
'vkCmdDrawIndirect',
'vkCmdDrawIndexedIndirect',
'vkCmdDrawMultiEXT',
'vkCmdDrawMultiIndexedEXT',
'vkCmdClearAttachments',
'vkCmdBindIndexBuffer',
'vkCmdCopyBuffer',
'vkCmdUpdateBuffer',
'vkCmdFillBuffer',
'vkCreateSwapchainKHR',
'vkCreateSharedSwapchainsKHR',
'vkQueuePresentKHR',
'vkCreateDescriptorPool',
'vkCmdDispatch',
'vkCmdDispatchIndirect',
'vkCmdDispatchBaseKHR',
'vkCmdPushDescriptorSetKHR',
'vkCmdSetExclusiveScissorNV',
'vkCmdSetViewportShadingRatePaletteNV',
'vkCmdSetCoarseSampleOrderNV',
'vkCmdDrawMeshTasksNV',
'vkCmdDrawMeshTasksIndirectNV',
'vkCmdDrawMeshTasksIndirectCountNV',
'vkAllocateMemory',
'vkCreateAccelerationStructureNV',
'vkCreateAccelerationStructureKHR',
'vkGetAccelerationStructureHandleNV',
'vkGetPhysicalDeviceImageFormatProperties',
'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceImageFormatProperties2KHR',
'vkCmdBuildAccelerationStructureNV',
'vkCreateFramebuffer',
'vkCmdSetLineStippleEXT',
'vkSetDebugUtilsObjectNameEXT',
'vkSetDebugUtilsObjectTagEXT',
'vkCmdSetViewportWScalingNV',
'vkAcquireNextImageKHR',
'vkAcquireNextImage2KHR',
'vkCmdBindTransformFeedbackBuffersEXT',
'vkCmdBeginTransformFeedbackEXT',
'vkCmdEndTransformFeedbackEXT',
'vkCmdDrawIndirectByteCountEXT',
'vkCreateSamplerYcbcrConversion',
'vkCreateSamplerYcbcrConversionKHR',
'vkImportSemaphoreFdKHR',
'vkCmdBindVertexBuffers',
'vkCreateImageView',
'vkCopyAccelerationStructureToMemoryKHR',
'vkCmdCopyAccelerationStructureToMemoryKHR',
'vkCopyAccelerationStructureKHR',
'vkCmdCopyAccelerationStructureKHR',
'vkCopyMemoryToAccelerationStructureKHR',
'vkCmdCopyMemoryToAccelerationStructureKHR',
'vkCmdDrawIndirectCount',
'vkCmdDrawIndirectCountKHR',
'vkCmdDrawIndexedIndirectCount',
'vkCmdDrawIndexedIndirectCountKHR',
'vkCmdWriteAccelerationStructuresPropertiesKHR',
'vkWriteAccelerationStructuresPropertiesKHR',
'vkGetRayTracingCaptureReplayShaderGroupHandlesKHR',
'vkCmdTraceRaysKHR',
'vkCmdTraceRaysNV',
'vkCmdTraceRaysIndirectKHR',
'vkCmdBuildAccelerationStructureIndirectKHR',
'vkGetDeviceAccelerationStructureCompatibilityKHR',
'vkCmdSetViewportWithCountEXT',
'vkCmdSetScissorWithCountEXT',
'vkCmdBindVertexBuffers2EXT',
'vkCmdCopyBuffer2KHR',
'vkCmdBuildAccelerationStructuresKHR',
'vkCmdBuildAccelerationStructuresIndirectKHR',
'vkBuildAccelerationStructuresKHR',
'vkGetAccelerationStructureBuildSizesKHR',
'vkCmdWriteAccelerationStructuresPropertiesNV',
'vkCreateDisplayModeKHR',
'vkCreatePrivateDataSlotEXT',
'vkCmdSetVertexInputEXT',
'vkCmdPushConstants',
'vkMergePipelineCaches',
'vkGetPhysicalDeviceVideoFormatPropertiesKHR',
'vkCmdClearColorImage',
'vkCmdBeginRenderPass',
'vkCmdBeginRenderPass2KHR',
'vkCmdBeginRenderPass2',
'vkCmdSetDiscardRectangleEXT',
'vkGetQueryPoolResults',
'vkCmdBeginConditionalRenderingEXT',
'vkCreateWin32SurfaceKHR'
]
self.blacklist = [
'vkGetInstanceProcAddr',
'vkGetDeviceProcAddr',
'vkEnumerateInstanceVersion',
'vkEnumerateInstanceLayerProperties',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateDeviceLayerProperties',
'vkEnumerateDeviceExtensionProperties',
'vkGetDeviceGroupSurfacePresentModes2EXT'
]
self.structMemberBlacklist = { 'VkWriteDescriptorSet' : ['dstSet'], 'VkAccelerationStructureGeometryKHR' :['geometry'] }
self.structMemberValidationConditions = { 'VkPipelineColorBlendStateCreateInfo' : { 'logicOp' : '{}logicOpEnable == VK_TRUE' } }
self.headerVersion = None
self.validation = []
self.stypes = []
self.structTypes = dict()
self.handleTypes = set()
self.commands = []
self.structMembers = []
self.validatedStructs = dict()
self.enumRanges = set()
self.enum_values_definitions = dict()
self.flag_values_definitions = dict()
self.stype_version_dict = dict()
self.flags = set()
self.flagBits = dict()
self.newFlags = set()
self.required_extensions = dict()
self.extension_type = ''
self.extension_names = dict()
self.structextends_list = []
self.struct_feature_protect = dict()
self.valid_vuids = set()
self.vuid_dict = dict()
self.alias_dict = dict()
self.header_file = False
self.source_file = False
self.instance_extension_list = ''
self.device_extension_list = ''
self.returnedonly_structs = []
self.called_types = set()
self.CommandParam = namedtuple('CommandParam', ['type', 'name', 'ispointer', 'isstaticarray', 'isbool', 'israngedenum',
'isconst', 'isoptional', 'iscount', 'noautovalidity',
'len', 'extstructs', 'condition', 'cdecl'])
self.CommandData = namedtuple('CommandData', ['name', 'params', 'cdecl', 'extension_type', 'result', 'promotion_info'])
self.StructMemberData = namedtuple('StructMemberData', ['name', 'members'])
def GenerateCopyright(self):
copyright = '/* *** THIS FILE IS GENERATED - DO NOT EDIT! ***\n'
copyright += ' * See parameter_validation_generator.py for modifications\n'
copyright += ' *\n'
copyright += ' * Copyright (c) 2015-2021 The Khronos Group Inc.\n'
copyright += ' * Copyright (c) 2015-2021 LunarG, Inc.\n'
copyright += ' * Copyright (C) 2015-2021 Google Inc.\n'
copyright += ' *\n'
copyright += ' * Licensed under the Apache License, Version 2.0 (the "License");\n'
copyright += ' * you may not use this file except in compliance with the License.\n'
copyright += ' * Copyright (c) 2015-2017 Valve Corporation\n'
copyright += ' * You may obtain a copy of the License at\n'
copyright += ' *\n'
copyright += ' * http://www.apache.org/licenses/LICENSE-2.0\n'
copyright += ' *\n'
copyright += ' * Unless required by applicable law or agreed to in writing, software\n'
copyright += ' * distributed under the License is distributed on an "AS IS" BASIS,\n'
copyright += ' * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
copyright += ' * See the License for the specific language governing permissions and\n'
copyright += ' * limitations under the License.\n'
copyright += ' *\n'
copyright += ' * Author: Mark Lobodzinski <mark@LunarG.com>\n'
copyright += ' * Author: Dave Houlton <daveh@LunarG.com>\n'
copyright += ' */\n\n'
return copyright
def incIndent(self, indent):
inc = ' ' * self.INDENT_SPACES
if indent:
return indent + inc
return inc
def decIndent(self, indent):
if indent and (len(indent) > self.INDENT_SPACES):
return indent[:-self.INDENT_SPACES]
return ''
def ExtractVUIDs(self, d):
if hasattr(d, 'items'):
for k, v in d.items():
if k == "vuid":
yield v
elif isinstance(v, dict):
for s in self.ExtractVUIDs(v):
yield s
elif isinstance (v, list):
for l in v:
for s in self.ExtractVUIDs(l):
yield s
def beginFile(self, genOpts):
OutputGenerator.beginFile(self, genOpts)
self.header_file = (genOpts.filename == 'parameter_validation.h')
self.source_file = (genOpts.filename == 'parameter_validation.cpp')
if not self.header_file and not self.source_file:
print("Error: Output Filenames have changed, update generator source.\n")
sys.exit(1)
if self.source_file or self.header_file:
s = self.GenerateCopyright()
write(s, file=self.outFile)
if self.header_file:
return
stype_map = ''
stype_version_dict = dict()
root = self.registry.reg
for node in root.findall('feature'):
version_name = node.get('name')
version_name = version_name.replace('VK_', 'VK_API_')
for enum_item in node.iter('enum'):
if enum_item.get('extends') == "VkStructureType":
struct_type_id = enum_item.get('name')
self.stype_version_dict[struct_type_id] = version_name
for extensions in root.findall('extensions'):
for extension in extensions.findall('extension'):
for entry in extension.iterfind('require/enum[@extends="VkStructureType"]'):
alias = entry.get('alias')
if alias is not None and (entry.get('comment') is None or 'typo' not in entry.get('comment')):
self.stype_version_dict[alias] = extension.get('name')
for struct in self.registry.tree.iterfind('types/type[@category="struct"]'):
stype = struct.find('member[name="sType"][type="VkStructureType"][@values]')
if stype is not None:
self.structTypes[struct.get('name')] = stype.get('values')
self.valid_usage_path = genOpts.valid_usage_path
vu_json_filename = os.path.join(self.valid_usage_path + os.sep, 'validusage.json')
if os.path.isfile(vu_json_filename):
json_file = open(vu_json_filename, 'r', encoding='utf-8')
self.vuid_dict = json.load(json_file)
json_file.close()
if len(self.vuid_dict) == 0:
print("Error: Could not find, or error loading %s/validusage.json\n", vu_json_filename)
sys.exit(1)
for json_vuid_string in self.ExtractVUIDs(self.vuid_dict):
self.valid_vuids.add(json_vuid_string)
write('#include "chassis.h"', file=self.outFile)
self.newline()
write('#include "stateless_validation.h"', file=self.outFile)
self.newline()
def endFile(self):
if self.source_file:
self.newline()
# But need to save everything as not all information is known until endFile()
for flag, string in self.flag_values_definitions.items():
if flag == 'VkGeometryInstanceFlagsKHR':
# only called in VkAccelerationStructureInstanceKHR which is never called anywhere explicitly
continue
flagBits = flag.replace('Flags', 'FlagBits')
if flag in self.called_types or flagBits in self.called_types:
write(string, file=self.outFile)
for enum, string in self.enum_values_definitions.items():
if enum in self.called_types:
write(string, file=self.outFile)
self.newline()
self.newline()
api_func = 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkInstance instance, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' if (api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s"\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(api_version).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n\n'
api_func += 'bool StatelessValidation::CheckPromotedApiAgainstVulkanVersion(VkPhysicalDevice pdev, const char *api_name, const uint32_t promoted_version) const {\n'
api_func += ' bool skip = false;\n'
api_func += ' const auto &target_pdev = physical_device_properties_map.find(pdev);\n'
api_func += ' if (target_pdev != physical_device_properties_map.end()) {\n'
api_func += ' auto effective_api_version = std::min(target_pdev->second->apiVersion, api_version);\n'
api_func += ' if (effective_api_version < promoted_version) {\n'
api_func += ' skip = LogError(instance,\n'
api_func += ' kVUID_PVError_ApiVersionViolation, "Attemped to call %s() with an effective API version of %s, "\n'
api_func += ' "which is the minimum of version requested in pApplicationInfo (%s) and supported by this physical device (%s), "\n'
api_func += ' "but this API was not promoted until version %s.", api_name, StringAPIVersion(effective_api_version).c_str(),\n'
api_func += ' StringAPIVersion(api_version).c_str(), StringAPIVersion(target_pdev->second->apiVersion).c_str(),\n'
api_func += ' StringAPIVersion(promoted_version).c_str());\n'
api_func += ' }\n'
api_func += ' }\n'
api_func += ' return skip;\n'
api_func += '}\n'
write(api_func, file=self.outFile)
pnext_handler = 'bool StatelessValidation::ValidatePnextStructContents(const char *api_name, const ParameterName ¶meter_name,\n'
pnext_handler += ' const VkBaseOutStructure* header, const char *pnext_vuid, bool is_physdev_api, bool is_const_param) const {\n'
pnext_handler += ' bool skip = false;\n'
pnext_handler += ' switch(header->sType) {\n'
# Do some processing here to extract data from validatedstructs...
for item in self.structextends_list:
postProcSpec = {}
postProcSpec['ppp'] = '' if not item else '{postProcPrefix}'
postProcSpec['pps'] = '' if not item else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not item else '{postProcInsert}'
pnext_case = '\n'
pnext_check = ''
protect = ''
# Guard struct cases with feature ifdefs, if necessary
if item in self.struct_feature_protect.keys():
protect = self.struct_feature_protect[item]
pnext_case += '
pnext_case += ' // Validation code for %s structure members\n' % item
pnext_case += ' case %s: { // Covers VUID-%s-sType-sType\n' % (self.structTypes[item], item)
# pNext version/extension-enabled checks
ver_info = ''
struct_type = self.structTypes[item]
if struct_type in self.stype_version_dict.keys():
ver_info = self.stype_version_dict[struct_type]
else:
struct_type[:-4]
if struct_type[:-4] in self.stype_version_dict.values():
ver_info = self.stype_version_dict[struct_type[:-4]]
else:
ver_info = None
api_check = False
if ver_info is not None:
if 'VK_API_VERSION_' in ver_info:
api_check = True
api_version = ver_info;
pnext_check += ' if (api_version < %s) {\n' % ver_info
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s) which was added in %s but the "\n' % (struct_type, ver_info)
pnext_check += ' "current effective API version is %s.",\n'
pnext_check += ' api_name, parameter_name.get_name().c_str(), StringAPIVersion(api_version).c_str());\n'
pnext_check += ' }\n'
else:
# Dependent on enabled extension
ext_name = ver_info
ext_name_define = self.extension_names[ver_info]
table_type = ''
if ext_name_define in self.instance_extension_list:
table_type = 'instance'
elif ext_name_define in self.device_extension_list:
table_type = 'device'
else:
print("Error in parameter_validation_generator.py CodeGen.")
pnext_check += ' if (is_const_param) {\n'
if table_type == 'device':
pnext_check += f' if ((is_physdev_api && !SupportedByPdev(physical_device, {ext_name_define})) || (!is_physdev_api && !IsExtEnabled({table_type}_extensions.{ext_name.lower()}))) {{\n'
else:
pnext_check += ' if (!%s_extensions.%s) {\n' % (table_type, ext_name.lower())
pnext_check += ' skip |= LogError(\n'
pnext_check += ' instance, pnext_vuid,\n'
pnext_check += ' "%%s: Includes a pNext pointer (%%s) to a VkStructureType (%s), but its parent extension "\n' % struct_type
pnext_check += ' "%s has not been enabled.",\n' % ext_name
pnext_check += ' api_name, parameter_name.get_name().c_str());\n'
pnext_check += ' }\n'
pnext_check += ' }\n'
pnext_check += '\n'
expr = self.expandStructCode(item, item, 'structure->', '', ' ', [], postProcSpec)
struct_validation_source = self.ScrubStructCode(expr)
if struct_validation_source != '':
pnext_check += ' if (is_const_param) {\n'
struct_validation_source = ' %s *structure = (%s *) header;\n' % (item, item) + struct_validation_source
struct_validation_source += ' }\n'
pnext_case += '%s%s' % (pnext_check, struct_validation_source)
pnext_case += ' } break;\n'
if protect:
pnext_case += '
# Skip functions containing no validation
if struct_validation_source or pnext_check != '':
pnext_handler += pnext_case;
else:
pnext_handler += '\n // No Validation code for %s structure members -- Covers VUID-%s-sType-sType\n' % (item, item)
pnext_handler += ' default:\n'
pnext_handler += ' skip = false;\n'
pnext_handler += ' }\n'
pnext_handler += ' return skip;\n'
pnext_handler += '}\n'
write(pnext_handler, file=self.outFile)
self.newline()
ext_template = 'bool StatelessValidation::OutputExtensionError(const std::string &api_name, const std::string &extension_name) const {\n'
ext_template += ' return LogError(instance,\n'
ext_template += ' kVUID_PVError_ExtensionNotEnabled, "Attemped to call %s() but its required extension %s has not been enabled\\n",\n'
ext_template += ' api_name.c_str(), extension_name.c_str());\n'
ext_template += '}\n'
write(ext_template, file=self.outFile)
self.newline()
commands_text = '\n'.join(self.validation)
write(commands_text, file=self.outFile)
self.newline()
if self.header_file:
# Output declarations and record intercepted procedures
write('\n'.join(self.declarations), file=self.outFile)
# Finish processing in superclass
OutputGenerator.endFile(self)
#
# Processing at beginning of each feature or extension
def beginFeature(self, interface, emit):
# Start processing in superclass
OutputGenerator.beginFeature(self, interface, emit)
# C-specific
# Accumulate includes, defines, types, enums, function pointer typedefs, end function prototypes separately for this
# feature. They're only printed in endFeature().
self.headerVersion = None
self.stypes = []
self.commands = []
self.structMembers = []
self.newFlags = set()
self.featureExtraProtect = GetFeatureProtect(interface)
base_required_extensions = []
if "VK_VERSION_1" not in self.featureName:
nameElem = interface[0][1]
name = nameElem.get('name')
self.extension_names[self.featureName] = name
base_required_extensions.append(self.featureName)
requires = interface.get('requires')
if requires is not None:
base_required_extensions.extend(requires.split(','))
self.required_extensions = dict()
for require_element in interface.findall('require'):
required_extensions = list(base_required_extensions)
additional_extensions = require_element.get('extension')
if additional_extensions:
required_extensions.extend(additional_extensions.split(','))
for element in require_element.findall('*[@name]'):
self.required_extensions[element.get('name')] = required_extensions
self.extension_type = interface.get('type')
if interface.tag == 'extension':
if interface.get('type') == 'instance':
self.instance_extension_list += '%s, ' % GetNameDefine(interface)
else:
self.device_extension_list += '%s, ' % GetNameDefine(interface)
def endFeature(self):
if self.header_file:
return
if (self.emit):
ifdef = ''
if (self.featureExtraProtect is not None):
ifdef = '#ifdef %s\n' % self.featureExtraProtect
self.validation.append(ifdef)
self.processStructMemberData()
self.processCmdData()
if self.headerVersion:
write('const uint32_t GeneratedVulkanHeaderVersion = {};'.format(self.headerVersion), file=self.outFile)
for flag in sorted(self.newFlags):
flagBits = flag.replace('Flags', 'FlagBits')
if flagBits in self.flagBits:
bits = self.flagBits[flagBits]
decl = 'const {} All{} = {}'.format(flag, flagBits, bits[0])
for bit in bits[1:]:
decl += '|' + bit
decl += ';'
self.flag_values_definitions[flag] = Guarded(self.featureExtraProtect, decl)
endif = '\n'
if (self.featureExtraProtect is not None):
endif = '#endif // %s\n' % self.featureExtraProtect
self.validation.append(endif)
OutputGenerator.endFeature(self)
def genType(self, typeinfo, name, alias):
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genType(self, typeinfo, name, alias)
typeElem = typeinfo.elem
category = typeElem.get('category')
if (category == 'struct' or category == 'union'):
self.genStruct(typeinfo, name, alias)
elif (category == 'handle'):
self.handleTypes.add(name)
elif (category == 'bitmask'):
self.flags.add(name)
self.newFlags.add(name)
elif (category == 'define'):
if name == 'VK_HEADER_VERSION':
nameElem = typeElem.find('name')
self.headerVersion = noneStr(nameElem.tail).strip()
def genStruct(self, typeinfo, typeName, alias):
if not self.source_file:
return
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
conditions = self.structMemberValidationConditions[typeName] if typeName in self.structMemberValidationConditions else None
members = typeinfo.elem.findall('.//member')
if self.featureExtraProtect is not None:
self.struct_feature_protect[typeName] = self.featureExtraProtect
lens = set()
for member in members:
len = self.getLen(member)
if len:
lens.add(len)
membersInfo = []
returned_only = typeinfo.elem.attrib.get('returnedonly') is not None
for member in members:
info = self.getTypeNameTuple(member)
type = info[0]
name = info[1]
stypeValue = ''
cdecl = self.makeCParamDecl(member, 0)
ispointer = self.paramIsPointer(member)
isconst = True if 'const' in cdecl else False
# Store pointer/array/string info -- Check for parameter name in lens set
iscount = False
if name in lens:
iscount = True
# The pNext members are not tagged as optional, but are treated as optional for parameter NULL checks. Static array
# members are also treated as optional to skip NULL pointer validation, as they won't be NULL.
isstaticarray = self.paramIsStaticArray(member)
isoptional = False
if self.paramIsOptional(member) or (name == 'pNext') or (isstaticarray):
isoptional = True
noautovalidity = False
if (member.attrib.get('noautovalidity') is not None) or ((typeName in self.structMemberBlacklist) and (name in self.structMemberBlacklist[typeName])):
noautovalidity = True
noautovalidity_type_exceptions = [
"VkQueryPipelineStatisticFlags",
"VkBorderColor"
]
if (noautovalidity == False) or (type in noautovalidity_type_exceptions):
if not returned_only and (not ispointer or isconst):
self.called_types.add(type)
structextends = False
membersInfo.append(self.CommandParam(type=type, name=name,
ispointer=ispointer,
isstaticarray=isstaticarray,
isbool=True if type == 'VkBool32' else False,
israngedenum=True if type in self.enumRanges else False,
isconst=isconst,
isoptional=isoptional,
iscount=iscount,
noautovalidity=noautovalidity,
len=self.getLen(member),
extstructs=self.registry.validextensionstructs[typeName] if name == 'pNext' else None,
condition=conditions[name] if conditions and name in conditions else None,
cdecl=cdecl))
# If this struct extends another, keep its name in list for further processing
if typeinfo.elem.attrib.get('structextends') is not None:
self.structextends_list.append(typeName)
# Returnedonly structs should have most of their members ignored -- on entry, we only care about validating the sType and
# pNext members. Everything else will be overwritten by the callee.
if returned_only:
self.returnedonly_structs.append(typeName)
membersInfo = [m for m in membersInfo if m.name in ('sType', 'pNext')]
self.structMembers.append(self.StructMemberData(name=typeName, members=membersInfo))
#
# Capture group (e.g. C "enum" type) info to be used for param check code generation.
# These are concatenated together with other types.
def genGroup(self, groupinfo, groupName, alias):
if not self.source_file:
return
# record the name/alias pair
if alias is not None:
self.alias_dict[groupName]=alias
OutputGenerator.genGroup(self, groupinfo, groupName, alias)
groupElem = groupinfo.elem
# Store the sType values
if groupName == 'VkStructureType':
for elem in groupElem.findall('enum'):
self.stypes.append(elem.get('name'))
elif 'FlagBits' in groupName:
bits = []
for elem in groupElem.findall('enum'):
if elem.get('supported') != 'disabled':
bits.append(elem.get('name'))
if bits:
self.flagBits[groupName] = bits
else:
# Determine if begin/end ranges are needed (we don't do this for VkStructureType, which has a more finely grained check)
expandName = re.sub(r'([0-9a-z_])([A-Z0-9][^A-Z0-9]?)',r'\1_\2',groupName).upper()
expandPrefix = expandName
expandSuffix = ''
expandSuffixMatch = re.search(r'[A-Z][A-Z]+$',groupName)
if expandSuffixMatch:
expandSuffix = '_' + expandSuffixMatch.group()
expandPrefix = expandName.rsplit(expandSuffix, 1)[0]
isEnum = ('FLAG_BITS' not in expandPrefix)
if isEnum:
self.enumRanges.add(groupName)
if self.featureExtraProtect is not None:
enum_entry = '#ifdef %s\n' % self.featureExtraProtect
else:
enum_entry = ''
enum_entry += 'const std::vector<%s> All%sEnums = {' % (groupName, groupName)
for enum in groupElem:
name = enum.get('name')
if name is not None and enum.get('supported') != 'disabled':
enum_entry += '%s, ' % name
enum_entry += '};'
if self.featureExtraProtect is not None:
enum_entry += '\n#endif // %s' % self.featureExtraProtect
self.enum_values_definitions[groupName] = enum_entry
def genCmd(self, cmdinfo, name, alias):
if alias is not None:
self.alias_dict[name]=alias
OutputGenerator.genCmd(self, cmdinfo, name, alias)
decls = self.makeCDecls(cmdinfo.elem)
typedef = decls[1]
typedef = typedef.split(')',1)[1]
if self.header_file:
if name not in self.blacklist:
if (self.featureExtraProtect is not None):
self.declarations += [ '#ifdef %s' % self.featureExtraProtect ]
decl = '%s%s' % ('bool PreCallValidate', decls[0].split("VKAPI_CALL vk")[1])
decl_terminator = ' const override;'
if 'ValidationCache' in name:
decl_terminator = ' const;'
decl = str(decl).replace(';', decl_terminator)
self.declarations += [ decl ]
if (self.featureExtraProtect is not None):
self.declarations += [ '#endif' ]
if self.source_file:
if name not in self.blacklist:
params = cmdinfo.elem.findall('param')
lens = set()
for param in params:
len = self.getLen(param)
if len:
lens.add(len)
paramsInfo = []
for param in params:
paramInfo = self.getTypeNameTuple(param)
cdecl = self.makeCParamDecl(param, 0)
ispointer = self.paramIsPointer(param)
isconst = True if 'const' in cdecl else False
if not ispointer or isconst:
self.called_types.add(paramInfo[0])
# Check for parameter name in lens set
iscount = False
if paramInfo[1] in lens:
iscount = True
paramsInfo.append(self.CommandParam(type=paramInfo[0], name=paramInfo[1],
ispointer=ispointer,
isstaticarray=self.paramIsStaticArray(param),
isbool=True if paramInfo[0] == 'VkBool32' else False,
israngedenum=True if paramInfo[0] in self.enumRanges else False,
isconst=isconst,
isoptional=self.paramIsOptional(param),
iscount=iscount,
noautovalidity=True if param.attrib.get('noautovalidity') is not None else False,
len=self.getLen(param),
extstructs=None,
condition=None,
cdecl=cdecl))
# Save return value information, if any
result_type = ''
promotion_info = ''
resultinfo = cmdinfo.elem.find('proto/type')
if (resultinfo is not None and resultinfo.text != 'void'):
result_type = resultinfo.text
if "VK_VERSION" in self.featureName and "VK_VERSION_1_0" != self.featureName:
if ('VkInstance' == paramsInfo[0].type or 'VkPhysicalDevice' == paramsInfo[0].type):
promotion_info = [paramsInfo[0].name, self.featureName]
self.commands.append(self.CommandData(name=name, params=paramsInfo, cdecl=self.makeCDecls(cmdinfo.elem)[0], extension_type=self.extension_type, result=result_type, promotion_info=promotion_info))
#
# Check if the parameter passed in is a pointer
def paramIsPointer(self, param):
ispointer = 0
paramtype = param.find('type')
if (paramtype.tail is not None) and ('*' in paramtype.tail):
ispointer = paramtype.tail.count('*')
elif paramtype.text[:4] == 'PFN_':
# Treat function pointer typedefs as a pointer to a single value
ispointer = 1
return ispointer
#
# Check if the parameter passed in is a static array
def paramIsStaticArray(self, param):
isstaticarray = 0
paramname = param.find('name')
if (paramname.tail is not None) and ('[' in paramname.tail):
isstaticarray = paramname.tail.count('[')
return isstaticarray
#
# Check if the parameter passed in is optional
# Returns a list of Boolean values for comma separated len attributes (len='false,true')
def paramIsOptional(self, param):
# See if the handle is optional
isoptional = False
# Simple, if it's optional, return true
optString = param.attrib.get('optional')
if optString:
if optString == 'true':
isoptional = True
elif ',' in optString:
opts = []
for opt in optString.split(','):
val = opt.strip()
if val == 'true':
opts.append(True)
elif val == 'false':
opts.append(False)
else:
print('Unrecognized len attribute value',val)
isoptional = opts
return isoptional
def isHandleOptional(self, param, lenParam):
if param.isoptional:
return True
# If no validity is being generated, it usually means that validity is complex and not absolute, so let's say yes.
if param.noautovalidity:
return True
if lenParam and lenParam.isoptional:
return True
return False
#
# Retrieve the value of the len tag
def getLen(self, param):
result = None
# Default to altlen when available to avoid LaTeX markup
if 'altlen' in param.attrib:
len = param.attrib.get('altlen')
else:
len = param.attrib.get('len')
if len and len != 'null-terminated':
# Only first level is supported for multidimensional arrays. Conveniently, this also strips the trailing
# 'null-terminated' from arrays of strings
len = len.split(',')[0]
# Convert scope notation to pointer access
result = str(len).replace('::', '->')
elif self.paramIsStaticArray(param):
# For static arrays get length from inside []
array_match = re.search(r'\[(\d+)\]', param.find('name').tail)
if array_match:
result = array_match.group(1)
return result
#
# Retrieve the type and name for a parameter
def getTypeNameTuple(self, param):
type = ''
name = ''
for elem in param:
if elem.tag == 'type':
type = noneStr(elem.text)
elif elem.tag == 'name':
name = noneStr(elem.text)
return (type, name)
#
# Find a named parameter in a parameter list
def getParamByName(self, params, name):
for param in params:
if param.name == name:
return param
return None
#
# Get the length paramater record for the specified length expression
def getLenParam(self, params, length):
# First check if any element of params matches length exactly
lenParam = self.getParamByName(params, length)
if not lenParam:
# Otherwise, look for any elements of params that appear within length
len_candidates = [p for p in params if re.search(r'\b{}\b'.format(p.name), length)]
# 0 or 1 matches are expected, >1 would require a special case and/or explicit validation
if len(len_candidates) == 0:
lenParam = None
elif len(len_candidates) == 1:
lenParam = len_candidates[0]
else:
raise Exception('Cannot determine length parameter for len attribute value {}'.format(length))
return lenParam
#
# Convert a vulkan.h command declaration into a parameter_validation.h definition
def getCmdDef(self, cmd):
# Strip the trailing ';' and split into individual lines
lines = cmd.cdecl[:-1].split('\n')
cmd_hdr = '\n'.join(lines)
return cmd_hdr
#
# Generate the code to check for a NULL dereference before calling the
# validation function
def genCheckedLengthCall(self, name, exprs):
count = name.count('->')
if count:
checkedExpr = []
localIndent = ''
elements = name.split('->')
# Open the if expression blocks
for i in range(0, count):
checkedExpr.append(localIndent + 'if ({} != NULL) {{\n'.format('->'.join(elements[0:i+1])))
localIndent = self.incIndent(localIndent)
# Add the validation expression
for expr in exprs:
checkedExpr.append(localIndent + expr)
# Close the if blocks
for i in range(0, count):
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
# No if statements were required
return exprs
#
# Generate code to check for a specific condition before executing validation code
def genConditionalCall(self, prefix, condition, exprs):
checkedExpr = []
localIndent = ''
formattedCondition = condition.format(prefix)
checkedExpr.append(localIndent + 'if ({})\n'.format(formattedCondition))
checkedExpr.append(localIndent + '{\n')
localIndent = self.incIndent(localIndent)
for expr in exprs:
checkedExpr.append(localIndent + expr)
localIndent = self.decIndent(localIndent)
checkedExpr.append(localIndent + '}\n')
return [checkedExpr]
#
# Get VUID identifier from implicit VUID tag
def GetVuid(self, name, suffix):
vuid_string = 'VUID-%s-%s' % (name, suffix)
vuid = "kVUIDUndefined"
if '->' in vuid_string:
return vuid
if vuid_string in self.valid_vuids:
vuid = "\"%s\"" % vuid_string
else:
if name in self.alias_dict:
alias_string = 'VUID-%s-%s' % (self.alias_dict[name], suffix)
if alias_string in self.valid_vuids:
vuid = "\"%s\"" % alias_string
return vuid
#
# Generate the sType check string
def makeStructTypeCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
stype = self.structTypes[value.type]
vuid_name = struct_type_name if struct_type_name is not None else funcPrintName
stype_vuid = self.GetVuid(value.type, "sType-sType")
param_vuid = self.GetVuid(vuid_name, "%s-parameter" % value.name)
if lenValue:
count_required_vuid = self.GetVuid(vuid_name, "%s-arraylength" % value.len)
# This is an array of struct pointers
if value.ispointer == 2:
checkExpr.append('skip |= validate_struct_pointer_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=lenValue.name, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with a pointer to a count value
elif lenValue.ispointer:
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
checkExpr.append('skip |= validate_struct_type_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, "{sv}", {pf}{ln}, {pf}{vn}, {sv}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, stype_vuid, param_vuid, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, sv=stype, pf=prefix, **postProcSpec))
# This is an individual struct
else:
checkExpr.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", {}{vn}, {sv}, {}, {}, {});\n'.format(
funcPrintName, valuePrintName, prefix, valueRequired, param_vuid, stype_vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
return checkExpr
#
# Generate the handle check string
def makeHandleCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
if lenValue:
if lenValue.ispointer:
# This is assumed to be an output array with a pointer to a count value
raise('Unsupported parameter validation case: Output handle array elements are not NULL checked')
else:
count_required_vuid = self.GetVuid(funcPrintName, "%s-arraylength" % (value.len))
# This is an array with an integer count value
checkExpr.append('skip |= validate_handle_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
# This is assumed to be an output handle pointer
raise('Unsupported parameter validation case: Output handles are not NULL checked')
return checkExpr
#
# Generate check string for an array of VkFlags values
def makeFlagsArrayCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec):
checkExpr = []
flagBitsName = value.type.replace('Flags', 'FlagBits')
if not flagBitsName in self.flagBits:
raise('Unsupported parameter validation case: array of reserved VkFlags')
else:
allFlags = 'All' + flagBitsName
checkExpr.append('skip |= validate_flags_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcPrintName, lenPrintName, valuePrintName, flagBitsName, allFlags, value.len, value.name, lenValueRequired, valueRequired, pf=prefix, **postProcSpec))
return checkExpr
#
# Generate pNext check string
def makeStructNextCheck(self, prefix, value, funcPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
# Generate an array of acceptable VkStructureType values for pNext
extStructCount = 0
extStructVar = 'NULL'
extStructNames = 'NULL'
pNextVuid = self.GetVuid(struct_type_name, "pNext-pNext")
sTypeVuid = self.GetVuid(struct_type_name, "sType-unique")
if value.extstructs:
extStructVar = 'allowed_structs_{}'.format(struct_type_name)
extStructCount = 'ARRAY_SIZE({})'.format(extStructVar)
extStructNames = '"' + ', '.join(value.extstructs) + '"'
checkExpr.append('const VkStructureType {}[] = {{ {} }};\n'.format(extStructVar, ', '.join([self.structTypes[s] for s in value.extstructs])))
checkExpr.append('skip |= validate_struct_pnext("{}", {ppp}"{}"{pps}, {}, {}{}, {}, {}, GeneratedVulkanHeaderVersion, {}, {});\n'.format(
funcPrintName, valuePrintName, extStructNames, prefix, value.name, extStructCount, extStructVar, pNextVuid, sTypeVuid, **postProcSpec))
return checkExpr
#
# Generate the pointer check string
def makePointerCheck(self, prefix, value, lenValue, valueRequired, lenValueRequired, lenPtrRequired, funcPrintName, lenPrintName, valuePrintName, postProcSpec, struct_type_name):
checkExpr = []
vuid_tag_name = struct_type_name if struct_type_name is not None else funcPrintName
if lenValue:
length_deref = '->' in value.len
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len))
array_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
# TODO: Remove workaround for missing optional tag in vk.xml
if array_required_vuid == '"VUID-VkFramebufferCreateInfo-pAttachments-parameter"':
return []
# This is an array with a pointer to a count value
if lenValue.ispointer and not length_deref:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenPtrRequired == 'true' or lenValueRequired == 'true':
# When the length parameter is a pointer, there is an extra Boolean parameter in the function call to indicate if it is required
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {}, {});\n'.format(
funcPrintName, lenPtrRequired, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
# This is an array with an integer count value
else:
# If count and array parameters are optional, there will be no validation
if valueRequired == 'true' or lenValueRequired == 'true':
if value.type != 'char':
# A valid VU can't use '->' in the middle so the generated VUID from the spec uses '::' instead
count_required_vuid = self.GetVuid(vuid_tag_name, "%s-arraylength" % (value.len.replace('->', '::')))
checkExpr.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, &{pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
else:
checkExpr.append('skip |= validate_string_array("{}", {ppp}"{ldn}"{pps}, {ppp}"{dn}"{pps}, {pf}{ln}, {pf}{vn}, {}, {}, {}, {});\n'.format(
funcPrintName, lenValueRequired, valueRequired, count_required_vuid, array_required_vuid, ln=value.len, ldn=lenPrintName, dn=valuePrintName, vn=value.name, pf=prefix, **postProcSpec))
if checkExpr:
if lenValue and length_deref:
checkExpr = self.genCheckedLengthCall(value.len, checkExpr)
elif not value.isoptional:
ptr_required_vuid = self.GetVuid(vuid_tag_name, "%s-parameter" % (value.name))
if value.type[:4] == 'PFN_':
allocator_dict = {'pfnAllocation': '"VUID-VkAllocationCallbacks-pfnAllocation-00632"',
'pfnReallocation': '"VUID-VkAllocationCallbacks-pfnReallocation-00633"',
'pfnFree': '"VUID-VkAllocationCallbacks-pfnFree-00634"',
}
vuid = allocator_dict.get(value.name)
if vuid is not None:
ptr_required_vuid = vuid
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
checkExpr.append('skip |= validate_required_pointer("{}", {ppp}"{}"{pps}, {}{}, {});\n'.format(funcPrintName, valuePrintName, prefix, value.name, ptr_required_vuid, **postProcSpec))
else:
if (value.type, value.name) == ('PFN_vkInternalAllocationNotification', 'pfnInternalAllocation'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalFree', postProcSpec))
elif (value.type, value.name) == ('PFN_vkInternalFreeNotification', 'pfnInternalFree'):
checkExpr.extend(self.internalAllocationCheck(funcPrintName, prefix, value.name, 'pfnInternalAllocation', postProcSpec))
return checkExpr
def internalAllocationCheck(self, funcPrintName, prefix, name, complementaryName, postProcSpec):
checkExpr = []
vuid = '"VUID-VkAllocationCallbacks-pfnInternalAllocation-00635"'
checkExpr.append('if ({}{} != NULL)'.format(prefix, name))
checkExpr.append('{')
local_indent = self.incIndent('')
checkExpr.append(local_indent + 'skip |= validate_required_pointer("{}", {ppp}"{}{}"{pps}, reinterpret_cast<const void*>({}{}), {});\n'.format(funcPrintName, prefix, complementaryName, prefix, complementaryName, vuid, **postProcSpec))
checkExpr.append('}\n')
return checkExpr
def processStructMemberCode(self, line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec):
kwargs = {}
if '{postProcPrefix}' in line:
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcPrefix'] = 'ParameterName('
else:
kwargs['postProcPrefix'] = postProcSpec['ppp']
if '{postProcSuffix}' in line:
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcSuffix'] = ', ParameterName::IndexVector{{ {}{} }})'.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcSuffix'] = postProcSpec['pps']
if '{postProcInsert}' in line:
if type(memberDisplayNamePrefix) is tuple:
kwargs['postProcInsert'] = '{}{}, '.format(postProcSpec['ppi'], memberDisplayNamePrefix[1])
else:
kwargs['postProcInsert'] = postProcSpec['ppi']
if '{funcName}' in line:
kwargs['funcName'] = funcName
if '{valuePrefix}' in line:
kwargs['valuePrefix'] = memberNamePrefix
if '{displayNamePrefix}' in line:
if type(memberDisplayNamePrefix) is tuple:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix[0]
else:
kwargs['displayNamePrefix'] = memberDisplayNamePrefix
if kwargs:
if 'IndexVector' in line:
line = line.replace('IndexVector{ ', 'IndexVector{{ ')
line = line.replace(' }),', ' }}),')
return line.format(**kwargs)
return line
def ScrubStructCode(self, code):
scrubbed_lines = ''
for line in code:
if 'validate_struct_pnext' in line:
continue
if 'allowed_structs' in line:
continue
if 'xml-driven validation' in line:
continue
line = line.replace('{postProcPrefix}', '')
line = line.replace('{postProcSuffix}', '')
line = line.replace('{postProcInsert}', '')
line = line.replace('{funcName}', '')
line = line.replace('{valuePrefix}', '')
line = line.replace('{displayNamePrefix}', '')
line = line.replace('{IndexVector}', '')
line = line.replace('local_data->', '')
scrubbed_lines += line
return scrubbed_lines
def expandStructCode(self, item_type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, output, postProcSpec):
lines = self.validatedStructs[item_type]
for line in lines:
if output:
output[-1] += '\n'
if type(line) is list:
for sub in line:
output.append(self.processStructMemberCode(indent + sub, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
else:
output.append(self.processStructMemberCode(indent + line, funcName, memberNamePrefix, memberDisplayNamePrefix, postProcSpec))
return output
def expandStructPointerCode(self, prefix, value, lenValue, funcName, valueDisplayName, postProcSpec):
expr = []
expr.append('if ({}{} != NULL)\n'.format(prefix, value.name))
expr.append('{')
indent = self.incIndent(None)
if lenValue:
indexName = value.len.replace('Count', 'Index')
expr[-1] += '\n'
if lenValue.ispointer:
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < *{}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
else:
expr.append(indent + 'for (uint32_t {iname} = 0; {iname} < {}{}; ++{iname})\n'.format(prefix, value.len, iname=indexName))
expr.append(indent + '{')
indent = self.incIndent(indent)
if value.ispointer == 2:
memberNamePrefix = '{}{}[{}]->'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i]->'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}[{}].'.format(prefix, value.name, indexName)
memberDisplayNamePrefix = ('{}[%i].'.format(valueDisplayName), indexName)
else:
memberNamePrefix = '{}{}->'.format(prefix, value.name)
memberDisplayNamePrefix = '{}->'.format(valueDisplayName)
expr = self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, indent, expr, postProcSpec)
if lenValue:
indent = self.decIndent(indent)
expr.append(indent + '}\n')
expr.append('}\n')
return expr
def genFuncBody(self, funcName, values, valuePrefix, displayNamePrefix, structTypeName, is_phys_device = False):
lines = []
unused = []
duplicateCountVuid = []
validate_pnext_rx = re.compile(r'(.*validate_struct_pnext\(.*)(\).*\n*)', re.M)
for value in values:
usedLines = []
lenParam = None
postProcSpec = {}
postProcSpec['ppp'] = '' if not structTypeName else '{postProcPrefix}'
postProcSpec['pps'] = '' if not structTypeName else '{postProcSuffix}'
postProcSpec['ppi'] = '' if not structTypeName else '{postProcInsert}'
valueDisplayName = '{}{}'.format(displayNamePrefix, value.name)
if (value.ispointer or value.isstaticarray) and not value.iscount:
req = 'true'
cpReq = 'true'
cvReq = 'true'
lenDisplayName = None
countRequiredVuid = None
if value.isoptional:
req = 'false'
if value.len:
lenParam = self.getLenParam(values, value.len)
if lenParam:
lenDisplayName = value.len.replace(lenParam.name, displayNamePrefix + lenParam.name)
if lenParam.ispointer:
if type(lenParam.isoptional) is list:
if lenParam.isoptional[0]:
cpReq = 'false'
if lenParam.isoptional[1]:
cvReq = 'false'
else:
if lenParam.isoptional:
cpReq = 'false'
len_deref = value.len.split('->')
if len(len_deref) == 2:
struct_fields = next((struct.members for struct in self.structMembers if struct.name == lenParam.type), None)
if struct_fields:
len_field_name = len_deref[1]
struct_field = next((field for field in struct_fields if field.name == len_field_name), None)
if struct_field and struct_field.isoptional:
cvReq = 'false'
else:
if lenParam.isoptional:
cvReq = 'false'
elif value.noautovalidity:
vuidNameTag = structTypeName if structTypeName is not None else funcName
countRequiredVuid = self.GetVuid(vuidNameTag, "%s-arraylength" % (lenParam.name))
if countRequiredVuid in duplicateCountVuid:
countRequiredVuid = None
else:
duplicateCountVuid.append(countRequiredVuid)
else:
cpReq = 'false'
cvReq = 'false'
AllocatorFunctions = ['PFN_vkAllocationFunction', 'PFN_vkReallocationFunction', 'PFN_vkFreeFunction', 'PFN_vkInternalAllocationNotification', 'PFN_vkInternalFreeNotification']
if value.noautovalidity and value.type not in AllocatorFunctions and not countRequiredVuid:
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
elif countRequiredVuid:
usedLines.append('skip |= validate_array("{}", {ppp}"{ldn}"{pps}, "", {pf}{ln}, &{pf}{vn}, true, false, {}, kVUIDUndefined);\n'.format(
funcName, countRequiredVuid, pf=valuePrefix, ldn=lenDisplayName, ln=value.len, vn=value.name, **postProcSpec))
else:
if value.type in self.structTypes:
usedLines += self.makeStructTypeCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
elif value.type in self.handleTypes and value.isconst and not self.isHandleOptional(value, lenParam):
usedLines += self.makeHandleCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.type in self.flags and value.isconst:
usedLines += self.makeFlagsArrayCheck(valuePrefix, value, lenParam, req, cvReq, funcName, lenDisplayName, valueDisplayName, postProcSpec)
elif value.isbool and value.isconst:
usedLines.append('skip |= validate_bool32_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.israngedenum and value.isconst:
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum_array("{}", {ppp}"{}"{pps}, {ppp}"{}"{pps}, "{}", {}, {pf}{}, {pf}{}, {}, {});\n'.format(funcName, lenDisplayName, valueDisplayName, value.type, enum_value_list, value.len, value.name, cvReq, req, pf=valuePrefix, **postProcSpec))
elif value.name == 'pNext':
usedLines += self.makeStructNextCheck(valuePrefix, value, funcName, valueDisplayName, postProcSpec, structTypeName)
else:
usedLines += self.makePointerCheck(valuePrefix, value, lenParam, req, cvReq, cpReq, funcName, lenDisplayName, valueDisplayName, postProcSpec, structTypeName)
if value.type in self.validatedStructs:
if value.isconst:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
elif value.type in self.returnedonly_structs:
usedLines.append(self.expandStructPointerCode(valuePrefix, value, lenParam, funcName, valueDisplayName, postProcSpec))
is_const_str = 'true' if value.isconst else 'false'
is_phys_device_str = 'true' if is_phys_device else 'false'
for setter, _, elem in multi_string_iter(usedLines):
elem = re.sub(r', (true|false)', '', elem)
m = validate_pnext_rx.match(elem)
if m is not None:
setter(f'{m.group(1)}, {is_phys_device_str}, {is_const_str}{m.group(2)}')
else:
if value.noautovalidity:
self.logMsg('diag', 'ParameterValidation: No validation for {} {}'.format(structTypeName if structTypeName else funcName, value.name))
else:
vuid_name_tag = structTypeName if structTypeName is not None else funcName
if value.type in self.structTypes:
stype = self.structTypes[value.type]
vuid = self.GetVuid(value.type, "sType-sType")
undefined_vuid = '"kVUIDUndefined"'
usedLines.append('skip |= validate_struct_type("{}", {ppp}"{}"{pps}, "{sv}", &({}{vn}), {sv}, false, kVUIDUndefined, {});\n'.format(
funcName, valueDisplayName, valuePrefix, vuid, vn=value.name, sv=stype, vt=value.type, **postProcSpec))
elif value.type in self.handleTypes:
if not self.isHandleOptional(value, None):
usedLines.append('skip |= validate_required_handle("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.type in self.flags and value.type.replace('Flags', 'FlagBits') not in self.flagBits:
vuid = self.GetVuid(vuid_name_tag, "%s-zerobitmask" % (value.name))
usedLines.append('skip |= validate_reserved_flags("{}", {ppp}"{}"{pps}, {pf}{}, {});\n'.format(funcName, valueDisplayName, value.name, vuid, pf=valuePrefix, **postProcSpec))
elif value.type in self.flags or value.type in self.flagBits:
if value.type in self.flags:
flagBitsName = value.type.replace('Flags', 'FlagBits')
flagsType = 'kOptionalFlags' if value.isoptional else 'kRequiredFlags'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = self.GetVuid(vuid_name_tag, "%s-requiredbitmask" % (value.name))
elif value.type in self.flagBits:
flagBitsName = value.type
flagsType = 'kOptionalSingleBit' if value.isoptional else 'kRequiredSingleBit'
invalidVuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
zeroVuid = invalidVuid
allFlagsName = 'All' + flagBitsName
invalid_vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
allFlagsName = 'All' + flagBitsName
zeroVuidArg = '' if value.isoptional else ', ' + zeroVuid
usedLines.append('skip |= validate_flags("{}", {ppp}"{}"{pps}, "{}", {}, {pf}{}, {}, {}{});\n'.format(funcName, valueDisplayName, flagBitsName, allFlagsName, value.name, flagsType, invalidVuid, zeroVuidArg, pf=valuePrefix, **postProcSpec))
elif value.isbool:
usedLines.append('skip |= validate_bool32("{}", {ppp}"{}"{pps}, {}{});\n'.format(funcName, valueDisplayName, valuePrefix, value.name, **postProcSpec))
elif value.israngedenum:
vuid = self.GetVuid(vuid_name_tag, "%s-parameter" % (value.name))
enum_value_list = 'All%sEnums' % value.type
usedLines.append('skip |= validate_ranged_enum("{}", {ppp}"{}"{pps}, "{}", {}, {}{}, {});\n'.format(funcName, valueDisplayName, value.type, enum_value_list, valuePrefix, value.name, vuid, **postProcSpec))
if value.type in self.validatedStructs:
memberNamePrefix = '{}{}.'.format(valuePrefix, value.name)
memberDisplayNamePrefix = '{}.'.format(valueDisplayName)
usedLines.append(self.expandStructCode(value.type, funcName, memberNamePrefix, memberDisplayNamePrefix, '', [], postProcSpec))
if usedLines:
if value.condition:
usedLines = self.genConditionalCall(valuePrefix, value.condition, usedLines)
lines += usedLines
elif not value.iscount:
unused.append(value.name)
if not lines:
lines.append('// No xml-driven validation\n')
return lines, unused
def processStructMemberData(self):
indent = self.incIndent(None)
for struct in self.structMembers:
lines, unused = self.genFuncBody('{funcName}', struct.members, '{valuePrefix}', '{displayNamePrefix}', struct.name)
if lines:
self.validatedStructs[struct.name] = lines
def processCmdData(self):
indent = self.incIndent(None)
for command in self.commands:
startIndex = 0 if command.name == 'vkCreateInstance' else 1
lines, unused = self.genFuncBody(command.name, command.params[startIndex:], '', '', None, is_phys_device = command.params[0].type == 'VkPhysicalDevice')
if (command.name in self.required_extensions) and (self.extension_type != 'device' or command.params[0].type != 'VkPhysicalDevice'):
for ext in self.required_extensions[command.name]:
ext_name_define = ''
for extension in self.registry.extensions:
if extension.attrib['name'] == ext:
ext_name_define = GetNameDefine(extension)
break
ext_test = ''
if command.params[0].type in ["VkInstance", "VkPhysicalDevice"] or command.name == 'vkCreateInstance':
ext_test = 'if (!instance_extensions.%s) skip |= OutputExtensionError("%s", %s);\n' % (ext.lower(), command.name, ext_name_define)
else:
ext_test = 'if (!IsExtEnabled(device_extensions.%s)) skip |= OutputExtensionError("%s", %s);\n' % (ext.lower(), command.name, ext_name_define)
lines.insert(0, ext_test)
if lines:
func_sig = self.getCmdDef(command) + ' const {\n'
func_sig = func_sig.split('VKAPI_CALL vk')[1]
cmdDef = 'bool StatelessValidation::PreCallValidate' + func_sig
cmdDef += '%sbool skip = false;\n' % indent
if isinstance(command.promotion_info, list):
version_flag = command.promotion_info[1]
version_id = version_flag.replace('VK_VERSION', 'VK_API_VERSION')
cmdDef += '%s if (CheckPromotedApiAgainstVulkanVersion(%s, "%s", %s)) return true;\n' % (indent, command.promotion_info[0], command.name, version_id)
for line in lines:
if type(line) is list:
for sub in line:
cmdDef += indent + sub
else:
cmdDef += indent + line
if command.name in self.functions_with_manual_checks:
params_text = ''
for param in command.params:
params_text += '%s, ' % param.name
params_text = params_text[:-2] + ');\n'
cmdDef += ' if (!skip) skip |= manual_PreCallValidate'+ command.name[2:] + '(' + params_text
cmdDef += '%sreturn skip;\n' % indent
cmdDef += '}\n'
self.validation.append(cmdDef)
| true | true |
f7367d3e91a11abf0fcf39706c137ba09ea539be | 921 | py | Python | back/popo/__init__.py | supercilium/popoeshnick | 4f2111e7841fbbb5b43a61b2e9011ccac192b12f | [
"MIT"
] | null | null | null | back/popo/__init__.py | supercilium/popoeshnick | 4f2111e7841fbbb5b43a61b2e9011ccac192b12f | [
"MIT"
] | 8 | 2019-03-14T15:48:49.000Z | 2020-08-30T16:30:52.000Z | back/popo/__init__.py | supercilium/popoeshnick | 4f2111e7841fbbb5b43a61b2e9011ccac192b12f | [
"MIT"
] | 1 | 2020-07-04T06:33:29.000Z | 2020-07-04T06:33:29.000Z | from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_session import Session
from datetime import timedelta
app = Flask(__name__)
###################### for testing #########################
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/alive')
def live():
return "it's alive"
# export FLASK_ENV variable
if app.config["ENV"] == "production":
app.config.from_object("config.ProdConfig")
else:
app.config.from_object("config.DevConfig")
api = Api(app)
db = SQLAlchemy(app)
Migrate(app, db)
app.permanent_session_lifetime = timedelta(minutes=10)
Session(app)
from popo.resources import Users
api.add_resource(Users, '/api/user/registration/', endpoint='users')
from popo.resources import UserID
api.add_resource(UserID, '/api/user/login/', endpoint = 'user_id')
# /user?id=123&
| 19.595745 | 68 | 0.703583 | from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_session import Session
from datetime import timedelta
app = Flask(__name__)
| true | true |
f7367ec0b1c08ca138247592c249252349d7aef0 | 12,392 | py | Python | src/preprocessing/02_extractlogtraces/03_join_responses_with_ip.py | wikimedia/research-reader-survey-analysis | fbf4d71eebaf5ac5205713b0271f4ea51ab388f8 | [
"MIT"
] | null | null | null | src/preprocessing/02_extractlogtraces/03_join_responses_with_ip.py | wikimedia/research-reader-survey-analysis | fbf4d71eebaf5ac5205713b0271f4ea51ab388f8 | [
"MIT"
] | null | null | null | src/preprocessing/02_extractlogtraces/03_join_responses_with_ip.py | wikimedia/research-reader-survey-analysis | fbf4d71eebaf5ac5205713b0271f4ea51ab388f8 | [
"MIT"
] | null | null | null | """
This script joins:
* the EventLogging (EL) data based on webrequest beacons (in my experience, most complete / simplest)
* Google Forms survey responses
* EditAttemptStep data based on hive tables
There are two outputs for each language:
* CSV w/ survey responses + EL details (e.g., datetime, pageID) + webrequest details (e.g., client-IP, user-agent)
* CSV w/ all approximate userhashes for matching against webrequest logs
"""
import argparse
import csv
import os
from geopy.distance import distance
import pandas as pd
# hacky way to make sure utils is visible
import sys
sys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..'))
from src.utils import config
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--el_logs_fn",
default=config.quicksurvey_el_tsv,
help="TSV with EventLogging data")
parser.add_argument("--survey_req_fn",
default=config.quicksurvey_requests_tsv,
help="TSV with survey webrequests.")
parser.add_argument("--editattempt_fn",
default=config.edit_el_tsv,
help="TSV filename for edit attempt data")
parser.add_argument("--ids_dir",
default=config.ids_folder,
help="Folder to store survey respondent UserIDs")
parser.add_argument("--languages",
default=config.languages,
nargs="*",
help="List of languages to process")
parser.add_argument("--responses_dir",
default=config.responses_folder,
help="Folder to hold survey responses + associated webrequest")
parser.add_argument("--dist_threshold",
default=config.ip_dist_threshold,
help="Max distance in km between Geonames point and IP point for match.")
parser.add_argument("--geonames_tsv",
default=config.geonames_tsv,
help="Geonames TSV file w/ place and population information.")
args = parser.parse_args()
requests = pd.read_csv(args.survey_req_fn, sep="\t")
print("{0} total requests.".format(len(requests)))
requests.drop_duplicates(inplace=True)
requests.sort_values(by=['response_type'], ascending=False, inplace=True)
requests.set_index('pageview_token', inplace=True)
print("{0} requests from {1} unique users after removing duplicates.".format(len(requests),
len(requests['userhash'].unique())))
map_ip_to_population(requests, args.geonames_tsv, args.dist_threshold)
# edit_attempts = pd.read_csv(args.editattempt_fn, sep="\t")
# print("{0} edit actions across {1} users.".format(len(edit_attempts), len(edit_attempts['userhash'].unique())))
# edit_attempts = edit_attempts.groupby('userhash').apply(group_edit_actions)
if not os.path.isdir(args.ids_dir):
print("Creating directory: {0}".format(os.path.abspath(args.ids_dir)))
os.mkdir(args.ids_dir)
if not os.path.isdir(args.responses_dir):
print("Creating directory: {0}".format(os.path.abspath(args.responses_dir)))
os.mkdir(args.responses_dir)
all_ids = []
for lang in args.languages:
recoded_fn = os.path.join(config.data_folder, "recoded", "responses_{0}_recoded.csv".format(lang))
surv_responses = pd.read_csv(recoded_fn, sep = '\t')
surv_responses.set_index('survey_id', inplace=True)
print("**********")
print("Google Responses in {0}: {1}".format(lang, len(surv_responses)))
# merge in quicksurveys eventlogging -- priority to yes to take survey, no to take survey, initiation
srv_el_req = pd.merge(surv_responses, requests, how="left", left_index=True, right_index=True)
srv_el_req = srv_el_req[~srv_el_req.index.duplicated(keep='first')]
print("Breakdown of ability to match up Google responses with EL: (w/o initiation)")
print(srv_el_req['response_type'].value_counts(dropna=False))
print("Breakdown of ability to match up Google responses with EL (w/ initiation):")
print(srv_el_req['country'].value_counts(dropna=False))
# merge in edit attempt data
# srv_el_req = srv_el_req.join(edit_attempts, how="left", on="userhash")
# print("Responses w/ associated edit data (is anon):")
# print(srv_el_req['is_anon'].value_counts(dropna=False))
# Write responses+EL+webrequest data to TSV
output_merged_data = os.path.join(args.responses_dir, "responses_with_el_{0}.csv".format(lang))
srv_el_req.to_csv(output_merged_data, sep='\t')
# Write userIDs associated with completed surveys to file
output_respondent_ids = os.path.join(args.ids_dir, "ids_{0}.csv".format(lang))
ids = srv_el_req["userhash"]
ids = ids.dropna()
ids.to_csv(output_respondent_ids, index=False, header=False)
print("Complete IDs:", len(ids))
all_ids.extend(list(ids.values))
if all_ids:
with open(config.all_ids_csv, 'w') as fout:
csvwriter = csv.writer(fout)
for ip_ua in all_ids:
csvwriter.writerow([ip_ua])
def group_edit_actions(user_data):
is_anon = any(user_data['anon'])
edit_count = user_data['user_edit'].value_counts().index[0]
editor_interface = user_data['editor_interface'].value_counts().index[0]
return pd.Series({'is_anon': is_anon,
'edit_count': edit_count,
'editor_interface':editor_interface})
def map_ip_to_population(df, geonames_tsv, dist_threshold):
print("Loading geonames lookup")
geonames = get_geonames_map(geonames_tsv)
print("Calculating populations")
df['population'] = df.apply(lambda x: lookup_row(x, geonames, dist_threshold=dist_threshold), axis=1)
print("Success rate:", (df['population'] >= 1).sum() / df['population'].count())
print("Breakdown of matches:", df['population'].apply(lambda x: 1 if x > 0 else x).value_counts(dropna=False))
try:
ipdump_fn = geonames_tsv.replace('.txt', '_ipmatch.tsv')
df[['city', 'country_code', 'lat', 'lon', 'population']].to_csv(ipdump_fn, header=True, index=False, sep='\t')
print("Dumped IP->population data to:", ipdump_fn)
except Exception:
print("Failed to dump IP->population data.")
def calc_dist(pt1, pt2):
return distance(pt1, pt2).kilometers
def get_geonames_map(allcountries):
geonames_header = ['geonameid', 'name', 'asciiname', 'alternatenames',
'latitude', 'longitude', 'feature class', 'feature code',
'country code', 'cc2', 'admin1 code', 'admin2 code', 'admin3 code', 'admin4 code',
'population', 'elevation', 'dem', 'timezone', 'modification date']
country_idx = geonames_header.index('country code')
pop_idx = geonames_header.index('population')
lat_idx = geonames_header.index('latitude')
lon_idx = geonames_header.index('longitude')
name_idx = geonames_header.index('name')
altname_idx = geonames_header.index('alternatenames')
feature_idx = geonames_header.index('feature class')
lookup = {}
num_countries = 0
num_places = 0
num_pops = 0
nonzero_pops = 0
duplicates = 0
with open(allcountries, 'r') as fin:
tsvreader = csv.reader(fin, delimiter='\t')
for line in tsvreader:
feature = line[feature_idx]
try:
population = int(line[pop_idx])
except ValueError:
population = -1
if (feature == 'A' and population >= 0) or feature == 'P':
pt = (float(line[lat_idx]), float(line[lon_idx]))
names = [line[name_idx]]
if line[altname_idx]:
names.extend(line[altname_idx].split(','))
country = line[country_idx]
if country not in lookup:
num_countries += 1
lookup[country] = {}
for n in names:
if n in lookup[country]:
if pt in lookup[country][n]:
existing_pop = lookup[country][n][pt]
if not population:
continue
elif existing_pop == population:
continue
elif not existing_pop:
lookup[country][n][pt] = population
num_pops += 1
else:
duplicates += 1
else:
lookup[country][n][pt] = population
num_places += 1
if num_places % 500000 == 0:
print(num_places, "added.")
if population >= 0:
num_pops += 1
if population == 0:
nonzero_pops += 1
else:
lookup[country][n] = {pt:population}
num_places += 1
if num_places % 500000 == 0:
print(num_places, "added.")
if population >= 0:
num_pops += 1
if population == 0:
nonzero_pops += 1
print("{0} countries. {1} places. {2} places w/ population. {3} w/ pop 0. {4} duplicates".format(
num_countries, num_places, num_pops, nonzero_pops, duplicates))
# add location-based lookup index for places w/ unknown cities but that still have points
locs_to_add = {}
for cc in lookup:
for n in lookup[cc]:
for loc in lookup[cc][n]:
simple_loc = (int(loc[0]), int(loc[1]))
if simple_loc not in locs_to_add:
locs_to_add[simple_loc] = set()
locs_to_add[simple_loc].add((cc, n))
for l in locs_to_add:
lookup[l] = locs_to_add[l]
return lookup
def lookup_row(x, geonames, dist_threshold):
country = x['country_code']
city = x['city']
pt = (float(x['lat']), float(x['lon']))
# no city info, use lat-lon as backup
if city.lower() == "unknown":
return lookup_pt(pt, geonames, dist_threshold)
# use city to geocode and then lat-lon to filter
else:
try:
candidates = geonames[country][city]
within_thres = []
# find all potential place matches
for cpt, cpop in candidates.items():
if calc_dist(pt, cpt) < dist_threshold:
within_thres.append(cpop)
# return potential match with highest population (arbitrary choice but empirically seems to matter little)
if within_thres:
# Success: found a matching place w/i distance threshold
# Possibilities:
# >0 == have a real population
# 0 if geonames listed that
# -1 population if geonames didn't provide a number
return max(within_thres)
else:
# found a matching name but was not close enough
backup = lookup_pt(pt, geonames, dist_threshold)
if backup > 0:
return backup
else:
return -2
except KeyError:
# did not find a matching name
return lookup_pt(pt, geonames, dist_threshold)
def lookup_pt(pt, geonames, dist_threshold):
simple_pt = (int(pt[0]), int(pt[1]))
closest_with_pop = float('inf')
pop = -3
for cc, name in geonames.get(simple_pt, []):
for cpt, cpop in geonames[cc][name]:
if cpop > 0:
cand_dist = calc_dist(pt, cpt)
if cand_dist < dist_threshold and cand_dist < closest_with_pop:
closest_with_pop = cand_dist
pop = cpop
return pop
if __name__ == "__main__":
main() | 44.736462 | 118 | 0.577873 |
import argparse
import csv
import os
from geopy.distance import distance
import pandas as pd
import sys
sys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..'))
from src.utils import config
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--el_logs_fn",
default=config.quicksurvey_el_tsv,
help="TSV with EventLogging data")
parser.add_argument("--survey_req_fn",
default=config.quicksurvey_requests_tsv,
help="TSV with survey webrequests.")
parser.add_argument("--editattempt_fn",
default=config.edit_el_tsv,
help="TSV filename for edit attempt data")
parser.add_argument("--ids_dir",
default=config.ids_folder,
help="Folder to store survey respondent UserIDs")
parser.add_argument("--languages",
default=config.languages,
nargs="*",
help="List of languages to process")
parser.add_argument("--responses_dir",
default=config.responses_folder,
help="Folder to hold survey responses + associated webrequest")
parser.add_argument("--dist_threshold",
default=config.ip_dist_threshold,
help="Max distance in km between Geonames point and IP point for match.")
parser.add_argument("--geonames_tsv",
default=config.geonames_tsv,
help="Geonames TSV file w/ place and population information.")
args = parser.parse_args()
requests = pd.read_csv(args.survey_req_fn, sep="\t")
print("{0} total requests.".format(len(requests)))
requests.drop_duplicates(inplace=True)
requests.sort_values(by=['response_type'], ascending=False, inplace=True)
requests.set_index('pageview_token', inplace=True)
print("{0} requests from {1} unique users after removing duplicates.".format(len(requests),
len(requests['userhash'].unique())))
map_ip_to_population(requests, args.geonames_tsv, args.dist_threshold)
if not os.path.isdir(args.ids_dir):
print("Creating directory: {0}".format(os.path.abspath(args.ids_dir)))
os.mkdir(args.ids_dir)
if not os.path.isdir(args.responses_dir):
print("Creating directory: {0}".format(os.path.abspath(args.responses_dir)))
os.mkdir(args.responses_dir)
all_ids = []
for lang in args.languages:
recoded_fn = os.path.join(config.data_folder, "recoded", "responses_{0}_recoded.csv".format(lang))
surv_responses = pd.read_csv(recoded_fn, sep = '\t')
surv_responses.set_index('survey_id', inplace=True)
print("**********")
print("Google Responses in {0}: {1}".format(lang, len(surv_responses)))
srv_el_req = pd.merge(surv_responses, requests, how="left", left_index=True, right_index=True)
srv_el_req = srv_el_req[~srv_el_req.index.duplicated(keep='first')]
print("Breakdown of ability to match up Google responses with EL: (w/o initiation)")
print(srv_el_req['response_type'].value_counts(dropna=False))
print("Breakdown of ability to match up Google responses with EL (w/ initiation):")
print(srv_el_req['country'].value_counts(dropna=False))
output_merged_data = os.path.join(args.responses_dir, "responses_with_el_{0}.csv".format(lang))
srv_el_req.to_csv(output_merged_data, sep='\t')
output_respondent_ids = os.path.join(args.ids_dir, "ids_{0}.csv".format(lang))
ids = srv_el_req["userhash"]
ids = ids.dropna()
ids.to_csv(output_respondent_ids, index=False, header=False)
print("Complete IDs:", len(ids))
all_ids.extend(list(ids.values))
if all_ids:
with open(config.all_ids_csv, 'w') as fout:
csvwriter = csv.writer(fout)
for ip_ua in all_ids:
csvwriter.writerow([ip_ua])
def group_edit_actions(user_data):
is_anon = any(user_data['anon'])
edit_count = user_data['user_edit'].value_counts().index[0]
editor_interface = user_data['editor_interface'].value_counts().index[0]
return pd.Series({'is_anon': is_anon,
'edit_count': edit_count,
'editor_interface':editor_interface})
def map_ip_to_population(df, geonames_tsv, dist_threshold):
print("Loading geonames lookup")
geonames = get_geonames_map(geonames_tsv)
print("Calculating populations")
df['population'] = df.apply(lambda x: lookup_row(x, geonames, dist_threshold=dist_threshold), axis=1)
print("Success rate:", (df['population'] >= 1).sum() / df['population'].count())
print("Breakdown of matches:", df['population'].apply(lambda x: 1 if x > 0 else x).value_counts(dropna=False))
try:
ipdump_fn = geonames_tsv.replace('.txt', '_ipmatch.tsv')
df[['city', 'country_code', 'lat', 'lon', 'population']].to_csv(ipdump_fn, header=True, index=False, sep='\t')
print("Dumped IP->population data to:", ipdump_fn)
except Exception:
print("Failed to dump IP->population data.")
def calc_dist(pt1, pt2):
return distance(pt1, pt2).kilometers
def get_geonames_map(allcountries):
geonames_header = ['geonameid', 'name', 'asciiname', 'alternatenames',
'latitude', 'longitude', 'feature class', 'feature code',
'country code', 'cc2', 'admin1 code', 'admin2 code', 'admin3 code', 'admin4 code',
'population', 'elevation', 'dem', 'timezone', 'modification date']
country_idx = geonames_header.index('country code')
pop_idx = geonames_header.index('population')
lat_idx = geonames_header.index('latitude')
lon_idx = geonames_header.index('longitude')
name_idx = geonames_header.index('name')
altname_idx = geonames_header.index('alternatenames')
feature_idx = geonames_header.index('feature class')
lookup = {}
num_countries = 0
num_places = 0
num_pops = 0
nonzero_pops = 0
duplicates = 0
with open(allcountries, 'r') as fin:
tsvreader = csv.reader(fin, delimiter='\t')
for line in tsvreader:
feature = line[feature_idx]
try:
population = int(line[pop_idx])
except ValueError:
population = -1
if (feature == 'A' and population >= 0) or feature == 'P':
pt = (float(line[lat_idx]), float(line[lon_idx]))
names = [line[name_idx]]
if line[altname_idx]:
names.extend(line[altname_idx].split(','))
country = line[country_idx]
if country not in lookup:
num_countries += 1
lookup[country] = {}
for n in names:
if n in lookup[country]:
if pt in lookup[country][n]:
existing_pop = lookup[country][n][pt]
if not population:
continue
elif existing_pop == population:
continue
elif not existing_pop:
lookup[country][n][pt] = population
num_pops += 1
else:
duplicates += 1
else:
lookup[country][n][pt] = population
num_places += 1
if num_places % 500000 == 0:
print(num_places, "added.")
if population >= 0:
num_pops += 1
if population == 0:
nonzero_pops += 1
else:
lookup[country][n] = {pt:population}
num_places += 1
if num_places % 500000 == 0:
print(num_places, "added.")
if population >= 0:
num_pops += 1
if population == 0:
nonzero_pops += 1
print("{0} countries. {1} places. {2} places w/ population. {3} w/ pop 0. {4} duplicates".format(
num_countries, num_places, num_pops, nonzero_pops, duplicates))
locs_to_add = {}
for cc in lookup:
for n in lookup[cc]:
for loc in lookup[cc][n]:
simple_loc = (int(loc[0]), int(loc[1]))
if simple_loc not in locs_to_add:
locs_to_add[simple_loc] = set()
locs_to_add[simple_loc].add((cc, n))
for l in locs_to_add:
lookup[l] = locs_to_add[l]
return lookup
def lookup_row(x, geonames, dist_threshold):
country = x['country_code']
city = x['city']
pt = (float(x['lat']), float(x['lon']))
if city.lower() == "unknown":
return lookup_pt(pt, geonames, dist_threshold)
else:
try:
candidates = geonames[country][city]
within_thres = []
for cpt, cpop in candidates.items():
if calc_dist(pt, cpt) < dist_threshold:
within_thres.append(cpop)
if within_thres:
return max(within_thres)
else:
# found a matching name but was not close enough
backup = lookup_pt(pt, geonames, dist_threshold)
if backup > 0:
return backup
else:
return -2
except KeyError:
# did not find a matching name
return lookup_pt(pt, geonames, dist_threshold)
def lookup_pt(pt, geonames, dist_threshold):
simple_pt = (int(pt[0]), int(pt[1]))
closest_with_pop = float('inf')
pop = -3
for cc, name in geonames.get(simple_pt, []):
for cpt, cpop in geonames[cc][name]:
if cpop > 0:
cand_dist = calc_dist(pt, cpt)
if cand_dist < dist_threshold and cand_dist < closest_with_pop:
closest_with_pop = cand_dist
pop = cpop
return pop
if __name__ == "__main__":
main() | true | true |
f7367fbd890732a0926b6aa0105cd9b4bbca9b3f | 4,326 | py | Python | contrib/seeds/generate-seeds.py | Satoshi-Nosamoto/NOS-Coin | e542087d45e53640f2f608553c2377b95a3095e6 | [
"MIT"
] | 1 | 2018-05-29T12:49:19.000Z | 2018-05-29T12:49:19.000Z | contrib/seeds/generate-seeds.py | Satoshi-Nosamoto/NOS-Coin | e542087d45e53640f2f608553c2377b95a3095e6 | [
"MIT"
] | null | null | null | contrib/seeds/generate-seeds.py | Satoshi-Nosamoto/NOS-Coin | e542087d45e53640f2f608553c2377b95a3095e6 | [
"MIT"
] | 5 | 2018-05-28T12:19:00.000Z | 2018-09-26T14:16:52.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 2727)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 1717)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.347826 | 98 | 0.581368 |
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 2727)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 1717)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true | true |
f7368159615fc34ed4019158803029ce0739af7b | 3,493 | py | Python | sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure.core.pipeline.transport import (
HttpRequest,
AsyncHttpResponse,
AsyncHttpTransport,
)
from azure.core.pipeline import AsyncPipeline
from azure.core.pipeline.transport._aiohttp import AioHttpStreamDownloadGenerator
from unittest import mock
import pytest
@pytest.mark.asyncio
async def test_connection_error_response():
class MockTransport(AsyncHttpTransport):
def __init__(self):
self._count = 0
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def close(self):
pass
async def open(self):
pass
async def send(self, request, **kwargs):
request = HttpRequest('GET', 'http://127.0.0.1/')
response = AsyncHttpResponse(request, None)
response.status_code = 200
return response
class MockContent():
def __init__(self):
self._first = True
async def read(self, block_size):
if self._first:
self._first = False
raise ConnectionError
return None
class MockInternalResponse():
def __init__(self):
self.headers = {}
self.content = MockContent()
async def close(self):
pass
class AsyncMock(mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
http_request = HttpRequest('GET', 'http://127.0.0.1/')
pipeline = AsyncPipeline(MockTransport())
http_response = AsyncHttpResponse(http_request, None)
http_response.internal_response = MockInternalResponse()
stream = AioHttpStreamDownloadGenerator(pipeline, http_response)
with mock.patch('asyncio.sleep', new_callable=AsyncMock):
with pytest.raises(StopAsyncIteration):
await stream.__anext__()
@pytest.mark.asyncio
async def test_connection_error_416():
class MockTransport(AsyncHttpTransport):
def __init__(self):
self._count = 0
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def close(self):
pass
async def open(self):
pass
async def send(self, request, **kwargs):
request = HttpRequest('GET', 'http://127.0.0.1/')
response = AsyncHttpResponse(request, None)
response.status_code = 416
return response
class MockContent():
async def read(self, block_size):
raise ConnectionError
class MockInternalResponse():
def __init__(self):
self.headers = {}
self.content = MockContent()
async def close(self):
pass
class AsyncMock(mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
http_request = HttpRequest('GET', 'http://127.0.0.1/')
pipeline = AsyncPipeline(MockTransport())
http_response = AsyncHttpResponse(http_request, None)
http_response.internal_response = MockInternalResponse()
stream = AioHttpStreamDownloadGenerator(pipeline, http_response)
with mock.patch('asyncio.sleep', new_callable=AsyncMock):
with pytest.raises(ConnectionError):
await stream.__anext__()
| 32.342593 | 81 | 0.621815 |
from azure.core.pipeline.transport import (
HttpRequest,
AsyncHttpResponse,
AsyncHttpTransport,
)
from azure.core.pipeline import AsyncPipeline
from azure.core.pipeline.transport._aiohttp import AioHttpStreamDownloadGenerator
from unittest import mock
import pytest
@pytest.mark.asyncio
async def test_connection_error_response():
class MockTransport(AsyncHttpTransport):
def __init__(self):
self._count = 0
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def close(self):
pass
async def open(self):
pass
async def send(self, request, **kwargs):
request = HttpRequest('GET', 'http://127.0.0.1/')
response = AsyncHttpResponse(request, None)
response.status_code = 200
return response
class MockContent():
def __init__(self):
self._first = True
async def read(self, block_size):
if self._first:
self._first = False
raise ConnectionError
return None
class MockInternalResponse():
def __init__(self):
self.headers = {}
self.content = MockContent()
async def close(self):
pass
class AsyncMock(mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
http_request = HttpRequest('GET', 'http://127.0.0.1/')
pipeline = AsyncPipeline(MockTransport())
http_response = AsyncHttpResponse(http_request, None)
http_response.internal_response = MockInternalResponse()
stream = AioHttpStreamDownloadGenerator(pipeline, http_response)
with mock.patch('asyncio.sleep', new_callable=AsyncMock):
with pytest.raises(StopAsyncIteration):
await stream.__anext__()
@pytest.mark.asyncio
async def test_connection_error_416():
class MockTransport(AsyncHttpTransport):
def __init__(self):
self._count = 0
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def close(self):
pass
async def open(self):
pass
async def send(self, request, **kwargs):
request = HttpRequest('GET', 'http://127.0.0.1/')
response = AsyncHttpResponse(request, None)
response.status_code = 416
return response
class MockContent():
async def read(self, block_size):
raise ConnectionError
class MockInternalResponse():
def __init__(self):
self.headers = {}
self.content = MockContent()
async def close(self):
pass
class AsyncMock(mock.MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
http_request = HttpRequest('GET', 'http://127.0.0.1/')
pipeline = AsyncPipeline(MockTransport())
http_response = AsyncHttpResponse(http_request, None)
http_response.internal_response = MockInternalResponse()
stream = AioHttpStreamDownloadGenerator(pipeline, http_response)
with mock.patch('asyncio.sleep', new_callable=AsyncMock):
with pytest.raises(ConnectionError):
await stream.__anext__()
| true | true |
f73681c2212c77dcbc3c9aa8cf6c020c06b27702 | 223 | py | Python | app/api/business/buyer_dashboard_business.py | ArenaNetworks/dto-digitalmarketplace-api | d0d58924719d889503ed112b0d5801b528b0398c | [
"MIT"
] | 6 | 2017-06-09T03:38:53.000Z | 2021-12-22T02:42:15.000Z | app/api/business/buyer_dashboard_business.py | ArenaNetworks/dto-digitalmarketplace-api | d0d58924719d889503ed112b0d5801b528b0398c | [
"MIT"
] | 47 | 2016-08-02T05:21:31.000Z | 2022-03-28T01:14:17.000Z | app/api/business/buyer_dashboard_business.py | AusDTO/dto-digitalmarketplace-api | 937843c9c01a71518cf4688b4daa55bbe7df1965 | [
"MIT"
] | 7 | 2016-09-13T13:07:18.000Z | 2021-02-17T10:16:21.000Z | from app.api.services import (
briefs
)
def get_briefs(user_id, status=None):
return briefs.get_buyer_dashboard_briefs(user_id, status)
def get_brief_counts(user_id):
return briefs.get_brief_counts(user_id)
| 18.583333 | 61 | 0.7713 | from app.api.services import (
briefs
)
def get_briefs(user_id, status=None):
return briefs.get_buyer_dashboard_briefs(user_id, status)
def get_brief_counts(user_id):
return briefs.get_brief_counts(user_id)
| true | true |
f73682241a967aacbd505aac5535efe0276e9f2d | 2,167 | py | Python | doit/cmd_forget.py | samuelsinayoko/doit | e7aa4f5f399c65bf9567ced9fb2673d52d9d6c92 | [
"MIT"
] | 1,390 | 2015-01-01T21:11:47.000Z | 2022-03-31T11:35:44.000Z | doit/cmd_forget.py | samuelsinayoko/doit | e7aa4f5f399c65bf9567ced9fb2673d52d9d6c92 | [
"MIT"
] | 393 | 2015-01-05T11:18:29.000Z | 2022-03-20T11:46:46.000Z | doit/cmd_forget.py | samuelsinayoko/doit | e7aa4f5f399c65bf9567ced9fb2673d52d9d6c92 | [
"MIT"
] | 176 | 2015-01-07T16:58:56.000Z | 2022-03-28T12:12:11.000Z | from .cmd_base import DoitCmdBase, check_tasks_exist
from .cmd_base import tasks_and_deps_iter, subtasks_iter
opt_forget_taskdep = {
'name': 'forget_sub',
'short': 's',
'long': 'follow-sub',
'type': bool,
'default': False,
'help': 'forget task dependencies too',
}
opt_disable_default = {
'name': 'forget_disable_default',
'long': 'disable-default',
'inverse': 'enable-default',
'type': bool,
'default': False,
'help': 'disable forgetting default tasks (when no arguments are passed)',
}
opt_forget_all = {
'name': 'forget_all',
'short': 'a',
'long': 'all',
'type': bool,
'default': False,
'help': 'forget all tasks',
}
class Forget(DoitCmdBase):
doc_purpose = "clear successful run status from internal DB"
doc_usage = "[TASK ...]"
doc_description = None
cmd_options = (opt_forget_taskdep, opt_disable_default, opt_forget_all)
def _execute(self, forget_sub, forget_disable_default, forget_all):
"""remove saved data successful runs from DB
"""
if forget_all:
self.dep_manager.remove_all()
self.outstream.write("forgetting all tasks\n")
elif self.sel_default_tasks and forget_disable_default:
self.outstream.write(
"no tasks specified, pass task name, --enable-default or --all\n")
# forget tasks from list
else:
tasks = dict([(t.name, t) for t in self.task_list])
check_tasks_exist(tasks, self.sel_tasks)
forget_list = self.sel_tasks
if forget_sub:
to_forget = list(tasks_and_deps_iter(tasks, forget_list, True))
else:
to_forget = []
for name in forget_list:
task = tasks[name]
to_forget.append(task)
to_forget.extend(subtasks_iter(tasks, task))
for task in to_forget:
# forget it - remove from dependency file
self.dep_manager.remove(task.name)
self.outstream.write("forgetting %s\n" % task.name)
self.dep_manager.close()
| 30.097222 | 83 | 0.599908 | from .cmd_base import DoitCmdBase, check_tasks_exist
from .cmd_base import tasks_and_deps_iter, subtasks_iter
opt_forget_taskdep = {
'name': 'forget_sub',
'short': 's',
'long': 'follow-sub',
'type': bool,
'default': False,
'help': 'forget task dependencies too',
}
opt_disable_default = {
'name': 'forget_disable_default',
'long': 'disable-default',
'inverse': 'enable-default',
'type': bool,
'default': False,
'help': 'disable forgetting default tasks (when no arguments are passed)',
}
opt_forget_all = {
'name': 'forget_all',
'short': 'a',
'long': 'all',
'type': bool,
'default': False,
'help': 'forget all tasks',
}
class Forget(DoitCmdBase):
doc_purpose = "clear successful run status from internal DB"
doc_usage = "[TASK ...]"
doc_description = None
cmd_options = (opt_forget_taskdep, opt_disable_default, opt_forget_all)
def _execute(self, forget_sub, forget_disable_default, forget_all):
if forget_all:
self.dep_manager.remove_all()
self.outstream.write("forgetting all tasks\n")
elif self.sel_default_tasks and forget_disable_default:
self.outstream.write(
"no tasks specified, pass task name, --enable-default or --all\n")
else:
tasks = dict([(t.name, t) for t in self.task_list])
check_tasks_exist(tasks, self.sel_tasks)
forget_list = self.sel_tasks
if forget_sub:
to_forget = list(tasks_and_deps_iter(tasks, forget_list, True))
else:
to_forget = []
for name in forget_list:
task = tasks[name]
to_forget.append(task)
to_forget.extend(subtasks_iter(tasks, task))
for task in to_forget:
self.dep_manager.remove(task.name)
self.outstream.write("forgetting %s\n" % task.name)
self.dep_manager.close()
| true | true |
f736843c3e9a412e98dcc760a9f33e7f532cd580 | 5,462 | py | Python | scraper.py | Mastermind497/DogDataFinder | 6192db5c7556c4fa1d593725a1d5b4dc0bb26c58 | [
"MIT"
] | 1 | 2021-01-11T14:07:14.000Z | 2021-01-11T14:07:14.000Z | scraper.py | Mastermind497/DogDataFinder | 6192db5c7556c4fa1d593725a1d5b4dc0bb26c58 | [
"MIT"
] | null | null | null | scraper.py | Mastermind497/DogDataFinder | 6192db5c7556c4fa1d593725a1d5b4dc0bb26c58 | [
"MIT"
] | null | null | null | import requests
import urllib.request
import time
import xlwt
from bs4 import BeautifulSoup
def addHeadersToSheet(worksheet):
#Add Style for the Headers
style_text_wrap_font_bold_black_color = xlwt.easyxf('align:wrap on; font: bold on, color-index black')
col_width = 128*30
worksheet.write(0, 0, "BREED", style_text_wrap_font_bold_black_color)
worksheet.write(0, 1, "HEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 2, "WEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 3, "LIFE EXPECTANCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 4, "CHARACTERISTICS", style_text_wrap_font_bold_black_color)
worksheet.write(0, 5, "GROOMING FREQUENCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 6, "SHEDDING LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 7, "ENERGY LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 8, "TRAINABILITY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 9, "TEMPERAMENT/DEMEANOR", style_text_wrap_font_bold_black_color)
worksheet.col(0).width = col_width
worksheet.col(1).width = col_width
worksheet.col(2).width = col_width
worksheet.col(3).width = col_width
worksheet.col(4).width = col_width
worksheet.col(5).width = col_width
worksheet.col(6).width = col_width
worksheet.col(7).width = col_width
worksheet.col(8).width = col_width
worksheet.col(9).width = col_width
def insertDataInSheet(worksheet, currentDogCounter, dog):
breed = dog.find("div", {"id": "page-title"}).select('h1')[0].text.strip()
print(str(currentDogCounter) + " " + breed)
attributeList = dog.find("ul", {"class": "attribute-list"})
try:
characteristics = attributeList.find_all("li")[0].find("span", {"class": "attribute-list__description"}).string
except IndexError:
characteristics = "NA"
except AttributeError:
characteristics = "NA"
try:
height = attributeList.find_all("li")[2].find("span", {"class": "attribute-list__description"}).string
except IndexError:
height = "NA"
except AttributeError:
height = "NA"
try:
weight = attributeList.find_all("li")[3].find("span", {"class": "attribute-list__description"}).string
except IndexError:
weight = "NA"
except AttributeError:
weight = "NA"
try:
lifeExpancy = attributeList.find_all("li")[4].find("span", {"class": "attribute-list__description"}).string
except IndexError:
lifeExpancy = "NA"
except AttributeError:
lifeExpancy = "NA"
groomingTab = dog.find("div", {"id": "panel-GROOMING"})
try:
groomingFrequency = groomingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
groomingFrequency = "NA"
except AttributeError:
groomingFrequency = "NA"
try:
shedding = groomingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
shedding = "NA"
except AttributeError:
shedding = "NA"
energyTab = dog.find("div", {"id": "panel-EXERCISE"})
try:
energyLevel = energyTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
energyLevel = "DOUBLE CHECK"
except AttributeError:
energyLevel = "NA"
trainingTab = dog.find("div", {"id": "panel-TRAINING"})
try:
trainability = trainingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
trainability = "DOUBLE CHECK"
except AttributeError:
trainability = "NA"
try:
temperament = trainingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
temperament = "DOUBLE CHECK"
except AttributeError:
temperament = "NA"
worksheet.write(currentDogCounter, 0, breed)
worksheet.write(currentDogCounter, 1, height)
worksheet.write(currentDogCounter, 2, weight)
worksheet.write(currentDogCounter, 3, lifeExpancy)
worksheet.write(currentDogCounter, 4, characteristics)
worksheet.write(currentDogCounter, 5, groomingFrequency)
worksheet.write(currentDogCounter, 6, shedding)
worksheet.write(currentDogCounter, 7, energyLevel)
worksheet.write(currentDogCounter, 8, trainability)
worksheet.write(currentDogCounter, 9, temperament)
#Set Up the Excel File
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet("Dogs")
excel_file_path = "./Dog Options.xls"
addHeadersToSheet(worksheet)
currentDogCounter = 1
for i in range(24):
url = "https://www.akc.org/dog-breeds/page/" + str(i + 1)
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
topDiv = soup.find("div", {"class": "contents-grid-group"})
secondDiv = topDiv.find("div")
dogChoices = secondDiv.find_all("div", {"class": "grid-col"})
for dog in dogChoices:
href = dog.find("a").get("href")
nextResponse = requests.get(href)
dog = BeautifulSoup(nextResponse.text, "lxml")
insertDataInSheet(worksheet, currentDogCounter, dog)
currentDogCounter += 1
workbook.save(excel_file_path)
| 38.464789 | 142 | 0.685646 | import requests
import urllib.request
import time
import xlwt
from bs4 import BeautifulSoup
def addHeadersToSheet(worksheet):
style_text_wrap_font_bold_black_color = xlwt.easyxf('align:wrap on; font: bold on, color-index black')
col_width = 128*30
worksheet.write(0, 0, "BREED", style_text_wrap_font_bold_black_color)
worksheet.write(0, 1, "HEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 2, "WEIGHT", style_text_wrap_font_bold_black_color)
worksheet.write(0, 3, "LIFE EXPECTANCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 4, "CHARACTERISTICS", style_text_wrap_font_bold_black_color)
worksheet.write(0, 5, "GROOMING FREQUENCY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 6, "SHEDDING LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 7, "ENERGY LEVEL", style_text_wrap_font_bold_black_color)
worksheet.write(0, 8, "TRAINABILITY", style_text_wrap_font_bold_black_color)
worksheet.write(0, 9, "TEMPERAMENT/DEMEANOR", style_text_wrap_font_bold_black_color)
worksheet.col(0).width = col_width
worksheet.col(1).width = col_width
worksheet.col(2).width = col_width
worksheet.col(3).width = col_width
worksheet.col(4).width = col_width
worksheet.col(5).width = col_width
worksheet.col(6).width = col_width
worksheet.col(7).width = col_width
worksheet.col(8).width = col_width
worksheet.col(9).width = col_width
def insertDataInSheet(worksheet, currentDogCounter, dog):
breed = dog.find("div", {"id": "page-title"}).select('h1')[0].text.strip()
print(str(currentDogCounter) + " " + breed)
attributeList = dog.find("ul", {"class": "attribute-list"})
try:
characteristics = attributeList.find_all("li")[0].find("span", {"class": "attribute-list__description"}).string
except IndexError:
characteristics = "NA"
except AttributeError:
characteristics = "NA"
try:
height = attributeList.find_all("li")[2].find("span", {"class": "attribute-list__description"}).string
except IndexError:
height = "NA"
except AttributeError:
height = "NA"
try:
weight = attributeList.find_all("li")[3].find("span", {"class": "attribute-list__description"}).string
except IndexError:
weight = "NA"
except AttributeError:
weight = "NA"
try:
lifeExpancy = attributeList.find_all("li")[4].find("span", {"class": "attribute-list__description"}).string
except IndexError:
lifeExpancy = "NA"
except AttributeError:
lifeExpancy = "NA"
groomingTab = dog.find("div", {"id": "panel-GROOMING"})
try:
groomingFrequency = groomingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
groomingFrequency = "NA"
except AttributeError:
groomingFrequency = "NA"
try:
shedding = groomingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
shedding = "NA"
except AttributeError:
shedding = "NA"
energyTab = dog.find("div", {"id": "panel-EXERCISE"})
try:
energyLevel = energyTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
energyLevel = "DOUBLE CHECK"
except AttributeError:
energyLevel = "NA"
trainingTab = dog.find("div", {"id": "panel-TRAINING"})
try:
trainability = trainingTab.find_all("div", {"class": "graph-section__inner"})[0].find("div", {"class": "bar-graph__text"}).string
except IndexError:
trainability = "DOUBLE CHECK"
except AttributeError:
trainability = "NA"
try:
temperament = trainingTab.find_all("div", {"class": "graph-section__inner"})[1].find("div", {"class": "bar-graph__text"}).string
except IndexError:
temperament = "DOUBLE CHECK"
except AttributeError:
temperament = "NA"
worksheet.write(currentDogCounter, 0, breed)
worksheet.write(currentDogCounter, 1, height)
worksheet.write(currentDogCounter, 2, weight)
worksheet.write(currentDogCounter, 3, lifeExpancy)
worksheet.write(currentDogCounter, 4, characteristics)
worksheet.write(currentDogCounter, 5, groomingFrequency)
worksheet.write(currentDogCounter, 6, shedding)
worksheet.write(currentDogCounter, 7, energyLevel)
worksheet.write(currentDogCounter, 8, trainability)
worksheet.write(currentDogCounter, 9, temperament)
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet("Dogs")
excel_file_path = "./Dog Options.xls"
addHeadersToSheet(worksheet)
currentDogCounter = 1
for i in range(24):
url = "https://www.akc.org/dog-breeds/page/" + str(i + 1)
response = requests.get(url)
soup = BeautifulSoup(response.text, "lxml")
topDiv = soup.find("div", {"class": "contents-grid-group"})
secondDiv = topDiv.find("div")
dogChoices = secondDiv.find_all("div", {"class": "grid-col"})
for dog in dogChoices:
href = dog.find("a").get("href")
nextResponse = requests.get(href)
dog = BeautifulSoup(nextResponse.text, "lxml")
insertDataInSheet(worksheet, currentDogCounter, dog)
currentDogCounter += 1
workbook.save(excel_file_path)
| true | true |
f7368465fe7af359a0e682f88039ce381df409ff | 2,676 | py | Python | algos/prediction/transformers.py | gcba/IATos | d42cffea313170bb249edcadb0776f7a6d368654 | [
"MIT"
] | 3 | 2022-01-21T02:50:16.000Z | 2022-02-22T13:00:00.000Z | algos/prediction/transformers.py | gcba/IATos | d42cffea313170bb249edcadb0776f7a6d368654 | [
"MIT"
] | null | null | null | algos/prediction/transformers.py | gcba/IATos | d42cffea313170bb249edcadb0776f7a6d368654 | [
"MIT"
] | 2 | 2021-09-07T03:19:35.000Z | 2021-09-08T13:37:48.000Z | import librosa
import numpy as np
from PIL import Image
from typing import Optional
from sklearn.base import BaseEstimator, TransformerMixin
from matplotlib.cm import ScalarMappable
__all__ = [
"Denoising",
"MelSpectogram",
"ColoredSpectogram",
]
class BaseTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
@classmethod
def read_params(cls, params):
return cls(**params)
class Denoising(BaseTransformer):
"""Placeholder para la capa "denoising" actualmente en codigo MATLAB"""
def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:
"""Codigo aqui"""
return X
class MelSpectogram(BaseTransformer):
"""Transforma una señal en un espectograma con escala de Mel utilizando librosa
Parameters
----------
Los parametros para instanciar son los que se pasan a `librosa.feature.melspectogram`
y a `librosa.power_to_db`.
Returns
-------
np.array : Numpy array del espectograma con valores en decibeles.
"""
def __init__(
self,
sr: int,
n_fft: int,
hop_length: int,
n_mels: int,
fmin: int,
fmax: int,
ref: str,
T: bool,
as_ratio: bool,
):
self.sr = sr
self.n_fft = n_fft
self.hop_length = hop_length
self.n_mels = n_mels
self.fmin = fmin
self.fmax = fmax
self.ref = ref
self.T = T
self.as_ratio = as_ratio
def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:
X_ = self._mel_spec(X)
if self.T: # backward compatibility
X_ = X_.T
return librosa.power_to_db(X_, ref=getattr(np, self.ref))
def _mel_spec(self, X: np.array) -> np.array:
hop = self.hop_length
if self.as_ratio: # backward compatibility
hop = X.size // self.hop_length
return librosa.feature.melspectrogram(
y=X, sr=self.sr, hop_length=hop, n_mels=self.n_mels
)
class ColoredSpectogram(BaseTransformer):
"""Transforma una matriz de valores a una imagen con escala de colores.
Parameters
----------
cmap : str
Escala de colores accesible desde `matplotlib.cm.get_cmap`.
Returns
-------
PIL.Image : Imagen en modo RGB.
"""
def __init__(self, cmap: str):
self.cmap = cmap
def transform(self, X: np.array, y: Optional[np.array] = None) -> Image:
X_ = ScalarMappable(cmap=self.cmap).to_rgba(X, bytes=True)
return Image.fromarray(X_).convert("RGB")
| 24.550459 | 89 | 0.611734 | import librosa
import numpy as np
from PIL import Image
from typing import Optional
from sklearn.base import BaseEstimator, TransformerMixin
from matplotlib.cm import ScalarMappable
__all__ = [
"Denoising",
"MelSpectogram",
"ColoredSpectogram",
]
class BaseTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
@classmethod
def read_params(cls, params):
return cls(**params)
class Denoising(BaseTransformer):
def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:
return X
class MelSpectogram(BaseTransformer):
def __init__(
self,
sr: int,
n_fft: int,
hop_length: int,
n_mels: int,
fmin: int,
fmax: int,
ref: str,
T: bool,
as_ratio: bool,
):
self.sr = sr
self.n_fft = n_fft
self.hop_length = hop_length
self.n_mels = n_mels
self.fmin = fmin
self.fmax = fmax
self.ref = ref
self.T = T
self.as_ratio = as_ratio
def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:
X_ = self._mel_spec(X)
if self.T:
X_ = X_.T
return librosa.power_to_db(X_, ref=getattr(np, self.ref))
def _mel_spec(self, X: np.array) -> np.array:
hop = self.hop_length
if self.as_ratio:
hop = X.size // self.hop_length
return librosa.feature.melspectrogram(
y=X, sr=self.sr, hop_length=hop, n_mels=self.n_mels
)
class ColoredSpectogram(BaseTransformer):
def __init__(self, cmap: str):
self.cmap = cmap
def transform(self, X: np.array, y: Optional[np.array] = None) -> Image:
X_ = ScalarMappable(cmap=self.cmap).to_rgba(X, bytes=True)
return Image.fromarray(X_).convert("RGB")
| true | true |
f73684fe31a19ed4f0c1ff760d55d461573b2eae | 6,024 | py | Python | test/client/test_request_upload.py | sgnes/python-udsoncan | 0906911a50e8e4e64feaf4d29c0ac884ec0df61d | [
"MIT"
] | 1 | 2019-07-02T10:15:03.000Z | 2019-07-02T10:15:03.000Z | test/client/test_request_upload.py | sgnes/python-udsoncan | 0906911a50e8e4e64feaf4d29c0ac884ec0df61d | [
"MIT"
] | null | null | null | test/client/test_request_upload.py | sgnes/python-udsoncan | 0906911a50e8e4e64feaf4d29c0ac884ec0df61d | [
"MIT"
] | null | null | null | from udsoncan.client import Client
from udsoncan import services, MemoryLocation, DataFormatIdentifier
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestRequestUpload(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_request_upload_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd") # Positive response
def _test_request_upload_success(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_spr_no_effect(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd") # Positive response
def _test_request_upload_success_spr_no_effect(self):
with self.udsclient.suppress_positive_response:
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_config_format(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x24\x00\x00\x12\x34\x00\xFF") # dfi = 24 and 0 padding
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd") # Positive response
def _test_request_upload_config_format(self):
self.udsclient.set_configs({'server_address_format':32, 'server_memorysize_format':16})
memloc = MemoryLocation(address=0x1234, memorysize=0xFF)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_lfid(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x23\xab\xcd") # Positive response
def _test_request_upload_success_lfid(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_dfi(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x52\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd") # Positive response
def _test_request_upload_success_dfi(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
dfi =DataFormatIdentifier(compression=5, encryption=2)
response = self.udsclient.request_upload(memory_location=memloc, dfi=dfi)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_incomplete_nblock_response_exception(self):
self.wait_request_and_respond(b"\x75\x40\xab\xcd") # Missing 2 bytes to complete number of block
def _test_incomplete_nblock_response_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(InvalidResponseException):
self.udsclient.request_upload(memory_location=memloc)
def test_incomplete_nblock_response_no_exception(self):
self.wait_request_and_respond(b"\x75\x40\xab\xcd") # Missing 2 bytes to complete number of block
def _test_incomplete_nblock_response_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertFalse(response.valid)
def test_request_upload_invalidservice_exception(self):
self.wait_request_and_respond(b"\x00\x20\x12\x34") #Inexistent Service
def _test_request_upload_invalidservice_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.request_upload(memory_location=memloc)
def test_request_upload_invalidservice_no_exception(self):
self.wait_request_and_respond(b"\x00\x20\x12\x34") #Inexistent Service
def _test_request_upload_invalidservice_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertFalse(response.valid)
def test_request_upload_wrongservice_exception(self):
self.wait_request_and_respond(b"\x7E\x20\x12\x34") # Valid but wrong service (Tester Present)
def _test_request_upload_wrongservice_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.request_upload(memory_location=memloc)
def test_request_upload_wrongservice_no_exception(self):
self.wait_request_and_respond(b"\x7E\x20\x12\x34") # Valid but wrong service (Tester Present)
def _test_request_upload_wrongservice_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_params(self):
pass
def _test_bad_params(self):
with self.assertRaises(ValueError) as handle:
self.udsclient.request_upload(memory_location=1)
with self.assertRaises(ValueError) as handle:
self.udsclient.request_upload(memory_location="asd")
| 48.192 | 99 | 0.813413 | from udsoncan.client import Client
from udsoncan import services, MemoryLocation, DataFormatIdentifier
from udsoncan.exceptions import *
from test.ClientServerTest import ClientServerTest
class TestRequestUpload(ClientServerTest):
def __init__(self, *args, **kwargs):
ClientServerTest.__init__(self, *args, **kwargs)
def test_request_upload_success(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd")
def _test_request_upload_success(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_spr_no_effect(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd")
def _test_request_upload_success_spr_no_effect(self):
with self.udsclient.suppress_positive_response:
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_config_format(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x24\x00\x00\x12\x34\x00\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd")
def _test_request_upload_config_format(self):
self.udsclient.set_configs({'server_address_format':32, 'server_memorysize_format':16})
memloc = MemoryLocation(address=0x1234, memorysize=0xFF)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_lfid(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x00\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x23\xab\xcd")
def _test_request_upload_success_lfid(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_request_upload_success_dfi(self):
request = self.conn.touserqueue.get(timeout=0.2)
self.assertEqual(request, b"\x35\x52\x12\x12\x34\xFF")
self.conn.fromuserqueue.put(b"\x75\x20\xab\xcd")
def _test_request_upload_success_dfi(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
dfi =DataFormatIdentifier(compression=5, encryption=2)
response = self.udsclient.request_upload(memory_location=memloc, dfi=dfi)
self.assertEqual(response.service_data.max_length,0xabcd)
def test_incomplete_nblock_response_exception(self):
self.wait_request_and_respond(b"\x75\x40\xab\xcd")
def _test_incomplete_nblock_response_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(InvalidResponseException):
self.udsclient.request_upload(memory_location=memloc)
def test_incomplete_nblock_response_no_exception(self):
self.wait_request_and_respond(b"\x75\x40\xab\xcd")
def _test_incomplete_nblock_response_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertFalse(response.valid)
def test_request_upload_invalidservice_exception(self):
self.wait_request_and_respond(b"\x00\x20\x12\x34")
def _test_request_upload_invalidservice_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(InvalidResponseException) as handle:
self.udsclient.request_upload(memory_location=memloc)
def test_request_upload_invalidservice_no_exception(self):
self.wait_request_and_respond(b"\x00\x20\x12\x34")
def _test_request_upload_invalidservice_no_exception(self):
self.udsclient.config['exception_on_invalid_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertFalse(response.valid)
def test_request_upload_wrongservice_exception(self):
self.wait_request_and_respond(b"\x7E\x20\x12\x34")
def _test_request_upload_wrongservice_exception(self):
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
with self.assertRaises(UnexpectedResponseException) as handle:
self.udsclient.request_upload(memory_location=memloc)
def test_request_upload_wrongservice_no_exception(self):
self.wait_request_and_respond(b"\x7E\x20\x12\x34")
def _test_request_upload_wrongservice_no_exception(self):
self.udsclient.config['exception_on_unexpected_response'] = False
memloc = MemoryLocation(address=0x1234, memorysize=0xFF, address_format=16, memorysize_format=8)
response = self.udsclient.request_upload(memory_location=memloc)
self.assertTrue(response.valid)
self.assertTrue(response.unexpected)
def test_bad_params(self):
pass
def _test_bad_params(self):
with self.assertRaises(ValueError) as handle:
self.udsclient.request_upload(memory_location=1)
with self.assertRaises(ValueError) as handle:
self.udsclient.request_upload(memory_location="asd")
| true | true |
f736852b4871bb5c2d35bf29484950631a1b5694 | 1,098 | py | Python | solution/143. reorder-list.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | solution/143. reorder-list.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | solution/143. reorder-list.py | sundaycat/Leetcode-Practice | 65c3ab0f967331a095fd8a6eb2f3d7765cbf7d5a | [
"MIT"
] | null | null | null | from LinkedList import *
class Solution(LinkedList):
def reverse(self, head):
pre = None
while head:
next = head.next
head.next = pre
pre = head
head = next
return pre
def reorderList(self, head: ListNode) -> None:
# find the midddle node of the linkedlist
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
# reverse the second part
tail = self.reverse(slow)
# reorder the linkedlist
rs = head
while tail.next:
# save the next head and tail first
head_next = head.next
tail_next = tail.next
# reorder, connects the head and tail
tail.next = head.next
head.next = tail
# move head and tail forward
tail = tail_next
head = head_next
return rs
values = [1, 2, 3, 4, 5]
s = Solution(values)
s.reorderList(s.head)
s.print_list() | 23.361702 | 50 | 0.510929 | from LinkedList import *
class Solution(LinkedList):
def reverse(self, head):
pre = None
while head:
next = head.next
head.next = pre
pre = head
head = next
return pre
def reorderList(self, head: ListNode) -> None:
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
tail = self.reverse(slow)
rs = head
while tail.next:
head_next = head.next
tail_next = tail.next
tail.next = head.next
head.next = tail
tail = tail_next
head = head_next
return rs
values = [1, 2, 3, 4, 5]
s = Solution(values)
s.reorderList(s.head)
s.print_list() | true | true |
f73686f1dffadac2a4e983ba72a126c53b2ed4b4 | 305 | py | Python | HttpMessageParser/http1/utils/FileUtil.py | Patr1ck97/HttpMessageParser | 3cc22679744a68b4cea1d701b834480b1d96fb04 | [
"Apache-2.0"
] | null | null | null | HttpMessageParser/http1/utils/FileUtil.py | Patr1ck97/HttpMessageParser | 3cc22679744a68b4cea1d701b834480b1d96fb04 | [
"Apache-2.0"
] | null | null | null | HttpMessageParser/http1/utils/FileUtil.py | Patr1ck97/HttpMessageParser | 3cc22679744a68b4cea1d701b834480b1d96fb04 | [
"Apache-2.0"
] | null | null | null | import json
class FileUtil:
@classmethod
def read_as_dict(cls, file) -> dict:
string = cls.read_as_str(file)
return json.loads(string)
@classmethod
def read_as_str(cls, file) -> str:
with open(file, mode="r", encoding="utf-8") as f:
return f.read()
| 20.333333 | 57 | 0.6 | import json
class FileUtil:
@classmethod
def read_as_dict(cls, file) -> dict:
string = cls.read_as_str(file)
return json.loads(string)
@classmethod
def read_as_str(cls, file) -> str:
with open(file, mode="r", encoding="utf-8") as f:
return f.read()
| true | true |
f73687942aaf768e1eb3db6df96f377f6b399bab | 59 | py | Python | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/M/muon-proton mag. mom. ratio.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/M/muon-proton mag. mom. ratio.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/M/muon-proton mag. mom. ratio.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | constants.physical_constants["muon-proton mag. mom. ratio"] | 59 | 59 | 0.813559 | constants.physical_constants["muon-proton mag. mom. ratio"] | true | true |
f73687b79dcf99809b548c00bc6d3a21444a082c | 35,850 | py | Python | python/ccxt/async_support/huobipro.py | vankiaio/ccxt | 44f69b681c2772c5f585db6d55c5d10cb6d64017 | [
"MIT"
] | 1 | 2018-12-11T12:42:34.000Z | 2018-12-11T12:42:34.000Z | python/ccxt/async_support/huobipro.py | vankiaio/ccxt | 44f69b681c2772c5f585db6d55c5d10cb6d64017 | [
"MIT"
] | null | null | null | python/ccxt/async_support/huobipro.py | vankiaio/ccxt | 44f69b681c2772c5f585db6d55c5d10cb6d64017 | [
"MIT"
] | 1 | 2022-03-15T22:41:20.000Z | 2022-03-15T22:41:20.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class huobipro (Exchange):
def describe(self):
return self.deep_extend(super(huobipro, self).describe(), {
'id': 'huobipro',
'name': 'Huobi Pro',
'countries': ['CN'],
'rateLimit': 2000,
'userAgent': self.userAgents['chrome39'],
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro',
'has': {
'CORS': False,
'fetchTickers': True,
'fetchDepositAddress': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTradingLimits': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchCurrencies': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766569-15aa7b9a-5edd-11e7-9e7f-44791f4ee49c.jpg',
'api': {
'market': 'https://api.huobi.pro',
'public': 'https://api.huobi.pro',
'private': 'https://api.huobi.pro',
'zendesk': 'https://huobiglobal.zendesk.com/hc/en-us/articles',
},
'www': 'https://www.huobi.pro',
'referral': 'https://www.huobi.br.com/en-us/topic/invited/?invite_code=rwrd3',
'doc': 'https://github.com/huobiapi/API_Docs/wiki/REST_api_reference',
'fees': 'https://www.huobi.pro/about/fee/',
},
'api': {
'zendesk': {
'get': [
'360000400491-Trade-Limits',
],
},
'market': {
'get': [
'history/kline', # 获取K线数据
'detail/merged', # 获取聚合行情(Ticker)
'depth', # 获取 Market Depth 数据
'trade', # 获取 Trade Detail 数据
'history/trade', # 批量获取最近的交易记录
'detail', # 获取 Market Detail 24小时成交量数据
'tickers',
],
},
'public': {
'get': [
'common/symbols', # 查询系统支持的所有交易对
'common/currencys', # 查询系统支持的所有币种
'common/timestamp', # 查询系统当前时间
'common/exchange', # order limits
'settings/currencys', # ?language=en-US
],
},
'private': {
'get': [
'account/accounts', # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance', # 查询指定账户的余额
'order/orders/{id}', # 查询某个订单详情
'order/orders/{id}/matchresults', # 查询某个订单的成交明细
'order/orders', # 查询当前委托、历史委托
'order/matchresults', # 查询当前成交、历史成交
'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址
'dw/deposit-virtual/addresses',
'query/deposit-withdraw',
'margin/loan-orders', # 借贷订单
'margin/accounts/balance', # 借贷账户详情
'points/actions',
'points/orders',
'subuser/aggregate-balance',
],
'post': [
'order/orders/place', # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel', # 申请撤销一个订单请求
'order/orders/batchcancel', # 批量撤销订单
'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create', # 申请提现虚拟币
'dw/withdraw-virtual/create', # 申请提现虚拟币
'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现
'dw/withdraw-virtual/{id}/cancel', # 申请取消提现虚拟币
'dw/transfer-in/margin', # 现货账户划入至借贷账户
'dw/transfer-out/margin', # 借贷账户划出至现货账户
'margin/orders', # 申请借贷
'margin/orders/{id}/repay', # 归还借贷
'subuser/transfer',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchMarketsMethod': 'publicGetCommonSymbols',
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
},
})
async def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
await self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = await self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
async def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = await self.publicGetCommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_float(limits, 'limit-order-must-greater-than'),
'max': self.safe_float(limits, 'limit-order-must-less-than'),
},
},
}
async def fetch_markets(self, params={}):
method = self.options['fetchMarketsMethod']
response = await getattr(self, method)()
markets = response['data']
numMarkets = len(markets)
if numMarkets < 1:
raise ExchangeError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = market['base-currency']
quoteId = market['quote-currency']
base = baseId.upper()
quote = quoteId.upper()
id = baseId + quoteId
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount-precision'],
'price': market['price-precision'],
}
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'ts')
bid = None
ask = None
bidVolume = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_float(ticker['bid'], 0)
bidVolume = self.safe_float(ticker['bid'], 1)
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_float(ticker['ask'], 0)
askVolume = self.safe_float(ticker['ask'], 1)
open = self.safe_float(ticker, 'open')
close = self.safe_float(ticker, 'close')
change = None
percentage = None
average = None
if (open is not None) and(close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and(close > 0):
percentage = (change / open) * 100
baseVolume = self.safe_float(ticker, 'amount')
quoteVolume = self.safe_float(ticker, 'vol')
vwap = None
if baseVolume is not None and quoteVolume is not None and baseVolume > 0:
vwap = quoteVolume / baseVolume
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDepth(self.extend({
'symbol': market['id'],
'type': 'step0',
}, params))
if 'tick' in response:
if not response['tick']:
raise ExchangeError(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
orderbook = response['tick']
result = self.parse_order_book(orderbook, orderbook['ts'])
result['nonce'] = orderbook['version']
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDetailMerged(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['tick'], market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetTickers(params)
tickers = response['data']
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
symbol = marketId
if market is not None:
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return result
def parse_trade(self, trade, market=None):
symbol = None
if market is None:
marketId = self.safe_string(trade, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'filled-amount', 'amount')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
fee = None
feeCost = self.safe_float(trade, 'filled-fees')
feeCurrency = None
if market is not None:
feeCurrency = market['base'] if (side == 'buy') else market['quote']
filledPoints = self.safe_float(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.common_currency_code('HBPOINT')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': trade,
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.privateGetOrderMatchresults(params)
trades = self.parse_trades(response['data'], None, since, limit)
if symbol is not None:
market = self.market(symbol)
trades = self.filter_by_symbol(trades, market['symbol'])
return trades
async def fetch_trades(self, symbol, since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryTrade(self.extend(request, params))
data = response['data']
result = []
for i in range(0, len(data)):
trades = data[i]['data']
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv['id'] * 1000,
ohlcv['open'],
ohlcv['high'],
ohlcv['low'],
ohlcv['close'],
ohlcv['amount'],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryKline(self.extend(request, params))
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def load_accounts(self, reload=False):
if reload:
self.accounts = await self.fetch_accounts()
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts()
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_accounts(self):
await self.load_markets()
response = await self.privateGetAccountAccounts()
return response['data']
async def fetch_currencies(self, params={}):
response = await self.publicGetSettingsCurrencys(self.extend({
'language': self.options['language'],
}, params))
currencies = response['data']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
#
# { name: "ctxc",
# 'display-name': "CTXC",
# 'withdraw-precision': 8,
# 'currency-type': "eth",
# 'currency-partition': "pro",
# 'support-sites': null,
# 'otc-enable': 0,
# 'deposit-min-amount': "2",
# 'withdraw-min-amount': "4",
# 'show-precision': "8",
# weight: "2988",
# visible: True,
# 'deposit-desc': "Please don’t deposit any other digital assets except CTXC t…",
# 'withdraw-desc': "Minimum withdrawal amount: 4 CTXC. not >_<not For security reason…",
# 'deposit-enabled': True,
# 'withdraw-enabled': True,
# 'currency-addr-with-tag': False,
# 'fast-confirms': 15,
# 'safe-confirms': 30 }
#
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.common_currency_code(id.upper())
active = currency['visible'] and currency['deposit-enabled'] and currency['withdraw-enabled']
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
# 'payin': currency['deposit-enabled'],
# 'payout': currency['withdraw-enabled'],
# 'transfer': None,
'name': currency['display-name'],
'active': active,
'fee': None, # todo need to fetch from fee endpoint
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': self.safe_float(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_float(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
method = self.options['fetchBalanceMethod']
response = await getattr(self, method)(self.extend({
'id': self.accounts[0]['id'],
}, params))
balances = response['data']['list']
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
uppercase = balance['currency'].upper()
currency = self.common_currency_code(uppercase)
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = float(balance['balance'])
if balance['type'] == 'frozen':
account['used'] = float(balance['balance'])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privateGetOrderOrders(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrderOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response['data'])
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
symbol = None
if market is None:
if 'symbol' in order:
if order['symbol'] in self.markets_by_id:
marketId = order['symbol']
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created-at')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'field-amount') # typo in their API, filled amount
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'field-cash-amount') # same typo
remaining = None
average = None
if filled is not None:
average = 0
if amount is not None:
remaining = amount - filled
# if cost is defined and filled is not zero
if (cost is not None) and(filled > 0):
average = cost / filled
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'amount': self.amount_to_precision(symbol, amount),
'symbol': market['id'],
'type': side + '-' + type,
}
if self.options['createMarketBuyOrderRequiresPrice']:
if (type == 'market') and(side == 'buy'):
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
request['amount'] = self.price_to_precision(symbol, float(amount) * float(price))
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = await getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': response['data'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
async def cancel_order(self, id, symbol=None, params={}):
response = await self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# response = {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privateGetDwDepositVirtualAddresses(self.extend({
'currency': currency['id'].lower(),
}, params))
address = self.safe_string(response, 'data')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
response = await self.privatePostDwWithdrawApiCreate(self.extend(request, params))
id = None
if 'data' in response:
id = response['data']
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = self.keysort(self.extend({
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}, query))
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response=None):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
raise ExchangeError(feedback)
| 42.226148 | 355 | 0.501199 |
rt.base.exchange import Exchange
try:
basestring
except NameError:
basestring = str
import hashlib
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import ExchangeNotAvailable
class huobipro (Exchange):
def describe(self):
return self.deep_extend(super(huobipro, self).describe(), {
'id': 'huobipro',
'name': 'Huobi Pro',
'countries': ['CN'],
'rateLimit': 2000,
'userAgent': self.userAgents['chrome39'],
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro',
'has': {
'CORS': False,
'fetchTickers': True,
'fetchDepositAddress': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchTradingLimits': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchCurrencies': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766569-15aa7b9a-5edd-11e7-9e7f-44791f4ee49c.jpg',
'api': {
'market': 'https://api.huobi.pro',
'public': 'https://api.huobi.pro',
'private': 'https://api.huobi.pro',
'zendesk': 'https://huobiglobal.zendesk.com/hc/en-us/articles',
},
'www': 'https://www.huobi.pro',
'referral': 'https://www.huobi.br.com/en-us/topic/invited/?invite_code=rwrd3',
'doc': 'https://github.com/huobiapi/API_Docs/wiki/REST_api_reference',
'fees': 'https://www.huobi.pro/about/fee/',
},
'api': {
'zendesk': {
'get': [
'360000400491-Trade-Limits',
],
},
'market': {
'get': [
'history/kline',
'detail/merged',
'depth',
'trade',
'history/trade',
'detail',
'tickers',
],
},
'public': {
'get': [
'common/symbols',
'common/currencys',
'common/timestamp',
'common/exchange',
'settings/currencys',
],
},
'private': {
'get': [
'account/accounts',
'account/accounts/{id}/balance',
'order/orders/{id}',
'order/orders/{id}/matchresults',
'order/orders',
'order/matchresults',
'dw/withdraw-virtual/addresses',
'dw/deposit-virtual/addresses',
'query/deposit-withdraw',
'margin/loan-orders',
'margin/accounts/balance',
'points/actions',
'points/orders',
'subuser/aggregate-balance',
],
'post': [
'order/orders/place',
'order/orders',
'order/orders/{id}/place',
'order/orders/{id}/submitcancel',
'order/orders/batchcancel',
'dw/balance/transfer',
'dw/withdraw/api/create',
'dw/withdraw-virtual/create',
'dw/withdraw-virtual/{id}/place',
'dw/withdraw-virtual/{id}/cancel',
'dw/transfer-in/margin',
'dw/transfer-out/margin',
'margin/orders',
'margin/orders/{id}/repay',
'subuser/transfer',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'account-frozen-balance-insufficient-error': InsufficientFunds,
'invalid-amount': InvalidOrder,
'order-limitorder-amount-min-error': InvalidOrder,
'order-marketorder-amount-min-error': InvalidOrder,
'order-limitorder-price-min-error': InvalidOrder,
'order-limitorder-price-max-error': InvalidOrder,
'order-orderstate-error': OrderNotFound,
'order-queryorder-invalid': OrderNotFound,
'order-update-error': ExchangeNotAvailable,
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError,
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'fetchMarketsMethod': 'publicGetCommonSymbols',
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
},
})
async def fetch_trading_limits(self, symbols=None, params={}):
await self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = await self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
async def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = await self.publicGetCommonExchange(self.extend(request, params))
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_float(limits, 'limit-order-must-greater-than'),
'max': self.safe_float(limits, 'limit-order-must-less-than'),
},
},
}
async def fetch_markets(self, params={}):
method = self.options['fetchMarketsMethod']
response = await getattr(self, method)()
markets = response['data']
numMarkets = len(markets)
if numMarkets < 1:
raise ExchangeError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = market['base-currency']
quoteId = market['quote-currency']
base = baseId.upper()
quote = quoteId.upper()
id = baseId + quoteId
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': market['amount-precision'],
'price': market['price-precision'],
}
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'ts')
bid = None
ask = None
bidVolume = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_float(ticker['bid'], 0)
bidVolume = self.safe_float(ticker['bid'], 1)
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_float(ticker['ask'], 0)
askVolume = self.safe_float(ticker['ask'], 1)
open = self.safe_float(ticker, 'open')
close = self.safe_float(ticker, 'close')
change = None
percentage = None
average = None
if (open is not None) and(close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and(close > 0):
percentage = (change / open) * 100
baseVolume = self.safe_float(ticker, 'amount')
quoteVolume = self.safe_float(ticker, 'vol')
vwap = None
if baseVolume is not None and quoteVolume is not None and baseVolume > 0:
vwap = quoteVolume / baseVolume
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDepth(self.extend({
'symbol': market['id'],
'type': 'step0',
}, params))
if 'tick' in response:
if not response['tick']:
raise ExchangeError(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
orderbook = response['tick']
result = self.parse_order_book(orderbook, orderbook['ts'])
result['nonce'] = orderbook['version']
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetDetailMerged(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response['tick'], market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetTickers(params)
tickers = response['data']
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
symbol = marketId
if market is not None:
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return result
def parse_trade(self, trade, market=None):
symbol = None
if market is None:
marketId = self.safe_string(trade, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'filled-amount', 'amount')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
fee = None
feeCost = self.safe_float(trade, 'filled-fees')
feeCurrency = None
if market is not None:
feeCurrency = market['base'] if (side == 'buy') else market['quote']
filledPoints = self.safe_float(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.common_currency_code('HBPOINT')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': trade,
'id': self.safe_string(trade, 'id'),
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.privateGetOrderMatchresults(params)
trades = self.parse_trades(response['data'], None, since, limit)
if symbol is not None:
market = self.market(symbol)
trades = self.filter_by_symbol(trades, market['symbol'])
return trades
async def fetch_trades(self, symbol, since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryTrade(self.extend(request, params))
data = response['data']
result = []
for i in range(0, len(data)):
trades = data[i]['data']
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv['id'] * 1000,
ohlcv['open'],
ohlcv['high'],
ohlcv['low'],
ohlcv['close'],
ohlcv['amount'],
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryKline(self.extend(request, params))
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
async def load_accounts(self, reload=False):
if reload:
self.accounts = await self.fetch_accounts()
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts()
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_accounts(self):
await self.load_markets()
response = await self.privateGetAccountAccounts()
return response['data']
async def fetch_currencies(self, params={}):
response = await self.publicGetSettingsCurrencys(self.extend({
'language': self.options['language'],
}, params))
currencies = response['data']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.common_currency_code(id.upper())
active = currency['visible'] and currency['deposit-enabled'] and currency['withdraw-enabled']
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
'name': currency['display-name'],
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'deposit': {
'min': self.safe_float(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_float(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
method = self.options['fetchBalanceMethod']
response = await getattr(self, method)(self.extend({
'id': self.accounts[0]['id'],
}, params))
balances = response['data']['list']
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
uppercase = balance['currency'].upper()
currency = self.common_currency_code(uppercase)
account = None
if currency in result:
account = result[currency]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = float(balance['balance'])
if balance['type'] == 'frozen':
account['used'] = float(balance['balance'])
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
async def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privateGetOrderOrders(self.extend(request, params))
return self.parse_orders(response['data'], market, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privateGetOrderOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response['data'])
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
symbol = None
if market is None:
if 'symbol' in order:
if order['symbol'] in self.markets_by_id:
marketId = order['symbol']
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(order, 'created-at')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'field-amount')
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'field-cash-amount')
remaining = None
average = None
if filled is not None:
average = 0
if amount is not None:
remaining = amount - filled
if (cost is not None) and(filled > 0):
average = cost / filled
result = {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'amount': self.amount_to_precision(symbol, amount),
'symbol': market['id'],
'type': side + '-' + type,
}
if self.options['createMarketBuyOrderRequiresPrice']:
if (type == 'market') and(side == 'buy'):
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
request['amount'] = self.price_to_precision(symbol, float(amount) * float(price))
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = await getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': response['data'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
async def cancel_order(self, id, symbol=None, params={}):
response = await self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# response = {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privateGetDwDepositVirtualAddresses(self.extend({
'currency': currency['id'].lower(),
}, params))
address = self.safe_string(response, 'data')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'info': response,
}
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = amount * rate
key = 'quote'
if side == 'sell':
cost *= price
else:
key = 'base'
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(self.currency_to_precision(market[key], cost)),
}
async def withdraw(self, code, amount, address, tag=None, params={}):
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
response = await self.privatePostDwWithdrawApiCreate(self.extend(request, params))
id = None
if 'data' in response:
id = response['data']
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = self.keysort(self.extend({
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}, query))
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response=None):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if code in exceptions:
raise exceptions[code](feedback)
raise ExchangeError(feedback)
| true | true |
f73687d75ddedbad5f641cebbaab293e9fc2d28b | 7,393 | py | Python | mxts/strategy/utils.py | kevingivens/mxts | e0a9810ae7e696ffa537019a1ae2582495b40473 | [
"Apache-2.0"
] | null | null | null | mxts/strategy/utils.py | kevingivens/mxts | e0a9810ae7e696ffa537019a1ae2582495b40473 | [
"Apache-2.0"
] | null | null | null | mxts/strategy/utils.py | kevingivens/mxts | e0a9810ae7e696ffa537019a1ae2582495b40473 | [
"Apache-2.0"
] | null | null | null | import asyncio
from datetime import datetime
from typing import Union, Callable, Optional, List, TYPE_CHECKING
from ..config import Side, TradingType, ExitRoutine, InstrumentType
from ..core import Trade, Instrument, ExchangeType, Order, OrderBook
from ..exchange import Exchange
from ..engine.managers import Periodic
if TYPE_CHECKING:
from mxts.engine import StrategyManager
class StrategyUtilsMixin(object):
_manager: "StrategyManager"
def orders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
"""select all open orders
Args:
instrument (Optional[Instrument]): filter open orders by instrument
exchange (Optional[ExchangeType]): filter open orders by exchange
side (Optional[Side]): filter open orders by side
Returns:
list (Order): list of open orders
"""
return self._manager.orders(self, instrument, exchange, side) # type: ignore # mixin
def pastOrders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
"""select all past orders
Args:
instrument (Optional[Instrument]): filter past orders by instrument
exchange (Optional[ExchangeType]): filter past orders by exchange
side (Optional[Side]): filter past orders by side
Returns:
list (Order): list of open orders
"""
return self._manager.pastOrders(self, instrument, exchange, side) # type: ignore # mixin
def trades(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Trade]:
"""select all past trades
Args:
instrument (Optional[Instrument]): filter trades by instrument
exchange (Optional[ExchangeType]): filter trades by exchange
side (Optional[Side]): filter trades by side
Returns:
list (Trade): list of trades
"""
return self._manager.trades(self, instrument, exchange, side) # type: ignore # mixin
#################
# Other Methods #
#################
def tradingType(self) -> TradingType:
"""Return the trading type, from TradingType enum"""
return self._manager.tradingType()
def loop(self) -> asyncio.AbstractEventLoop:
"""Return the event loop"""
return self._manager.loop()
def now(self) -> datetime:
"""Return the current datetime. Useful to avoid code changes between
live trading and backtesting. Defaults to `datetime.now`"""
return self._manager.now()
def instruments(
self, type: InstrumentType = None, exchange: ExchangeType = None
) -> List[Instrument]:
"""Return list of all available instruments"""
return Instrument._instrumentdb.instruments(type=type, exchange=exchange)
def exchanges(self, instrument_type: InstrumentType = None) -> List[Exchange]:
"""Return list of all available exchanges"""
return list(
set(
__
for _ in Instrument._instrumentdb.instruments(type=instrument_type)
for __ in _.exchanges
)
)
def accounts(
self, type: InstrumentType = None, exchange: ExchangeType = None
) -> None: # TODO
"""Return list of all accounts"""
raise NotImplementedError()
async def subscribe(self, instrument: Instrument) -> None:
"""Subscribe to market data for the given instrument"""
return await self._manager.subscribe(instrument=instrument, strategy=self) # type: ignore # mixin
async def lookup(
self, instrument: Optional[Instrument], exchange: ExchangeType = None
) -> List[Instrument]:
"""Return list of all available instruments that match the instrument given"""
return await self._manager.lookup(instrument, exchange=exchange)
async def book(self, instrument: Instrument) -> Optional[OrderBook]:
"""Return list of all available instruments that match the instrument given"""
return await self._manager.book(instrument)
def periodic(
self,
function: Callable,
second: Union[int, str] = 0,
minute: Union[int, str] = "*",
hour: Union[int, str] = "*",
) -> Periodic:
"""periodically run a given async function.
NOTE: precise timing is NOT guaranteed due to event loop scheduling.
Args:
function (callable); function to call periodically
second (Union[int, str]); second to align periodic to, or '*' for every second
minute (Union[int, str]); minute to align periodic to, or '*' for every minute
hour (Union[int, str]); hour to align periodic to, or '*' for every hour
NOTE: this is a rudimentary scheme but should be sufficient. For more
complicated scheduling, just install multiple instances of the same periodic
e.g. for running on :00, :15, :30, and :45 install
periodic(0, 0, '*')
periodic(0, 15, '*')
periodic(0, 30, '*')
periodic(0, 45, '*')
"""
return self._manager.periodic(function, second, minute, hour)
def restrictTradingHours(
self,
start_second: Optional[int] = None,
start_minute: Optional[int] = None,
start_hour: Optional[int] = None,
end_second: Optional[int] = None,
end_minute: Optional[int] = None,
end_hour: Optional[int] = None,
on_end_of_day: ExitRoutine = ExitRoutine.NONE,
) -> None:
"""Restrict a strategy's trading hours to
[start_hour:start_minute:start_second, end_hour:end_minute:end_second]
NOTE: precise timing is NOT guaranteed due to event loop scheduling.
Args:
start_second (Optional[int]); starting second
start_minute (Optional[int]); starting minute
start_second (Optional[int]); starting hour
end_second (Optional[int]); ending second
end_second (Optional[int]); ending minute
end_second (Optional[int]); ending hour
on_end_of_day (ExitRoutine); what to do when you hit the end time
"""
self._manager.restrictTradingHours(
self, # type: ignore # mixin
start_second=start_second,
start_minute=start_minute,
start_hour=start_hour,
end_second=end_second,
end_minute=end_minute,
end_hour=end_hour,
on_end_of_day=on_end_of_day,
)
def slippage(self, trade: Trade) -> None:
"""method to inject slippage when backtesting
Args:
trade (Trade): the completed trade to adjust
Returns:
trade (Trade): the modified trade
"""
pass
def transactionCost(self, trade: Trade) -> None:
"""method to inject transaction costs when backtesting
Args:
trade (Trade): the completed trade to adjust
Returns:
trade (Trade): the modified trade
"""
pass
| 36.418719 | 106 | 0.608008 | import asyncio
from datetime import datetime
from typing import Union, Callable, Optional, List, TYPE_CHECKING
from ..config import Side, TradingType, ExitRoutine, InstrumentType
from ..core import Trade, Instrument, ExchangeType, Order, OrderBook
from ..exchange import Exchange
from ..engine.managers import Periodic
if TYPE_CHECKING:
from mxts.engine import StrategyManager
class StrategyUtilsMixin(object):
_manager: "StrategyManager"
def orders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
return self._manager.orders(self, instrument, exchange, side) ef pastOrders(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Order]:
return self._manager.pastOrders(self, instrument, exchange, side) ef trades(
self,
instrument: Instrument = None,
exchange: ExchangeType = None,
side: Side = None,
) -> List[Trade]:
return self._manager.trades(self, instrument, exchange, side) self, type: InstrumentType = None, exchange: ExchangeType = None
) -> List[Instrument]:
return Instrument._instrumentdb.instruments(type=type, exchange=exchange)
def exchanges(self, instrument_type: InstrumentType = None) -> List[Exchange]:
return list(
set(
__
for _ in Instrument._instrumentdb.instruments(type=instrument_type)
for __ in _.exchanges
)
)
def accounts(
self, type: InstrumentType = None, exchange: ExchangeType = None
) -> None:
raise NotImplementedError()
async def subscribe(self, instrument: Instrument) -> None:
return await self._manager.subscribe(instrument=instrument, strategy=self) sync def lookup(
self, instrument: Optional[Instrument], exchange: ExchangeType = None
) -> List[Instrument]:
return await self._manager.lookup(instrument, exchange=exchange)
async def book(self, instrument: Instrument) -> Optional[OrderBook]:
return await self._manager.book(instrument)
def periodic(
self,
function: Callable,
second: Union[int, str] = 0,
minute: Union[int, str] = "*",
hour: Union[int, str] = "*",
) -> Periodic:
return self._manager.periodic(function, second, minute, hour)
def restrictTradingHours(
self,
start_second: Optional[int] = None,
start_minute: Optional[int] = None,
start_hour: Optional[int] = None,
end_second: Optional[int] = None,
end_minute: Optional[int] = None,
end_hour: Optional[int] = None,
on_end_of_day: ExitRoutine = ExitRoutine.NONE,
) -> None:
self._manager.restrictTradingHours(
self, start_second=start_second,
start_minute=start_minute,
start_hour=start_hour,
end_second=end_second,
end_minute=end_minute,
end_hour=end_hour,
on_end_of_day=on_end_of_day,
)
def slippage(self, trade: Trade) -> None:
pass
def transactionCost(self, trade: Trade) -> None:
pass
| true | true |
f73689676c7e34072751132b67e88ebf427c1287 | 842 | py | Python | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | 6 | 2019-03-25T03:45:43.000Z | 2019-07-03T06:31:31.000Z | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | 1 | 2019-06-03T09:13:44.000Z | 2019-06-03T09:13:44.000Z | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name='randomproto',
version='0.0.1',
py_modules=('randomproto',),
install_requires=[
'protobuf>=3.6.0',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest==4.2.1',
'pytest-cov==2.6.1',
'pytest-mock==1.10.0',
],
author='Yu-Ping Wu',
author_email='yupingso@gmail.com',
description='Random protobuf object generator',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='protobuf proto message random generate generator',
url='https://github.com/yupingso/randomproto',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 26.3125 | 64 | 0.608076 | import setuptools
setuptools.setup(
name='randomproto',
version='0.0.1',
py_modules=('randomproto',),
install_requires=[
'protobuf>=3.6.0',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest==4.2.1',
'pytest-cov==2.6.1',
'pytest-mock==1.10.0',
],
author='Yu-Ping Wu',
author_email='yupingso@gmail.com',
description='Random protobuf object generator',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='protobuf proto message random generate generator',
url='https://github.com/yupingso/randomproto',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| true | true |
f7368a1ac02d29ee3a35858f88c6ce9c101d00b8 | 993 | py | Python | lib/pycoin/pycoin/tx/pay_to/ScriptNulldata.py | AYCHDO/Dominus | d7065816febafb6cf0fb1142ff7dc7e454c835ad | [
"MIT"
] | 68 | 2015-08-30T13:23:12.000Z | 2022-02-26T06:59:15.000Z | lib/pycoin/pycoin/tx/pay_to/ScriptNulldata.py | AYCHDO/Dominus | d7065816febafb6cf0fb1142ff7dc7e454c835ad | [
"MIT"
] | 44 | 2016-02-14T02:08:00.000Z | 2020-11-11T09:03:55.000Z | lib/pycoin/pycoin/tx/pay_to/ScriptNulldata.py | AYCHDO/Dominus | d7065816febafb6cf0fb1142ff7dc7e454c835ad | [
"MIT"
] | 56 | 2015-08-26T12:49:20.000Z | 2022-02-17T19:06:36.000Z | from ..script import tools
from ...serialize import b2h
from .ScriptType import ScriptType
class ScriptNulldata(ScriptType):
TEMPLATE = tools.compile("OP_RETURN OP_NULLDATA")
def __init__(self, nulldata):
self.nulldata = nulldata
self._script = None
@classmethod
def from_script(cls, script):
r = cls.match(script)
if r:
nulldata = r["NULLDATA_LIST"][0]
s = cls(nulldata)
return s
raise ValueError("bad script")
def script(self):
if self._script is None:
# create the script
STANDARD_SCRIPT_OUT = "OP_RETURN %s"
script_text = STANDARD_SCRIPT_OUT % b2h(self.nulldata)
self._script = tools.compile(script_text)
return self._script
def info(self, netcode="BTC"):
return dict(type="nulldata", script=self._script, summary=self.nulldata)
def __repr__(self):
return "<Script: nulldata %s>" % self.nulldata
| 26.837838 | 80 | 0.621349 | from ..script import tools
from ...serialize import b2h
from .ScriptType import ScriptType
class ScriptNulldata(ScriptType):
TEMPLATE = tools.compile("OP_RETURN OP_NULLDATA")
def __init__(self, nulldata):
self.nulldata = nulldata
self._script = None
@classmethod
def from_script(cls, script):
r = cls.match(script)
if r:
nulldata = r["NULLDATA_LIST"][0]
s = cls(nulldata)
return s
raise ValueError("bad script")
def script(self):
if self._script is None:
STANDARD_SCRIPT_OUT = "OP_RETURN %s"
script_text = STANDARD_SCRIPT_OUT % b2h(self.nulldata)
self._script = tools.compile(script_text)
return self._script
def info(self, netcode="BTC"):
return dict(type="nulldata", script=self._script, summary=self.nulldata)
def __repr__(self):
return "<Script: nulldata %s>" % self.nulldata
| true | true |
f7368ab478d23ff790912d01221ac59d2a174d83 | 799 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/OVR/multiview_multisampled_render_to_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/OVR/multiview_multisampled_render_to_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GLES2/OVR/multiview_multisampled_render_to_texture.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OVR_multiview_multisampled_render_to_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OVR_multiview_multisampled_render_to_texture',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLsizei,_cs.GLint,_cs.GLsizei)
def glFramebufferTextureMultisampleMultiviewOVR(target,attachment,texture,level,samples,baseViewIndex,numViews):pass
| 44.388889 | 148 | 0.809762 | from OpenGL import platform as _p, arrays
from OpenGL.raw.GLES2 import _types as _cs
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_OVR_multiview_multisampled_render_to_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_OVR_multiview_multisampled_render_to_texture',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLuint,_cs.GLint,_cs.GLsizei,_cs.GLint,_cs.GLsizei)
def glFramebufferTextureMultisampleMultiviewOVR(target,attachment,texture,level,samples,baseViewIndex,numViews):pass
| true | true |
f7368ae3c387c509bf7bb279b519d759d46d4dc4 | 1,059 | py | Python | parse_html.py | BeyondHeaven/parse_html | 5b59549de73f461706bb4bad9ebb989fe66adc8f | [
"MIT"
] | null | null | null | parse_html.py | BeyondHeaven/parse_html | 5b59549de73f461706bb4bad9ebb989fe66adc8f | [
"MIT"
] | null | null | null | parse_html.py | BeyondHeaven/parse_html | 5b59549de73f461706bb4bad9ebb989fe66adc8f | [
"MIT"
] | null | null | null | import argparse
import requests
import time
import random
def get_arguments():
parser = argparse.ArgumentParser(description="path")
parser.add_argument("--url-file", type=str, default='',
help="url file path",
required=True)
parser.add_argument("--output-dir", type=str, default='',
help="output file path",
required=True)
return parser.parse_args()
def get_source(url):
headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
response = requests.get(url,headers = headers)
return (response.content).decode('utf-8')
def main():
i=0
args = get_arguments()
print(dir(args))
f=open(args.url_file,'r')
for url in f.readlines():
i+=1
time.sleep(random.randint(3,5))
html=get_source(url)
w=open(args.output_dir+'/'+str(i)+'.html','w')
w.write(html)
w.close()
f.close()
if __name__ == '__main__':
main() | 28.621622 | 117 | 0.582625 | import argparse
import requests
import time
import random
def get_arguments():
parser = argparse.ArgumentParser(description="path")
parser.add_argument("--url-file", type=str, default='',
help="url file path",
required=True)
parser.add_argument("--output-dir", type=str, default='',
help="output file path",
required=True)
return parser.parse_args()
def get_source(url):
headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
response = requests.get(url,headers = headers)
return (response.content).decode('utf-8')
def main():
i=0
args = get_arguments()
print(dir(args))
f=open(args.url_file,'r')
for url in f.readlines():
i+=1
time.sleep(random.randint(3,5))
html=get_source(url)
w=open(args.output_dir+'/'+str(i)+'.html','w')
w.write(html)
w.close()
f.close()
if __name__ == '__main__':
main() | true | true |
f7368b970bb59efe26b74439b895d7e2efff05cb | 4,118 | py | Python | api/routers/odin.py | KAIYO-OSS/titan | 0a4e296ded466785334279f7106d390c1dd4c30b | [
"Apache-2.0"
] | 55 | 2021-01-27T18:39:39.000Z | 2022-03-16T10:56:46.000Z | api/routers/odin.py | KAIYO-OSS/titan | 0a4e296ded466785334279f7106d390c1dd4c30b | [
"Apache-2.0"
] | 17 | 2021-06-05T12:28:35.000Z | 2022-02-14T13:11:26.000Z | api/routers/odin.py | KAIYO-OSS/titan | 0a4e296ded466785334279f7106d390c1dd4c30b | [
"Apache-2.0"
] | null | null | null | import json
from fastapi import APIRouter, HTTPException
from util.helm import Helm
from models.deployRequest import DeployRequest
from models.rollbackRequest import RollbackRequest
from util.utilityHelpers import Utils
from util.kubernetes import Kubernentes
import etcd3
router = APIRouter()
etcd = etcd3.client()
@router.post("/odin/service/", tags=["odin"])
async def deploy_service(deploy_request: DeployRequest):
try:
Helm.odinHelmSetup()
output = Utils.getJson(Helm.deployService(deploy_request.service_name,
deploy_request.chart_name, deploy_request.values))
service_list = Utils.getJson(Helm.listAllServices())
etcd.put('service_list', json.dumps(service_list))
return {
"status": "200",
"metadata": output,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service deployment failed: " + str(ex))
@router.delete("/odin/service/{service_name}", tags=["odin"])
async def delete_service(service_name):
try:
Helm.deleteService(service_name)
service_list = Utils.getJson(Helm.listAllServices())
etcd.put('service_list', json.dumps(service_list))
return {
"status": "200",
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service delete failed: " + str(ex))
@router.get("/odin/service/{service_name}/status", tags=["odin"])
async def get_status(service_name):
try:
status = Utils.getJson(Helm.getServiceStatus(service_name))
values = Utils.getJson(Helm.getServiceValues(service_name))
revisions = Utils.getJson(Helm.getServiceRevisions(service_name))
return {
"status": "200",
"metadata": status,
"values": values,
"revisions": revisions,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch Service Status: " + str(ex))
@router.get("/odin/service/{service_name}/revisions", tags=["odin"])
async def get_revisions(service_name):
try:
revisions = Utils.getJson(Helm.getServiceRevisions(service_name))
return {
"status": "200",
"revisions": revisions,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch Service Revisions: " + str(ex))
@router.post("/odin/service/rollback", tags=["odin"])
async def rollback_service(rollback_request: RollbackRequest):
try:
Helm.odinHelmSetup()
Helm.rollbackService(
rollback_request.service_name, rollback_request.revision)
return {
"status": "200",
"metadata": "Rolled back successfully",
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service deployment failed: " + str(ex))
@router.get("/odin/services/", tags=["odin"])
async def get_all_services():
try:
# service_list = Utils.getJsonValue(etcd, 'service_list')
service_list = Utils.getJson(Helm.listAllServices())
return {
"status": "200",
"metadata": service_list,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch all services: " + str(ex))
@router.get("/odin/metrics/{pod_name}", tags=["odin"])
async def get_pod_metrics(pod_name):
try:
metrics_params = Kubernentes.getPodMetrics(podName=pod_name).split()
return {
"status": "200",
"metadata": {
"name": metrics_params[3],
"cpu": metrics_params[4],
"memory": metrics_params[5]
}
}
except Exception as ex:
raise HTTPException(
status_code=500, details="Error getting metrics: " + str(ex)
) | 32.68254 | 100 | 0.607576 | import json
from fastapi import APIRouter, HTTPException
from util.helm import Helm
from models.deployRequest import DeployRequest
from models.rollbackRequest import RollbackRequest
from util.utilityHelpers import Utils
from util.kubernetes import Kubernentes
import etcd3
router = APIRouter()
etcd = etcd3.client()
@router.post("/odin/service/", tags=["odin"])
async def deploy_service(deploy_request: DeployRequest):
try:
Helm.odinHelmSetup()
output = Utils.getJson(Helm.deployService(deploy_request.service_name,
deploy_request.chart_name, deploy_request.values))
service_list = Utils.getJson(Helm.listAllServices())
etcd.put('service_list', json.dumps(service_list))
return {
"status": "200",
"metadata": output,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service deployment failed: " + str(ex))
@router.delete("/odin/service/{service_name}", tags=["odin"])
async def delete_service(service_name):
try:
Helm.deleteService(service_name)
service_list = Utils.getJson(Helm.listAllServices())
etcd.put('service_list', json.dumps(service_list))
return {
"status": "200",
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service delete failed: " + str(ex))
@router.get("/odin/service/{service_name}/status", tags=["odin"])
async def get_status(service_name):
try:
status = Utils.getJson(Helm.getServiceStatus(service_name))
values = Utils.getJson(Helm.getServiceValues(service_name))
revisions = Utils.getJson(Helm.getServiceRevisions(service_name))
return {
"status": "200",
"metadata": status,
"values": values,
"revisions": revisions,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch Service Status: " + str(ex))
@router.get("/odin/service/{service_name}/revisions", tags=["odin"])
async def get_revisions(service_name):
try:
revisions = Utils.getJson(Helm.getServiceRevisions(service_name))
return {
"status": "200",
"revisions": revisions,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch Service Revisions: " + str(ex))
@router.post("/odin/service/rollback", tags=["odin"])
async def rollback_service(rollback_request: RollbackRequest):
try:
Helm.odinHelmSetup()
Helm.rollbackService(
rollback_request.service_name, rollback_request.revision)
return {
"status": "200",
"metadata": "Rolled back successfully",
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Service deployment failed: " + str(ex))
@router.get("/odin/services/", tags=["odin"])
async def get_all_services():
try:
service_list = Utils.getJson(Helm.listAllServices())
return {
"status": "200",
"metadata": service_list,
"error": ""
}
except Exception as ex:
raise HTTPException(
status_code=500, detail="Failed to fetch all services: " + str(ex))
@router.get("/odin/metrics/{pod_name}", tags=["odin"])
async def get_pod_metrics(pod_name):
try:
metrics_params = Kubernentes.getPodMetrics(podName=pod_name).split()
return {
"status": "200",
"metadata": {
"name": metrics_params[3],
"cpu": metrics_params[4],
"memory": metrics_params[5]
}
}
except Exception as ex:
raise HTTPException(
status_code=500, details="Error getting metrics: " + str(ex)
) | true | true |
f7368bbc050950d0d9166ce07494a26c550aff3f | 3,604 | py | Python | sdk/python/pulumi_azure_native/apimanagement/v20191201/get_tag_by_product.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20191201/get_tag_by_product.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/apimanagement/v20191201/get_tag_by_product.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetTagByProductResult',
'AwaitableGetTagByProductResult',
'get_tag_by_product',
]
@pulumi.output_type
class GetTagByProductResult:
"""
Tag Contract details.
"""
def __init__(__self__, display_name=None, id=None, name=None, type=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Tag name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
class AwaitableGetTagByProductResult(GetTagByProductResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTagByProductResult(
display_name=self.display_name,
id=self.id,
name=self.name,
type=self.type)
def get_tag_by_product(product_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
tag_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTagByProductResult:
"""
Tag Contract details.
:param str product_id: Product identifier. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str tag_id: Tag identifier. Must be unique in the current API Management service instance.
"""
__args__ = dict()
__args__['productId'] = product_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['tagId'] = tag_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20191201:getTagByProduct', __args__, opts=opts, typ=GetTagByProductResult).value
return AwaitableGetTagByProductResult(
display_name=__ret__.display_name,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type)
| 32.468468 | 145 | 0.636515 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetTagByProductResult',
'AwaitableGetTagByProductResult',
'get_tag_by_product',
]
@pulumi.output_type
class GetTagByProductResult:
def __init__(__self__, display_name=None, id=None, name=None, type=None):
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetTagByProductResult(GetTagByProductResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTagByProductResult(
display_name=self.display_name,
id=self.id,
name=self.name,
type=self.type)
def get_tag_by_product(product_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
tag_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTagByProductResult:
__args__ = dict()
__args__['productId'] = product_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['tagId'] = tag_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20191201:getTagByProduct', __args__, opts=opts, typ=GetTagByProductResult).value
return AwaitableGetTagByProductResult(
display_name=__ret__.display_name,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type)
| true | true |
f7368d7d9df588d75941fb2cac77d3eec5ade79d | 2,257 | py | Python | ap_third_semester/compact_objects/figures/roche-lobe-radius.py | jacopok/notes | 805ebe1be49bbd14c6b46b24055f9fc7d1cd2586 | [
"Apache-2.0"
] | 6 | 2019-10-10T13:10:57.000Z | 2022-01-13T14:52:50.000Z | ap_third_semester/compact_objects/figures/roche-lobe-radius.py | jacopok/notes | 805ebe1be49bbd14c6b46b24055f9fc7d1cd2586 | [
"Apache-2.0"
] | null | null | null | ap_third_semester/compact_objects/figures/roche-lobe-radius.py | jacopok/notes | 805ebe1be49bbd14c6b46b24055f9fc7d1cd2586 | [
"Apache-2.0"
] | 3 | 2019-10-03T16:20:19.000Z | 2021-08-06T16:11:07.000Z | #%%
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
THR = .5
WIDTH = 0
# def weight(q):
# if WIDTH>0:
# offset = 1/2 - THR / WIDTH
# return (np.piecewise(q,
# condlist=[
# q < THR - WIDTH / 2,
# q > THR - WIDTH / 2 and q < THR + WIDTH / 2 ,
# q > THR + WIDTH / 2,
# ],
# funclist=[
# 0,
# lambda x: x / WIDTH + offset,
# 1
# ]
# ))
# else:
# return (np.piecewise(q,
# condlist=[q < THR, q >= THR],
# funclist=[0, 1]
# ))
def f1(q):
return (.46224 * (q / (1 + q))**(1 / 3))
def f2(q):
return (.38 + .2 * np.log10(q))
def f(q):
if q < 0.5:
return (f1(q))
else:
return(f2(q))
f = np.vectorize(f, signature='()->()')
#%%
qs = np.linspace(0, 8, num=1000)
f_q = f(qs)
# plt.plot(qs, f(qs))
# plt.xlabel('$q = M_2 / M_1$')
# plt.ylabel('$R_{\\text{{lobe}}} / a$')
# plt.savefig('roche-lobe-radius.pdf', format = 'pdf')
#%%
def a(q):
return((1+q)**4 / q**2)
a_q = a(qs)
plt.plot(qs, np.abs(np.gradient(f_q, qs) / f_q), label='$\\abs{\\Delta \\log f}$')
plt.plot(qs, np.abs(np.gradient(a_q, qs) / a_q), label='$\\abs{\\Delta \\log a}$')
plt.plot(qs, np.gradient(a_q, qs) / a_q + np.gradient(f_q, qs) / f_q, label='$\\Delta \\log a + \\Delta \\log f$', ls='--')
plt.axvline(1, label='$q = 1$', ls=':', c='black')
plt.xlabel('$q = M_2 / M_1$')
plt.ylabel('relative variation')
plt.legend()
plt.yscale('log')
plt.savefig('roche-lobe-relative-corrections.pdf')
plt.show()
#%%
qs = np.linspace(0, 5/4, num=200)
def circ(q):
return((.5 - .227 * np.log10(q))**4 * (1+q))
plt.plot(qs, f(1 / qs), label='Roche Lobe radius')
plt.plot(qs, circ(qs), label='Circularization radius')
plt.xlabel('$q$')
plt.ylim(0,1)
plt.legend()
plt.savefig('roche-vs-circularization.pdf')
# %%
| 23.030612 | 123 | 0.51883 |
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
rc('text.latex', preamble=r'''\usepackage{amsmath}
\usepackage{physics}
\usepackage{siunitx}
''')
THR = .5
WIDTH = 0
def f1(q):
return (.46224 * (q / (1 + q))**(1 / 3))
def f2(q):
return (.38 + .2 * np.log10(q))
def f(q):
if q < 0.5:
return (f1(q))
else:
return(f2(q))
f = np.vectorize(f, signature='()->()')
qs = np.linspace(0, 8, num=1000)
f_q = f(qs)
def a(q):
return((1+q)**4 / q**2)
a_q = a(qs)
plt.plot(qs, np.abs(np.gradient(f_q, qs) / f_q), label='$\\abs{\\Delta \\log f}$')
plt.plot(qs, np.abs(np.gradient(a_q, qs) / a_q), label='$\\abs{\\Delta \\log a}$')
plt.plot(qs, np.gradient(a_q, qs) / a_q + np.gradient(f_q, qs) / f_q, label='$\\Delta \\log a + \\Delta \\log f$', ls='--')
plt.axvline(1, label='$q = 1$', ls=':', c='black')
plt.xlabel('$q = M_2 / M_1$')
plt.ylabel('relative variation')
plt.legend()
plt.yscale('log')
plt.savefig('roche-lobe-relative-corrections.pdf')
plt.show()
qs = np.linspace(0, 5/4, num=200)
def circ(q):
return((.5 - .227 * np.log10(q))**4 * (1+q))
plt.plot(qs, f(1 / qs), label='Roche Lobe radius')
plt.plot(qs, circ(qs), label='Circularization radius')
plt.xlabel('$q$')
plt.ylim(0,1)
plt.legend()
plt.savefig('roche-vs-circularization.pdf')
| true | true |
f7368e09b14dc6684b57afb09e9f7492ed3db638 | 169 | py | Python | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/userauthority/urls.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 6 | 2018-11-26T08:42:52.000Z | 2020-06-01T08:33:48.000Z | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/userauthority/urls.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | null | null | null | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/userauthority/urls.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 1 | 2019-01-22T06:45:36.000Z | 2019-01-22T06:45:36.000Z | #coding=utf-8
# coding=utf-8
'''
Created on 2014-1-5
@author: ETHAN
'''
from django.conf.urls import patterns,url
urlpatterns = patterns(
)
| 10.5625 | 41 | 0.591716 |
from django.conf.urls import patterns,url
urlpatterns = patterns(
)
| true | true |
f7368e7258b8fca34fe7355cadbf4b4365596283 | 1,203 | py | Python | lambda/lambda_function.py | kykasper/konomania-bot | 0649d4f9c7ee47717e79f96558ba914b81ea5ec2 | [
"MIT"
] | null | null | null | lambda/lambda_function.py | kykasper/konomania-bot | 0649d4f9c7ee47717e79f96558ba914b81ea5ec2 | [
"MIT"
] | null | null | null | lambda/lambda_function.py | kykasper/konomania-bot | 0649d4f9c7ee47717e79f96558ba914b81ea5ec2 | [
"MIT"
] | null | null | null | import random
import boto3
import tweepy
import twitter_config
# 先ほど取得した各種キーを代入する
CK=twitter_config.CONSUMER_KEY
CS=twitter_config.CONSUMER_SECRET
AT=twitter_config.ACCESS_TOKEN
AS=twitter_config.ACCESS_TOKEN_SECRET
# Twitterオブジェクトの生成
auth = tweepy.OAuthHandler(CK, CS)
auth.set_access_token(AT, AS)
api = tweepy.API(auth)
# dynamodb
TABLENAME = 'konomania-tweet'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(TABLENAME)
def lambda_handler(event, context):
# dynamodbをscanし、ランダムにitem_idを作成
count_data = table.scan(Select='COUNT')
cnt = count_data['Count']
item_id = random.randrange(cnt)
#getItemメソッドの呼び出し(主キー検索)
response = table.get_item(
#パラメーターとして主キー情報(辞書型)を渡す
#Keyという変数名?は固定(違う名前だとエラーになる)
Key={
#主キー情報を設定
#今回はテーブルにid(プライマリーキー)・sex(ソートキー)を定義した
'id': item_id,
}
)
#結果の取得
item = response['Item']
#辞書型オブジェクトとして取得できる(テーブルのカラムが定義されている)
#キーに一致するものがない場合、エラーとなる
print(item)
# tweet
tweet = response['Item']["tweet"].replace('\\n', '\n')
api.update_status(tweet)
if __name__ == "__main__":
event = {}
context = ""
lambda_handler(event, context) | 22.277778 | 58 | 0.686617 | import random
import boto3
import tweepy
import twitter_config
CK=twitter_config.CONSUMER_KEY
CS=twitter_config.CONSUMER_SECRET
AT=twitter_config.ACCESS_TOKEN
AS=twitter_config.ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CK, CS)
auth.set_access_token(AT, AS)
api = tweepy.API(auth)
TABLENAME = 'konomania-tweet'
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(TABLENAME)
def lambda_handler(event, context):
count_data = table.scan(Select='COUNT')
cnt = count_data['Count']
item_id = random.randrange(cnt)
response = table.get_item(
Key={
'id': item_id,
}
)
item = response['Item']
print(item)
tweet = response['Item']["tweet"].replace('\\n', '\n')
api.update_status(tweet)
if __name__ == "__main__":
event = {}
context = ""
lambda_handler(event, context) | true | true |
f7368ec2b4775aaf8bb0ac3b38ac65be9ce467a6 | 4,135 | py | Python | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/patchreader_unittest.py | viewdy/phantomjs | eddb0db1d253fd0c546060a4555554c8ee08c13c | [
"BSD-3-Clause"
] | 1 | 2015-05-27T13:52:20.000Z | 2015-05-27T13:52:20.000Z | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/patchreader_unittest.py | mrampersad/phantomjs | dca6f77a36699eb4e1c46f7600cca618f01b0ac3 | [
"BSD-3-Clause"
] | null | null | null | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/patchreader_unittest.py | mrampersad/phantomjs | dca6f77a36699eb4e1c46f7600cca618f01b0ac3 | [
"BSD-3-Clause"
] | 1 | 2017-03-19T13:03:23.000Z | 2017-03-19T13:03:23.000Z | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.style.patchreader import PatchReader
class PatchReaderTest(unittest.TestCase):
"""Test the PatchReader class."""
class MockTextFileReader(object):
def __init__(self):
self.passed_to_process_file = []
"""A list of (file_path, line_numbers) pairs."""
self.delete_only_file_count = 0
"""A number of times count_delete_only_file() called"""
def process_file(self, file_path, line_numbers):
self.passed_to_process_file.append((file_path, line_numbers))
def count_delete_only_file(self):
self.delete_only_file_count += 1
def setUp(self):
file_reader = self.MockTextFileReader()
self._file_reader = file_reader
self._patch_checker = PatchReader(file_reader)
def _call_check_patch(self, patch_string):
self._patch_checker.check(patch_string)
def _assert_checked(self, passed_to_process_file, delete_only_file_count):
self.assertEqual(self._file_reader.passed_to_process_file,
passed_to_process_file)
self.assertEqual(self._file_reader.delete_only_file_count,
delete_only_file_count)
def test_check_patch(self):
# The modified line_numbers array for this patch is: [2].
self._call_check_patch("""diff --git a/__init__.py b/__init__.py
index ef65bee..e3db70e 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,1 +1,2 @@
# Required for Python to search this directory for module files
+# New line
""")
self._assert_checked([("__init__.py", [2])], 0)
def test_check_patch_with_deletion(self):
self._call_check_patch("""Index: __init__.py
===================================================================
--- __init__.py (revision 3593)
+++ __init__.py (working copy)
@@ -1 +0,0 @@
-foobar
""")
# _mock_check_file should not be called for the deletion patch.
self._assert_checked([], 1)
def test_check_patch_with_png_deletion(self):
fs = MockFileSystem()
diff_text = """Index: LayoutTests/platform/mac/foo-expected.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = image/png
"""
self._patch_checker.check(diff_text, fs)
self._assert_checked([], 1)
| 40.539216 | 78 | 0.698912 |
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.style.patchreader import PatchReader
class PatchReaderTest(unittest.TestCase):
class MockTextFileReader(object):
def __init__(self):
self.passed_to_process_file = []
self.delete_only_file_count = 0
def process_file(self, file_path, line_numbers):
self.passed_to_process_file.append((file_path, line_numbers))
def count_delete_only_file(self):
self.delete_only_file_count += 1
def setUp(self):
file_reader = self.MockTextFileReader()
self._file_reader = file_reader
self._patch_checker = PatchReader(file_reader)
def _call_check_patch(self, patch_string):
self._patch_checker.check(patch_string)
def _assert_checked(self, passed_to_process_file, delete_only_file_count):
self.assertEqual(self._file_reader.passed_to_process_file,
passed_to_process_file)
self.assertEqual(self._file_reader.delete_only_file_count,
delete_only_file_count)
def test_check_patch(self):
self._call_check_patch("""diff --git a/__init__.py b/__init__.py
index ef65bee..e3db70e 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,1 +1,2 @@
# Required for Python to search this directory for module files
+# New line
""")
self._assert_checked([("__init__.py", [2])], 0)
def test_check_patch_with_deletion(self):
self._call_check_patch("""Index: __init__.py
===================================================================
--- __init__.py (revision 3593)
+++ __init__.py (working copy)
@@ -1 +0,0 @@
-foobar
""")
self._assert_checked([], 1)
def test_check_patch_with_png_deletion(self):
fs = MockFileSystem()
diff_text = """Index: LayoutTests/platform/mac/foo-expected.png
===================================================================
Cannot display: file marked as a binary type.
svn:mime-type = image/png
"""
self._patch_checker.check(diff_text, fs)
self._assert_checked([], 1)
| true | true |
f7368f0142b75a58a4a5ca792952e50e33b0190d | 1,180 | py | Python | tests/test.py | RickBarretto/toCase | 13720f3bcef017951c76dc41ba0dbe0fb355b66a | [
"MIT"
] | 3 | 2021-04-20T00:36:09.000Z | 2022-01-10T06:34:59.000Z | tests/test.py | RickBarretto/toCase | 13720f3bcef017951c76dc41ba0dbe0fb355b66a | [
"MIT"
] | 3 | 2022-01-10T07:27:25.000Z | 2022-01-11T19:03:49.000Z | tests/test.py | RickBarretto/toCase | 13720f3bcef017951c76dc41ba0dbe0fb355b66a | [
"MIT"
] | null | null | null | from rb_tocase import Case
STRINGS = [
"Hello World Haha",
"hello world haha",
"HELLO WORLD HAHA",
"Hello-World-Haha",
"hello-world-haha",
"HELLO-WORLD-HAHA",
"Hello_World_Haha",
"hello_world_haha",
"HELLO_WORLD_HAHA",
"HelloWorldHaha",
"helloWorldHaha"
]
for s in STRINGS:
try:
print(
s, "=>\tCamel\t=>", Case.to_camel(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tKebab\t=>", Case.to_kebab(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tPascal\t=>", Case.to_pascal(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tSentence\t=>", Case.to_sentence(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tSnake\t=>", Case.to_snake(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tUpperSnake\t=>", Case.to_upper_snake(s)
)
except:
print("UpperSnake =>Can't change case for", s) | 20.344828 | 59 | 0.491525 | from rb_tocase import Case
STRINGS = [
"Hello World Haha",
"hello world haha",
"HELLO WORLD HAHA",
"Hello-World-Haha",
"hello-world-haha",
"HELLO-WORLD-HAHA",
"Hello_World_Haha",
"hello_world_haha",
"HELLO_WORLD_HAHA",
"HelloWorldHaha",
"helloWorldHaha"
]
for s in STRINGS:
try:
print(
s, "=>\tCamel\t=>", Case.to_camel(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tKebab\t=>", Case.to_kebab(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tPascal\t=>", Case.to_pascal(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tSentence\t=>", Case.to_sentence(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tSnake\t=>", Case.to_snake(s)
)
except:
print("Can't change case for", s)
try:
print(
s, "=>\tUpperSnake\t=>", Case.to_upper_snake(s)
)
except:
print("UpperSnake =>Can't change case for", s) | true | true |
f7368fc63beb6a039e40aa6591c00659166ee1c4 | 100,108 | py | Python | sdk/python/pulumi_azure/compute/scale_set.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 109 | 2018-06-18T00:19:44.000Z | 2022-02-20T05:32:57.000Z | sdk/python/pulumi_azure/compute/scale_set.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 663 | 2018-06-18T21:08:46.000Z | 2022-03-31T20:10:11.000Z | sdk/python/pulumi_azure/compute/scale_set.py | henriktao/pulumi-azure | f1cbcf100b42b916da36d8fe28be3a159abaf022 | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2018-07-19T22:37:38.000Z | 2022-03-14T10:56:26.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScaleSetArgs', 'ScaleSet']
@pulumi.input_type
class ScaleSetArgs:
def __init__(__self__, *,
network_profiles: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]],
os_profile: pulumi.Input['ScaleSetOsProfileArgs'],
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['ScaleSetSkuArgs'],
storage_profile_os_disk: pulumi.Input['ScaleSetStorageProfileOsDiskArgs'],
upgrade_policy_mode: pulumi.Input[str],
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ScaleSet resource.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileArgs'] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetSkuArgs'] sku: A sku block as documented below.
:param pulumi.Input['ScaleSetStorageProfileOsDiskArgs'] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input['ScaleSetBootDiagnosticsArgs'] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetOsProfileLinuxConfigArgs'] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input['ScaleSetOsProfileWindowsConfigArgs'] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input['ScaleSetPlanArgs'] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input['ScaleSetRollingUpgradePolicyArgs'] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input['ScaleSetStorageProfileImageReferenceArgs'] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
pulumi.set(__self__, "network_profiles", network_profiles)
pulumi.set(__self__, "os_profile", os_profile)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Input['ScaleSetOsProfileArgs']:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: pulumi.Input['ScaleSetOsProfileArgs']):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['ScaleSetSkuArgs']:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['ScaleSetSkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Input['ScaleSetStorageProfileOsDiskArgs']:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: pulumi.Input['ScaleSetStorageProfileOsDiskArgs']):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Input[str]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: pulumi.Input[str]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
"""
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference")
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
@pulumi.input_type
class _ScaleSetState:
def __init__(__self__, *,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]] = None,
os_profile: Optional[pulumi.Input['ScaleSetOsProfileArgs']] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input['ScaleSetSkuArgs']] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
storage_profile_os_disk: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ScaleSet resources.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input['ScaleSetBootDiagnosticsArgs'] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileArgs'] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input['ScaleSetOsProfileLinuxConfigArgs'] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input['ScaleSetOsProfileWindowsConfigArgs'] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input['ScaleSetPlanArgs'] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input['ScaleSetRollingUpgradePolicyArgs'] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input['ScaleSetSkuArgs'] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input['ScaleSetStorageProfileImageReferenceArgs'] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input['ScaleSetStorageProfileOsDiskArgs'] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if network_profiles is not None:
pulumi.set(__self__, "network_profiles", network_profiles)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if storage_profile_os_disk is not None:
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if upgrade_policy_mode is not None:
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional[pulumi.Input['ScaleSetOsProfileArgs']]:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: Optional[pulumi.Input['ScaleSetOsProfileArgs']]):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ScaleSetSkuArgs']]:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ScaleSetSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
"""
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference")
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class ScaleSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages a virtual machine scale set.
## Disclaimers
> **Note:** The `compute.ScaleSet` resource has been superseded by the `compute.LinuxVirtualMachineScaleSet`](linux_virtual_machine_scale_set.html) and `compute.WindowsVirtualMachineScaleSet` resources. The existing `compute.ScaleSet` resource will continue to be available throughout the 2.x releases however is in a feature-frozen state to maintain compatibility - new functionality will instead be added to the `compute.LinuxVirtualMachineScaleSet` and `compute.WindowsVirtualMachineScaleSet` resources.
## Example Usage
### With Managed Disks (Recommended)
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static",
domain_name_label=example_resource_group.name,
tags={
"environment": "staging",
})
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
bpepool = azure.lb.BackendAddressPool("bpepool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
lbnatpool = azure.lb.NatPool("lbnatpool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
frontend_port_start=50000,
frontend_port_end=50119,
backend_port=22,
frontend_ip_configuration_name="PublicIPAddress")
example_probe = azure.lb.Probe("exampleProbe",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Http",
request_path="/health",
port=8080)
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automatic_os_upgrade=True,
upgrade_policy_mode="Rolling",
rolling_upgrade_policy=azure.compute.ScaleSetRollingUpgradePolicyArgs(
max_batch_instance_percent=20,
max_unhealthy_instance_percent=20,
max_unhealthy_upgraded_instance_percent=5,
pause_time_between_batches="PT0S",
),
health_probe_id=example_probe.id,
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
),
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="",
caching="ReadWrite",
create_option="FromImage",
managed_disk_type="Standard_LRS",
),
storage_profile_data_disks=[azure.compute.ScaleSetStorageProfileDataDiskArgs(
lun=0,
caching="ReadWrite",
create_option="Empty",
disk_size_gb=10,
)],
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="mynetworkprofile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
load_balancer_backend_address_pool_ids=[bpepool.id],
load_balancer_inbound_nat_rules_ids=[lbnatpool.id],
)],
)],
tags={
"environment": "staging",
})
```
### With Unmanaged Disks
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location="West US",
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location="westus",
account_tier="Standard",
account_replication_type="LRS",
tags={
"environment": "staging",
})
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location="West US",
resource_group_name=example_resource_group.name,
upgrade_policy_mode="Manual",
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="TestNetworkProfile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
)],
)],
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="osDiskProfile",
caching="ReadWrite",
create_option="FromImage",
vhd_containers=[pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}")],
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
))
```
## Example of storage_profile_image_reference with id
```python
import pulumi
import pulumi_azure as azure
example_image = azure.compute.Image("exampleImage")
# ...
example_scale_set = azure.compute.ScaleSet("exampleScaleSet", storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
id=example_image.id,
))
# ...
```
## Import
Virtual Machine Scale Sets can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/scaleSet:ScaleSet scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ScaleSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a virtual machine scale set.
## Disclaimers
> **Note:** The `compute.ScaleSet` resource has been superseded by the `compute.LinuxVirtualMachineScaleSet`](linux_virtual_machine_scale_set.html) and `compute.WindowsVirtualMachineScaleSet` resources. The existing `compute.ScaleSet` resource will continue to be available throughout the 2.x releases however is in a feature-frozen state to maintain compatibility - new functionality will instead be added to the `compute.LinuxVirtualMachineScaleSet` and `compute.WindowsVirtualMachineScaleSet` resources.
## Example Usage
### With Managed Disks (Recommended)
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location=example_resource_group.location,
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_public_ip = azure.network.PublicIp("examplePublicIp",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
allocation_method="Static",
domain_name_label=example_resource_group.name,
tags={
"environment": "staging",
})
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
bpepool = azure.lb.BackendAddressPool("bpepool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
lbnatpool = azure.lb.NatPool("lbnatpool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
frontend_port_start=50000,
frontend_port_end=50119,
backend_port=22,
frontend_ip_configuration_name="PublicIPAddress")
example_probe = azure.lb.Probe("exampleProbe",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Http",
request_path="/health",
port=8080)
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automatic_os_upgrade=True,
upgrade_policy_mode="Rolling",
rolling_upgrade_policy=azure.compute.ScaleSetRollingUpgradePolicyArgs(
max_batch_instance_percent=20,
max_unhealthy_instance_percent=20,
max_unhealthy_upgraded_instance_percent=5,
pause_time_between_batches="PT0S",
),
health_probe_id=example_probe.id,
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
),
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="",
caching="ReadWrite",
create_option="FromImage",
managed_disk_type="Standard_LRS",
),
storage_profile_data_disks=[azure.compute.ScaleSetStorageProfileDataDiskArgs(
lun=0,
caching="ReadWrite",
create_option="Empty",
disk_size_gb=10,
)],
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="mynetworkprofile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
load_balancer_backend_address_pool_ids=[bpepool.id],
load_balancer_inbound_nat_rules_ids=[lbnatpool.id],
)],
)],
tags={
"environment": "staging",
})
```
### With Unmanaged Disks
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_virtual_network = azure.network.VirtualNetwork("exampleVirtualNetwork",
address_spaces=["10.0.0.0/16"],
location="West US",
resource_group_name=example_resource_group.name)
example_subnet = azure.network.Subnet("exampleSubnet",
resource_group_name=example_resource_group.name,
virtual_network_name=example_virtual_network.name,
address_prefixes=["10.0.2.0/24"])
example_account = azure.storage.Account("exampleAccount",
resource_group_name=example_resource_group.name,
location="westus",
account_tier="Standard",
account_replication_type="LRS",
tags={
"environment": "staging",
})
example_container = azure.storage.Container("exampleContainer",
storage_account_name=example_account.name,
container_access_type="private")
example_scale_set = azure.compute.ScaleSet("exampleScaleSet",
location="West US",
resource_group_name=example_resource_group.name,
upgrade_policy_mode="Manual",
sku=azure.compute.ScaleSetSkuArgs(
name="Standard_F2",
tier="Standard",
capacity=2,
),
os_profile=azure.compute.ScaleSetOsProfileArgs(
computer_name_prefix="testvm",
admin_username="myadmin",
),
os_profile_linux_config=azure.compute.ScaleSetOsProfileLinuxConfigArgs(
disable_password_authentication=True,
ssh_keys=[azure.compute.ScaleSetOsProfileLinuxConfigSshKeyArgs(
path="/home/myadmin/.ssh/authorized_keys",
key_data=(lambda path: open(path).read())("~/.ssh/demo_key.pub"),
)],
),
network_profiles=[azure.compute.ScaleSetNetworkProfileArgs(
name="TestNetworkProfile",
primary=True,
ip_configurations=[azure.compute.ScaleSetNetworkProfileIpConfigurationArgs(
name="TestIPConfiguration",
primary=True,
subnet_id=example_subnet.id,
)],
)],
storage_profile_os_disk=azure.compute.ScaleSetStorageProfileOsDiskArgs(
name="osDiskProfile",
caching="ReadWrite",
create_option="FromImage",
vhd_containers=[pulumi.Output.all(example_account.primary_blob_endpoint, example_container.name).apply(lambda primary_blob_endpoint, name: f"{primary_blob_endpoint}{name}")],
),
storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
publisher="Canonical",
offer="UbuntuServer",
sku="16.04-LTS",
version="latest",
))
```
## Example of storage_profile_image_reference with id
```python
import pulumi
import pulumi_azure as azure
example_image = azure.compute.Image("exampleImage")
# ...
example_scale_set = azure.compute.ScaleSet("exampleScaleSet", storage_profile_image_reference=azure.compute.ScaleSetStorageProfileImageReferenceArgs(
id=example_image.id,
))
# ...
```
## Import
Virtual Machine Scale Sets can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:compute/scaleSet:ScaleSet scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1
```
:param str resource_name: The name of the resource.
:param ScaleSetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScaleSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScaleSetArgs.__new__(ScaleSetArgs)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if network_profiles is None and not opts.urn:
raise TypeError("Missing required property 'network_profiles'")
__props__.__dict__["network_profiles"] = network_profiles
if os_profile is None and not opts.urn:
raise TypeError("Missing required property 'os_profile'")
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
if storage_profile_os_disk is None and not opts.urn:
raise TypeError("Missing required property 'storage_profile_os_disk'")
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
if upgrade_policy_mode is None and not opts.urn:
raise TypeError("Missing required property 'upgrade_policy_mode'")
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
super(ScaleSet, __self__).__init__(
'azure:compute/scaleSet:ScaleSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ScaleSet':
"""
Get an existing ScaleSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] automatic_os_upgrade: Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
:param pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']] boot_diagnostics: A boot diagnostics profile block as referenced below.
:param pulumi.Input[str] eviction_policy: Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]] extensions: Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
:param pulumi.Input[str] health_probe_id: Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
:param pulumi.Input[str] license_type: Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]] network_profiles: A collection of network profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']] os_profile: A Virtual Machine OS Profile block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']] os_profile_linux_config: A Linux config block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]] os_profile_secrets: A collection of Secret blocks as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']] os_profile_windows_config: A Windows config block as documented below.
:param pulumi.Input[bool] overprovision: Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
:param pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']] plan: A plan block as documented below.
:param pulumi.Input[str] priority: Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
:param pulumi.Input[str] proximity_placement_group_id: The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']] rolling_upgrade_policy: A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
:param pulumi.Input[bool] single_placement_group: Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
:param pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']] sku: A sku block as documented below.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]] storage_profile_data_disks: A storage profile data disk block as documented below
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']] storage_profile_image_reference: A storage profile image reference block as documented below.
:param pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']] storage_profile_os_disk: A storage profile os disk block as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] upgrade_policy_mode: Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A collection of availability zones to spread the Virtual Machines over.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScaleSetState.__new__(_ScaleSetState)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["network_profiles"] = network_profiles
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
return ScaleSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> pulumi.Output[Optional[bool]]:
"""
Automatic OS patches can be applied by Azure to your scaleset. This is particularly useful when `upgrade_policy_mode` is set to `Rolling`. Defaults to `false`.
"""
return pulumi.get(self, "automatic_os_upgrade")
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> pulumi.Output[Optional['outputs.ScaleSetBootDiagnostics']]:
"""
A boot diagnostics profile block as referenced below.
"""
return pulumi.get(self, "boot_diagnostics")
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the eviction policy for Virtual Machines in this Scale Set. Possible values are `Deallocate` and `Delete`.
"""
return pulumi.get(self, "eviction_policy")
@property
@pulumi.getter
def extensions(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetExtension']]]:
"""
Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
"""
return pulumi.get(self, "extensions")
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the identifier for the load balancer health probe. Required when using `Rolling` as your `upgrade_policy_mode`.
"""
return pulumi.get(self, "health_probe_id")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.ScaleSetIdentity']:
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[str]:
"""
Specifies the Windows OS license type. If supplied, the only allowed values are `Windows_Client` and `Windows_Server`.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Output[Sequence['outputs.ScaleSetNetworkProfile']]:
"""
A collection of network profile block as documented below.
"""
return pulumi.get(self, "network_profiles")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Output['outputs.ScaleSetOsProfile']:
"""
A Virtual Machine OS Profile block as documented below.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> pulumi.Output['outputs.ScaleSetOsProfileLinuxConfig']:
"""
A Linux config block as documented below.
"""
return pulumi.get(self, "os_profile_linux_config")
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetOsProfileSecret']]]:
"""
A collection of Secret blocks as documented below.
"""
return pulumi.get(self, "os_profile_secrets")
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> pulumi.Output[Optional['outputs.ScaleSetOsProfileWindowsConfig']]:
"""
A Windows config block as documented below.
"""
return pulumi.get(self, "os_profile_windows_config")
@property
@pulumi.getter
def overprovision(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the virtual machine scale set should be overprovisioned. Defaults to `true`.
"""
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.ScaleSetPlan']]:
"""
A plan block as documented below.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the priority for the Virtual Machines in the Scale Set. Defaults to `Regular`. Possible values are `Low` and `Regular`.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID of the Proximity Placement Group to which this Virtual Machine should be assigned. Changing this forces a new resource to be created
"""
return pulumi.get(self, "proximity_placement_group_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> pulumi.Output[Optional['outputs.ScaleSetRollingUpgradePolicy']]:
"""
A `rolling_upgrade_policy` block as defined below. This is only applicable when the `upgrade_policy_mode` is `Rolling`.
"""
return pulumi.get(self, "rolling_upgrade_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
"""
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.ScaleSetSku']:
"""
A sku block as documented below.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetStorageProfileDataDisk']]]:
"""
A storage profile data disk block as documented below
"""
return pulumi.get(self, "storage_profile_data_disks")
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> pulumi.Output['outputs.ScaleSetStorageProfileImageReference']:
"""
A storage profile image reference block as documented below.
"""
return pulumi.get(self, "storage_profile_image_reference")
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Output['outputs.ScaleSetStorageProfileOsDisk']:
"""
A storage profile os disk block as documented below
"""
return pulumi.get(self, "storage_profile_os_disk")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Output[str]:
"""
Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Rolling`, `Manual`, or `Automatic`. When choosing `Rolling`, you will need to set a health probe.
"""
return pulumi.get(self, "upgrade_policy_mode")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A collection of availability zones to spread the Virtual Machines over.
"""
return pulumi.get(self, "zones")
| 55.064906 | 514 | 0.6852 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ScaleSetArgs', 'ScaleSet']
@pulumi.input_type
class ScaleSetArgs:
def __init__(__self__, *,
network_profiles: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]],
os_profile: pulumi.Input['ScaleSetOsProfileArgs'],
resource_group_name: pulumi.Input[str],
sku: pulumi.Input['ScaleSetSkuArgs'],
storage_profile_os_disk: pulumi.Input['ScaleSetStorageProfileOsDiskArgs'],
upgrade_policy_mode: pulumi.Input[str],
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
pulumi.set(__self__, "network_profiles", network_profiles)
pulumi.set(__self__, "os_profile", os_profile)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "sku", sku)
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]:
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Input['ScaleSetOsProfileArgs']:
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: pulumi.Input['ScaleSetOsProfileArgs']):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['ScaleSetSkuArgs']:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['ScaleSetSkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Input['ScaleSetStorageProfileOsDiskArgs']:
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: pulumi.Input['ScaleSetStorageProfileOsDiskArgs']):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Input[str]:
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: pulumi.Input[str]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
return pulumi.get(self, "storage_profile_image_reference")
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
@pulumi.input_type
class _ScaleSetState:
def __init__(__self__, *,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ScaleSetIdentityArgs']] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]] = None,
os_profile: Optional[pulumi.Input['ScaleSetOsProfileArgs']] = None,
os_profile_linux_config: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]] = None,
os_profile_windows_config: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input['ScaleSetPlanArgs']] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input['ScaleSetSkuArgs']] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]] = None,
storage_profile_image_reference: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']] = None,
storage_profile_os_disk: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
if automatic_os_upgrade is not None:
pulumi.set(__self__, "automatic_os_upgrade", automatic_os_upgrade)
if boot_diagnostics is not None:
pulumi.set(__self__, "boot_diagnostics", boot_diagnostics)
if eviction_policy is not None:
pulumi.set(__self__, "eviction_policy", eviction_policy)
if extensions is not None:
pulumi.set(__self__, "extensions", extensions)
if health_probe_id is not None:
pulumi.set(__self__, "health_probe_id", health_probe_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if network_profiles is not None:
pulumi.set(__self__, "network_profiles", network_profiles)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if os_profile_linux_config is not None:
pulumi.set(__self__, "os_profile_linux_config", os_profile_linux_config)
if os_profile_secrets is not None:
pulumi.set(__self__, "os_profile_secrets", os_profile_secrets)
if os_profile_windows_config is not None:
pulumi.set(__self__, "os_profile_windows_config", os_profile_windows_config)
if overprovision is not None:
pulumi.set(__self__, "overprovision", overprovision)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if rolling_upgrade_policy is not None:
pulumi.set(__self__, "rolling_upgrade_policy", rolling_upgrade_policy)
if single_placement_group is not None:
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if storage_profile_data_disks is not None:
pulumi.set(__self__, "storage_profile_data_disks", storage_profile_data_disks)
if storage_profile_image_reference is not None:
pulumi.set(__self__, "storage_profile_image_reference", storage_profile_image_reference)
if storage_profile_os_disk is not None:
pulumi.set(__self__, "storage_profile_os_disk", storage_profile_os_disk)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if upgrade_policy_mode is not None:
pulumi.set(__self__, "upgrade_policy_mode", upgrade_policy_mode)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "automatic_os_upgrade")
@automatic_os_upgrade.setter
def automatic_os_upgrade(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "automatic_os_upgrade", value)
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]:
return pulumi.get(self, "boot_diagnostics")
@boot_diagnostics.setter
def boot_diagnostics(self, value: Optional[pulumi.Input['ScaleSetBootDiagnosticsArgs']]):
pulumi.set(self, "boot_diagnostics", value)
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "eviction_policy")
@eviction_policy.setter
def eviction_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eviction_policy", value)
@property
@pulumi.getter
def extensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]:
return pulumi.get(self, "extensions")
@extensions.setter
def extensions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetExtensionArgs']]]]):
pulumi.set(self, "extensions", value)
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "health_probe_id")
@health_probe_id.setter
def health_probe_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "health_probe_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ScaleSetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ScaleSetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]:
return pulumi.get(self, "network_profiles")
@network_profiles.setter
def network_profiles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetNetworkProfileArgs']]]]):
pulumi.set(self, "network_profiles", value)
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional[pulumi.Input['ScaleSetOsProfileArgs']]:
return pulumi.get(self, "os_profile")
@os_profile.setter
def os_profile(self, value: Optional[pulumi.Input['ScaleSetOsProfileArgs']]):
pulumi.set(self, "os_profile", value)
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]:
return pulumi.get(self, "os_profile_linux_config")
@os_profile_linux_config.setter
def os_profile_linux_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileLinuxConfigArgs']]):
pulumi.set(self, "os_profile_linux_config", value)
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]:
return pulumi.get(self, "os_profile_secrets")
@os_profile_secrets.setter
def os_profile_secrets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetOsProfileSecretArgs']]]]):
pulumi.set(self, "os_profile_secrets", value)
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]:
return pulumi.get(self, "os_profile_windows_config")
@os_profile_windows_config.setter
def os_profile_windows_config(self, value: Optional[pulumi.Input['ScaleSetOsProfileWindowsConfigArgs']]):
pulumi.set(self, "os_profile_windows_config", value)
@property
@pulumi.getter
def overprovision(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "overprovision")
@overprovision.setter
def overprovision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "overprovision", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ScaleSetPlanArgs']]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ScaleSetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]:
return pulumi.get(self, "rolling_upgrade_policy")
@rolling_upgrade_policy.setter
def rolling_upgrade_policy(self, value: Optional[pulumi.Input['ScaleSetRollingUpgradePolicyArgs']]):
pulumi.set(self, "rolling_upgrade_policy", value)
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "single_placement_group")
@single_placement_group.setter
def single_placement_group(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "single_placement_group", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ScaleSetSkuArgs']]:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ScaleSetSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]:
return pulumi.get(self, "storage_profile_data_disks")
@storage_profile_data_disks.setter
def storage_profile_data_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScaleSetStorageProfileDataDiskArgs']]]]):
pulumi.set(self, "storage_profile_data_disks", value)
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]:
return pulumi.get(self, "storage_profile_image_reference")
@storage_profile_image_reference.setter
def storage_profile_image_reference(self, value: Optional[pulumi.Input['ScaleSetStorageProfileImageReferenceArgs']]):
pulumi.set(self, "storage_profile_image_reference", value)
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]:
return pulumi.get(self, "storage_profile_os_disk")
@storage_profile_os_disk.setter
def storage_profile_os_disk(self, value: Optional[pulumi.Input['ScaleSetStorageProfileOsDiskArgs']]):
pulumi.set(self, "storage_profile_os_disk", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "upgrade_policy_mode")
@upgrade_policy_mode.setter
def upgrade_policy_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "upgrade_policy_mode", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
class ScaleSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ScaleSetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ScaleSetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ScaleSetArgs.__new__(ScaleSetArgs)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if network_profiles is None and not opts.urn:
raise TypeError("Missing required property 'network_profiles'")
__props__.__dict__["network_profiles"] = network_profiles
if os_profile is None and not opts.urn:
raise TypeError("Missing required property 'os_profile'")
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
if storage_profile_os_disk is None and not opts.urn:
raise TypeError("Missing required property 'storage_profile_os_disk'")
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
if upgrade_policy_mode is None and not opts.urn:
raise TypeError("Missing required property 'upgrade_policy_mode'")
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
super(ScaleSet, __self__).__init__(
'azure:compute/scaleSet:ScaleSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_os_upgrade: Optional[pulumi.Input[bool]] = None,
boot_diagnostics: Optional[pulumi.Input[pulumi.InputType['ScaleSetBootDiagnosticsArgs']]] = None,
eviction_policy: Optional[pulumi.Input[str]] = None,
extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetExtensionArgs']]]]] = None,
health_probe_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ScaleSetIdentityArgs']]] = None,
license_type: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetNetworkProfileArgs']]]]] = None,
os_profile: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileArgs']]] = None,
os_profile_linux_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileLinuxConfigArgs']]] = None,
os_profile_secrets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetOsProfileSecretArgs']]]]] = None,
os_profile_windows_config: Optional[pulumi.Input[pulumi.InputType['ScaleSetOsProfileWindowsConfigArgs']]] = None,
overprovision: Optional[pulumi.Input[bool]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ScaleSetPlanArgs']]] = None,
priority: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rolling_upgrade_policy: Optional[pulumi.Input[pulumi.InputType['ScaleSetRollingUpgradePolicyArgs']]] = None,
single_placement_group: Optional[pulumi.Input[bool]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ScaleSetSkuArgs']]] = None,
storage_profile_data_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileDataDiskArgs']]]]] = None,
storage_profile_image_reference: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileImageReferenceArgs']]] = None,
storage_profile_os_disk: Optional[pulumi.Input[pulumi.InputType['ScaleSetStorageProfileOsDiskArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
upgrade_policy_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'ScaleSet':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ScaleSetState.__new__(_ScaleSetState)
__props__.__dict__["automatic_os_upgrade"] = automatic_os_upgrade
__props__.__dict__["boot_diagnostics"] = boot_diagnostics
__props__.__dict__["eviction_policy"] = eviction_policy
__props__.__dict__["extensions"] = extensions
__props__.__dict__["health_probe_id"] = health_probe_id
__props__.__dict__["identity"] = identity
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["network_profiles"] = network_profiles
__props__.__dict__["os_profile"] = os_profile
__props__.__dict__["os_profile_linux_config"] = os_profile_linux_config
__props__.__dict__["os_profile_secrets"] = os_profile_secrets
__props__.__dict__["os_profile_windows_config"] = os_profile_windows_config
__props__.__dict__["overprovision"] = overprovision
__props__.__dict__["plan"] = plan
__props__.__dict__["priority"] = priority
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rolling_upgrade_policy"] = rolling_upgrade_policy
__props__.__dict__["single_placement_group"] = single_placement_group
__props__.__dict__["sku"] = sku
__props__.__dict__["storage_profile_data_disks"] = storage_profile_data_disks
__props__.__dict__["storage_profile_image_reference"] = storage_profile_image_reference
__props__.__dict__["storage_profile_os_disk"] = storage_profile_os_disk
__props__.__dict__["tags"] = tags
__props__.__dict__["upgrade_policy_mode"] = upgrade_policy_mode
__props__.__dict__["zones"] = zones
return ScaleSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automaticOsUpgrade")
def automatic_os_upgrade(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "automatic_os_upgrade")
@property
@pulumi.getter(name="bootDiagnostics")
def boot_diagnostics(self) -> pulumi.Output[Optional['outputs.ScaleSetBootDiagnostics']]:
return pulumi.get(self, "boot_diagnostics")
@property
@pulumi.getter(name="evictionPolicy")
def eviction_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "eviction_policy")
@property
@pulumi.getter
def extensions(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetExtension']]]:
return pulumi.get(self, "extensions")
@property
@pulumi.getter(name="healthProbeId")
def health_probe_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "health_probe_id")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.ScaleSetIdentity']:
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfiles")
def network_profiles(self) -> pulumi.Output[Sequence['outputs.ScaleSetNetworkProfile']]:
return pulumi.get(self, "network_profiles")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> pulumi.Output['outputs.ScaleSetOsProfile']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="osProfileLinuxConfig")
def os_profile_linux_config(self) -> pulumi.Output['outputs.ScaleSetOsProfileLinuxConfig']:
return pulumi.get(self, "os_profile_linux_config")
@property
@pulumi.getter(name="osProfileSecrets")
def os_profile_secrets(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetOsProfileSecret']]]:
return pulumi.get(self, "os_profile_secrets")
@property
@pulumi.getter(name="osProfileWindowsConfig")
def os_profile_windows_config(self) -> pulumi.Output[Optional['outputs.ScaleSetOsProfileWindowsConfig']]:
return pulumi.get(self, "os_profile_windows_config")
@property
@pulumi.getter
def overprovision(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.ScaleSetPlan']]:
return pulumi.get(self, "plan")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "priority")
@property
@pulumi.getter(name="proximityPlacementGroupId")
def proximity_placement_group_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "proximity_placement_group_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="rollingUpgradePolicy")
def rolling_upgrade_policy(self) -> pulumi.Output[Optional['outputs.ScaleSetRollingUpgradePolicy']]:
return pulumi.get(self, "rolling_upgrade_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.ScaleSetSku']:
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageProfileDataDisks")
def storage_profile_data_disks(self) -> pulumi.Output[Optional[Sequence['outputs.ScaleSetStorageProfileDataDisk']]]:
return pulumi.get(self, "storage_profile_data_disks")
@property
@pulumi.getter(name="storageProfileImageReference")
def storage_profile_image_reference(self) -> pulumi.Output['outputs.ScaleSetStorageProfileImageReference']:
return pulumi.get(self, "storage_profile_image_reference")
@property
@pulumi.getter(name="storageProfileOsDisk")
def storage_profile_os_disk(self) -> pulumi.Output['outputs.ScaleSetStorageProfileOsDisk']:
return pulumi.get(self, "storage_profile_os_disk")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="upgradePolicyMode")
def upgrade_policy_mode(self) -> pulumi.Output[str]:
return pulumi.get(self, "upgrade_policy_mode")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "zones")
| true | true |
f73690586d5e8f1f4408286c3b7cdd7949573d25 | 1,359 | py | Python | nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..histogrammatching import HistogramMatching
def test_HistogramMatching_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
inputVolume=dict(
argstr='%s',
position=-3,
),
numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ),
outputVolume=dict(
argstr='%s',
hash_files=False,
position=-1,
),
referenceVolume=dict(
argstr='%s',
position=-2,
),
threshold=dict(argstr='--threshold ', ),
)
inputs = HistogramMatching.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_HistogramMatching_outputs():
output_map = dict(outputVolume=dict(position=-1, ), )
outputs = HistogramMatching.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 32.357143 | 78 | 0.604121 |
from __future__ import unicode_literals
from ..histogrammatching import HistogramMatching
def test_HistogramMatching_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
inputVolume=dict(
argstr='%s',
position=-3,
),
numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ),
outputVolume=dict(
argstr='%s',
hash_files=False,
position=-1,
),
referenceVolume=dict(
argstr='%s',
position=-2,
),
threshold=dict(argstr='--threshold ', ),
)
inputs = HistogramMatching.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_HistogramMatching_outputs():
output_map = dict(outputVolume=dict(position=-1, ), )
outputs = HistogramMatching.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| true | true |
f73690b1f6595eaa7cdf4b609a365803b2d5007b | 305 | py | Python | api.py | PocketOfWeird/payoff-api | a28ad0e096f7b3678176bbe4a2942ce55e50a38a | [
"MIT"
] | null | null | null | api.py | PocketOfWeird/payoff-api | a28ad0e096f7b3678176bbe4a2942ce55e50a38a | [
"MIT"
] | null | null | null | api.py | PocketOfWeird/payoff-api | a28ad0e096f7b3678176bbe4a2942ce55e50a38a | [
"MIT"
] | null | null | null | # api.py
from dotenv import load_dotenv
load_dotenv()
import flask
from mongo import load_db
app = flask.Flask(__name__)
app.config["DEBUG"] = True
db = load_db()
@app.route("/", methods=["GET"])
def home():
return "<h1>Payoff API</h1><p>Back-end to the Payoff Budget Application</p>"
app.run()
| 16.052632 | 80 | 0.691803 |
from dotenv import load_dotenv
load_dotenv()
import flask
from mongo import load_db
app = flask.Flask(__name__)
app.config["DEBUG"] = True
db = load_db()
@app.route("/", methods=["GET"])
def home():
return "<h1>Payoff API</h1><p>Back-end to the Payoff Budget Application</p>"
app.run()
| true | true |
f7369130a3ae90390c4f9f9366b7cb66ec5a2af2 | 27,920 | py | Python | twilio/rest/api/v2010/account/usage/record/daily.py | angmunpri/twilio-python | d6ed1098f4bc06529d68f965eabdf87642ac441c | [
"MIT"
] | 1 | 2022-03-12T08:56:51.000Z | 2022-03-12T08:56:51.000Z | twilio/rest/api/v2010/account/usage/record/daily.py | angmunpri/twilio-python | d6ed1098f4bc06529d68f965eabdf87642ac441c | [
"MIT"
] | 1 | 2022-03-12T06:00:21.000Z | 2022-03-12T07:07:55.000Z | env/lib/python3.9/site-packages/twilio/rest/api/v2010/account/usage/record/daily.py | giannicrivello/AudioShack_BE | b50ba91b6904ac069fc37c98a691729932297b6a | [
"MIT"
] | null | null | null | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DailyList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the DailyList
:param Version version: Version that contains the resource
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyList
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyList
"""
super(DailyList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/Daily.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Streams DailyInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
"""
Lists DailyInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.usage.record.daily.DailyInstance]
"""
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of DailyInstance records from the API.
Request is executed immediately
:param DailyInstance.Category category: The usage category of the UsageRecord resources to read
:param date start_date: Only include usage that has occurred on or after this date
:param date end_date: Only include usage that occurred on or before this date
:param bool include_subaccounts: Whether to include usage from the master account and all its subaccounts
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
data = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return DailyPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of DailyInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DailyPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyList>'
class DailyPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the DailyPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyPage
"""
super(DailyPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of DailyInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
return DailyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyPage>'
class DailyInstance(InstanceResource):
class Category(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
def __init__(self, version, payload, account_sid):
"""
Initialize the DailyInstance
:returns: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
:rtype: twilio.rest.api.v2010.account.usage.record.daily.DailyInstance
"""
super(DailyInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'as_of': payload.get('as_of'),
'category': payload.get('category'),
'count': payload.get('count'),
'count_unit': payload.get('count_unit'),
'description': payload.get('description'),
'end_date': deserialize.iso8601_date(payload.get('end_date')),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'start_date': deserialize.iso8601_date(payload.get('start_date')),
'subresource_uris': payload.get('subresource_uris'),
'uri': payload.get('uri'),
'usage': payload.get('usage'),
'usage_unit': payload.get('usage_unit'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account accrued the usage
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the resource
:rtype: unicode
"""
return self._properties['api_version']
@property
def as_of(self):
"""
:returns: Usage records up to date as of this timestamp
:rtype: unicode
"""
return self._properties['as_of']
@property
def category(self):
"""
:returns: The category of usage
:rtype: DailyInstance.Category
"""
return self._properties['category']
@property
def count(self):
"""
:returns: The number of usage events
:rtype: unicode
"""
return self._properties['count']
@property
def count_unit(self):
"""
:returns: The units in which count is measured
:rtype: unicode
"""
return self._properties['count_unit']
@property
def description(self):
"""
:returns: A plain-language description of the usage category
:rtype: unicode
"""
return self._properties['description']
@property
def end_date(self):
"""
:returns: The last date for which usage is included in the UsageRecord
:rtype: date
"""
return self._properties['end_date']
@property
def price(self):
"""
:returns: The total price of the usage
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which `price` is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def start_date(self):
"""
:returns: The first date for which usage is included in this UsageRecord
:rtype: date
"""
return self._properties['start_date']
@property
def subresource_uris(self):
"""
:returns: A list of related resources identified by their relative URIs
:rtype: unicode
"""
return self._properties['subresource_uris']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def usage(self):
"""
:returns: The amount of usage
:rtype: unicode
"""
return self._properties['usage']
@property
def usage_unit(self):
"""
:returns: The units in which usage is measured
:rtype: unicode
"""
return self._properties['usage_unit']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.DailyInstance>'
| 46.14876 | 121 | 0.688574 |
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class DailyList(ListResource):
def __init__(self, version, account_sid):
super(DailyList, self).__init__(version)
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Usage/Records/Daily.json'.format(**self._solution)
def stream(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
limits = self._version.read_limits(limit, page_size)
page = self.page(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset, limit=None,
page_size=None):
return list(self.stream(
category=category,
start_date=start_date,
end_date=end_date,
include_subaccounts=include_subaccounts,
limit=limit,
page_size=page_size,
))
def page(self, category=values.unset, start_date=values.unset,
end_date=values.unset, include_subaccounts=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
data = values.of({
'Category': category,
'StartDate': serialize.iso8601_date(start_date),
'EndDate': serialize.iso8601_date(end_date),
'IncludeSubaccounts': include_subaccounts,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return DailyPage(self._version, response, self._solution)
def get_page(self, target_url):
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return DailyPage(self._version, response, self._solution)
def __repr__(self):
return '<Twilio.Api.V2010.DailyList>'
class DailyPage(Page):
def __init__(self, version, response, solution):
super(DailyPage, self).__init__(version, response)
self._solution = solution
def get_instance(self, payload):
return DailyInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
return '<Twilio.Api.V2010.DailyPage>'
class DailyInstance(InstanceResource):
class Category(object):
A2P_REGISTRATION_FEES = "a2p-registration-fees"
AGENT_CONFERENCE = "agent-conference"
ANSWERING_MACHINE_DETECTION = "answering-machine-detection"
AUTHY_AUTHENTICATIONS = "authy-authentications"
AUTHY_CALLS_OUTBOUND = "authy-calls-outbound"
AUTHY_MONTHLY_FEES = "authy-monthly-fees"
AUTHY_PHONE_INTELLIGENCE = "authy-phone-intelligence"
AUTHY_PHONE_VERIFICATIONS = "authy-phone-verifications"
AUTHY_SMS_OUTBOUND = "authy-sms-outbound"
CALL_PROGESS_EVENTS = "call-progess-events"
CALLERIDLOOKUPS = "calleridlookups"
CALLS = "calls"
CALLS_CLIENT = "calls-client"
CALLS_GLOBALCONFERENCE = "calls-globalconference"
CALLS_INBOUND = "calls-inbound"
CALLS_INBOUND_LOCAL = "calls-inbound-local"
CALLS_INBOUND_MOBILE = "calls-inbound-mobile"
CALLS_INBOUND_TOLLFREE = "calls-inbound-tollfree"
CALLS_OUTBOUND = "calls-outbound"
CALLS_PAY_VERB_TRANSACTIONS = "calls-pay-verb-transactions"
CALLS_RECORDINGS = "calls-recordings"
CALLS_SIP = "calls-sip"
CALLS_SIP_INBOUND = "calls-sip-inbound"
CALLS_SIP_OUTBOUND = "calls-sip-outbound"
CALLS_TRANSFERS = "calls-transfers"
CARRIER_LOOKUPS = "carrier-lookups"
CONVERSATIONS = "conversations"
CONVERSATIONS_API_REQUESTS = "conversations-api-requests"
CONVERSATIONS_CONVERSATION_EVENTS = "conversations-conversation-events"
CONVERSATIONS_ENDPOINT_CONNECTIVITY = "conversations-endpoint-connectivity"
CONVERSATIONS_EVENTS = "conversations-events"
CONVERSATIONS_PARTICIPANT_EVENTS = "conversations-participant-events"
CONVERSATIONS_PARTICIPANTS = "conversations-participants"
CPS = "cps"
FLEX_USAGE = "flex-usage"
FRAUD_LOOKUPS = "fraud-lookups"
GROUP_ROOMS = "group-rooms"
GROUP_ROOMS_DATA_TRACK = "group-rooms-data-track"
GROUP_ROOMS_ENCRYPTED_MEDIA_RECORDED = "group-rooms-encrypted-media-recorded"
GROUP_ROOMS_MEDIA_DOWNLOADED = "group-rooms-media-downloaded"
GROUP_ROOMS_MEDIA_RECORDED = "group-rooms-media-recorded"
GROUP_ROOMS_MEDIA_ROUTED = "group-rooms-media-routed"
GROUP_ROOMS_MEDIA_STORED = "group-rooms-media-stored"
GROUP_ROOMS_PARTICIPANT_MINUTES = "group-rooms-participant-minutes"
GROUP_ROOMS_RECORDED_MINUTES = "group-rooms-recorded-minutes"
IMP_V1_USAGE = "imp-v1-usage"
LOOKUPS = "lookups"
MARKETPLACE = "marketplace"
MARKETPLACE_ALGORITHMIA_NAMED_ENTITY_RECOGNITION = "marketplace-algorithmia-named-entity-recognition"
MARKETPLACE_CADENCE_TRANSCRIPTION = "marketplace-cadence-transcription"
MARKETPLACE_CADENCE_TRANSLATION = "marketplace-cadence-translation"
MARKETPLACE_CAPIO_SPEECH_TO_TEXT = "marketplace-capio-speech-to-text"
MARKETPLACE_CONVRIZA_ABABA = "marketplace-convriza-ababa"
MARKETPLACE_DEEPGRAM_PHRASE_DETECTOR = "marketplace-deepgram-phrase-detector"
MARKETPLACE_DIGITAL_SEGMENT_BUSINESS_INFO = "marketplace-digital-segment-business-info"
MARKETPLACE_FACEBOOK_OFFLINE_CONVERSIONS = "marketplace-facebook-offline-conversions"
MARKETPLACE_GOOGLE_SPEECH_TO_TEXT = "marketplace-google-speech-to-text"
MARKETPLACE_IBM_WATSON_MESSAGE_INSIGHTS = "marketplace-ibm-watson-message-insights"
MARKETPLACE_IBM_WATSON_MESSAGE_SENTIMENT = "marketplace-ibm-watson-message-sentiment"
MARKETPLACE_IBM_WATSON_RECORDING_ANALYSIS = "marketplace-ibm-watson-recording-analysis"
MARKETPLACE_IBM_WATSON_TONE_ANALYZER = "marketplace-ibm-watson-tone-analyzer"
MARKETPLACE_ICEHOOK_SYSTEMS_SCOUT = "marketplace-icehook-systems-scout"
MARKETPLACE_INFOGROUP_DATAAXLE_BIZINFO = "marketplace-infogroup-dataaxle-bizinfo"
MARKETPLACE_KEEN_IO_CONTACT_CENTER_ANALYTICS = "marketplace-keen-io-contact-center-analytics"
MARKETPLACE_MARCHEX_CLEANCALL = "marketplace-marchex-cleancall"
MARKETPLACE_MARCHEX_SENTIMENT_ANALYSIS_FOR_SMS = "marketplace-marchex-sentiment-analysis-for-sms"
MARKETPLACE_MARKETPLACE_NEXTCALLER_SOCIAL_ID = "marketplace-marketplace-nextcaller-social-id"
MARKETPLACE_MOBILE_COMMONS_OPT_OUT_CLASSIFIER = "marketplace-mobile-commons-opt-out-classifier"
MARKETPLACE_NEXIWAVE_VOICEMAIL_TO_TEXT = "marketplace-nexiwave-voicemail-to-text"
MARKETPLACE_NEXTCALLER_ADVANCED_CALLER_IDENTIFICATION = "marketplace-nextcaller-advanced-caller-identification"
MARKETPLACE_NOMOROBO_SPAM_SCORE = "marketplace-nomorobo-spam-score"
MARKETPLACE_PAYFONE_TCPA_COMPLIANCE = "marketplace-payfone-tcpa-compliance"
MARKETPLACE_REMEETING_AUTOMATIC_SPEECH_RECOGNITION = "marketplace-remeeting-automatic-speech-recognition"
MARKETPLACE_TCPA_DEFENSE_SOLUTIONS_BLACKLIST_FEED = "marketplace-tcpa-defense-solutions-blacklist-feed"
MARKETPLACE_TELO_OPENCNAM = "marketplace-telo-opencnam"
MARKETPLACE_TRUECNAM_TRUE_SPAM = "marketplace-truecnam-true-spam"
MARKETPLACE_TWILIO_CALLER_NAME_LOOKUP_US = "marketplace-twilio-caller-name-lookup-us"
MARKETPLACE_TWILIO_CARRIER_INFORMATION_LOOKUP = "marketplace-twilio-carrier-information-lookup"
MARKETPLACE_VOICEBASE_PCI = "marketplace-voicebase-pci"
MARKETPLACE_VOICEBASE_TRANSCRIPTION = "marketplace-voicebase-transcription"
MARKETPLACE_VOICEBASE_TRANSCRIPTION_CUSTOM_VOCABULARY = "marketplace-voicebase-transcription-custom-vocabulary"
MARKETPLACE_WHITEPAGES_PRO_CALLER_IDENTIFICATION = "marketplace-whitepages-pro-caller-identification"
MARKETPLACE_WHITEPAGES_PRO_PHONE_INTELLIGENCE = "marketplace-whitepages-pro-phone-intelligence"
MARKETPLACE_WHITEPAGES_PRO_PHONE_REPUTATION = "marketplace-whitepages-pro-phone-reputation"
MARKETPLACE_WOLFARM_SPOKEN_RESULTS = "marketplace-wolfarm-spoken-results"
MARKETPLACE_WOLFRAM_SHORT_ANSWER = "marketplace-wolfram-short-answer"
MARKETPLACE_YTICA_CONTACT_CENTER_REPORTING_ANALYTICS = "marketplace-ytica-contact-center-reporting-analytics"
MEDIASTORAGE = "mediastorage"
MMS = "mms"
MMS_INBOUND = "mms-inbound"
MMS_INBOUND_LONGCODE = "mms-inbound-longcode"
MMS_INBOUND_SHORTCODE = "mms-inbound-shortcode"
MMS_MESSAGES_CARRIERFEES = "mms-messages-carrierfees"
MMS_OUTBOUND = "mms-outbound"
MMS_OUTBOUND_LONGCODE = "mms-outbound-longcode"
MMS_OUTBOUND_SHORTCODE = "mms-outbound-shortcode"
MONITOR_READS = "monitor-reads"
MONITOR_STORAGE = "monitor-storage"
MONITOR_WRITES = "monitor-writes"
NOTIFY = "notify"
NOTIFY_ACTIONS_ATTEMPTS = "notify-actions-attempts"
NOTIFY_CHANNELS = "notify-channels"
NUMBER_FORMAT_LOOKUPS = "number-format-lookups"
PCHAT = "pchat"
PCHAT_USERS = "pchat-users"
PEER_TO_PEER_ROOMS_PARTICIPANT_MINUTES = "peer-to-peer-rooms-participant-minutes"
PFAX = "pfax"
PFAX_MINUTES = "pfax-minutes"
PFAX_MINUTES_INBOUND = "pfax-minutes-inbound"
PFAX_MINUTES_OUTBOUND = "pfax-minutes-outbound"
PFAX_PAGES = "pfax-pages"
PHONENUMBERS = "phonenumbers"
PHONENUMBERS_CPS = "phonenumbers-cps"
PHONENUMBERS_EMERGENCY = "phonenumbers-emergency"
PHONENUMBERS_LOCAL = "phonenumbers-local"
PHONENUMBERS_MOBILE = "phonenumbers-mobile"
PHONENUMBERS_SETUPS = "phonenumbers-setups"
PHONENUMBERS_TOLLFREE = "phonenumbers-tollfree"
PREMIUMSUPPORT = "premiumsupport"
PROXY = "proxy"
PROXY_ACTIVE_SESSIONS = "proxy-active-sessions"
PSTNCONNECTIVITY = "pstnconnectivity"
PV = "pv"
PV_COMPOSITION_MEDIA_DOWNLOADED = "pv-composition-media-downloaded"
PV_COMPOSITION_MEDIA_ENCRYPTED = "pv-composition-media-encrypted"
PV_COMPOSITION_MEDIA_STORED = "pv-composition-media-stored"
PV_COMPOSITION_MINUTES = "pv-composition-minutes"
PV_RECORDING_COMPOSITIONS = "pv-recording-compositions"
PV_ROOM_PARTICIPANTS = "pv-room-participants"
PV_ROOM_PARTICIPANTS_AU1 = "pv-room-participants-au1"
PV_ROOM_PARTICIPANTS_BR1 = "pv-room-participants-br1"
PV_ROOM_PARTICIPANTS_IE1 = "pv-room-participants-ie1"
PV_ROOM_PARTICIPANTS_JP1 = "pv-room-participants-jp1"
PV_ROOM_PARTICIPANTS_SG1 = "pv-room-participants-sg1"
PV_ROOM_PARTICIPANTS_US1 = "pv-room-participants-us1"
PV_ROOM_PARTICIPANTS_US2 = "pv-room-participants-us2"
PV_ROOMS = "pv-rooms"
PV_SIP_ENDPOINT_REGISTRATIONS = "pv-sip-endpoint-registrations"
RECORDINGS = "recordings"
RECORDINGSTORAGE = "recordingstorage"
ROOMS_GROUP_BANDWIDTH = "rooms-group-bandwidth"
ROOMS_GROUP_MINUTES = "rooms-group-minutes"
ROOMS_PEER_TO_PEER_MINUTES = "rooms-peer-to-peer-minutes"
SHORTCODES = "shortcodes"
SHORTCODES_CUSTOMEROWNED = "shortcodes-customerowned"
SHORTCODES_MMS_ENABLEMENT = "shortcodes-mms-enablement"
SHORTCODES_MPS = "shortcodes-mps"
SHORTCODES_RANDOM = "shortcodes-random"
SHORTCODES_UK = "shortcodes-uk"
SHORTCODES_VANITY = "shortcodes-vanity"
SMALL_GROUP_ROOMS = "small-group-rooms"
SMALL_GROUP_ROOMS_DATA_TRACK = "small-group-rooms-data-track"
SMALL_GROUP_ROOMS_PARTICIPANT_MINUTES = "small-group-rooms-participant-minutes"
SMS = "sms"
SMS_INBOUND = "sms-inbound"
SMS_INBOUND_LONGCODE = "sms-inbound-longcode"
SMS_INBOUND_SHORTCODE = "sms-inbound-shortcode"
SMS_MESSAGES_CARRIERFEES = "sms-messages-carrierfees"
SMS_MESSAGES_FEATURES = "sms-messages-features"
SMS_MESSAGES_FEATURES_SENDERID = "sms-messages-features-senderid"
SMS_OUTBOUND = "sms-outbound"
SMS_OUTBOUND_CONTENT_INSPECTION = "sms-outbound-content-inspection"
SMS_OUTBOUND_LONGCODE = "sms-outbound-longcode"
SMS_OUTBOUND_SHORTCODE = "sms-outbound-shortcode"
SPEECH_RECOGNITION = "speech-recognition"
STUDIO_ENGAGEMENTS = "studio-engagements"
SYNC = "sync"
SYNC_ACTIONS = "sync-actions"
SYNC_ENDPOINT_HOURS = "sync-endpoint-hours"
SYNC_ENDPOINT_HOURS_ABOVE_DAILY_CAP = "sync-endpoint-hours-above-daily-cap"
TASKROUTER_TASKS = "taskrouter-tasks"
TOTALPRICE = "totalprice"
TRANSCRIPTIONS = "transcriptions"
TRUNKING_CPS = "trunking-cps"
TRUNKING_EMERGENCY_CALLS = "trunking-emergency-calls"
TRUNKING_ORIGINATION = "trunking-origination"
TRUNKING_ORIGINATION_LOCAL = "trunking-origination-local"
TRUNKING_ORIGINATION_MOBILE = "trunking-origination-mobile"
TRUNKING_ORIGINATION_TOLLFREE = "trunking-origination-tollfree"
TRUNKING_RECORDINGS = "trunking-recordings"
TRUNKING_SECURE = "trunking-secure"
TRUNKING_TERMINATION = "trunking-termination"
TURNMEGABYTES = "turnmegabytes"
TURNMEGABYTES_AUSTRALIA = "turnmegabytes-australia"
TURNMEGABYTES_BRASIL = "turnmegabytes-brasil"
TURNMEGABYTES_GERMANY = "turnmegabytes-germany"
TURNMEGABYTES_INDIA = "turnmegabytes-india"
TURNMEGABYTES_IRELAND = "turnmegabytes-ireland"
TURNMEGABYTES_JAPAN = "turnmegabytes-japan"
TURNMEGABYTES_SINGAPORE = "turnmegabytes-singapore"
TURNMEGABYTES_USEAST = "turnmegabytes-useast"
TURNMEGABYTES_USWEST = "turnmegabytes-uswest"
TWILIO_INTERCONNECT = "twilio-interconnect"
VERIFY_PUSH = "verify-push"
VIDEO_RECORDINGS = "video-recordings"
VOICE_INSIGHTS = "voice-insights"
VOICE_INSIGHTS_CLIENT_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-client-insights-on-demand-minute"
VOICE_INSIGHTS_PTSN_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-ptsn-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_INTERFACE_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-interface-insights-on-demand-minute"
VOICE_INSIGHTS_SIP_TRUNKING_INSIGHTS_ON_DEMAND_MINUTE = "voice-insights-sip-trunking-insights-on-demand-minute"
WIRELESS = "wireless"
WIRELESS_ORDERS = "wireless-orders"
WIRELESS_ORDERS_ARTWORK = "wireless-orders-artwork"
WIRELESS_ORDERS_BULK = "wireless-orders-bulk"
WIRELESS_ORDERS_ESIM = "wireless-orders-esim"
WIRELESS_ORDERS_STARTER = "wireless-orders-starter"
WIRELESS_USAGE = "wireless-usage"
WIRELESS_USAGE_COMMANDS = "wireless-usage-commands"
WIRELESS_USAGE_COMMANDS_AFRICA = "wireless-usage-commands-africa"
WIRELESS_USAGE_COMMANDS_ASIA = "wireless-usage-commands-asia"
WIRELESS_USAGE_COMMANDS_CENTRALANDSOUTHAMERICA = "wireless-usage-commands-centralandsouthamerica"
WIRELESS_USAGE_COMMANDS_EUROPE = "wireless-usage-commands-europe"
WIRELESS_USAGE_COMMANDS_HOME = "wireless-usage-commands-home"
WIRELESS_USAGE_COMMANDS_NORTHAMERICA = "wireless-usage-commands-northamerica"
WIRELESS_USAGE_COMMANDS_OCEANIA = "wireless-usage-commands-oceania"
WIRELESS_USAGE_COMMANDS_ROAMING = "wireless-usage-commands-roaming"
WIRELESS_USAGE_DATA = "wireless-usage-data"
WIRELESS_USAGE_DATA_AFRICA = "wireless-usage-data-africa"
WIRELESS_USAGE_DATA_ASIA = "wireless-usage-data-asia"
WIRELESS_USAGE_DATA_CENTRALANDSOUTHAMERICA = "wireless-usage-data-centralandsouthamerica"
WIRELESS_USAGE_DATA_CUSTOM_ADDITIONALMB = "wireless-usage-data-custom-additionalmb"
WIRELESS_USAGE_DATA_CUSTOM_FIRST5MB = "wireless-usage-data-custom-first5mb"
WIRELESS_USAGE_DATA_DOMESTIC_ROAMING = "wireless-usage-data-domestic-roaming"
WIRELESS_USAGE_DATA_EUROPE = "wireless-usage-data-europe"
WIRELESS_USAGE_DATA_INDIVIDUAL_ADDITIONALGB = "wireless-usage-data-individual-additionalgb"
WIRELESS_USAGE_DATA_INDIVIDUAL_FIRSTGB = "wireless-usage-data-individual-firstgb"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_CANADA = "wireless-usage-data-international-roaming-canada"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_INDIA = "wireless-usage-data-international-roaming-india"
WIRELESS_USAGE_DATA_INTERNATIONAL_ROAMING_MEXICO = "wireless-usage-data-international-roaming-mexico"
WIRELESS_USAGE_DATA_NORTHAMERICA = "wireless-usage-data-northamerica"
WIRELESS_USAGE_DATA_OCEANIA = "wireless-usage-data-oceania"
WIRELESS_USAGE_DATA_POOLED = "wireless-usage-data-pooled"
WIRELESS_USAGE_DATA_POOLED_DOWNLINK = "wireless-usage-data-pooled-downlink"
WIRELESS_USAGE_DATA_POOLED_UPLINK = "wireless-usage-data-pooled-uplink"
WIRELESS_USAGE_MRC = "wireless-usage-mrc"
WIRELESS_USAGE_MRC_CUSTOM = "wireless-usage-mrc-custom"
WIRELESS_USAGE_MRC_INDIVIDUAL = "wireless-usage-mrc-individual"
WIRELESS_USAGE_MRC_POOLED = "wireless-usage-mrc-pooled"
WIRELESS_USAGE_MRC_SUSPENDED = "wireless-usage-mrc-suspended"
WIRELESS_USAGE_SMS = "wireless-usage-sms"
WIRELESS_USAGE_VOICE = "wireless-usage-voice"
def __init__(self, version, payload, account_sid):
super(DailyInstance, self).__init__(version)
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'as_of': payload.get('as_of'),
'category': payload.get('category'),
'count': payload.get('count'),
'count_unit': payload.get('count_unit'),
'description': payload.get('description'),
'end_date': deserialize.iso8601_date(payload.get('end_date')),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'start_date': deserialize.iso8601_date(payload.get('start_date')),
'subresource_uris': payload.get('subresource_uris'),
'uri': payload.get('uri'),
'usage': payload.get('usage'),
'usage_unit': payload.get('usage_unit'),
}
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
return self._properties['account_sid']
@property
def api_version(self):
return self._properties['api_version']
@property
def as_of(self):
return self._properties['as_of']
@property
def category(self):
return self._properties['category']
@property
def count(self):
return self._properties['count']
@property
def count_unit(self):
return self._properties['count_unit']
@property
def description(self):
return self._properties['description']
@property
def end_date(self):
return self._properties['end_date']
@property
def price(self):
return self._properties['price']
@property
def price_unit(self):
return self._properties['price_unit']
@property
def start_date(self):
return self._properties['start_date']
@property
def subresource_uris(self):
return self._properties['subresource_uris']
@property
def uri(self):
return self._properties['uri']
@property
def usage(self):
return self._properties['usage']
@property
def usage_unit(self):
return self._properties['usage_unit']
def __repr__(self):
return '<Twilio.Api.V2010.DailyInstance>'
| true | true |
f73691c075bf67a53c8250db67cb736af7e0407e | 2,610 | py | Python | plugin/PyDemoPlugin.py | Rucadi/A64DBG_unofficial_git | 10c721912dfe36db2bd8d1f0f3d5c9fec888f86c | [
"MIT"
] | 3 | 2021-07-28T02:53:56.000Z | 2022-03-03T18:11:28.000Z | plugin/PyDemoPlugin.py | Rucadi/A64DBG_unofficial_git | 10c721912dfe36db2bd8d1f0f3d5c9fec888f86c | [
"MIT"
] | null | null | null | plugin/PyDemoPlugin.py | Rucadi/A64DBG_unofficial_git | 10c721912dfe36db2bd8d1f0f3d5c9fec888f86c | [
"MIT"
] | 4 | 2021-07-12T02:37:13.000Z | 2022-02-09T08:43:55.000Z | '''
This is really a very simple a64dbg python adp demo.
'''
# import basic adp definition like error/event code
from adpdef import *
# import adp api entries
from adp import *
import os
# adcpp output handler for api send2py
def adcpp_output(data):
print(data)
# auto attach the Calculator process
def attach_calculator(name):
# execute an lldb command to list the calcular process
procs = command2('plat shell ps -ef|grep %s' % (name))
lines = procs.split('\n')
for l in lines:
if l.find(name) < 0:
continue
print('Checking line: %s' % (l))
items = l.lstrip().split(' ')
if (items[0] == 'root' or items[0] == 'shell'):
continue
for i in range(1, len(items)):
if len(items[i]):
pid = int(items[i])
if pid:
attach(pid)
return True
break
print(procs)
return False
# a64dbg debugengine event for python plugin
def adp_on_event(args):
event = args[adp_inkey_type]
# user clicked the plugin's main menu
if event == adp_event_main_menu:
plat = curPlatform()
print('Current platform is %d.' % (plat))
if plat == adp_local_mac or plat == adp_local_unicornvm:
if not attach_calculator('Calculator'):
# ask user to select a file to debug
debugee = inputPath()
if debugee and os.path.exists(debugee):
start(debugee)
elif plat == adp_remote_ios or plat == adp_remote_unicornvm_ios:
attach_calculator('Calculator')
elif plat == adp_remote_android or plat == adp_remote_unicornvm_android:
attach_calculator('calculator')
return success()
# ask for plugin's menu name
if event == adp_event_menuname:
return success('PyDemoPlugin')
# ask for plugins's version and descripton
if event == adp_event_adpinfo:
return success(('0.1.0', 'This is a simple AD python plugin.'))
# run c/c++ code inside debugee
if event == adp_event_debug_initialized:
# demo for adcpp api
plat = curPlatform()
if plat == adp_local_unicornvm or \
plat == adp_remote_unicornvm_ios or \
plat == adp_remote_unicornvm_android:
runadc(
'''
printf("Hello world from PyDemoPlugin's runadc.\\n");
str2py("adcpp_output", "Hello, ADCpp. My pid in %d.\\n", getpid());
''')
return success()
# print(args)
return failed(adp_err_unimpl)
| 34.8 | 80 | 0.590805 |
from adpdef import *
from adp import *
import os
def adcpp_output(data):
print(data)
def attach_calculator(name):
procs = command2('plat shell ps -ef|grep %s' % (name))
lines = procs.split('\n')
for l in lines:
if l.find(name) < 0:
continue
print('Checking line: %s' % (l))
items = l.lstrip().split(' ')
if (items[0] == 'root' or items[0] == 'shell'):
continue
for i in range(1, len(items)):
if len(items[i]):
pid = int(items[i])
if pid:
attach(pid)
return True
break
print(procs)
return False
def adp_on_event(args):
event = args[adp_inkey_type]
if event == adp_event_main_menu:
plat = curPlatform()
print('Current platform is %d.' % (plat))
if plat == adp_local_mac or plat == adp_local_unicornvm:
if not attach_calculator('Calculator'):
# ask user to select a file to debug
debugee = inputPath()
if debugee and os.path.exists(debugee):
start(debugee)
elif plat == adp_remote_ios or plat == adp_remote_unicornvm_ios:
attach_calculator('Calculator')
elif plat == adp_remote_android or plat == adp_remote_unicornvm_android:
attach_calculator('calculator')
return success()
# ask for plugin's menu name
if event == adp_event_menuname:
return success('PyDemoPlugin')
if event == adp_event_adpinfo:
return success(('0.1.0', 'This is a simple AD python plugin.'))
# run c/c++ code inside debugee
if event == adp_event_debug_initialized:
# demo for adcpp api
plat = curPlatform()
if plat == adp_local_unicornvm or \
plat == adp_remote_unicornvm_ios or \
plat == adp_remote_unicornvm_android:
runadc(
'''
printf("Hello world from PyDemoPlugin's runadc.\\n");
str2py("adcpp_output", "Hello, ADCpp. My pid in %d.\\n", getpid());
''')
return success()
return failed(adp_err_unimpl)
| true | true |
f73691cd9c4d84b3f0a8796e6cae56b20059b9c9 | 521 | py | Python | awwardapp/forms.py | Gakur/awwardsapp1 | 994ed6cdec618c688b2b281e6dbd302a0aab39b1 | [
"MIT"
] | null | null | null | awwardapp/forms.py | Gakur/awwardsapp1 | 994ed6cdec618c688b2b281e6dbd302a0aab39b1 | [
"MIT"
] | null | null | null | awwardapp/forms.py | Gakur/awwardsapp1 | 994ed6cdec618c688b2b281e6dbd302a0aab39b1 | [
"MIT"
] | null | null | null | from .models import Projects,Rates,Comments,Profile
from django import forms
class RateForm(forms.ModelForm):
class Meta:
model=Rates
exclude=['user','project']
class PostForm(forms.ModelForm):
class Meta:
model=Projects
exclude=['user','design','usability','content']
class ReviewForm(forms.ModelForm):
class Meta:
model=Comments
exclude=['user','pro_id']
class UpdateForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user'] | 20.84 | 55 | 0.652591 | from .models import Projects,Rates,Comments,Profile
from django import forms
class RateForm(forms.ModelForm):
class Meta:
model=Rates
exclude=['user','project']
class PostForm(forms.ModelForm):
class Meta:
model=Projects
exclude=['user','design','usability','content']
class ReviewForm(forms.ModelForm):
class Meta:
model=Comments
exclude=['user','pro_id']
class UpdateForm(forms.ModelForm):
class Meta:
model=Profile
exclude=['user'] | true | true |
f736921b910f40a58969432b92bbceb379a8d101 | 2,290 | py | Python | Examples/Image/Detection/utils/annotations/LabelMeConverter.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 17,702 | 2016-01-25T14:03:01.000Z | 2019-05-06T09:23:41.000Z | Examples/Image/Detection/utils/annotations/LabelMeConverter.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 3,489 | 2016-01-25T13:32:09.000Z | 2019-05-03T11:29:15.000Z | Examples/Image/Detection/utils/annotations/LabelMeConverter.py | burhandodhy/CNTK | fcdeef63d0192c7b4b7428b14c1f9750d6c1de2e | [
"MIT"
] | 5,180 | 2016-01-25T14:02:12.000Z | 2019-05-06T04:24:28.000Z | import os
import xml.etree.ElementTree as ET
import csv
filepath = "C:/Your/Folder/Labelme/Files/" # set path of Labelme XML Files here include slash at end of path
for filename in os.listdir(filepath):
try:
file = filepath + filename
tree = ET.parse(file)
root = tree.getroot()
outputpath = filepath + "Parsed/"
if not os.path.exists(outputpath):
os.makedirs(outputpath)
imagename = os.path.splitext(filename)[0]
## create output files
outputFile_label = outputpath + imagename + ".bboxes.labels.tsv"
outputFile_ROI = outputpath + imagename + ".bboxes.tsv"
labelFile = open(outputFile_label, 'w')
ROIFile = open(outputFile_ROI, 'w')
# loop through to get objects
for child in root:
if str(child.tag) == 'object':
label = ""
xlist = []
ylist = []
# loop through to get name and BBox values from object
for child in child:
if str(child.tag) == 'name':
label = child.text
if str(child.tag) == 'polygon' or str(child.tag) == 'segm':
for child in child:
if str(child.tag) == 'box' or str(child.tag) == 'pt':
for child in child:
if str(child.tag) == 'xmin' or str(child.tag) == 'xmax' or str(child.tag) == 'x':
xlist.append(int(child.text))
if str(child.tag) == 'ymin' or str(child.tag) == 'ymax' or str(child.tag) == 'y':
ylist.append(int(child.text))
xmin = min(xlist)
xmax = max(xlist)
ymin = min(ylist)
ymax = max(ylist)
# output object roi based on cntk format of xmin ymin xmax ymax
obj_ROI = str(xmin) + "\t" + str(ymin) + "\t" +str(xmax) + "\t" + str(ymax)
labelFile.write(label + '\n')
ROIFile.write(obj_ROI + '\n')
labelFile.close()
ROIFile.close()
except Exception:
pass
print("Done")
| 33.676471 | 117 | 0.484279 | import os
import xml.etree.ElementTree as ET
import csv
filepath = "C:/Your/Folder/Labelme/Files/"
for filename in os.listdir(filepath):
try:
file = filepath + filename
tree = ET.parse(file)
root = tree.getroot()
outputpath = filepath + "Parsed/"
if not os.path.exists(outputpath):
os.makedirs(outputpath)
imagename = os.path.splitext(filename)[0]
el = outputpath + imagename + ".bboxes.labels.tsv"
outputFile_ROI = outputpath + imagename + ".bboxes.tsv"
labelFile = open(outputFile_label, 'w')
ROIFile = open(outputFile_ROI, 'w')
for child in root:
if str(child.tag) == 'object':
label = ""
xlist = []
ylist = []
for child in child:
if str(child.tag) == 'name':
label = child.text
if str(child.tag) == 'polygon' or str(child.tag) == 'segm':
for child in child:
if str(child.tag) == 'box' or str(child.tag) == 'pt':
for child in child:
if str(child.tag) == 'xmin' or str(child.tag) == 'xmax' or str(child.tag) == 'x':
xlist.append(int(child.text))
if str(child.tag) == 'ymin' or str(child.tag) == 'ymax' or str(child.tag) == 'y':
ylist.append(int(child.text))
xmin = min(xlist)
xmax = max(xlist)
ymin = min(ylist)
ymax = max(ylist)
obj_ROI = str(xmin) + "\t" + str(ymin) + "\t" +str(xmax) + "\t" + str(ymax)
labelFile.write(label + '\n')
ROIFile.write(obj_ROI + '\n')
labelFile.close()
ROIFile.close()
except Exception:
pass
print("Done")
| true | true |
f736923381dfd439c1e66c2dcea9e5600c2a52c6 | 10,995 | py | Python | ucsmsdk/mometa/mgmt/MgmtEntity.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/mgmt/MgmtEntity.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/mgmt/MgmtEntity.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for MgmtEntity ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class MgmtEntityConsts:
CHASSIS_DEVICE_IO_STATE1_OK = "ok"
CHASSIS_DEVICE_IO_STATE1_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE1_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE1_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE1_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE2_OK = "ok"
CHASSIS_DEVICE_IO_STATE2_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE2_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE2_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE2_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE3_OK = "ok"
CHASSIS_DEVICE_IO_STATE3_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE3_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE3_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE3_WRITE_ERROR = "writeError"
HA_FAILURE_REASON_DB_VERSION_MISMATCH = "DbVersionMismatch"
HA_FAILURE_REASON_PEER_MGMT_SERVICES_UNRESPONSIVE = "PeerMgmtServicesUnresponsive"
HA_FAILURE_REASON_CHASSIS_CONFIG_INCOMPLETE = "chassisConfigIncomplete"
HA_FAILURE_REASON_MGMT_SERVICES_UNRESPONSIVE = "mgmtServicesUnresponsive"
HA_FAILURE_REASON_NETWORK_INTERFACE_DOWN = "networkInterfaceDown"
HA_FAILURE_REASON_NODE_DOWN = "nodeDown"
HA_FAILURE_REASON_NONE = "none"
HA_FAILURE_REASON_PEER_CHASSIS_CONFIG_INCOMPLETE = "peerChassisConfigIncomplete"
HA_FAILURE_REASON_PEER_NODE_DOWN = "peerNodeDown"
HA_READINESS_DOWNGRADED = "downgraded"
HA_READINESS_NOT_READY = "notReady"
HA_READINESS_READY = "ready"
HA_READINESS_UNKNOWN = "unknown"
HA_READY_FALSE = "false"
HA_READY_NO = "no"
HA_READY_TRUE = "true"
HA_READY_YES = "yes"
ID_A = "A"
ID_B = "B"
ID_NONE = "NONE"
LEAD_ID_FOR_AUTO_INSTALL_A = "A"
LEAD_ID_FOR_AUTO_INSTALL_B = "B"
LEAD_ID_FOR_AUTO_INSTALL_NONE = "NONE"
LEADERSHIP_ELECTION_FAILED = "electionFailed"
LEADERSHIP_ELECTION_IN_PROGRESS = "electionInProgress"
LEADERSHIP_INAPPLICABLE = "inapplicable"
LEADERSHIP_PRIMARY = "primary"
LEADERSHIP_SUBORDINATE = "subordinate"
LEADERSHIP_UNKNOWN = "unknown"
MGMT_SERVICES_STATE_DOWN = "down"
MGMT_SERVICES_STATE_SWITCHOVER_IN_PROGRESS = "switchoverInProgress"
MGMT_SERVICES_STATE_UNKNOWN = "unknown"
MGMT_SERVICES_STATE_UNRESPONSIVE = "unresponsive"
MGMT_SERVICES_STATE_UP = "up"
SSH_KEY_STATUS_MATCHED = "matched"
SSH_KEY_STATUS_MISMATCHED = "mismatched"
SSH_KEY_STATUS_NONE = "none"
STATE_DOWN = "down"
STATE_UNKNOWN = "unknown"
STATE_UP = "up"
UMBILICAL_STATE_DEGRADED = "degraded"
UMBILICAL_STATE_FAILED = "failed"
UMBILICAL_STATE_FULL = "full"
UMBILICAL_STATE_UNKNOWN = "unknown"
VERSION_MISMATCH_FALSE = "false"
VERSION_MISMATCH_NO = "no"
VERSION_MISMATCH_TRUE = "true"
VERSION_MISMATCH_YES = "yes"
class MgmtEntity(ManagedObject):
"""This is MgmtEntity class."""
consts = MgmtEntityConsts()
naming_props = set(['id'])
mo_meta = MoMeta("MgmtEntity", "mgmtEntity", "mgmt-entity-[id]", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["read-only"], ['topSystem'], ['faultInst', 'mgmtPmonEntry'], ["Get"])
prop_meta = {
"chassis1": MoPropertyMeta("chassis1", "chassis1", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis2": MoPropertyMeta("chassis2", "chassis2", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis3": MoPropertyMeta("chassis3", "chassis3", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis_device_io_state1": MoPropertyMeta("chassis_device_io_state1", "chassisDeviceIoState1", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state2": MoPropertyMeta("chassis_device_io_state2", "chassisDeviceIoState2", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state3": MoPropertyMeta("chassis_device_io_state3", "chassisDeviceIoState3", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ha_failure_reason": MoPropertyMeta("ha_failure_reason", "haFailureReason", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["DbVersionMismatch", "PeerMgmtServicesUnresponsive", "chassisConfigIncomplete", "mgmtServicesUnresponsive", "networkInterfaceDown", "nodeDown", "none", "peerChassisConfigIncomplete", "peerNodeDown"], []),
"ha_readiness": MoPropertyMeta("ha_readiness", "haReadiness", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downgraded", "notReady", "ready", "unknown"], []),
"ha_ready": MoPropertyMeta("ha_ready", "haReady", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, ["A", "B", "NONE"], []),
"lead_id_for_auto_install": MoPropertyMeta("lead_id_for_auto_install", "leadIdForAutoInstall", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE"], []),
"leadership": MoPropertyMeta("leadership", "leadership", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["electionFailed", "electionInProgress", "inapplicable", "primary", "subordinate", "unknown"], []),
"mgmt_services_state": MoPropertyMeta("mgmt_services_state", "mgmtServicesState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["down", "switchoverInProgress", "unknown", "unresponsive", "up"], []),
"problems": MoPropertyMeta("problems", "problems", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"ssh_auth_keys_csum": MoPropertyMeta("ssh_auth_keys_csum", "sshAuthKeysCsum", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_auth_keys_size": MoPropertyMeta("ssh_auth_keys_size", "sshAuthKeysSize", "ulong", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ssh_key_status": MoPropertyMeta("ssh_key_status", "sshKeyStatus", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["matched", "mismatched", "none"], ["0-18446744073709551615"]),
"ssh_root_pub_key_csum": MoPropertyMeta("ssh_root_pub_key_csum", "sshRootPubKeyCsum", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_root_pub_key_size": MoPropertyMeta("ssh_root_pub_key_size", "sshRootPubKeySize", "ulong", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"state": MoPropertyMeta("state", "state", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["down", "unknown", "up"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"umbilical_state": MoPropertyMeta("umbilical_state", "umbilicalState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "failed", "full", "unknown"], []),
"version_mismatch": MoPropertyMeta("version_mismatch", "versionMismatch", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"chassis1": "chassis1",
"chassis2": "chassis2",
"chassis3": "chassis3",
"chassisDeviceIoState1": "chassis_device_io_state1",
"chassisDeviceIoState2": "chassis_device_io_state2",
"chassisDeviceIoState3": "chassis_device_io_state3",
"childAction": "child_action",
"dn": "dn",
"haFailureReason": "ha_failure_reason",
"haReadiness": "ha_readiness",
"haReady": "ha_ready",
"id": "id",
"leadIdForAutoInstall": "lead_id_for_auto_install",
"leadership": "leadership",
"mgmtServicesState": "mgmt_services_state",
"problems": "problems",
"rn": "rn",
"sacl": "sacl",
"sshAuthKeysCsum": "ssh_auth_keys_csum",
"sshAuthKeysSize": "ssh_auth_keys_size",
"sshKeyStatus": "ssh_key_status",
"sshRootPubKeyCsum": "ssh_root_pub_key_csum",
"sshRootPubKeySize": "ssh_root_pub_key_size",
"state": "state",
"status": "status",
"umbilicalState": "umbilical_state",
"versionMismatch": "version_mismatch",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.chassis1 = None
self.chassis2 = None
self.chassis3 = None
self.chassis_device_io_state1 = None
self.chassis_device_io_state2 = None
self.chassis_device_io_state3 = None
self.child_action = None
self.ha_failure_reason = None
self.ha_readiness = None
self.ha_ready = None
self.lead_id_for_auto_install = None
self.leadership = None
self.mgmt_services_state = None
self.problems = None
self.sacl = None
self.ssh_auth_keys_csum = None
self.ssh_auth_keys_size = None
self.ssh_key_status = None
self.ssh_root_pub_key_csum = None
self.ssh_root_pub_key_size = None
self.state = None
self.status = None
self.umbilical_state = None
self.version_mismatch = None
ManagedObject.__init__(self, "MgmtEntity", parent_mo_or_dn, **kwargs)
| 64.298246 | 374 | 0.703774 |
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class MgmtEntityConsts:
CHASSIS_DEVICE_IO_STATE1_OK = "ok"
CHASSIS_DEVICE_IO_STATE1_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE1_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE1_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE1_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE2_OK = "ok"
CHASSIS_DEVICE_IO_STATE2_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE2_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE2_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE2_WRITE_ERROR = "writeError"
CHASSIS_DEVICE_IO_STATE3_OK = "ok"
CHASSIS_DEVICE_IO_STATE3_OPEN_ERROR = "openError"
CHASSIS_DEVICE_IO_STATE3_READ_ERROR = "readError"
CHASSIS_DEVICE_IO_STATE3_UNKNOWN = "unknown"
CHASSIS_DEVICE_IO_STATE3_WRITE_ERROR = "writeError"
HA_FAILURE_REASON_DB_VERSION_MISMATCH = "DbVersionMismatch"
HA_FAILURE_REASON_PEER_MGMT_SERVICES_UNRESPONSIVE = "PeerMgmtServicesUnresponsive"
HA_FAILURE_REASON_CHASSIS_CONFIG_INCOMPLETE = "chassisConfigIncomplete"
HA_FAILURE_REASON_MGMT_SERVICES_UNRESPONSIVE = "mgmtServicesUnresponsive"
HA_FAILURE_REASON_NETWORK_INTERFACE_DOWN = "networkInterfaceDown"
HA_FAILURE_REASON_NODE_DOWN = "nodeDown"
HA_FAILURE_REASON_NONE = "none"
HA_FAILURE_REASON_PEER_CHASSIS_CONFIG_INCOMPLETE = "peerChassisConfigIncomplete"
HA_FAILURE_REASON_PEER_NODE_DOWN = "peerNodeDown"
HA_READINESS_DOWNGRADED = "downgraded"
HA_READINESS_NOT_READY = "notReady"
HA_READINESS_READY = "ready"
HA_READINESS_UNKNOWN = "unknown"
HA_READY_FALSE = "false"
HA_READY_NO = "no"
HA_READY_TRUE = "true"
HA_READY_YES = "yes"
ID_A = "A"
ID_B = "B"
ID_NONE = "NONE"
LEAD_ID_FOR_AUTO_INSTALL_A = "A"
LEAD_ID_FOR_AUTO_INSTALL_B = "B"
LEAD_ID_FOR_AUTO_INSTALL_NONE = "NONE"
LEADERSHIP_ELECTION_FAILED = "electionFailed"
LEADERSHIP_ELECTION_IN_PROGRESS = "electionInProgress"
LEADERSHIP_INAPPLICABLE = "inapplicable"
LEADERSHIP_PRIMARY = "primary"
LEADERSHIP_SUBORDINATE = "subordinate"
LEADERSHIP_UNKNOWN = "unknown"
MGMT_SERVICES_STATE_DOWN = "down"
MGMT_SERVICES_STATE_SWITCHOVER_IN_PROGRESS = "switchoverInProgress"
MGMT_SERVICES_STATE_UNKNOWN = "unknown"
MGMT_SERVICES_STATE_UNRESPONSIVE = "unresponsive"
MGMT_SERVICES_STATE_UP = "up"
SSH_KEY_STATUS_MATCHED = "matched"
SSH_KEY_STATUS_MISMATCHED = "mismatched"
SSH_KEY_STATUS_NONE = "none"
STATE_DOWN = "down"
STATE_UNKNOWN = "unknown"
STATE_UP = "up"
UMBILICAL_STATE_DEGRADED = "degraded"
UMBILICAL_STATE_FAILED = "failed"
UMBILICAL_STATE_FULL = "full"
UMBILICAL_STATE_UNKNOWN = "unknown"
VERSION_MISMATCH_FALSE = "false"
VERSION_MISMATCH_NO = "no"
VERSION_MISMATCH_TRUE = "true"
VERSION_MISMATCH_YES = "yes"
class MgmtEntity(ManagedObject):
consts = MgmtEntityConsts()
naming_props = set(['id'])
mo_meta = MoMeta("MgmtEntity", "mgmtEntity", "mgmt-entity-[id]", VersionMeta.Version101e, "InputOutput", 0x3f, [], ["read-only"], ['topSystem'], ['faultInst', 'mgmtPmonEntry'], ["Get"])
prop_meta = {
"chassis1": MoPropertyMeta("chassis1", "chassis1", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis2": MoPropertyMeta("chassis2", "chassis2", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis3": MoPropertyMeta("chassis3", "chassis3", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"chassis_device_io_state1": MoPropertyMeta("chassis_device_io_state1", "chassisDeviceIoState1", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state2": MoPropertyMeta("chassis_device_io_state2", "chassisDeviceIoState2", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"chassis_device_io_state3": MoPropertyMeta("chassis_device_io_state3", "chassisDeviceIoState3", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["ok", "openError", "readError", "unknown", "writeError"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ha_failure_reason": MoPropertyMeta("ha_failure_reason", "haFailureReason", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["DbVersionMismatch", "PeerMgmtServicesUnresponsive", "chassisConfigIncomplete", "mgmtServicesUnresponsive", "networkInterfaceDown", "nodeDown", "none", "peerChassisConfigIncomplete", "peerNodeDown"], []),
"ha_readiness": MoPropertyMeta("ha_readiness", "haReadiness", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downgraded", "notReady", "ready", "unknown"], []),
"ha_ready": MoPropertyMeta("ha_ready", "haReady", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x8, None, None, None, ["A", "B", "NONE"], []),
"lead_id_for_auto_install": MoPropertyMeta("lead_id_for_auto_install", "leadIdForAutoInstall", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE"], []),
"leadership": MoPropertyMeta("leadership", "leadership", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["electionFailed", "electionInProgress", "inapplicable", "primary", "subordinate", "unknown"], []),
"mgmt_services_state": MoPropertyMeta("mgmt_services_state", "mgmtServicesState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["down", "switchoverInProgress", "unknown", "unresponsive", "up"], []),
"problems": MoPropertyMeta("problems", "problems", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"ssh_auth_keys_csum": MoPropertyMeta("ssh_auth_keys_csum", "sshAuthKeysCsum", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_auth_keys_size": MoPropertyMeta("ssh_auth_keys_size", "sshAuthKeysSize", "ulong", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"ssh_key_status": MoPropertyMeta("ssh_key_status", "sshKeyStatus", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, ["matched", "mismatched", "none"], ["0-18446744073709551615"]),
"ssh_root_pub_key_csum": MoPropertyMeta("ssh_root_pub_key_csum", "sshRootPubKeyCsum", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ssh_root_pub_key_size": MoPropertyMeta("ssh_root_pub_key_size", "sshRootPubKeySize", "ulong", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"state": MoPropertyMeta("state", "state", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["down", "unknown", "up"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"umbilical_state": MoPropertyMeta("umbilical_state", "umbilicalState", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "failed", "full", "unknown"], []),
"version_mismatch": MoPropertyMeta("version_mismatch", "versionMismatch", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
}
prop_map = {
"chassis1": "chassis1",
"chassis2": "chassis2",
"chassis3": "chassis3",
"chassisDeviceIoState1": "chassis_device_io_state1",
"chassisDeviceIoState2": "chassis_device_io_state2",
"chassisDeviceIoState3": "chassis_device_io_state3",
"childAction": "child_action",
"dn": "dn",
"haFailureReason": "ha_failure_reason",
"haReadiness": "ha_readiness",
"haReady": "ha_ready",
"id": "id",
"leadIdForAutoInstall": "lead_id_for_auto_install",
"leadership": "leadership",
"mgmtServicesState": "mgmt_services_state",
"problems": "problems",
"rn": "rn",
"sacl": "sacl",
"sshAuthKeysCsum": "ssh_auth_keys_csum",
"sshAuthKeysSize": "ssh_auth_keys_size",
"sshKeyStatus": "ssh_key_status",
"sshRootPubKeyCsum": "ssh_root_pub_key_csum",
"sshRootPubKeySize": "ssh_root_pub_key_size",
"state": "state",
"status": "status",
"umbilicalState": "umbilical_state",
"versionMismatch": "version_mismatch",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.chassis1 = None
self.chassis2 = None
self.chassis3 = None
self.chassis_device_io_state1 = None
self.chassis_device_io_state2 = None
self.chassis_device_io_state3 = None
self.child_action = None
self.ha_failure_reason = None
self.ha_readiness = None
self.ha_ready = None
self.lead_id_for_auto_install = None
self.leadership = None
self.mgmt_services_state = None
self.problems = None
self.sacl = None
self.ssh_auth_keys_csum = None
self.ssh_auth_keys_size = None
self.ssh_key_status = None
self.ssh_root_pub_key_csum = None
self.ssh_root_pub_key_size = None
self.state = None
self.status = None
self.umbilical_state = None
self.version_mismatch = None
ManagedObject.__init__(self, "MgmtEntity", parent_mo_or_dn, **kwargs)
| true | true |
f736929e011696afd656cd65172379c9bf511215 | 1,057 | py | Python | tests/test_TestSong.py | fabian-thomas/python-odesli | 1cac20dfa2fc88ea855028124265dbde611e3cff | [
"MIT"
] | null | null | null | tests/test_TestSong.py | fabian-thomas/python-odesli | 1cac20dfa2fc88ea855028124265dbde611e3cff | [
"MIT"
] | null | null | null | tests/test_TestSong.py | fabian-thomas/python-odesli | 1cac20dfa2fc88ea855028124265dbde611e3cff | [
"MIT"
] | null | null | null | import unittest
from odesli.Odesli import Odesli
from odesli.entity.song.Song import Song
EXPECTED_YOUTUBE_SONG = Song('VHb_XIql_gU', 'youtube', 'Kids', 'MGMT - Topic', 'https://i.ytimg.com/vi/VHb_XIql_gU/hqdefault.jpg', 480, 360, { 'youtube': 'https://www.youtube.com/watch?v=VHb_XIql_gU', 'youtubeMusic': 'https://music.youtube.com/watch?v=VHb_XIql_gU' })
class TestSong(unittest.TestCase):
def check(self, result):
self.assertEqual(result.songLink, 'https://song.link/s/1jJci4qxiYcOHhQR247rEU')
self.assertEqual(result.song.getType(), 'song')
self.assertEqual(result.song.provider, 'spotify')
self.assertEqual(result.songsByProvider['youtube'], EXPECTED_YOUTUBE_SONG)
def test_ByUrl(self):
o = Odesli()
song = o.getByUrl('https://open.spotify.com/track/1jJci4qxiYcOHhQR247rEU')
self.check(song)
def test_ById(self):
o = Odesli()
song = o.getById('1jJci4qxiYcOHhQR247rEU', 'spotify', 'song')
self.check(song)
if __name__ == '__main__':
unittest.main()
| 37.75 | 267 | 0.687796 | import unittest
from odesli.Odesli import Odesli
from odesli.entity.song.Song import Song
EXPECTED_YOUTUBE_SONG = Song('VHb_XIql_gU', 'youtube', 'Kids', 'MGMT - Topic', 'https://i.ytimg.com/vi/VHb_XIql_gU/hqdefault.jpg', 480, 360, { 'youtube': 'https://www.youtube.com/watch?v=VHb_XIql_gU', 'youtubeMusic': 'https://music.youtube.com/watch?v=VHb_XIql_gU' })
class TestSong(unittest.TestCase):
def check(self, result):
self.assertEqual(result.songLink, 'https://song.link/s/1jJci4qxiYcOHhQR247rEU')
self.assertEqual(result.song.getType(), 'song')
self.assertEqual(result.song.provider, 'spotify')
self.assertEqual(result.songsByProvider['youtube'], EXPECTED_YOUTUBE_SONG)
def test_ByUrl(self):
o = Odesli()
song = o.getByUrl('https://open.spotify.com/track/1jJci4qxiYcOHhQR247rEU')
self.check(song)
def test_ById(self):
o = Odesli()
song = o.getById('1jJci4qxiYcOHhQR247rEU', 'spotify', 'song')
self.check(song)
if __name__ == '__main__':
unittest.main()
| true | true |
f736936449f80682b0362945dd83c40eb0e8b0bd | 25,824 | py | Python | examples/distillation/distiller.py | deepbluesea/transformers | 11a2317986aad6e9a72f542e31344cfb7c94cbab | [
"Apache-2.0"
] | 1 | 2020-03-02T14:09:14.000Z | 2020-03-02T14:09:14.000Z | examples/distillation/distiller.py | deepbluesea/transformers | 11a2317986aad6e9a72f542e31344cfb7c94cbab | [
"Apache-2.0"
] | 1 | 2021-06-02T00:34:50.000Z | 2021-06-02T00:34:50.000Z | examples/distillation/distiller.py | deepbluesea/transformers | 11a2317986aad6e9a72f542e31344cfb7c94cbab | [
"Apache-2.0"
] | 1 | 2021-09-21T12:02:15.000Z | 2021-09-21T12:02:15.000Z | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The distiller to distil the student.
Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
"""
import os
import math
import psutil
import time
from tensorboardX import SummaryWriter
from tqdm import trange, tqdm
import numpy as np
import psutil
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import RandomSampler, BatchSampler, DataLoader
from transformers import WarmupLinearSchedule
from utils import logger
from lm_seqs_dataset import LmSeqsDataset
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
class Distiller:
def __init__(self,
params: dict,
dataset: LmSeqsDataset,
token_probs: torch.tensor,
student: nn.Module,
teacher: nn.Module):
logger.info('Initializing Distiller')
self.params = params
self.dump_path = params.dump_path
self.multi_gpu = params.multi_gpu
self.fp16 = params.fp16
self.student = student
self.teacher = teacher
self.student_config = student.config
self.vocab_size = student.config.vocab_size
if params.n_gpu <= 1:
sampler = RandomSampler(dataset)
else:
sampler = DistributedSampler(dataset)
if params.group_by_size:
groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size)
sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size)
else:
sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)
self.dataloader = DataLoader(dataset=dataset,
batch_sampler=sampler,
collate_fn=dataset.batch_sequences)
self.temperature = params.temperature
assert self.temperature > 0.
self.alpha_ce = params.alpha_ce
self.alpha_mlm = params.alpha_mlm
self.alpha_clm = params.alpha_clm
self.alpha_mse = params.alpha_mse
self.alpha_cos = params.alpha_cos
self.mlm = params.mlm
if self.mlm:
logger.info(f'Using MLM loss for LM step.')
self.mlm_mask_prop = params.mlm_mask_prop
assert 0.0 <= self.mlm_mask_prop <= 1.0
assert params.word_mask + params.word_keep + params.word_rand == 1.0
self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])
self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs
self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs
if self.fp16:
self.pred_probs = self.pred_probs.half()
self.token_probs = self.token_probs.half()
else:
logger.info(f'Using CLM loss for LM step.')
self.epoch = 0
self.n_iter = 0
self.n_total_iter = 0
self.n_sequences_epoch = 0
self.total_loss_epoch = 0
self.last_loss = 0
self.last_loss_ce = 0
self.last_loss_mlm = 0
self.last_loss_clm = 0
if self.alpha_mse > 0.: self.last_loss_mse = 0
if self.alpha_cos > 0.: self.last_loss_cos = 0
self.last_log = 0
self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean')
self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
if self.alpha_mse > 0.:
self.mse_loss_fct = nn.MSELoss(reduction='sum')
if self.alpha_cos > 0.:
self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean')
logger.info('--- Initializing model optimizer')
assert params.gradient_accumulation_steps >= 1
self.num_steps_epoch = len(self.dataloader)
num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},
{'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}
]
logger.info("------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]))
logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()]))
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=params.learning_rate,
eps=params.adam_epsilon,
betas=(0.9, 0.98))
warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)
self.scheduler = WarmupLinearSchedule(self.optimizer,
warmup_steps=warmup_steps,
t_total=num_train_optimization_steps)
if self.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level")
self.student, self.optimizer = amp.initialize(self.student,
self.optimizer,
opt_level=self.params.fp16_opt_level)
self.teacher = self.teacher.half()
if self.multi_gpu:
if self.fp16:
from apex.parallel import DistributedDataParallel
logger.info("Using apex.parallel.DistributedDataParallel for distributed training.")
self.student = DistributedDataParallel(self.student)
else:
from torch.nn.parallel import DistributedDataParallel
logger.info("Using nn.parallel.DistributedDataParallel for distributed training.")
self.student = DistributedDataParallel(self.student,
device_ids=[params.local_rank],
output_device=params.local_rank,
find_unused_parameters=True)
self.is_master = params.is_master
if self.is_master:
logger.info('--- Initializing Tensorboard')
self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train'))
self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0)
self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0)
def prepare_batch_mlm(self,
batch):
"""
Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM.
Input:
------
batch: `Tuple`
token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.
lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.
Output:
-------
token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.
attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.
mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict.
"""
token_ids, lengths = batch
token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)
assert token_ids.size(0) == lengths.size(0)
attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])
bs, max_seq_len = token_ids.size()
mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids)
x_prob = self.token_probs[token_ids.flatten()]
n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item())
tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False)
pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility
pred_mask[tgt_ids] = 1
pred_mask = pred_mask.view(bs, max_seq_len)
pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0
# mask a number of words == 0 [8] (faster with fp16)
if self.fp16:
n1 = pred_mask.sum().item()
if n1 > 8:
pred_mask = pred_mask.view(-1)
n2 = max(n1 % 8, 8 * (n1 // 8))
if n2 != n1:
pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0
pred_mask = pred_mask.view(bs, max_seq_len)
assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item()
_token_ids_real = token_ids[pred_mask]
_token_ids_rand = _token_ids_real.clone().random_(self.vocab_size)
_token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token'])
probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True)
_token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long()
token_ids = token_ids.masked_scatter(pred_mask, _token_ids)
mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility
# sanity checks
assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size
return token_ids, attn_mask, mlm_labels
def prepare_batch_clm(self,
batch):
"""
Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM.
Input:
------
batch: `Tuple`
token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.
lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.
Output:
-------
token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.
attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.
clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -1 where there is nothing to predict.
"""
token_ids, lengths = batch
token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)
assert token_ids.size(0) == lengths.size(0)
attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])
clm_labels = token_ids.new(token_ids.size()).copy_(token_ids)
clm_labels[~attn_mask] = -1 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility
# sanity checks
assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size
return token_ids, attn_mask, clm_labels
def round_batch(self,
x: torch.tensor,
lengths: torch.tensor):
"""
For float16 only.
Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8.
Input:
------
x: `torch.tensor(bs, seq_length)` - The token ids.
lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch.
Output:
-------
x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids.
lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths.
"""
if not self.fp16 or len(lengths) < 8:
return x, lengths
# number of sentences == 0 [8]
bs1 = len(lengths)
bs2 = 8 * (bs1 // 8)
assert bs2 > 0 and bs2 % 8 == 0
if bs1 != bs2:
idx = torch.randperm(bs1)[:bs2]
lengths = lengths[idx]
slen = lengths.max().item()
x = x[idx, :slen]
else:
idx = None
# sequence length == 0 [8]
ml1 = x.size(1)
if ml1 % 8 != 0:
pad = 8 - (ml1 % 8)
ml2 = ml1 + pad
if self.mlm:
pad_id = self.params.special_tok_ids['pad_token']
else:
pad_id = self.params.special_tok_ids['unk_token']
padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id)
x = torch.cat([x, padding_tensor], 1)
assert x.size() == (bs2, ml2)
assert x.size(0) % 8 == 0
assert x.size(1) % 8 == 0
return x, lengths
def train(self):
"""
The real training loop.
"""
if self.is_master: logger.info('Starting training')
self.last_log = time.time()
self.student.train()
self.teacher.eval()
for _ in range(self.params.n_epoch):
if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')
if self.multi_gpu:
torch.distributed.barrier()
iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
for batch in iter_bar:
if self.params.n_gpu > 0:
batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)
if self.mlm:
token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)
else:
token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)
self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)
iter_bar.update()
iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}',
'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'})
iter_bar.close()
if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}')
self.end_epoch()
if self.is_master:
logger.info(f'Save very last checkpoint as `pytorch_model.bin`.')
self.save_checkpoint(checkpoint_name=f'pytorch_model.bin')
logger.info('Training is finished')
def step(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
lm_labels: torch.tensor):
"""
One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),
and possibly a parameter update (depending on the gradient accumulation).
Input:
------
input_ids: `torch.tensor(bs, seq_length)` - The token ids.
attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.
lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM).
"""
if self.mlm:
s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)
with torch.no_grad():
t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)
else:
s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)
with torch.no_grad():
t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)
assert s_logits.size() == t_logits.size()
#https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
#https://github.com/peterliht/knowledge-distillation-pytorch/issues/2
if self.params.restrict_ce_to_mask:
mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)
else:
mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)
s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask
s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask
t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask
assert t_logits_slct.size() == s_logits_slct.size()
loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1),
F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2
loss = self.alpha_ce*loss_ce
if self.alpha_mlm > 0.:
loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1))
loss += self.alpha_mlm * loss_mlm
if self.alpha_clm > 0.:
shift_logits = s_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
loss += self.alpha_clm * loss_clm
if self.alpha_mse > 0.:
loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction
loss += self.alpha_mse * loss_mse
if self.alpha_cos > 0.:
s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim)
t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim)
mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim)
assert s_hidden_states.size() == t_hidden_states.size()
dim = s_hidden_states.size(-1)
s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim)
s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)
t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim)
t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)
target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,)
loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)
loss += self.alpha_cos * loss_cos
self.total_loss_epoch += loss.item()
self.last_loss = loss.item()
self.last_loss_ce = loss_ce.item()
if self.alpha_mlm > 0.:
self.last_loss_mlm = loss_mlm.item()
if self.alpha_clm > 0.:
self.last_loss_clm = loss_clm.item()
if self.alpha_mse > 0.:
self.last_loss_mse = loss_mse.item()
if self.alpha_cos > 0.:
self.last_loss_cos = loss_cos.item()
self.optimize(loss)
self.n_sequences_epoch += input_ids.size(0)
def optimize(self,
loss):
"""
Normalization on the loss (gradient accumulation or distributed training), followed by
backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).
Also update the metrics for tensorboard.
"""
# Check for NaN
if (loss != loss).data.any():
logger.error('NaN detected')
exit()
if self.multi_gpu:
loss = loss.mean()
if self.params.gradient_accumulation_steps > 1:
loss = loss / self.params.gradient_accumulation_steps
if self.fp16:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.iter()
if self.n_iter % self.params.gradient_accumulation_steps == 0:
if self.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
def iter(self):
"""
Update global counts, write to tensorboard and save checkpoint.
"""
self.n_iter += 1
self.n_total_iter += 1
if self.n_total_iter % self.params.log_interval == 0:
self.log_tensorboard()
self.last_log = time.time()
if self.n_total_iter % self.params.checkpoint_interval == 0:
self.save_checkpoint()
def log_tensorboard(self):
"""
Log into tensorboard. Only by the master process.
"""
if not self.is_master:
return
for param_name, param in self.student.named_parameters():
self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter)
if param.grad is None:
continue
self.tensorboard.add_scalar(tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter)
if self.alpha_mlm > 0.:
self.tensorboard.add_scalar(tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter)
if self.alpha_clm > 0.:
self.tensorboard.add_scalar(tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter)
if self.alpha_mse > 0.:
self.tensorboard.add_scalar(tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter)
if self.alpha_cos > 0.:
self.tensorboard.add_scalar(tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="global/speed", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter)
def end_epoch(self):
"""
Finally arrived at the end of epoch (full pass on dataset).
Do some tensorboard logging and checkpoint saving.
"""
logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')
if self.is_master:
self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth')
self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch)
self.epoch += 1
self.n_sequences_epoch = 0
self.n_iter = 0
self.total_loss_epoch = 0
def save_checkpoint(self,
checkpoint_name: str = 'checkpoint.pth'):
"""
Save the current state. Only by the master process.
"""
if not self.is_master:
return
mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student
mdl_to_save.config.save_pretrained(self.dump_path)
state_dict = mdl_to_save.state_dict()
torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
| 48 | 163 | 0.619114 |
import os
import math
import psutil
import time
from tensorboardX import SummaryWriter
from tqdm import trange, tqdm
import numpy as np
import psutil
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import RandomSampler, BatchSampler, DataLoader
from transformers import WarmupLinearSchedule
from utils import logger
from lm_seqs_dataset import LmSeqsDataset
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
class Distiller:
def __init__(self,
params: dict,
dataset: LmSeqsDataset,
token_probs: torch.tensor,
student: nn.Module,
teacher: nn.Module):
logger.info('Initializing Distiller')
self.params = params
self.dump_path = params.dump_path
self.multi_gpu = params.multi_gpu
self.fp16 = params.fp16
self.student = student
self.teacher = teacher
self.student_config = student.config
self.vocab_size = student.config.vocab_size
if params.n_gpu <= 1:
sampler = RandomSampler(dataset)
else:
sampler = DistributedSampler(dataset)
if params.group_by_size:
groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size)
sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size)
else:
sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)
self.dataloader = DataLoader(dataset=dataset,
batch_sampler=sampler,
collate_fn=dataset.batch_sequences)
self.temperature = params.temperature
assert self.temperature > 0.
self.alpha_ce = params.alpha_ce
self.alpha_mlm = params.alpha_mlm
self.alpha_clm = params.alpha_clm
self.alpha_mse = params.alpha_mse
self.alpha_cos = params.alpha_cos
self.mlm = params.mlm
if self.mlm:
logger.info(f'Using MLM loss for LM step.')
self.mlm_mask_prop = params.mlm_mask_prop
assert 0.0 <= self.mlm_mask_prop <= 1.0
assert params.word_mask + params.word_keep + params.word_rand == 1.0
self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])
self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs
self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs
if self.fp16:
self.pred_probs = self.pred_probs.half()
self.token_probs = self.token_probs.half()
else:
logger.info(f'Using CLM loss for LM step.')
self.epoch = 0
self.n_iter = 0
self.n_total_iter = 0
self.n_sequences_epoch = 0
self.total_loss_epoch = 0
self.last_loss = 0
self.last_loss_ce = 0
self.last_loss_mlm = 0
self.last_loss_clm = 0
if self.alpha_mse > 0.: self.last_loss_mse = 0
if self.alpha_cos > 0.: self.last_loss_cos = 0
self.last_log = 0
self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean')
self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
if self.alpha_mse > 0.:
self.mse_loss_fct = nn.MSELoss(reduction='sum')
if self.alpha_cos > 0.:
self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean')
logger.info('--- Initializing model optimizer')
assert params.gradient_accumulation_steps >= 1
self.num_steps_epoch = len(self.dataloader)
num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},
{'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}
]
logger.info("------ Number of trainable parameters (student): %i" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]))
logger.info("------ Number of parameters (student): %i" % sum([p.numel() for p in self.student.parameters()]))
self.optimizer = AdamW(optimizer_grouped_parameters,
lr=params.learning_rate,
eps=params.adam_epsilon,
betas=(0.9, 0.98))
warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)
self.scheduler = WarmupLinearSchedule(self.optimizer,
warmup_steps=warmup_steps,
t_total=num_train_optimization_steps)
if self.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
logger.info(f"Using fp16 training: {self.params.fp16_opt_level} level")
self.student, self.optimizer = amp.initialize(self.student,
self.optimizer,
opt_level=self.params.fp16_opt_level)
self.teacher = self.teacher.half()
if self.multi_gpu:
if self.fp16:
from apex.parallel import DistributedDataParallel
logger.info("Using apex.parallel.DistributedDataParallel for distributed training.")
self.student = DistributedDataParallel(self.student)
else:
from torch.nn.parallel import DistributedDataParallel
logger.info("Using nn.parallel.DistributedDataParallel for distributed training.")
self.student = DistributedDataParallel(self.student,
device_ids=[params.local_rank],
output_device=params.local_rank,
find_unused_parameters=True)
self.is_master = params.is_master
if self.is_master:
logger.info('--- Initializing Tensorboard')
self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train'))
self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0)
self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0)
def prepare_batch_mlm(self,
batch):
token_ids, lengths = batch
token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)
assert token_ids.size(0) == lengths.size(0)
attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])
bs, max_seq_len = token_ids.size()
mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids)
x_prob = self.token_probs[token_ids.flatten()]
n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item())
tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False)
pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device)
pred_mask[tgt_ids] = 1
pred_mask = pred_mask.view(bs, max_seq_len)
pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0
if self.fp16:
n1 = pred_mask.sum().item()
if n1 > 8:
pred_mask = pred_mask.view(-1)
n2 = max(n1 % 8, 8 * (n1 // 8))
if n2 != n1:
pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0
pred_mask = pred_mask.view(bs, max_seq_len)
assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item()
_token_ids_real = token_ids[pred_mask]
_token_ids_rand = _token_ids_real.clone().random_(self.vocab_size)
_token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token'])
probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True)
_token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long()
token_ids = token_ids.masked_scatter(pred_mask, _token_ids)
mlm_labels[~pred_mask] = -1
assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size
return token_ids, attn_mask, mlm_labels
def prepare_batch_clm(self,
batch):
token_ids, lengths = batch
token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)
assert token_ids.size(0) == lengths.size(0)
attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])
clm_labels = token_ids.new(token_ids.size()).copy_(token_ids)
clm_labels[~attn_mask] = -1
assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size
return token_ids, attn_mask, clm_labels
def round_batch(self,
x: torch.tensor,
lengths: torch.tensor):
if not self.fp16 or len(lengths) < 8:
return x, lengths
bs1 = len(lengths)
bs2 = 8 * (bs1 // 8)
assert bs2 > 0 and bs2 % 8 == 0
if bs1 != bs2:
idx = torch.randperm(bs1)[:bs2]
lengths = lengths[idx]
slen = lengths.max().item()
x = x[idx, :slen]
else:
idx = None
ml1 = x.size(1)
if ml1 % 8 != 0:
pad = 8 - (ml1 % 8)
ml2 = ml1 + pad
if self.mlm:
pad_id = self.params.special_tok_ids['pad_token']
else:
pad_id = self.params.special_tok_ids['unk_token']
padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id)
x = torch.cat([x, padding_tensor], 1)
assert x.size() == (bs2, ml2)
assert x.size(0) % 8 == 0
assert x.size(1) % 8 == 0
return x, lengths
def train(self):
if self.is_master: logger.info('Starting training')
self.last_log = time.time()
self.student.train()
self.teacher.eval()
for _ in range(self.params.n_epoch):
if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')
if self.multi_gpu:
torch.distributed.barrier()
iter_bar = tqdm(self.dataloader, desc="-Iter", disable=self.params.local_rank not in [-1, 0])
for batch in iter_bar:
if self.params.n_gpu > 0:
batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)
if self.mlm:
token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)
else:
token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)
self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)
iter_bar.update()
iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}',
'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'})
iter_bar.close()
if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}')
self.end_epoch()
if self.is_master:
logger.info(f'Save very last checkpoint as `pytorch_model.bin`.')
self.save_checkpoint(checkpoint_name=f'pytorch_model.bin')
logger.info('Training is finished')
def step(self,
input_ids: torch.tensor,
attention_mask: torch.tensor,
lm_labels: torch.tensor):
if self.mlm:
s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask)
with torch.no_grad():
t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask)
else:
s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None)
with torch.no_grad():
t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None)
assert s_logits.size() == t_logits.size()
if self.params.restrict_ce_to_mask:
mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits)
else:
mask = attention_mask.unsqueeze(-1).expand_as(s_logits)
s_logits_slct = torch.masked_select(s_logits, mask)
s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1))
t_logits_slct = torch.masked_select(t_logits, mask)
t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1))
assert t_logits_slct.size() == s_logits_slct.size()
loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1),
F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2
loss = self.alpha_ce*loss_ce
if self.alpha_mlm > 0.:
loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1))
loss += self.alpha_mlm * loss_mlm
if self.alpha_clm > 0.:
shift_logits = s_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
loss += self.alpha_clm * loss_clm
if self.alpha_mse > 0.:
loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0)
loss += self.alpha_mse * loss_mse
if self.alpha_cos > 0.:
s_hidden_states = s_hidden_states[-1]
t_hidden_states = t_hidden_states[-1]
mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states)
assert s_hidden_states.size() == t_hidden_states.size()
dim = s_hidden_states.size(-1)
s_hidden_states_slct = torch.masked_select(s_hidden_states, mask)
s_hidden_states_slct = s_hidden_states_slct.view(-1, dim)
t_hidden_states_slct = torch.masked_select(t_hidden_states, mask)
t_hidden_states_slct = t_hidden_states_slct.view(-1, dim)
target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1)
loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)
loss += self.alpha_cos * loss_cos
self.total_loss_epoch += loss.item()
self.last_loss = loss.item()
self.last_loss_ce = loss_ce.item()
if self.alpha_mlm > 0.:
self.last_loss_mlm = loss_mlm.item()
if self.alpha_clm > 0.:
self.last_loss_clm = loss_clm.item()
if self.alpha_mse > 0.:
self.last_loss_mse = loss_mse.item()
if self.alpha_cos > 0.:
self.last_loss_cos = loss_cos.item()
self.optimize(loss)
self.n_sequences_epoch += input_ids.size(0)
def optimize(self,
loss):
if (loss != loss).data.any():
logger.error('NaN detected')
exit()
if self.multi_gpu:
loss = loss.mean()
if self.params.gradient_accumulation_steps > 1:
loss = loss / self.params.gradient_accumulation_steps
if self.fp16:
from apex import amp
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
self.iter()
if self.n_iter % self.params.gradient_accumulation_steps == 0:
if self.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)
self.optimizer.step()
self.optimizer.zero_grad()
self.scheduler.step()
def iter(self):
self.n_iter += 1
self.n_total_iter += 1
if self.n_total_iter % self.params.log_interval == 0:
self.log_tensorboard()
self.last_log = time.time()
if self.n_total_iter % self.params.checkpoint_interval == 0:
self.save_checkpoint()
def log_tensorboard(self):
if not self.is_master:
return
for param_name, param in self.student.named_parameters():
self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter)
if param.grad is None:
continue
self.tensorboard.add_scalar(tag="grad_mean/" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="grad_std/" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/cum_avg_loss_epoch", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/loss", scalar_value=self.last_loss, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="losses/loss_ce", scalar_value=self.last_loss_ce, global_step=self.n_total_iter)
if self.alpha_mlm > 0.:
self.tensorboard.add_scalar(tag="losses/loss_mlm", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter)
if self.alpha_clm > 0.:
self.tensorboard.add_scalar(tag="losses/loss_clm", scalar_value=self.last_loss_clm, global_step=self.n_total_iter)
if self.alpha_mse > 0.:
self.tensorboard.add_scalar(tag="losses/loss_mse", scalar_value=self.last_loss_mse, global_step=self.n_total_iter)
if self.alpha_cos > 0.:
self.tensorboard.add_scalar(tag="losses/loss_cos", scalar_value=self.last_loss_cos, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="learning_rate/lr", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="global/memory_usage", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter)
self.tensorboard.add_scalar(tag="global/speed", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter)
def end_epoch(self):
logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')
if self.is_master:
self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth')
self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch)
self.epoch += 1
self.n_sequences_epoch = 0
self.n_iter = 0
self.total_loss_epoch = 0
def save_checkpoint(self,
checkpoint_name: str = 'checkpoint.pth'):
if not self.is_master:
return
mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student
mdl_to_save.config.save_pretrained(self.dump_path)
state_dict = mdl_to_save.state_dict()
torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))
| true | true |
f73693a4682bea4ed02bdb055520c7339d597c43 | 26,017 | py | Python | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 14 | 2021-11-12T10:09:25.000Z | 2022-03-18T08:24:16.000Z | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 7 | 2021-11-05T01:37:57.000Z | 2022-01-18T06:03:39.000Z | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 3 | 2021-11-05T01:22:47.000Z | 2021-12-12T03:57:09.000Z | # OpenPharmacophore
from openpharmacophore._private_tools.exceptions import InvalidFileFormat, NoLigandsError, OpenPharmacophoreTypeError
from openpharmacophore.pharmacophore.pharmacophoric_point import UniquePharmacophoricPoint
from openpharmacophore import StructuredBasedPharmacophore
from openpharmacophore import Pharmacophore
from openpharmacophore.utils.conformers import conformer_energy
from openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature
# Third Party
import matplotlib.pyplot as plt
import MDAnalysis as mda
from MDAnalysis.lib.util import NamedStream
import mdtraj as mdt
import numpy as np
import pandas as pd
import pyunitwizard as puw
from rdkit.Chem.Draw import rdMolDraw2D
from tqdm.auto import tqdm
# Standard Library
from collections import defaultdict
import copy
import bisect
from io import StringIO
import tempfile
from typing import List, Tuple, Optional
class Dynophore():
""" Class to store and compute dynamic pharmacophores
Parameters
----------
trajectory : str or mdtraj.trajectory or MDAnalysis.universe
A str with the file path containing the trajectory, an mdtraj trajectory object,
or an MDAnalysis universe.
Attributes
----------
pharmacophores : list of openpharmacophore.StructuredBasedPharmacophore
List with pharmacophores for each relevant frame in the trajectory.
pharmacophore_indices : list of int
Indices of the frame of the trajectory from which the pharmacophores were extracted.
The index of each element of the list corresponds to the one in pharmacophores list.
n_pharmacophores : int
Number of different pharmacophores in the trajectory.
"""
def __init__(self, trajectory):
self.pharmacophores = []
self.pharmacophore_indices = []
self.n_pharmacophores = 0
self.unique_pharmacophoric_points = []
# TODO: Load other types of file, including using a topology and tajectory
if isinstance(trajectory, str):
self._trajectory = self._load_trajectory_file(trajectory)
elif isinstance(trajectory, mdt.Trajectory):
self._trajectory_type = "mdt"
self._trajectory = trajectory
self._n_frames = self._trajectory.n_frames
elif isinstance(trajectory, mda.Universe):
self._trajectory_type = "mda"
self._trajectory = trajectory
self._n_frames = trajectory.trajectory.n_frames
else:
raise TypeError("Trajectory must be of type string, mdtraj.Trajectory or MdAnalysis.Universe")
self._saved_ligand = False
self._averaged_coords = False
def common_hits_approach(self, frame_list=None):
""" Get a list of pharmacophore models from a trajectory using the common hits approach
method.
Notes
-----
This method is based on obtaining a list of representative pharmacophore models from a
trajectory and then validate and score them using virtual screening. The best performant
pharmacophore models are then returned.
References
----------
[1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico,
and Thierry Langer. "Common hits approach: combining pharmacophore modeling and molecular dynamics
simulations." Journal of chemical information and modeling 57, no. 2 (2017): 365-385
"""
if frame_list is None:
frame_list = list(range(0, self._n_frames))
self.pharmacophores_from_frames(frame_list, load_ligand=True)
self._get_unique_pharmacophoric_points(avg_coordinates=False)
rpms = self.representative_pharmacophore_models()
pass
def draw(self, file_name: str, img_size: Tuple[int, int] = (500,500),
legend: str = "", freq_threshold: float = 0.2) -> None:
""" Draw a 2d representation of the dynamic pharmacophore. This is a drawing of the
ligand with the pharmacophoric features highlighted and the frequency if each
one.
Parameters
----------
file_name : str
Name or path og the file where the drawing will be saved. Must be a png file.
img_size : 2-tuple of int, optional
The size of the image (default=(500,500))
legend : str, optional
Image legend.
freq_threshold : double , optional
The minimun frequency of a pharmacophoric point to be drawn. Number
between 0.0 and 1.0 (default=0.2).
"""
if freq_threshold < 0.0 or freq_threshold > 1.0:
raise ValueError("Freqency threshold must be a value between 0 and 1")
if not file_name.endswith(".png"):
raise InvalidFileFormat("File must be a png.")
# Extract a ligand
if self.pharmacophores[0].ligand is None:
raise NoLigandsError("Ligand could not be extracted")
ligand = copy.deepcopy(self.pharmacophores[0].ligand)
ligand.RemoveAllConformers()
atoms = []
bond_colors = {}
atom_highlights = defaultdict(list)
highlight_radius = {}
for up in self.unique_pharmacophoric_points:
if up.frequency < freq_threshold:
continue
indices = up.atom_indices
update_freq = True
for idx in indices:
# If an atom has more than one feature keep higher frequency value
if idx in atoms:
if ligand.GetAtomWithIdx(idx).HasProp("atomNote"):
freq = int(ligand.GetAtomWithIdx(idx).GetProp("atomNote")[2:])
if freq > up.frequency:
update_freq = False
atoms.append(idx)
if "hydrophobicity" in up.feature_name:
feat_name = "hydrophobicity"
else:
feat_name = " ".join(up.feature_name.split()[0:2])
atom_highlights[idx].append(get_color_from_palette_for_feature(feat_name))
highlight_radius[idx] = 0.6
# Draw aromatic rings bonds
if up.short_name == "R":
for neighbor in ligand.GetAtomWithIdx(idx).GetNeighbors():
nbr_idx = neighbor.GetIdx()
if nbr_idx not in indices:
continue
bond = ligand.GetBondBetweenAtoms(idx, nbr_idx).GetIdx()
bond_colors[bond] = [get_color_from_palette_for_feature("aromatic ring")]
if update_freq:
frequency = int(up.frequency * 100)
ligand.GetAtomWithIdx(idx).SetProp("atomNote", f"f={frequency}")
drawing = rdMolDraw2D.MolDraw2DCairo(img_size[0], img_size[1])
drawing.DrawMoleculeWithHighlights(ligand, legend, dict(atom_highlights), bond_colors, highlight_radius, {})
drawing.FinishDrawing()
drawing.WriteDrawingText(file_name)
def first_and_last_pharmacophore(self) -> None:
""" Derive a pharmacophore model for the first and last frames of a trajectory.
References
----------
[1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, Stefan Boresch, and Thierry Langer.
"Comparing pharmacophore models derived from crystal structures and from molecular
dynamics simulations." Monatshefte für Chemie-Chemical Monthly 147, no. 3 (2016):
553-563.
"""
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
initial_pharmacophore = get_pharmacophore(0, True, True)
end_pharmacophore = get_pharmacophore(-1, True, True)
last_frame_index = self._trajectory.n_frames
self.pharmacophores = [
initial_pharmacophore,
end_pharmacophore
]
self.pharmacophore_indices = [0, last_frame_index]
self.n_pharmacophores = 2
def pharmacophore_by_frequency(self, threshold: float) -> Pharmacophore:
""" Derive a unique pharmacophore model with the pharmacophoric points
that have a frequency >= to threshold.
Parameters
---------
threshold : float
The value of frequency from which points are considered part of
the pharmacophore model. Must be a value between 0 and 1-
Returns
-------
openpharmcophore.Pharmacophore
Pharmacophore model with the unique pharmacophoric points.
References
----------
[1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, and Thierry Langer. "Pharmacophore models
derived from molecular dynamics simulations of protein-ligand complexes: A case study."
Natural product communications 11, no. 10 (2016): 1934578X1601101019.
"""
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if len(self.unique_pharmacophoric_points) == 0:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [p for p in self.unique_pharmacophoric_points if p.frequency >= threshold]
return Pharmacophore(points)
def pharmacophore_from_unique_points(self, unique_points: List[str]) -> Pharmacophore:
""" Get a pharmacophore which consists of the passed unique pharmacophoric
points.
Parameters
----------
unique_points: list of str
List with the name of the unique pharmacophoric points.
Returns
-------
openpharmcophore.Pharmacophore
Pharmacophore model with the specified points.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [point for point in self.unique_pharmacophoric_points if point.feature_name in unique_points]
return Pharmacophore(pharmacophoric_points=points)
def pharmacophores_from_frames(self, frames: List[int], load_ligand: bool = True) -> None:
""" Get pharmacophores for the specified frames in a trajectory
Parameters
----------
frames : list of int
Indices of the frames for which pharmacophores will be derived.
"""
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
self.pharmacophores.clear()
self.pharmacophore_indices.clear()
for ii in tqdm(frames):
self.pharmacophores.append(get_pharmacophore(ii, load_ligand=load_ligand))
self.pharmacophore_indices.append(ii)
self.n_pharmacophores = len(self.pharmacophores)
def pharmacophoric_point_frequency(self) -> pd.DataFrame:
""" Get a dataframe with all unique pharmacophoric points and its frequency.
Returns
-------
pandas.DataFrame
Dataframe with the following columns: feature name, frequency and atom
indices.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
names = []
frequencies = []
indices = []
for point in self.unique_pharmacophoric_points:
names.append(point.feature_name)
frequencies.append(point.frequency)
indices.append(point.atom_indices)
frequency = pd.DataFrame().from_dict({
"Feature Name": names,
"Frequency": frequencies,
"Atoms Indices": indices
})
frequency.sort_values(by=["Frequency"], ascending=False, inplace=True)
frequency.reset_index(inplace=True)
frequency.drop(columns=["index"], inplace=True)
return frequency
def point_frequency_plot(self, threshold: float = 0.0, n_bins: int = 10,
ax: Optional[plt.Axes] = None):
""" Plot of pharmacophoric points frequency vs time.
Each pharmacophoric point will appear as a different line in the plot.
Parameters
----------
threshold : double, default=0.0
The value of overall frequency from which points will form part of the
plot. If there are a lot of points with really low frequency, setting
the threshold value can help with visualization.
n_bins : int, default=10
Number of bins to discretize the timesteps.
ax : matplotlib.axes._subplots.AxesSubplot, optional.
An axes object where the plot will be drawn.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if ax is None:
fig, ax = plt.subplots(figsize=(10, 7))
n_timesteps = self._n_frames
bins = np.arange(0, n_timesteps + 1, n_timesteps/n_bins)
for point in self.unique_pharmacophoric_points:
if point.frequency < threshold:
continue
point_timesteps = np.array(point.timesteps)
discretized_timesteps = np.digitize(point_timesteps, bins)
counts = np.zeros_like(bins)
for i in range(bins.shape[0]):
c = np.count_nonzero(discretized_timesteps == i)
counts[i] = c
ax.plot(bins, counts, label=point.feature_name)
ax.legend()
ax.set_xlabel("Timesteps")
ax.set_ylabel("Count")
plt.show()
return ax
def representative_pharmacophore_models(self) -> List[StructuredBasedPharmacophore]:
""" Get all representative pharmacophore models (RPM) in a trajectory.
RPMs are pharmacophore models that have the same pharmacophoric points,
Returns
-------
rpms : list of openpharmacophore.StructuredBasedPharmacophore
The representative pharmacophore models
Note
-----
Pharmacophoric points are considered equal based only on feature type and the atoms to
which this points belong to. Coordinates are not taken into account.
The coordinates of the pharmacophoric points are those that belong to the median energy of
the ligand.
References
----------
[1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico,
and Thierry Langer. "Common hits approach: combining pharmacophore modeling and molecular dynamics
simulations." Journal of chemical information and modeling 57, no. 2 (2017): 365-385
"""
if len(self.unique_pharmacophoric_points) == 0 or self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=False)
self._averaged_coords = False
rpms_indices = self._get_rpms_indices()
return self._pharmacophores_from_ligand_median_energy(rpms_indices)
def _get_rpms_indices(self) -> List[List[int]]:
""" Get the indices of the representative pharmacophore models.
If an empty list is returned it means that all pharmacophore models in the trajectory are different.
Returns
--------
rpms_indices : list of list of int
A list where each sublist contains the indices of each representative pharmacophore
model. This indices correspond to the attribute pharmacophores of the Dynophore
class.
"""
# Compute a matrix where each row represents a feature vector of a pharmacophore
n_pharmacophores = self.n_pharmacophores
n_features = len(self.unique_pharmacophoric_points)
feature_matrix = np.zeros((n_pharmacophores, n_features), dtype=np.int32)
for ii, pharmacophore in enumerate(self.pharmacophores):
for point in pharmacophore:
for jj, unique_point in enumerate(self.unique_pharmacophoric_points):
if point.is_equal(unique_point):
feature_matrix[ii, jj] = 1
break
# Find similar pharmacophores in the matrix
rpms_indices = []
skip = []
for ii in range(n_pharmacophores):
rpm = [ii]
for jj in range(ii + 1, n_pharmacophores):
if jj in skip:
continue
if np.all(feature_matrix[ii, :] == feature_matrix[jj, :]):
rpm.append(jj)
skip.append(jj)
# Keep only models that have a frequency higher than 2
if len(rpm) > 2:
rpms_indices.append(rpm)
return rpms_indices
def _pharmacophores_from_ligand_median_energy(self, rpms_indices)-> List[List[int]]:
""" Get the representative pharmacophore models that correspond to the pharmacophore
with ligand median energy.
Parameters
----------
rpms_indices : list of list of int
A list where each sublist contains the indices of each representative pharmacophore
model. This indices correspond to the attribute pharmacophores of the Dynophore
class.
Returns
-------
rpms : list of openpharmacophore.StructuredBasedPharmacophore
The representative pharmacophore models
"""
rpms = []
for indices in rpms_indices:
energies = []
for index in indices:
energy = (conformer_energy(self.pharmacophores[index].ligand), index)
bisect.insort(energies, energy)
# Take the pharmacophore with median energy
median_energy_index = energies[int(len(energies) / 2)][1]
rpms.append(self.pharmacophores[median_energy_index])
return rpms
def _load_trajectory_file(self, file_name: str) -> mdt.Trajectory:
""" Load a trajectory file from a MD simulation
Parameters
----------
file_name : str
Name of the file containing the trajectory.
Returns
-------
traj :
The trajectory object.
"""
if file_name.endswith("h5"):
traj = mdt.load(file_name)
self._trajectory_type = "mdt"
else:
raise NotImplementedError
return traj
def _get_unique_pharmacophoric_points(self, avg_coordinates: bool = True) -> None:
""" Get all unique pharmacophoric points across all the pharmacophore models
derived from the trajectory.
Parameters
----------
avg_coordinates : bool
Whether to average the coordinates of the pharmacophoric points.
Notes
-----
Two points are considered equal if they have the same feature type and
are associated with the same atom in the ligand.
"""
if avg_coordinates:
self._averaged_coords = True
if self.n_pharmacophores == 0:
self.pharmacophores_from_frames(list(range(0, self._n_frames)))
all_points = []
for ii, pharmacophore in enumerate(self.pharmacophores):
for pharmacophoric_point in pharmacophore:
pharmacophoric_point.pharmacophore_index = ii
all_points.append(pharmacophoric_point)
self.unique_pharmacophoric_points.clear()
# Get all unique parmacophoric points while also updating the count,
# timesteps where they appear and calculating the average centroid.
for point in all_points:
is_unique = True
for unique_p in self.unique_pharmacophoric_points:
if point.is_equal(unique_p):
timestep = point.pharmacophore_index
if not timestep in unique_p.timesteps:
unique_p.timesteps.append(timestep)
unique_p.count += 1
if avg_coordinates:
unique_p.center += point.center
is_unique = False
break
if is_unique:
self.unique_pharmacophoric_points.append(UniquePharmacophoricPoint(point, point.pharmacophore_index))
names = []
for point in self.unique_pharmacophoric_points:
if avg_coordinates:
# Normalize centroid
point.center /= point.count
point.frequency = point.count / self.n_pharmacophores
# Get a unique name for each point
feat_num = 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
else:
while True:
feat_num += 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
break
def _pharmacophore_from_mdtraj(self, frame_num: int, load_mol_system: bool=False,
load_ligand: bool=False) -> StructuredBasedPharmacophore:
""" Derive a pharmacophore for a single frame of an mdtraj Trajectory object.
Parameters
----------
frame_num : int
The index number of the frame from which the pharmacophore will be derived.
load_mol_system : bool, default=False
If true the receptor will be stored in the pharmacophore object.
load_ligand : bool, default=False
If true the ligand will be stored in the pharmacophore object.
"""
# mdtraj trajectories cannot be passed to SringIO objects nor saved as string. So with this
# method, temporary pdb files will be created that can be read by the StructuredBasedPharmacophore
# class.
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
frame = self._trajectory[frame_num]
with tempfile.NamedTemporaryFile() as original_file:
frame.save_pdb(original_file.name)
original_file.seek(0)
lines_original = original_file.readlines()
# The pdb mdtraj generates needs to be edited so that pybel can read it.
# The third line that contains "MODEL" needs to be removed for the structured
# based pharmacophore to work.
with tempfile.NamedTemporaryFile() as modified_file:
for line in lines_original:
if not line.startswith(b'MODEL'):
modified_file.write(line)
modified_file.truncate()
modified_file.seek(0)
pharmacophore = StructuredBasedPharmacophore.from_pdb(modified_file,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def _pharmacohore_from_mdanalysis(self, frame_num: int, load_mol_system: bool = False,
load_ligand: bool = False) -> StructuredBasedPharmacophore:
""" Derive a pharmacophore for a single frame of an MdAnalysis Universe object.
Parameters
----------
frame_num : int
The index number of the frame from which the pharmacophore will be derived.
load_mol_system: bool, default=False
If true the receptor will be stored in the pharmacophore object.
load_ligand: bool, default=False
If true the ligand will be stored in the pharmacophore object.
"""
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
stream = StringIO()
pdb_stream = NamedStream(stream, "output.pdb")
atoms = self._trajectory.select_atoms("all")
atoms.write(pdb_stream, frames=self._trajectory.trajectory[[frame_num]])
pharmacophore = StructuredBasedPharmacophore.from_pdb(pdb_stream,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def __repr__(self) -> str:
return f"{self.__class__.__name__}(n_pharmacophores={self.n_pharmacophores}; n_frames={self._n_frames})"
| 41.827974 | 117 | 0.611523 |
from openpharmacophore._private_tools.exceptions import InvalidFileFormat, NoLigandsError, OpenPharmacophoreTypeError
from openpharmacophore.pharmacophore.pharmacophoric_point import UniquePharmacophoricPoint
from openpharmacophore import StructuredBasedPharmacophore
from openpharmacophore import Pharmacophore
from openpharmacophore.utils.conformers import conformer_energy
from openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature
import matplotlib.pyplot as plt
import MDAnalysis as mda
from MDAnalysis.lib.util import NamedStream
import mdtraj as mdt
import numpy as np
import pandas as pd
import pyunitwizard as puw
from rdkit.Chem.Draw import rdMolDraw2D
from tqdm.auto import tqdm
from collections import defaultdict
import copy
import bisect
from io import StringIO
import tempfile
from typing import List, Tuple, Optional
class Dynophore():
def __init__(self, trajectory):
self.pharmacophores = []
self.pharmacophore_indices = []
self.n_pharmacophores = 0
self.unique_pharmacophoric_points = []
if isinstance(trajectory, str):
self._trajectory = self._load_trajectory_file(trajectory)
elif isinstance(trajectory, mdt.Trajectory):
self._trajectory_type = "mdt"
self._trajectory = trajectory
self._n_frames = self._trajectory.n_frames
elif isinstance(trajectory, mda.Universe):
self._trajectory_type = "mda"
self._trajectory = trajectory
self._n_frames = trajectory.trajectory.n_frames
else:
raise TypeError("Trajectory must be of type string, mdtraj.Trajectory or MdAnalysis.Universe")
self._saved_ligand = False
self._averaged_coords = False
def common_hits_approach(self, frame_list=None):
if frame_list is None:
frame_list = list(range(0, self._n_frames))
self.pharmacophores_from_frames(frame_list, load_ligand=True)
self._get_unique_pharmacophoric_points(avg_coordinates=False)
rpms = self.representative_pharmacophore_models()
pass
def draw(self, file_name: str, img_size: Tuple[int, int] = (500,500),
legend: str = "", freq_threshold: float = 0.2) -> None:
if freq_threshold < 0.0 or freq_threshold > 1.0:
raise ValueError("Freqency threshold must be a value between 0 and 1")
if not file_name.endswith(".png"):
raise InvalidFileFormat("File must be a png.")
if self.pharmacophores[0].ligand is None:
raise NoLigandsError("Ligand could not be extracted")
ligand = copy.deepcopy(self.pharmacophores[0].ligand)
ligand.RemoveAllConformers()
atoms = []
bond_colors = {}
atom_highlights = defaultdict(list)
highlight_radius = {}
for up in self.unique_pharmacophoric_points:
if up.frequency < freq_threshold:
continue
indices = up.atom_indices
update_freq = True
for idx in indices:
if idx in atoms:
if ligand.GetAtomWithIdx(idx).HasProp("atomNote"):
freq = int(ligand.GetAtomWithIdx(idx).GetProp("atomNote")[2:])
if freq > up.frequency:
update_freq = False
atoms.append(idx)
if "hydrophobicity" in up.feature_name:
feat_name = "hydrophobicity"
else:
feat_name = " ".join(up.feature_name.split()[0:2])
atom_highlights[idx].append(get_color_from_palette_for_feature(feat_name))
highlight_radius[idx] = 0.6
if up.short_name == "R":
for neighbor in ligand.GetAtomWithIdx(idx).GetNeighbors():
nbr_idx = neighbor.GetIdx()
if nbr_idx not in indices:
continue
bond = ligand.GetBondBetweenAtoms(idx, nbr_idx).GetIdx()
bond_colors[bond] = [get_color_from_palette_for_feature("aromatic ring")]
if update_freq:
frequency = int(up.frequency * 100)
ligand.GetAtomWithIdx(idx).SetProp("atomNote", f"f={frequency}")
drawing = rdMolDraw2D.MolDraw2DCairo(img_size[0], img_size[1])
drawing.DrawMoleculeWithHighlights(ligand, legend, dict(atom_highlights), bond_colors, highlight_radius, {})
drawing.FinishDrawing()
drawing.WriteDrawingText(file_name)
def first_and_last_pharmacophore(self) -> None:
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
initial_pharmacophore = get_pharmacophore(0, True, True)
end_pharmacophore = get_pharmacophore(-1, True, True)
last_frame_index = self._trajectory.n_frames
self.pharmacophores = [
initial_pharmacophore,
end_pharmacophore
]
self.pharmacophore_indices = [0, last_frame_index]
self.n_pharmacophores = 2
def pharmacophore_by_frequency(self, threshold: float) -> Pharmacophore:
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if len(self.unique_pharmacophoric_points) == 0:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [p for p in self.unique_pharmacophoric_points if p.frequency >= threshold]
return Pharmacophore(points)
def pharmacophore_from_unique_points(self, unique_points: List[str]) -> Pharmacophore:
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [point for point in self.unique_pharmacophoric_points if point.feature_name in unique_points]
return Pharmacophore(pharmacophoric_points=points)
def pharmacophores_from_frames(self, frames: List[int], load_ligand: bool = True) -> None:
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
self.pharmacophores.clear()
self.pharmacophore_indices.clear()
for ii in tqdm(frames):
self.pharmacophores.append(get_pharmacophore(ii, load_ligand=load_ligand))
self.pharmacophore_indices.append(ii)
self.n_pharmacophores = len(self.pharmacophores)
def pharmacophoric_point_frequency(self) -> pd.DataFrame:
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
names = []
frequencies = []
indices = []
for point in self.unique_pharmacophoric_points:
names.append(point.feature_name)
frequencies.append(point.frequency)
indices.append(point.atom_indices)
frequency = pd.DataFrame().from_dict({
"Feature Name": names,
"Frequency": frequencies,
"Atoms Indices": indices
})
frequency.sort_values(by=["Frequency"], ascending=False, inplace=True)
frequency.reset_index(inplace=True)
frequency.drop(columns=["index"], inplace=True)
return frequency
def point_frequency_plot(self, threshold: float = 0.0, n_bins: int = 10,
ax: Optional[plt.Axes] = None):
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if ax is None:
fig, ax = plt.subplots(figsize=(10, 7))
n_timesteps = self._n_frames
bins = np.arange(0, n_timesteps + 1, n_timesteps/n_bins)
for point in self.unique_pharmacophoric_points:
if point.frequency < threshold:
continue
point_timesteps = np.array(point.timesteps)
discretized_timesteps = np.digitize(point_timesteps, bins)
counts = np.zeros_like(bins)
for i in range(bins.shape[0]):
c = np.count_nonzero(discretized_timesteps == i)
counts[i] = c
ax.plot(bins, counts, label=point.feature_name)
ax.legend()
ax.set_xlabel("Timesteps")
ax.set_ylabel("Count")
plt.show()
return ax
def representative_pharmacophore_models(self) -> List[StructuredBasedPharmacophore]:
if len(self.unique_pharmacophoric_points) == 0 or self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=False)
self._averaged_coords = False
rpms_indices = self._get_rpms_indices()
return self._pharmacophores_from_ligand_median_energy(rpms_indices)
def _get_rpms_indices(self) -> List[List[int]]:
n_pharmacophores = self.n_pharmacophores
n_features = len(self.unique_pharmacophoric_points)
feature_matrix = np.zeros((n_pharmacophores, n_features), dtype=np.int32)
for ii, pharmacophore in enumerate(self.pharmacophores):
for point in pharmacophore:
for jj, unique_point in enumerate(self.unique_pharmacophoric_points):
if point.is_equal(unique_point):
feature_matrix[ii, jj] = 1
break
rpms_indices = []
skip = []
for ii in range(n_pharmacophores):
rpm = [ii]
for jj in range(ii + 1, n_pharmacophores):
if jj in skip:
continue
if np.all(feature_matrix[ii, :] == feature_matrix[jj, :]):
rpm.append(jj)
skip.append(jj)
if len(rpm) > 2:
rpms_indices.append(rpm)
return rpms_indices
def _pharmacophores_from_ligand_median_energy(self, rpms_indices)-> List[List[int]]:
rpms = []
for indices in rpms_indices:
energies = []
for index in indices:
energy = (conformer_energy(self.pharmacophores[index].ligand), index)
bisect.insort(energies, energy)
median_energy_index = energies[int(len(energies) / 2)][1]
rpms.append(self.pharmacophores[median_energy_index])
return rpms
def _load_trajectory_file(self, file_name: str) -> mdt.Trajectory:
if file_name.endswith("h5"):
traj = mdt.load(file_name)
self._trajectory_type = "mdt"
else:
raise NotImplementedError
return traj
def _get_unique_pharmacophoric_points(self, avg_coordinates: bool = True) -> None:
if avg_coordinates:
self._averaged_coords = True
if self.n_pharmacophores == 0:
self.pharmacophores_from_frames(list(range(0, self._n_frames)))
all_points = []
for ii, pharmacophore in enumerate(self.pharmacophores):
for pharmacophoric_point in pharmacophore:
pharmacophoric_point.pharmacophore_index = ii
all_points.append(pharmacophoric_point)
self.unique_pharmacophoric_points.clear()
for point in all_points:
is_unique = True
for unique_p in self.unique_pharmacophoric_points:
if point.is_equal(unique_p):
timestep = point.pharmacophore_index
if not timestep in unique_p.timesteps:
unique_p.timesteps.append(timestep)
unique_p.count += 1
if avg_coordinates:
unique_p.center += point.center
is_unique = False
break
if is_unique:
self.unique_pharmacophoric_points.append(UniquePharmacophoricPoint(point, point.pharmacophore_index))
names = []
for point in self.unique_pharmacophoric_points:
if avg_coordinates:
point.center /= point.count
point.frequency = point.count / self.n_pharmacophores
feat_num = 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
else:
while True:
feat_num += 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
break
def _pharmacophore_from_mdtraj(self, frame_num: int, load_mol_system: bool=False,
load_ligand: bool=False) -> StructuredBasedPharmacophore:
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
frame = self._trajectory[frame_num]
with tempfile.NamedTemporaryFile() as original_file:
frame.save_pdb(original_file.name)
original_file.seek(0)
lines_original = original_file.readlines()
with tempfile.NamedTemporaryFile() as modified_file:
for line in lines_original:
if not line.startswith(b'MODEL'):
modified_file.write(line)
modified_file.truncate()
modified_file.seek(0)
pharmacophore = StructuredBasedPharmacophore.from_pdb(modified_file,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def _pharmacohore_from_mdanalysis(self, frame_num: int, load_mol_system: bool = False,
load_ligand: bool = False) -> StructuredBasedPharmacophore:
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
stream = StringIO()
pdb_stream = NamedStream(stream, "output.pdb")
atoms = self._trajectory.select_atoms("all")
atoms.write(pdb_stream, frames=self._trajectory.trajectory[[frame_num]])
pharmacophore = StructuredBasedPharmacophore.from_pdb(pdb_stream,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def __repr__(self) -> str:
return f"{self.__class__.__name__}(n_pharmacophores={self.n_pharmacophores}; n_frames={self._n_frames})"
| true | true |
f73694082f6555a6ed7e3da03bdda34c33f127d4 | 210 | py | Python | tests/classes/simple_score.py | WiosoftCrafts/jsonclasses-pymongo | c76fdfc072705484b47b09e23c5498aea757dad7 | [
"MIT"
] | 2 | 2021-11-02T02:54:01.000Z | 2021-12-02T10:38:18.000Z | tests/classes/simple_score.py | WiosoftCrafts/jsonclasses-pymongo | c76fdfc072705484b47b09e23c5498aea757dad7 | [
"MIT"
] | 1 | 2021-12-15T13:50:48.000Z | 2021-12-15T13:50:48.000Z | tests/classes/simple_score.py | zhichao-github/jsonclasses-pymongo | eaf08e4342a08f484bf99d06a3bceae447925189 | [
"MIT"
] | 5 | 2021-07-22T06:30:05.000Z | 2021-12-09T02:02:30.000Z | from __future__ import annotations
from jsonclasses import jsonclass, types
from jsonclasses_pymongo import pymongo
@pymongo
@jsonclass(class_graph='simple')
class SimpleScore:
name: str
score: float
| 19.090909 | 40 | 0.8 | from __future__ import annotations
from jsonclasses import jsonclass, types
from jsonclasses_pymongo import pymongo
@pymongo
@jsonclass(class_graph='simple')
class SimpleScore:
name: str
score: float
| true | true |
f736941a68ff8e23204fe23f650e5b8157bc3b80 | 6,047 | py | Python | eod/utils/general/saver_helper.py | scott-mao/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | 1 | 2022-01-12T01:51:39.000Z | 2022-01-12T01:51:39.000Z | eod/utils/general/saver_helper.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | eod/utils/general/saver_helper.py | YZW-explorer/EOD | f10e64de86c0f356ebf5c7e923f4042eec4207b1 | [
"Apache-2.0"
] | null | null | null | # Standard Library
import json
import os
import shutil
# Import from third library
import torch
# Import from local
from .log_helper import default_logger as logger
from .registry_factory import SAVER_REGISTRY
__all__ = ['Saver']
@SAVER_REGISTRY.register('base')
class Saver(object):
def __init__(self, save_cfg, yml_path=None, work_dir='./'):
# checkpoint dir
self.save_cfg = self.prepend_work_dir(save_cfg, work_dir)
self.work_dir = work_dir
self.save_dir = save_cfg['save_dir']
os.makedirs(self.save_dir, exist_ok=True)
if yml_path is not None and 's3://' not in yml_path: # TODO, save cpeh data
yml_name = os.path.basename(yml_path)
dst_path = os.path.join(self.save_dir, yml_name)
shutil.copy(yml_path, dst_path)
self.auto_resume = self.save_cfg.get('auto_resume', False)
self.running_config_file = os.path.join(self.save_dir, 'running_config.json')
def prepend_work_dir(self, save_cfg, work_dir):
def osp(path):
return os.path.join(work_dir, path)
save_cfg['save_dir'] = osp(save_cfg['save_dir'])
save_cfg['results_dir'] = osp(save_cfg['results_dir'])
return save_cfg
@staticmethod
def get_model_from_ckpt(ckpt_path):
return Saver.load_checkpoint(ckpt_path)['model']
def load_pretrain_or_resume(self):
if self.auto_resume:
last_checkpoint_path = self.find_last_checkpoint()
if last_checkpoint_path is not None:
logger.warning('Load checkpoint from {}'.format(last_checkpoint_path))
return self.load_checkpoint(last_checkpoint_path)
else:
logger.warning('Not found any valid checkpoint yet')
if 'resume_model' in self.save_cfg:
logger.warning('Load checkpoint from {}'.format(self.save_cfg['resume_model']))
state = self.load_checkpoint(self.save_cfg['resume_model'])
return state
elif 'pretrain_model' in self.save_cfg:
state = self.load_checkpoint(self.save_cfg['pretrain_model'])
logger.warning('Load checkpoint from {}'.format(self.save_cfg['pretrain_model']))
output = {}
if 'ema' in state:
if "ema_state_dict" in state['ema']:
logger.info("Load ema pretrain model")
st = state['ema']['ema_state_dict']
else:
st = state['model']
else:
st = state['model']
output['model'] = st
return output
else:
logger.warning('Load nothing! No weights provided {}')
return {'model': {}}
@staticmethod
def load_checkpoint(ckpt_path):
"""Load state_dict from checkpoint"""
def remove_prefix(state_dict, prefix):
"""Old style model is stored with all names of parameters share common prefix 'module.'"""
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
# assert os.path.exists(ckpt_path), f'No such file: {ckpt_path}'
device = torch.cuda.current_device()
ckpt_dict = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))
if 'model' in ckpt_dict:
state_dict = ckpt_dict['model']
elif 'state_dict' in ckpt_dict:
state_dict = ckpt_dict['state_dict']
else:
state_dict = ckpt_dict
state_dict = remove_prefix(state_dict, 'module.')
ckpt_dict['model'] = state_dict
return ckpt_dict
def lns_latest_ckpt(self, ckpt_path, new_path):
try:
pwd = os.getcwd()
absolute_ckpt_path = os.path.join(pwd, ckpt_path)
absolute_new_path = os.path.join(pwd, new_path)
if os.path.exists(absolute_new_path):
os.system(f'rm {absolute_new_path}')
os.system(f"ln -s {absolute_ckpt_path} {absolute_new_path}")
except Exception as e:
logger.warning(f'Failed to ln -s {ckpt_path} {new_path}')
logger.warning(e)
def save(self, epoch, iter, **kwargs):
"""Save model checkpoint for one epoch"""
os.makedirs(self.save_dir, exist_ok=True)
# Assume we warmup for a epochs and training a+b epochs in total,
# then our checkpoints are named of ckpt_e{-a+1}.pth ~ ckpt_e{b}.pth
# if best in kwargs, we save the best ckpt as ckpt_best.path.auto
if 'suffix' in kwargs:
suffix = kwargs['suffix']
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}-{}.pth'.format(epoch, suffix))
elif 'auto_save' in kwargs:
ckpt_path = os.path.join(self.save_dir, 'ckpt_{}.pth'.format(kwargs['auto_save']))
else:
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}.pth'.format(epoch))
# since epoch not in kwargs
kwargs['epoch'] = epoch
kwargs['iter'] = iter
kwargs['metric_val'] = kwargs.get('metric_val', -1)
lns_latest_ckpt = kwargs.pop('lns', True)
torch.save(kwargs, ckpt_path)
if lns_latest_ckpt:
latest_path = os.path.join(self.save_dir, 'ckpt_latest.pth')
self.lns_latest_ckpt(ckpt_path, latest_path)
return ckpt_path
def save_model_arch(self, model):
"""Save model structure"""
os.makedirs(self.save_dir, exist_ok=True)
meta_path = os.path.join(self.save_dir, 'model_arch.txt')
with open(meta_path, 'w') as fid:
fid.write(str(model))
def save_running_config(self, config):
with open(self.running_config_file, 'w') as rcf:
json.dump(config, rcf, indent=2)
def find_last_checkpoint(self):
last_ckpt_path = os.path.join(self.save_dir, "ckpt_latest.pth")
if os.path.exists(last_ckpt_path):
return last_ckpt_path
else:
return None
| 38.762821 | 102 | 0.616339 |
import json
import os
import shutil
import torch
from .log_helper import default_logger as logger
from .registry_factory import SAVER_REGISTRY
__all__ = ['Saver']
@SAVER_REGISTRY.register('base')
class Saver(object):
def __init__(self, save_cfg, yml_path=None, work_dir='./'):
self.save_cfg = self.prepend_work_dir(save_cfg, work_dir)
self.work_dir = work_dir
self.save_dir = save_cfg['save_dir']
os.makedirs(self.save_dir, exist_ok=True)
if yml_path is not None and 's3://' not in yml_path:
yml_name = os.path.basename(yml_path)
dst_path = os.path.join(self.save_dir, yml_name)
shutil.copy(yml_path, dst_path)
self.auto_resume = self.save_cfg.get('auto_resume', False)
self.running_config_file = os.path.join(self.save_dir, 'running_config.json')
def prepend_work_dir(self, save_cfg, work_dir):
def osp(path):
return os.path.join(work_dir, path)
save_cfg['save_dir'] = osp(save_cfg['save_dir'])
save_cfg['results_dir'] = osp(save_cfg['results_dir'])
return save_cfg
@staticmethod
def get_model_from_ckpt(ckpt_path):
return Saver.load_checkpoint(ckpt_path)['model']
def load_pretrain_or_resume(self):
if self.auto_resume:
last_checkpoint_path = self.find_last_checkpoint()
if last_checkpoint_path is not None:
logger.warning('Load checkpoint from {}'.format(last_checkpoint_path))
return self.load_checkpoint(last_checkpoint_path)
else:
logger.warning('Not found any valid checkpoint yet')
if 'resume_model' in self.save_cfg:
logger.warning('Load checkpoint from {}'.format(self.save_cfg['resume_model']))
state = self.load_checkpoint(self.save_cfg['resume_model'])
return state
elif 'pretrain_model' in self.save_cfg:
state = self.load_checkpoint(self.save_cfg['pretrain_model'])
logger.warning('Load checkpoint from {}'.format(self.save_cfg['pretrain_model']))
output = {}
if 'ema' in state:
if "ema_state_dict" in state['ema']:
logger.info("Load ema pretrain model")
st = state['ema']['ema_state_dict']
else:
st = state['model']
else:
st = state['model']
output['model'] = st
return output
else:
logger.warning('Load nothing! No weights provided {}')
return {'model': {}}
@staticmethod
def load_checkpoint(ckpt_path):
def remove_prefix(state_dict, prefix):
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
device = torch.cuda.current_device()
ckpt_dict = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))
if 'model' in ckpt_dict:
state_dict = ckpt_dict['model']
elif 'state_dict' in ckpt_dict:
state_dict = ckpt_dict['state_dict']
else:
state_dict = ckpt_dict
state_dict = remove_prefix(state_dict, 'module.')
ckpt_dict['model'] = state_dict
return ckpt_dict
def lns_latest_ckpt(self, ckpt_path, new_path):
try:
pwd = os.getcwd()
absolute_ckpt_path = os.path.join(pwd, ckpt_path)
absolute_new_path = os.path.join(pwd, new_path)
if os.path.exists(absolute_new_path):
os.system(f'rm {absolute_new_path}')
os.system(f"ln -s {absolute_ckpt_path} {absolute_new_path}")
except Exception as e:
logger.warning(f'Failed to ln -s {ckpt_path} {new_path}')
logger.warning(e)
def save(self, epoch, iter, **kwargs):
os.makedirs(self.save_dir, exist_ok=True)
if 'suffix' in kwargs:
suffix = kwargs['suffix']
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}-{}.pth'.format(epoch, suffix))
elif 'auto_save' in kwargs:
ckpt_path = os.path.join(self.save_dir, 'ckpt_{}.pth'.format(kwargs['auto_save']))
else:
ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}.pth'.format(epoch))
kwargs['epoch'] = epoch
kwargs['iter'] = iter
kwargs['metric_val'] = kwargs.get('metric_val', -1)
lns_latest_ckpt = kwargs.pop('lns', True)
torch.save(kwargs, ckpt_path)
if lns_latest_ckpt:
latest_path = os.path.join(self.save_dir, 'ckpt_latest.pth')
self.lns_latest_ckpt(ckpt_path, latest_path)
return ckpt_path
def save_model_arch(self, model):
os.makedirs(self.save_dir, exist_ok=True)
meta_path = os.path.join(self.save_dir, 'model_arch.txt')
with open(meta_path, 'w') as fid:
fid.write(str(model))
def save_running_config(self, config):
with open(self.running_config_file, 'w') as rcf:
json.dump(config, rcf, indent=2)
def find_last_checkpoint(self):
last_ckpt_path = os.path.join(self.save_dir, "ckpt_latest.pth")
if os.path.exists(last_ckpt_path):
return last_ckpt_path
else:
return None
| true | true |
f7369443fb51db9f04c2745e818b9e65d0c03c5f | 514 | py | Python | test.py | messense/flake-network-timeout | 53070048fd8b33dcdbf84f73df45c0d1a72bd4b7 | [
"MIT"
] | null | null | null | test.py | messense/flake-network-timeout | 53070048fd8b33dcdbf84f73df45c0d1a72bd4b7 | [
"MIT"
] | null | null | null | test.py | messense/flake-network-timeout | 53070048fd8b33dcdbf84f73df45c0d1a72bd4b7 | [
"MIT"
] | null | null | null | from redis import Redis, StrictRedis
r0 = StrictRedis.from_url('redis://localhost/0')
r1 = StrictRedis.from_url('redis://localhost/0', socket_timeout=10)
r2 = StrictRedis.from_url('redis://localhost/0?socket_timeout=10')
r3 = StrictRedis()
url1 = 'redis://localhost/0'
r4 = StrictRedis.from_url(url1)
r5 = StrictRedis.from_url(url1, socket_timeout=10)
url2 = url1 + '?' + 'socket_timeout=10'
r6 = StrictRedis.from_url(url2)
def t():
url1 = 'redis://localhost/1'
StrictRedis.from_url(url1)
Redis()
| 25.7 | 67 | 0.717899 | from redis import Redis, StrictRedis
r0 = StrictRedis.from_url('redis://localhost/0')
r1 = StrictRedis.from_url('redis://localhost/0', socket_timeout=10)
r2 = StrictRedis.from_url('redis://localhost/0?socket_timeout=10')
r3 = StrictRedis()
url1 = 'redis://localhost/0'
r4 = StrictRedis.from_url(url1)
r5 = StrictRedis.from_url(url1, socket_timeout=10)
url2 = url1 + '?' + 'socket_timeout=10'
r6 = StrictRedis.from_url(url2)
def t():
url1 = 'redis://localhost/1'
StrictRedis.from_url(url1)
Redis()
| true | true |
f736947afa737a0f0a193a290d69538e6c08baa1 | 28,733 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_public_ip_prefixes_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_public_ip_prefixes_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_07_01/operations/_public_ip_prefixes_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
"""PublicIPPrefixesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the Public IP Prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.PublicIPPrefix"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PublicIPPrefix"]
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.PublicIPPrefix"
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
public_ip_prefix_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.PublicIPPrefix"]
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2018_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PublicIPPrefixListResult"]
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.PublicIPPrefixListResult"]
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_07_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
| 48.128978 | 200 | 0.659312 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name,
public_ip_prefix_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def begin_delete(
self,
resource_group_name,
public_ip_prefix_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def get(
self,
resource_group_name,
public_ip_prefix_name,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def _create_or_update_initial(
self,
resource_group_name,
public_ip_prefix_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def begin_create_or_update(
self,
resource_group_name,
public_ip_prefix_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def _update_tags_initial(
self,
resource_group_name,
public_ip_prefix_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def begin_update_tags(
self,
resource_group_name,
public_ip_prefix_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'}
def list_all(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'}
def list(
self,
resource_group_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'}
| true | true |
f736947fd033510d384b64e5023fa3cb054be305 | 252 | py | Python | src/yafowil/plone/interfaces.py | gogobd/yafowil.plone | 633a28dc3bbf018d84c6e399c31e26b8125399a5 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T12:46:54.000Z | 2019-07-09T12:46:54.000Z | src/yafowil/plone/interfaces.py | gogobd/yafowil.plone | 633a28dc3bbf018d84c6e399c31e26b8125399a5 | [
"BSD-3-Clause"
] | 18 | 2015-10-09T22:39:00.000Z | 2021-09-06T07:01:42.000Z | src/yafowil/plone/interfaces.py | gogobd/yafowil.plone | 633a28dc3bbf018d84c6e399c31e26b8125399a5 | [
"BSD-3-Clause"
] | 3 | 2018-01-20T18:31:55.000Z | 2021-06-10T14:00:01.000Z | from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class IYafowilLayer(IDefaultBrowserLayer):
"""YAFOWIL related browser layer.
"""
class IYafowilDemoLayer(IYafowilLayer):
"""YAFOWIL demos related browser layer.
"""
| 21 | 66 | 0.753968 | from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class IYafowilLayer(IDefaultBrowserLayer):
class IYafowilDemoLayer(IYafowilLayer):
| true | true |
f73694a4e9e1db43beb7442bfe545c1eaeee88b0 | 181 | py | Python | lib/pylint/test/input/func_noerror_builtin_module_test.py | willemneal/Docky | d3504e1671b4a6557468234c263950bfab461ce4 | [
"MIT"
] | 3 | 2018-11-25T01:09:55.000Z | 2021-08-24T01:56:36.000Z | lib/pylint/test/input/func_noerror_builtin_module_test.py | willemneal/Docky | d3504e1671b4a6557468234c263950bfab461ce4 | [
"MIT"
] | null | null | null | lib/pylint/test/input/func_noerror_builtin_module_test.py | willemneal/Docky | d3504e1671b4a6557468234c263950bfab461ce4 | [
"MIT"
] | 3 | 2018-11-09T03:38:09.000Z | 2020-02-24T06:26:10.000Z | """test import from a builtin module"""
from __future__ import absolute_import
__revision__ = None
from math import log10
def log10_2():
"""bla bla bla"""
return log10(2)
| 18.1 | 39 | 0.712707 | from __future__ import absolute_import
__revision__ = None
from math import log10
def log10_2():
return log10(2)
| true | true |
f7369619cd01529ee372590cea33f5f73b48f876 | 5,341 | py | Python | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | import spikeextractors as si
#import spikewidgets as sw
import spiketoolkit as st
import mlprocessors as mlpr
import json
from cairio import client as ca
import numpy as np
from copy import deepcopy
def compare_sortings_with_truth(sortings,compute_resource,num_workers=None):
print('>>>>>> compare sortings with truth')
container='sha1://3b26155930cc4a4745c67b702ce297c9c968ac94/02-12-2019/mountaintools_basic.simg'
jobs_gen_table=[]
for sorting in sortings:
units_true=sorting.get('units_true',[])
firings=sorting['firings']
firings_true=sorting['firings_true']
units_true=units_true
job=GenSortingComparisonTable.createJob(
firings=firings,
firings_true=firings_true,
units_true=units_true,
json_out={'ext':'.json','upload':True},
html_out={'ext':'.html','upload':True},
_container=container
)
jobs_gen_table.append(job)
all_jobs=jobs_gen_table
label='Compare sortings with truth'
mlpr.executeBatch(jobs=all_jobs,label=label,num_workers=num_workers,compute_resource=compute_resource)
sortings_out=[]
for i,sorting in enumerate(sortings):
comparison_with_truth=dict()
comparison_with_truth['json']=jobs_gen_table[i]['result']['outputs']['json_out']
comparison_with_truth['html']=jobs_gen_table[i]['result']['outputs']['html_out']
sorting2=deepcopy(sorting)
sorting2['comparison_with_truth']=comparison_with_truth
sortings_out.append(sorting2)
return sortings_out
class GenSortingComparisonTable(mlpr.Processor):
VERSION='0.2.0'
firings=mlpr.Input('Firings file (sorting)')
firings_true=mlpr.Input('True firings file')
units_true=mlpr.IntegerListParameter('List of true units to consider')
json_out=mlpr.Output('Table as .json file produced from pandas dataframe')
html_out=mlpr.Output('Table as .html file produced from pandas dataframe')
def run(self):
sorting=si.MdaSortingExtractor(firings_file=self.firings)
sorting_true=si.MdaSortingExtractor(firings_file=self.firings_true)
if (self.units_true is not None) and (len(self.units_true)>0):
sorting_true=si.SubSortingExtractor(parent_sorting=sorting_true,unit_ids=self.units_true)
SC=st.comparison.SortingComparison(sorting_true,sorting)
df=get_comparison_data_frame(comparison=SC)
#sw.SortingComparisonTable(comparison=SC).getDataframe()
json=df.transpose().to_dict()
html=df.to_html(index=False)
_write_json_file(json,self.json_out)
_write_json_file(html,self.html_out)
def get_comparison_data_frame(*,comparison):
import pandas as pd
SC=comparison
unit_properties=[] #snr, etc? these would need to be properties in the sortings of the comparison
# Compute events counts
sorting1=SC.getSorting1()
sorting2=SC.getSorting2()
unit1_ids = sorting1.getUnitIds()
unit2_ids = sorting2.getUnitIds()
N1 = len(unit1_ids)
N2 = len(unit2_ids)
event_counts1 = dict()
for i1, u1 in enumerate(unit1_ids):
times1 = sorting1.getUnitSpikeTrain(u1)
event_counts1[u1] = len(times1)
event_counts2 = dict()
for i2, u2 in enumerate(unit2_ids):
times2 = sorting2.getUnitSpikeTrain(u2)
event_counts2[u2] = len(times2)
rows = []
for u_1, unit1 in enumerate(unit1_ids):
unit2 = SC.getBestUnitMatch1(unit1)
if unit2>=0:
num_matches=SC.getMatchingEventCount(unit1, unit2)
num_false_negatives=event_counts1[unit1]-num_matches
num_false_positives=event_counts2[unit2]-num_matches
else:
num_matches=0
num_false_negatives=event_counts1[unit1]
num_false_positives=0
row0 = {
'unit_id': unit1,
'accuracy': _safe_frac(num_matches,num_false_positives+num_false_negatives+num_matches),
'best_unit': unit2,
'matched_unit': SC.getMappedSorting1().getMappedUnitIds(unit1),
'num_matches': num_matches,
'num_false_negatives': num_false_negatives,
'num_false_positives': num_false_positives,
'f_n': _safe_frac(num_false_negatives,num_false_negatives+num_matches),
'f_p': _safe_frac(num_false_positives,num_false_positives+num_matches)
}
for prop in unit_properties:
pname = prop['name']
row0[pname] = SC.getSorting1().getUnitProperty(unit_id=int(unit1), property_name=pname)
rows.append(row0)
df = pd.DataFrame(rows)
fields = ['unit_id']
fields = fields + ['accuracy', 'best_unit', 'matched_unit', 'num_matches', 'num_false_negatives', 'num_false_positives', 'f_n', 'f_p']
for prop in unit_properties:
pname = prop['name']
fields.append(pname)
df = df[fields]
df['accuracy'] = df['accuracy'].map('{:,.4f}'.format)
# df['Best match'] = df['Accuracy'].map('{:,.2f}'.format)
df['f_n'] = df['f_n'].map('{:,.4f}'.format)
df['f_p'] = df['f_p'].map('{:,.4f}'.format)
return df
def _safe_frac(numer, denom):
if denom == 0:
return 0
return float(numer) / denom
def _write_json_file(obj,path):
with open(path,'w') as f:
return json.dump(obj,f)
| 39.272059 | 138 | 0.67684 | import spikeextractors as si
import spiketoolkit as st
import mlprocessors as mlpr
import json
from cairio import client as ca
import numpy as np
from copy import deepcopy
def compare_sortings_with_truth(sortings,compute_resource,num_workers=None):
print('>>>>>> compare sortings with truth')
container='sha1://3b26155930cc4a4745c67b702ce297c9c968ac94/02-12-2019/mountaintools_basic.simg'
jobs_gen_table=[]
for sorting in sortings:
units_true=sorting.get('units_true',[])
firings=sorting['firings']
firings_true=sorting['firings_true']
units_true=units_true
job=GenSortingComparisonTable.createJob(
firings=firings,
firings_true=firings_true,
units_true=units_true,
json_out={'ext':'.json','upload':True},
html_out={'ext':'.html','upload':True},
_container=container
)
jobs_gen_table.append(job)
all_jobs=jobs_gen_table
label='Compare sortings with truth'
mlpr.executeBatch(jobs=all_jobs,label=label,num_workers=num_workers,compute_resource=compute_resource)
sortings_out=[]
for i,sorting in enumerate(sortings):
comparison_with_truth=dict()
comparison_with_truth['json']=jobs_gen_table[i]['result']['outputs']['json_out']
comparison_with_truth['html']=jobs_gen_table[i]['result']['outputs']['html_out']
sorting2=deepcopy(sorting)
sorting2['comparison_with_truth']=comparison_with_truth
sortings_out.append(sorting2)
return sortings_out
class GenSortingComparisonTable(mlpr.Processor):
VERSION='0.2.0'
firings=mlpr.Input('Firings file (sorting)')
firings_true=mlpr.Input('True firings file')
units_true=mlpr.IntegerListParameter('List of true units to consider')
json_out=mlpr.Output('Table as .json file produced from pandas dataframe')
html_out=mlpr.Output('Table as .html file produced from pandas dataframe')
def run(self):
sorting=si.MdaSortingExtractor(firings_file=self.firings)
sorting_true=si.MdaSortingExtractor(firings_file=self.firings_true)
if (self.units_true is not None) and (len(self.units_true)>0):
sorting_true=si.SubSortingExtractor(parent_sorting=sorting_true,unit_ids=self.units_true)
SC=st.comparison.SortingComparison(sorting_true,sorting)
df=get_comparison_data_frame(comparison=SC)
json=df.transpose().to_dict()
html=df.to_html(index=False)
_write_json_file(json,self.json_out)
_write_json_file(html,self.html_out)
def get_comparison_data_frame(*,comparison):
import pandas as pd
SC=comparison
unit_properties=[]
sorting1=SC.getSorting1()
sorting2=SC.getSorting2()
unit1_ids = sorting1.getUnitIds()
unit2_ids = sorting2.getUnitIds()
N1 = len(unit1_ids)
N2 = len(unit2_ids)
event_counts1 = dict()
for i1, u1 in enumerate(unit1_ids):
times1 = sorting1.getUnitSpikeTrain(u1)
event_counts1[u1] = len(times1)
event_counts2 = dict()
for i2, u2 in enumerate(unit2_ids):
times2 = sorting2.getUnitSpikeTrain(u2)
event_counts2[u2] = len(times2)
rows = []
for u_1, unit1 in enumerate(unit1_ids):
unit2 = SC.getBestUnitMatch1(unit1)
if unit2>=0:
num_matches=SC.getMatchingEventCount(unit1, unit2)
num_false_negatives=event_counts1[unit1]-num_matches
num_false_positives=event_counts2[unit2]-num_matches
else:
num_matches=0
num_false_negatives=event_counts1[unit1]
num_false_positives=0
row0 = {
'unit_id': unit1,
'accuracy': _safe_frac(num_matches,num_false_positives+num_false_negatives+num_matches),
'best_unit': unit2,
'matched_unit': SC.getMappedSorting1().getMappedUnitIds(unit1),
'num_matches': num_matches,
'num_false_negatives': num_false_negatives,
'num_false_positives': num_false_positives,
'f_n': _safe_frac(num_false_negatives,num_false_negatives+num_matches),
'f_p': _safe_frac(num_false_positives,num_false_positives+num_matches)
}
for prop in unit_properties:
pname = prop['name']
row0[pname] = SC.getSorting1().getUnitProperty(unit_id=int(unit1), property_name=pname)
rows.append(row0)
df = pd.DataFrame(rows)
fields = ['unit_id']
fields = fields + ['accuracy', 'best_unit', 'matched_unit', 'num_matches', 'num_false_negatives', 'num_false_positives', 'f_n', 'f_p']
for prop in unit_properties:
pname = prop['name']
fields.append(pname)
df = df[fields]
df['accuracy'] = df['accuracy'].map('{:,.4f}'.format)
df['f_n'] = df['f_n'].map('{:,.4f}'.format)
df['f_p'] = df['f_p'].map('{:,.4f}'.format)
return df
def _safe_frac(numer, denom):
if denom == 0:
return 0
return float(numer) / denom
def _write_json_file(obj,path):
with open(path,'w') as f:
return json.dump(obj,f)
| true | true |
f7369630d3cb8bb5dfbc51e5029c0582cfc8043a | 40,331 | py | Python | imgaug/augmenters/weather.py | dynamicguy/imgaug | f58c06323eb04416c76de1f18952ca5875caf883 | [
"MIT"
] | 4 | 2018-11-24T15:31:36.000Z | 2020-06-23T02:52:45.000Z | imgaug/augmenters/weather.py | LU-K-Brant/imgaug | f58c06323eb04416c76de1f18952ca5875caf883 | [
"MIT"
] | null | null | null | imgaug/augmenters/weather.py | LU-K-Brant/imgaug | f58c06323eb04416c76de1f18952ca5875caf883 | [
"MIT"
] | null | null | null | """
Augmenters that create wheather effects.
Do not import directly from this file, as the categorization is not final.
Use instead::
from imgaug import augmenters as iaa
and then e.g.::
seq = iaa.Sequential([iaa.Snowflakes()])
List of augmenters:
* FastSnowyLandscape
* Clouds
* Fog
* CloudLayer
* Snowflakes
* SnowflakesLayer
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import cv2
from . import meta, arithmetic, blur, contrast
from .. import imgaug as ia
from .. import parameters as iap
class FastSnowyLandscape(meta.Augmenter):
"""
Augmenter to convert non-snowy landscapes to snowy ones.
This expects to get an image that roughly shows a landscape.
This is based on the method proposed by
https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c
Parameters
----------
lightness_threshold : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter, optional
All pixels with lightness in HLS colorspace below this value will have their lightness increased by
`lightness_multiplier`.
* If an int, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the discrete range ``[a .. b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
lightness_multiplier : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter, optional
Multiplier for pixel's lightness value in HLS colorspace. Affects all pixels selected via `lightness_threshold`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=140, lightness_multiplier=2.5)
Search for all pixels in the image with a lightness value in HLS colorspace of less than 140 and increase their
lightness by a factor of 2.5. This is the configuration proposed in the original article (see link above).
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=[128, 200], lightness_multiplier=(1.5, 3.5))
Search for all pixels in the image with a lightness value in HLS colorspace of less than 128 or less than 200
(one of these values is picked per image) and multiply their lightness by a factor of ``x`` with ``x`` being
sampled from ``uniform(1.5, 3.5)`` (once per image).
>>> aug = iaa.FastSnowyLandscape(lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0))
Similar to above, but the lightness threshold is sampled from ``uniform(100, 255)`` (per image) and the multiplier
from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and varied results.
"""
def __init__(self, lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), name=None, deterministic=False,
random_state=None):
super(FastSnowyLandscape, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.lightness_threshold = iap.handle_continuous_param(lightness_threshold, "lightness_threshold",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True)
self.lightness_multiplier = iap.handle_continuous_param(lightness_multiplier, "lightness_multiplier",
value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def _draw_samples(self, augmentables, random_state):
nb_augmentables = len(augmentables)
rss = ia.derive_random_states(random_state, 2)
thresh_samples = self.lightness_threshold.draw_samples((nb_augmentables,), rss[1])
lmul_samples = self.lightness_multiplier.draw_samples((nb_augmentables,), rss[0])
return thresh_samples, lmul_samples
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
thresh_samples, lmul_samples = self._draw_samples(images, random_state)
result = images
for i, (image, input_dtype, thresh, lmul) in enumerate(zip(images, input_dtypes, thresh_samples, lmul_samples)):
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float64)
lightness = image_hls[..., 1]
lightness[lightness < thresh] *= lmul
image_hls = meta.clip_augmented_image_(image_hls, 0, 255) # TODO make value range more flexible
image_hls = meta.restore_augmented_image_dtype_(image_hls, input_dtype)
image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)
result[i] = image_rgb
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.lightness_threshold, self.lightness_multiplier]
# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel
# TODO add configurable parameters
def Clouds(name=None, deterministic=False, random_state=None):
"""
Augmenter to draw clouds in images.
This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities
and frequency patterns of clouds.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Clouds()
Creates an augmenter that adds clouds to images.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return meta.SomeOf((1, 2), children=[
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)
),
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)
)
], random_order=False, name=name, deterministic=deterministic, random_state=random_state)
# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel
# TODO add configurable parameters
def Fog(name=None, deterministic=False, random_state=None):
"""
Augmenter to draw fog in images.
This is a wrapper around ``CloudLayer``. It executes a single layer per image with a configuration leading
to fairly dense clouds with low-frequency patterns.
This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``
and ``960x1280``.
Parameters
----------
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Fog()
Creates an augmenter that adds fog to images.
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return CloudLayer(
intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,
alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),
sparsity=0.9, density_multiplier=(0.4, 0.9),
name=name, deterministic=deterministic, random_state=random_state
)
# TODO add perspective transform to each cloud layer to make them look more distant?
# TODO alpha_mean and density overlap - remove one of them
class CloudLayer(meta.Augmenter):
"""
Augmenter to add a single layer of clouds to an image.
Parameters
----------
intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Mean intensity of the clouds (i.e. mean color). Recommended to be around ``(190, 255)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
intensity_freq_exponent : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to add fine intensity to the mean intensity.
Recommended to be somewhere around ``(-2.5, -1.5)``.
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
intensity_coarse_scale : number or tuple of number or list of number\
or imgaug.parameters.StochasticParameter
Standard deviation of the gaussian distribution used to add more localized intensity to the mean intensity.
Sampled in low resolution space, i.e. affects final intensity on a coarse level. Recommended to be
around ``(0, 10)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Minimum alpha when blending cloud noise with the image. High values will lead to clouds being "everywhere".
Recommended to usually be at around ``0.0`` for clouds and ``>0`` for fog.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Multiplier for the sampled alpha values. High values will lead to denser clouds wherever they are visible.
Recommended to be at around ``(0.3, 1.0)``. Note that this parameter currently overlaps with
`density_multiplier`, which is applied a bit later to the alpha mask.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the image size at which the alpha mask is sampled. Lower values will lead to coarser alpha masks
and hence larger clouds (and empty areas).
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent of the frequency noise used to sample the alpha mask. Similarly to `alpha_size_max_px`, lower values
will lead to coarser alpha patterns. Recommended to be somewhere around ``(-4.0, -1.5)``.
See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.
sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Exponent applied late to the alpha mask. Lower values will lead to coarser cloud patterns, higher values
to finer patterns. Recommended to be somewhere around ``1.0``. Do not deviate far from that values, otherwise
the alpha mask might get weird patterns with sudden fall-offs to zero that look very unnatural.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Late multiplier for the alpha mask, similar to `alpha_multiplier`. Set this higher to get "denser" clouds
wherever they are visible. Recommended to be around ``(0.5, 1.5)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, intensity_mean, intensity_freq_exponent, intensity_coarse_scale,
alpha_min, alpha_multiplier, alpha_size_px_max, alpha_freq_exponent,
sparsity, density_multiplier,
name=None, deterministic=False, random_state=None):
super(CloudLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.intensity_mean = iap.handle_continuous_param(intensity_mean, "intensity_mean")
self.intensity_freq_exponent = intensity_freq_exponent
self.intensity_coarse_scale = intensity_coarse_scale
self.alpha_min = iap.handle_continuous_param(alpha_min, "alpha_min")
self.alpha_multiplier = iap.handle_continuous_param(alpha_multiplier, "alpha_multiplier")
self.alpha_size_px_max = alpha_size_px_max
self.alpha_freq_exponent = alpha_freq_exponent
self.sparsity = iap.handle_continuous_param(sparsity, "sparsity")
self.density_multiplier = iap.handle_continuous_param(density_multiplier, "density_multiplier")
def _augment_images(self, images, random_state, parents, hooks):
rss = ia.derive_random_states(random_state, len(images))
result = images
for i, (image, rs) in enumerate(zip(images, rss)):
result[i] = self.draw_on_image(image, rs)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.intensity_mean, self.alpha_min, self.alpha_multiplier, self.alpha_size_px_max,
self.alpha_freq_exponent, self.intensity_freq_exponent, self.sparsity, self.density_min,
self.density_multiplier,
self.intensity_coarse_scale]
def draw_on_image(self, image, random_state):
alpha, intensity = self.generate_maps(image, random_state)
alpha = alpha[..., np.newaxis]
intensity = intensity[..., np.newaxis]
return np.clip(
(1 - alpha) * image.astype(np.float64) + alpha * intensity.astype(np.float64),
0,
255
).astype(np.uint8)
def generate_maps(self, image, random_state):
intensity_mean_sample = self.intensity_mean.draw_sample(random_state)
alpha_min_sample = self.alpha_min.draw_sample(random_state)
alpha_multiplier_sample = self.alpha_multiplier.draw_sample(random_state)
alpha_size_px_max = self.alpha_size_px_max
intensity_freq_exponent = self.intensity_freq_exponent
alpha_freq_exponent = self.alpha_freq_exponent
sparsity_sample = self.sparsity.draw_sample(random_state)
density_multiplier_sample = self.density_multiplier.draw_sample(random_state)
height, width = image.shape[0:2]
rss_alpha, rss_intensity = ia.derive_random_states(random_state, 2)
intensity_coarse = self._generate_intensity_map_coarse(
height, width, intensity_mean_sample,
iap.Normal(0, scale=self.intensity_coarse_scale),
rss_intensity
)
intensity_fine = self._generate_intensity_map_fine(height, width, intensity_mean_sample,
intensity_freq_exponent, rss_intensity)
intensity = np.clip(intensity_coarse + intensity_fine, 0, 255)
alpha = self._generate_alpha_mask(height, width, alpha_min_sample, alpha_multiplier_sample,
alpha_freq_exponent, alpha_size_px_max,
sparsity_sample, density_multiplier_sample, rss_alpha)
return alpha, intensity
@classmethod
def _generate_intensity_map_coarse(cls, height, width, intensity_mean, intensity_local_offset, random_state):
height_intensity, width_intensity = (8, 8) # TODO this might be too simplistic for some image sizes
intensity = intensity_mean\
+ intensity_local_offset.draw_samples((height_intensity, width_intensity), random_state)
intensity = ia.imresize_single_image(np.clip(intensity, 0, 255).astype(np.uint8), (height, width),
interpolation="cubic")
return intensity
@classmethod
def _generate_intensity_map_fine(cls, height, width, intensity_mean, exponent, random_state):
intensity_details_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=max(height, width),
upscale_method="cubic"
)
intensity_details = intensity_details_generator.draw_samples((height, width), random_state)
return intensity_mean * ((2*intensity_details - 1.0)/5.0)
@classmethod
def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier, exponent, alpha_size_px_max, sparsity,
density_multiplier, random_state):
alpha_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=alpha_size_px_max,
upscale_method="cubic"
)
alpha_local = alpha_generator.draw_samples((height, width), random_state)
alpha = alpha_min + (alpha_multiplier * alpha_local)
alpha = (alpha ** sparsity) * density_multiplier
alpha = np.clip(alpha, 0.0, 1.0)
return alpha
def Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7),
flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),
name=None, deterministic=False, random_state=None):
"""
Augmenter to add falling snowflakes to images.
This is a wrapper around ``SnowflakesLayer``. It executes 1 to 3 layers per image.
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.
Higher values mean that the resolution is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid value range is ``[0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.1, 0.4)`` worked well.
* On ``192x256`` a value of ``(0.2, 0.7)`` worked well.
* On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly
sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight
upwards. Recommended to be around ``(-30, 30)``.
See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.
It follows roughly the form ``kernel_size = image_size * speed``. Hence,
Values around ``1.0`` denote that the motion blur should "stretch" each snowflake over the whole image.
Valid value range is ``(0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.01, 0.05)`` worked well.
* On ``192x256`` a value of ``(0.007, 0.03)`` worked well.
* On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))
Adds snowflakes to small images (around ``96x128``).
>>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))
Adds snowflakes to medium-sized images (around ``192x256``).
>>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))
Adds snowflakes to large images (around ``960x1280``).
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
layer = SnowflakesLayer(
density=density, density_uniformity=density_uniformity,
flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,
angle=angle, speed=speed,
blur_sigma_fraction=(0.0001, 0.001)
)
return meta.SomeOf(
(1, 3), children=[layer.deepcopy() for _ in range(3)],
random_order=False, name=name, deterministic=deterministic, random_state=random_state
)
# TODO snowflakes are all almost 100% white, add some grayish tones and maybe color to them
class SnowflakesLayer(meta.Augmenter):
"""
Augmenter to add a single layer of falling snowflakes to images.
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.
Higher values mean that the resolution is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid value range is ``[0.0, 1.0)``. Recommended values:
* On 96x128 a value of ``(0.1, 0.4)`` worked well.
* On 192x256 a value of ``(0.2, 0.7)`` worked well.
* On 960x1280 a value of ``(0.7, 0.95)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly
sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight
upwards. Recommended to be around ``(-30, 30)``.
See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.
It follows roughly the form ``kernel_size = image_size * speed``. Hence,
Values around ``1.0`` denote that the motion blur should "stretch" each snowflake over the whole image.
Valid value range is ``(0.0, 1.0)``. Recommended values:
* On 96x128 a value of ``(0.01, 0.05)`` worked well.
* On 192x256 a value of ``(0.007, 0.03)`` worked well.
* On 960x1280 a value of ``(0.001, 0.03)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Standard deviation (as a fraction of the image size) of gaussian blur applied to the snowflakes.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.0001, 0.001)``. May still require tinkering
based on image size.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
blur_sigma_limits : tuple of float, optional
Controls allows min and max values of `blur_sigma_fraction` after(!) multiplication with the image size.
First value is the minimum, second value is the maximum. Values outside of that range will be clipped to be
within that range. This prevents extreme values for very small or large images.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
"""
def __init__(self, density, density_uniformity, flake_size, flake_size_uniformity, angle, speed, blur_sigma_fraction,
blur_sigma_limits=(0.5, 3.75), name=None, deterministic=False,
random_state=None):
super(SnowflakesLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.density = density
self.density_uniformity = iap.handle_continuous_param(density_uniformity, "density_uniformity",
value_range=(0.0, 1.0))
self.flake_size = iap.handle_continuous_param(flake_size, "flake_size", value_range=(0.0+1e-4, 1.0))
self.flake_size_uniformity = iap.handle_continuous_param(flake_size_uniformity, "flake_size_uniformity",
value_range=(0.0, 1.0))
self.angle = iap.handle_continuous_param(angle, "angle")
self.speed = iap.handle_continuous_param(speed, "speed", value_range=(0.0, 1.0))
self.blur_sigma_fraction = iap.handle_continuous_param(blur_sigma_fraction, "blur_sigma_fraction",
value_range=(0.0, 1.0))
self.blur_sigma_limits = blur_sigma_limits # (min, max), same for all images
self.gate_noise_size = (8, 8) # (height, width), same for all images
def _augment_images(self, images, random_state, parents, hooks):
rss = ia.derive_random_states(random_state, len(images))
result = images
for i, (image, rs) in enumerate(zip(images, rss)):
result[i] = self.draw_on_image(image, rs)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.density, self.density_uniformity, self.flake_size, self.flake_size_uniformity, self.angle,
self.speed, self.blur_sigma_fraction, self.blur_sigma_limits, self.gate_noise_size]
def draw_on_image(self, image, random_state):
flake_size_sample = self.flake_size.draw_sample(random_state)
flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(random_state)
angle_sample = self.angle.draw_sample(random_state)
speed_sample = self.speed.draw_sample(random_state)
blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(random_state)
height, width = image.shape[0:2]
downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)
height_down, width_down = int(height*downscale_factor), int(width*downscale_factor),
noise = self._generate_noise(
height_down,
width_down,
self.density,
ia.derive_random_state(random_state)
)
# gate the sampled noise via noise in range [0.0, 1.0]
# this leads to less flakes in some areas of the image and more in other areas
gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)
noise = self._gate(noise, gate_noise, self.gate_noise_size, ia.derive_random_state(random_state))
noise = ia.imresize_single_image(noise, (height, width), interpolation="cubic")
# apply a bit of gaussian blur and then motion blur according to angle and speed
sigma = max(height, width) * blur_sigma_fraction_sample
sigma = np.clip(sigma, self.blur_sigma_limits[0], self.blur_sigma_limits[1])
noise_small_blur = self._blur(noise, sigma, random_state)
noise_small_blur = self._motion_blur(noise_small_blur, angle=angle_sample, speed=speed_sample,
random_state=random_state)
# use contrast adjustment of noise to make the flake size a bit less uniform
# then readjust the noise values to make them more visible again
gain = 1.0 + 2*(1 - flake_size_uniformity_sample)
gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)
noise_small_blur = contrast.GammaContrast(gain).augment_image(noise_small_blur)
noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj
noise_small_blur_rgb = np.tile(noise_small_blur[..., np.newaxis], (1, 1, 3))
# blend:
# sum for a bit of glowy, hardly visible flakes
# max for the main flakes
image_f32 = image.astype(np.float32)
image_f32 = self._blend_by_sum(image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)
image_f32 = self._blend_by_max(image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)
return image_f32
@classmethod
def _generate_noise(cls, height, width, density, random_state):
noise = arithmetic.Salt(p=density, random_state=random_state)
return noise.augment_image(np.zeros((height, width), dtype=np.uint8))
@classmethod
def _gate(cls, noise, gate_noise, gate_size, random_state):
# the beta distribution here has most of its weight around 1.0 and will only rarely sample values around 0.0
# the average of the sampled values seems to be at around 0.6-0.75
gate_noise = gate_noise.draw_samples(gate_size, random_state)
gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2], interpolation="cubic")
gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)
return np.clip(noise.astype(np.float32) * gate_noise_up, 0, 255).astype(np.uint8)
@classmethod
def _blur(cls, noise, sigma, random_state):
blurer = blur.GaussianBlur(sigma, random_state=random_state)
return blurer.augment_image(noise)
@classmethod
def _motion_blur(cls, noise, angle, speed, random_state):
size = max(noise.shape[0:2])
k = int(speed * size)
if k <= 1:
return noise
# we use max(k, 3) here because MotionBlur errors for anything less than 3
blurer = blur.MotionBlur(k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)
return blurer.augment_image(noise)
@classmethod
def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):
image_f32 = image_f32 + noise_small_blur_rgb
return np.clip(image_f32, 0, 255).astype(np.uint8)
@classmethod
def _blend_by_max(cls, image_f32, noise_small_blur_rgb):
image_f32 = np.maximum(image_f32, noise_small_blur_rgb)
return np.clip(image_f32, 0, 255).astype(np.uint8)
| 51.972938 | 153 | 0.67008 | from __future__ import print_function, division, absolute_import
import numpy as np
import cv2
from . import meta, arithmetic, blur, contrast
from .. import imgaug as ia
from .. import parameters as iap
class FastSnowyLandscape(meta.Augmenter):
def __init__(self, lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), name=None, deterministic=False,
random_state=None):
super(FastSnowyLandscape, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.lightness_threshold = iap.handle_continuous_param(lightness_threshold, "lightness_threshold",
value_range=(0, 255),
tuple_to_uniform=True,
list_to_choice=True)
self.lightness_multiplier = iap.handle_continuous_param(lightness_multiplier, "lightness_multiplier",
value_range=(0, None), tuple_to_uniform=True,
list_to_choice=True)
def _draw_samples(self, augmentables, random_state):
nb_augmentables = len(augmentables)
rss = ia.derive_random_states(random_state, 2)
thresh_samples = self.lightness_threshold.draw_samples((nb_augmentables,), rss[1])
lmul_samples = self.lightness_multiplier.draw_samples((nb_augmentables,), rss[0])
return thresh_samples, lmul_samples
def _augment_images(self, images, random_state, parents, hooks):
input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
thresh_samples, lmul_samples = self._draw_samples(images, random_state)
result = images
for i, (image, input_dtype, thresh, lmul) in enumerate(zip(images, input_dtypes, thresh_samples, lmul_samples)):
image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float64)
lightness = image_hls[..., 1]
lightness[lightness < thresh] *= lmul
image_hls = meta.clip_augmented_image_(image_hls, 0, 255)
image_hls = meta.restore_augmented_image_dtype_(image_hls, input_dtype)
image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)
result[i] = image_rgb
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.lightness_threshold, self.lightness_multiplier]
def Clouds(name=None, deterministic=False, random_state=None):
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return meta.SomeOf((1, 2), children=[
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),
sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)
),
CloudLayer(
intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,
alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),
sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)
)
], random_order=False, name=name, deterministic=deterministic, random_state=random_state)
def Fog(name=None, deterministic=False, random_state=None):
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
return CloudLayer(
intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,
alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),
sparsity=0.9, density_multiplier=(0.4, 0.9),
name=name, deterministic=deterministic, random_state=random_state
)
class CloudLayer(meta.Augmenter):
def __init__(self, intensity_mean, intensity_freq_exponent, intensity_coarse_scale,
alpha_min, alpha_multiplier, alpha_size_px_max, alpha_freq_exponent,
sparsity, density_multiplier,
name=None, deterministic=False, random_state=None):
super(CloudLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.intensity_mean = iap.handle_continuous_param(intensity_mean, "intensity_mean")
self.intensity_freq_exponent = intensity_freq_exponent
self.intensity_coarse_scale = intensity_coarse_scale
self.alpha_min = iap.handle_continuous_param(alpha_min, "alpha_min")
self.alpha_multiplier = iap.handle_continuous_param(alpha_multiplier, "alpha_multiplier")
self.alpha_size_px_max = alpha_size_px_max
self.alpha_freq_exponent = alpha_freq_exponent
self.sparsity = iap.handle_continuous_param(sparsity, "sparsity")
self.density_multiplier = iap.handle_continuous_param(density_multiplier, "density_multiplier")
def _augment_images(self, images, random_state, parents, hooks):
rss = ia.derive_random_states(random_state, len(images))
result = images
for i, (image, rs) in enumerate(zip(images, rss)):
result[i] = self.draw_on_image(image, rs)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.intensity_mean, self.alpha_min, self.alpha_multiplier, self.alpha_size_px_max,
self.alpha_freq_exponent, self.intensity_freq_exponent, self.sparsity, self.density_min,
self.density_multiplier,
self.intensity_coarse_scale]
def draw_on_image(self, image, random_state):
alpha, intensity = self.generate_maps(image, random_state)
alpha = alpha[..., np.newaxis]
intensity = intensity[..., np.newaxis]
return np.clip(
(1 - alpha) * image.astype(np.float64) + alpha * intensity.astype(np.float64),
0,
255
).astype(np.uint8)
def generate_maps(self, image, random_state):
intensity_mean_sample = self.intensity_mean.draw_sample(random_state)
alpha_min_sample = self.alpha_min.draw_sample(random_state)
alpha_multiplier_sample = self.alpha_multiplier.draw_sample(random_state)
alpha_size_px_max = self.alpha_size_px_max
intensity_freq_exponent = self.intensity_freq_exponent
alpha_freq_exponent = self.alpha_freq_exponent
sparsity_sample = self.sparsity.draw_sample(random_state)
density_multiplier_sample = self.density_multiplier.draw_sample(random_state)
height, width = image.shape[0:2]
rss_alpha, rss_intensity = ia.derive_random_states(random_state, 2)
intensity_coarse = self._generate_intensity_map_coarse(
height, width, intensity_mean_sample,
iap.Normal(0, scale=self.intensity_coarse_scale),
rss_intensity
)
intensity_fine = self._generate_intensity_map_fine(height, width, intensity_mean_sample,
intensity_freq_exponent, rss_intensity)
intensity = np.clip(intensity_coarse + intensity_fine, 0, 255)
alpha = self._generate_alpha_mask(height, width, alpha_min_sample, alpha_multiplier_sample,
alpha_freq_exponent, alpha_size_px_max,
sparsity_sample, density_multiplier_sample, rss_alpha)
return alpha, intensity
@classmethod
def _generate_intensity_map_coarse(cls, height, width, intensity_mean, intensity_local_offset, random_state):
height_intensity, width_intensity = (8, 8)
intensity = intensity_mean\
+ intensity_local_offset.draw_samples((height_intensity, width_intensity), random_state)
intensity = ia.imresize_single_image(np.clip(intensity, 0, 255).astype(np.uint8), (height, width),
interpolation="cubic")
return intensity
@classmethod
def _generate_intensity_map_fine(cls, height, width, intensity_mean, exponent, random_state):
intensity_details_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=max(height, width),
upscale_method="cubic"
)
intensity_details = intensity_details_generator.draw_samples((height, width), random_state)
return intensity_mean * ((2*intensity_details - 1.0)/5.0)
@classmethod
def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier, exponent, alpha_size_px_max, sparsity,
density_multiplier, random_state):
alpha_generator = iap.FrequencyNoise(
exponent=exponent,
size_px_max=alpha_size_px_max,
upscale_method="cubic"
)
alpha_local = alpha_generator.draw_samples((height, width), random_state)
alpha = alpha_min + (alpha_multiplier * alpha_local)
alpha = (alpha ** sparsity) * density_multiplier
alpha = np.clip(alpha, 0.0, 1.0)
return alpha
def Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7),
flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),
name=None, deterministic=False, random_state=None):
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
layer = SnowflakesLayer(
density=density, density_uniformity=density_uniformity,
flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,
angle=angle, speed=speed,
blur_sigma_fraction=(0.0001, 0.001)
)
return meta.SomeOf(
(1, 3), children=[layer.deepcopy() for _ in range(3)],
random_order=False, name=name, deterministic=deterministic, random_state=random_state
)
class SnowflakesLayer(meta.Augmenter):
def __init__(self, density, density_uniformity, flake_size, flake_size_uniformity, angle, speed, blur_sigma_fraction,
blur_sigma_limits=(0.5, 3.75), name=None, deterministic=False,
random_state=None):
super(SnowflakesLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)
self.density = density
self.density_uniformity = iap.handle_continuous_param(density_uniformity, "density_uniformity",
value_range=(0.0, 1.0))
self.flake_size = iap.handle_continuous_param(flake_size, "flake_size", value_range=(0.0+1e-4, 1.0))
self.flake_size_uniformity = iap.handle_continuous_param(flake_size_uniformity, "flake_size_uniformity",
value_range=(0.0, 1.0))
self.angle = iap.handle_continuous_param(angle, "angle")
self.speed = iap.handle_continuous_param(speed, "speed", value_range=(0.0, 1.0))
self.blur_sigma_fraction = iap.handle_continuous_param(blur_sigma_fraction, "blur_sigma_fraction",
value_range=(0.0, 1.0))
self.blur_sigma_limits = blur_sigma_limits
self.gate_noise_size = (8, 8)
def _augment_images(self, images, random_state, parents, hooks):
rss = ia.derive_random_states(random_state, len(images))
result = images
for i, (image, rs) in enumerate(zip(images, rss)):
result[i] = self.draw_on_image(image, rs)
return result
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def get_parameters(self):
return [self.density, self.density_uniformity, self.flake_size, self.flake_size_uniformity, self.angle,
self.speed, self.blur_sigma_fraction, self.blur_sigma_limits, self.gate_noise_size]
def draw_on_image(self, image, random_state):
flake_size_sample = self.flake_size.draw_sample(random_state)
flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(random_state)
angle_sample = self.angle.draw_sample(random_state)
speed_sample = self.speed.draw_sample(random_state)
blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(random_state)
height, width = image.shape[0:2]
downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)
height_down, width_down = int(height*downscale_factor), int(width*downscale_factor),
noise = self._generate_noise(
height_down,
width_down,
self.density,
ia.derive_random_state(random_state)
)
gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)
noise = self._gate(noise, gate_noise, self.gate_noise_size, ia.derive_random_state(random_state))
noise = ia.imresize_single_image(noise, (height, width), interpolation="cubic")
sigma = max(height, width) * blur_sigma_fraction_sample
sigma = np.clip(sigma, self.blur_sigma_limits[0], self.blur_sigma_limits[1])
noise_small_blur = self._blur(noise, sigma, random_state)
noise_small_blur = self._motion_blur(noise_small_blur, angle=angle_sample, speed=speed_sample,
random_state=random_state)
gain = 1.0 + 2*(1 - flake_size_uniformity_sample)
gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)
noise_small_blur = contrast.GammaContrast(gain).augment_image(noise_small_blur)
noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj
noise_small_blur_rgb = np.tile(noise_small_blur[..., np.newaxis], (1, 1, 3))
image_f32 = image.astype(np.float32)
image_f32 = self._blend_by_sum(image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)
image_f32 = self._blend_by_max(image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)
return image_f32
@classmethod
def _generate_noise(cls, height, width, density, random_state):
noise = arithmetic.Salt(p=density, random_state=random_state)
return noise.augment_image(np.zeros((height, width), dtype=np.uint8))
@classmethod
def _gate(cls, noise, gate_noise, gate_size, random_state):
gate_noise = gate_noise.draw_samples(gate_size, random_state)
gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2], interpolation="cubic")
gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)
return np.clip(noise.astype(np.float32) * gate_noise_up, 0, 255).astype(np.uint8)
@classmethod
def _blur(cls, noise, sigma, random_state):
blurer = blur.GaussianBlur(sigma, random_state=random_state)
return blurer.augment_image(noise)
@classmethod
def _motion_blur(cls, noise, angle, speed, random_state):
size = max(noise.shape[0:2])
k = int(speed * size)
if k <= 1:
return noise
blurer = blur.MotionBlur(k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)
return blurer.augment_image(noise)
@classmethod
def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):
image_f32 = image_f32 + noise_small_blur_rgb
return np.clip(image_f32, 0, 255).astype(np.uint8)
@classmethod
def _blend_by_max(cls, image_f32, noise_small_blur_rgb):
image_f32 = np.maximum(image_f32, noise_small_blur_rgb)
return np.clip(image_f32, 0, 255).astype(np.uint8)
| true | true |
f736973def1f41cb86774b9f7de381b49169e3ec | 54,706 | py | Python | numpy/core/multiarray.py | mprinc/numpy | 73298871d6cb3f4e9c69cecd71ad1797ddad5c57 | [
"BSD-3-Clause"
] | 2 | 2021-06-11T12:31:32.000Z | 2021-08-17T10:56:54.000Z | numpy/core/multiarray.py | mprinc/numpy | 73298871d6cb3f4e9c69cecd71ad1797ddad5c57 | [
"BSD-3-Clause"
] | null | null | null | numpy/core/multiarray.py | mprinc/numpy | 73298871d6cb3f4e9c69cecd71ad1797ddad5c57 | [
"BSD-3-Clause"
] | 1 | 2021-01-04T06:47:33.000Z | 2021-01-04T06:47:33.000Z | """
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
the multiarray and umath c-extension modules were merged into a single
_multiarray_umath extension module. So we replicate the old namespace
by importing from the extension module.
"""
import functools
import warnings
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import * # noqa: F403
# These imports are needed for backward compatibility,
# do not change them. issue gh-15518
# _get_ndarray_c_version is semi-public, on purpose not added to __all__
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity, _get_ndarray_c_version, _set_madvise_hugepage,
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# We can't verify dispatcher signatures because NumPy's C functions don't
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `prototype`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # uninitialized
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.20.0
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
.. versionadded:: 1.20.0
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
block : Assemble arrays from blocks.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data=[0, --, 2],
mask=[False, True, False],
fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data=[0, 1, 2, 2, 3, 4],
mask=False,
fill_value=999999)
>>> np.ma.concatenate([a, b])
masked_array(data=[0, --, 2, 2, 3, 4],
mask=[False, True, False, False, False, False],
fill_value=999999)
"""
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
See Also
--------
choose
nonzero : The function that is called when x and y are omitted
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
This can be used on multidimensional arrays too:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, its rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
"""
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the maximum
integer/float value converted.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
"""
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
linalg.multi_dot : Chained dot product.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
according to the rule 'safe'
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
"""
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(a, axis=None, bitorder='big'):
"""
packbits(a, axis=None, bitorder='big')
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
a : array_like
An array of integers or booleans whose elements should be packed to
bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
bitorder : {'big', 'little'}, optional
The order of the input bits. 'big' will mimic bin(val),
``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],
[ 64]],
[[192],
[ 32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
unpackbits(a, axis=None, count=None, bitorder='big')
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `a` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is
either 1-D (if `axis` is ``None``) or the same shape as the input
array with unpacking done along the axis specified.
Parameters
----------
a : ndarray, uint8 type
Input array.
axis : int, optional
The dimension over which bit-unpacking is done.
``None`` implies unpacking the flattened array.
count : int or None, optional
The number of elements to unpack along `axis`, provided as a way
of undoing the effect of packing a size that is not a multiple
of eight. A non-negative number means to only unpack `count`
bits. A negative number means to trim off that many bits from
the end. ``None`` means to unpack the entire array (the
default). Counts larger than the available number of bits will
add zero padding to the output. Negative counts must not
exceed the available number of bits.
.. versionadded:: 1.17.0
bitorder : {'big', 'little'}, optional
The order of the returned bits. 'big' will mimic bin(val),
``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
Defaults to 'big'.
.. versionadded:: 1.17.0
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in
a uint8 array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
>>> c = np.unpackbits(a, axis=1, count=-3)
>>> c
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=uint8)
>>> p = np.packbits(b, axis=0)
>>> np.unpackbits(p, axis=0)
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
True
"""
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory.
.. warning::
This function can be exponentially slow for some inputs, unless
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
If in doubt, use `numpy.may_share_memory` instead.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays. Finding
the exact solution may take extremely long in some cases.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> np.shares_memory(x, np.array([5, 6, 7]))
False
>>> np.shares_memory(x[::2], x)
True
>>> np.shares_memory(x[::2], x[1::2])
False
Checking whether two arrays share memory is NP-complete, and
runtime may increase exponentially in the number of
dimensions. Hence, `max_work` should generally be set to a finite
number, as it is possible to construct examples that take
extremely long to run:
>>> from numpy.lib.stride_tricks import as_strided
>>> x = np.zeros([192163377], dtype=np.int8)
>>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
>>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
>>> np.shares_memory(x1, x2, max_work=1000)
Traceback (most recent call last):
...
numpy.TooHardError: Exceeded max_work
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
around 1 minute for this case. It is possible to find problems
that take still significantly longer.
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True])
"""
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23')
"""
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
>>> np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
Convert an array of datetimes into an array of strings.
Parameters
----------
arr : array_like of datetime64
The array of UTC timestamps to format.
unit : str
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
timezone : {'naive', 'UTC', 'local'} or tzinfo
Timezone information to use when displaying the datetime. If 'UTC', end
with a Z to indicate UTC time. If 'local', convert to the local timezone
first, and suffix with a +-#### timezone offset. If a tzinfo object,
then do as with 'local', but use the specified timezone.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
Casting to allow when changing between datetime units.
Returns
-------
str_arr : ndarray
An array of strings the same shape as `arr`.
Examples
--------
>>> import pytz
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
>>> d
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
'2002-10-27T07:30'], dtype='datetime64[m]')
Setting the timezone to UTC shows the same information, but with a Z suffix
>>> np.datetime_as_string(d, timezone='UTC')
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
'2002-10-27T07:30Z'], dtype='<U35')
Note that we picked datetimes that cross a DST boundary. Passing in a
``pytz`` timezone object will print the appropriate offset
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
Passing in a unit will change the precision
>>> np.datetime_as_string(d, unit='h')
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
dtype='<U32')
>>> np.datetime_as_string(d, unit='s')
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
'2002-10-27T07:30:00'], dtype='<U38')
'casting' can be used to specify whether precision can be changed
>>> np.datetime_as_string(d, unit='h', casting='safe')
Traceback (most recent call last):
...
TypeError: Cannot create a datetime string as units 'h' from a NumPy
datetime with units 'm' according to the rule 'safe'
"""
return (arr,)
| 32.73848 | 128 | 0.616989 |
import functools
import warnings
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import *
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
_ARRAY_API, _monotonicity, _get_ndarray_c_version, _set_madvise_hugepage,
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
'_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
_reconstruct.__module__ = 'numpy.core.multiarray'
scalar.__module__ = 'numpy.core.multiarray'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
geterrobj.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
set_numeric_ops.__module__ = 'numpy'
seterrobj.__module__ = 'numpy'
zeros.__module__ = 'numpy'
# support introspection.
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
return (condition, x, y)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
def lexsort(keys, axis=None):
if isinstance(keys, tuple):
return keys
else:
return (keys,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
def can_cast(from_, to, casting=None):
return (from_,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
def min_scalar_type(a):
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
return arrays_and_dtypes
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
return (a, b, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
return (x, weights)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
return multi_index
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
return (indices,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
def copyto(dst, src, casting=None, where=None):
return (dst, src, where)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
def putmask(a, mask, values):
return (a, mask, values)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
def packbits(a, axis=None, bitorder='big'):
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
return (a,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
def may_share_memory(a, b, max_work=None):
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
return (dates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
return (dates, offsets, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
return (begindates, enddates, weekmask, holidays, out)
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
return (arr,)
| true | true |
f73698e4a7fa3c38471416e94c84cde618bd7112 | 15,979 | py | Python | numpyro/infer/svi.py | vishalbelsare/numpyro | de94469f19388ff09b705754f407461163030fbe | [
"Apache-2.0"
] | 1,394 | 2019-03-19T16:28:45.000Z | 2022-03-31T18:03:26.000Z | numpyro/infer/svi.py | vishalbelsare/numpyro | de94469f19388ff09b705754f407461163030fbe | [
"Apache-2.0"
] | 964 | 2019-03-21T05:02:01.000Z | 2022-03-31T18:27:31.000Z | numpyro/infer/svi.py | vishalbelsare/numpyro | de94469f19388ff09b705754f407461163030fbe | [
"Apache-2.0"
] | 163 | 2019-03-20T17:23:15.000Z | 2022-03-31T13:39:29.000Z | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import namedtuple, partial
import warnings
import tqdm
import jax
from numpyro.util import _versiontuple
if _versiontuple(jax.__version__) >= (0, 2, 25):
from jax.example_libraries import optimizers
else:
from jax.experimental import optimizers
from jax import jit, lax, random
import jax.numpy as jnp
from jax.tree_util import tree_map
from numpyro.distributions import constraints
from numpyro.distributions.transforms import biject_to
from numpyro.handlers import replay, seed, trace
from numpyro.infer.util import helpful_support_errors, transform_fn
from numpyro.optim import _NumPyroOptim
SVIState = namedtuple("SVIState", ["optim_state", "mutable_state", "rng_key"])
"""
A :func:`~collections.namedtuple` consisting of the following fields:
- **optim_state** - current optimizer's state.
- **mutable_state** - extra state to store values of `"mutable"` sites
- **rng_key** - random number generator seed used for the iteration.
"""
SVIRunResult = namedtuple("SVIRunResult", ["params", "state", "losses"])
"""
A :func:`~collections.namedtuple` consisting of the following fields:
- **params** - the optimized parameters.
- **state** - the last :class:`SVIState`
- **losses** - the losses collected at every step.
"""
def _make_loss_fn(
elbo,
rng_key,
constrain_fn,
model,
guide,
args,
kwargs,
static_kwargs,
mutable_state=None,
):
def loss_fn(params):
params = constrain_fn(params)
if mutable_state is not None:
params.update(mutable_state)
result = elbo.loss_with_mutable_state(
rng_key, params, model, guide, *args, **kwargs, **static_kwargs
)
return result["loss"], result["mutable_state"]
else:
return (
elbo.loss(
rng_key, params, model, guide, *args, **kwargs, **static_kwargs
),
None,
)
return loss_fn
class SVI(object):
"""
Stochastic Variational Inference given an ELBO loss objective.
**References**
1. *SVI Part I: An Introduction to Stochastic Variational Inference in Pyro*,
(http://pyro.ai/examples/svi_part_i.html)
**Example:**
.. doctest::
>>> from jax import random
>>> import jax.numpy as jnp
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.distributions import constraints
>>> from numpyro.infer import Predictive, SVI, Trace_ELBO
>>> def model(data):
... f = numpyro.sample("latent_fairness", dist.Beta(10, 10))
... with numpyro.plate("N", data.shape[0]):
... numpyro.sample("obs", dist.Bernoulli(f), obs=data)
>>> def guide(data):
... alpha_q = numpyro.param("alpha_q", 15., constraint=constraints.positive)
... beta_q = numpyro.param("beta_q", lambda rng_key: random.exponential(rng_key),
... constraint=constraints.positive)
... numpyro.sample("latent_fairness", dist.Beta(alpha_q, beta_q))
>>> data = jnp.concatenate([jnp.ones(6), jnp.zeros(4)])
>>> optimizer = numpyro.optim.Adam(step_size=0.0005)
>>> svi = SVI(model, guide, optimizer, loss=Trace_ELBO())
>>> svi_result = svi.run(random.PRNGKey(0), 2000, data)
>>> params = svi_result.params
>>> inferred_mean = params["alpha_q"] / (params["alpha_q"] + params["beta_q"])
>>> # get posterior samples
>>> predictive = Predictive(guide, params=params, num_samples=1000)
>>> samples = predictive(random.PRNGKey(1), data)
:param model: Python callable with Pyro primitives for the model.
:param guide: Python callable with Pyro primitives for the guide
(recognition network).
:param optim: An instance of :class:`~numpyro.optim._NumpyroOptim`, a
``jax.example_libraries.optimizers.Optimizer`` or an Optax
``GradientTransformation``. If you pass an Optax optimizer it will
automatically be wrapped using :func:`numpyro.contrib.optim.optax_to_numpyro`.
>>> from optax import adam, chain, clip
>>> svi = SVI(model, guide, chain(clip(10.0), adam(1e-3)), loss=Trace_ELBO())
:param loss: ELBO loss, i.e. negative Evidence Lower Bound, to minimize.
:param static_kwargs: static arguments for the model / guide, i.e. arguments
that remain constant during fitting.
:return: tuple of `(init_fn, update_fn, evaluate)`.
"""
def __init__(self, model, guide, optim, loss, **static_kwargs):
self.model = model
self.guide = guide
self.loss = loss
self.static_kwargs = static_kwargs
self.constrain_fn = None
if isinstance(optim, _NumPyroOptim):
self.optim = optim
elif isinstance(optim, optimizers.Optimizer):
self.optim = _NumPyroOptim(lambda *args: args, *optim)
else:
try:
import optax
from numpyro.contrib.optim import optax_to_numpyro
except ImportError:
raise ImportError(
"It looks like you tried to use an optimizer that isn't an "
"instance of numpyro.optim._NumPyroOptim or "
"jax.example_libraries.optimizers.Optimizer. There is experimental "
"support for Optax optimizers, but you need to install Optax. "
"It can be installed with `pip install optax`."
)
if not isinstance(optim, optax.GradientTransformation):
raise TypeError(
"Expected either an instance of numpyro.optim._NumPyroOptim, "
"jax.example_libraries.optimizers.Optimizer or "
"optax.GradientTransformation. Got {}".format(type(optim))
)
self.optim = optax_to_numpyro(optim)
def init(self, rng_key, *args, **kwargs):
"""
Gets the initial SVI state.
:param jax.random.PRNGKey rng_key: random number generator seed.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: the initial :data:`SVIState`
"""
rng_key, model_seed, guide_seed = random.split(rng_key, 3)
model_init = seed(self.model, model_seed)
guide_init = seed(self.guide, guide_seed)
guide_trace = trace(guide_init).get_trace(*args, **kwargs, **self.static_kwargs)
model_trace = trace(replay(model_init, guide_trace)).get_trace(
*args, **kwargs, **self.static_kwargs
)
params = {}
inv_transforms = {}
mutable_state = {}
# NB: params in model_trace will be overwritten by params in guide_trace
for site in list(model_trace.values()) + list(guide_trace.values()):
if site["type"] == "param":
constraint = site["kwargs"].pop("constraint", constraints.real)
with helpful_support_errors(site):
transform = biject_to(constraint)
inv_transforms[site["name"]] = transform
params[site["name"]] = transform.inv(site["value"])
elif site["type"] == "mutable":
mutable_state[site["name"]] = site["value"]
elif (
site["type"] == "sample"
and (not site["is_observed"])
and site["fn"].support.is_discrete
and not self.loss.can_infer_discrete
):
s_name = type(self.loss).__name__
warnings.warn(
f"Currently, SVI with {s_name} loss does not support models with discrete latent variables"
)
if not mutable_state:
mutable_state = None
self.constrain_fn = partial(transform_fn, inv_transforms)
# we convert weak types like float to float32/float64
# to avoid recompiling body_fn in svi.run
params, mutable_state = tree_map(
lambda x: lax.convert_element_type(x, jnp.result_type(x)),
(params, mutable_state),
)
return SVIState(self.optim.init(params), mutable_state, rng_key)
def get_params(self, svi_state):
"""
Gets values at `param` sites of the `model` and `guide`.
:param svi_state: current state of SVI.
:return: the corresponding parameters
"""
params = self.constrain_fn(self.optim.get_params(svi_state.optim_state))
return params
def update(self, svi_state, *args, **kwargs):
"""
Take a single step of SVI (possibly on a batch / minibatch of data),
using the optimizer.
:param svi_state: current state of SVI.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: tuple of `(svi_state, loss)`.
"""
rng_key, rng_key_step = random.split(svi_state.rng_key)
loss_fn = _make_loss_fn(
self.loss,
rng_key_step,
self.constrain_fn,
self.model,
self.guide,
args,
kwargs,
self.static_kwargs,
mutable_state=svi_state.mutable_state,
)
(loss_val, mutable_state), optim_state = self.optim.eval_and_update(
loss_fn, svi_state.optim_state
)
return SVIState(optim_state, mutable_state, rng_key), loss_val
def stable_update(self, svi_state, *args, **kwargs):
"""
Similar to :meth:`update` but returns the current state if the
the loss or the new state contains invalid values.
:param svi_state: current state of SVI.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: tuple of `(svi_state, loss)`.
"""
rng_key, rng_key_step = random.split(svi_state.rng_key)
loss_fn = _make_loss_fn(
self.loss,
rng_key_step,
self.constrain_fn,
self.model,
self.guide,
args,
kwargs,
self.static_kwargs,
mutable_state=svi_state.mutable_state,
)
(loss_val, mutable_state), optim_state = self.optim.eval_and_stable_update(
loss_fn, svi_state.optim_state
)
return SVIState(optim_state, mutable_state, rng_key), loss_val
def run(
self,
rng_key,
num_steps,
*args,
progress_bar=True,
stable_update=False,
init_state=None,
**kwargs,
):
"""
(EXPERIMENTAL INTERFACE) Run SVI with `num_steps` iterations, then return
the optimized parameters and the stacked losses at every step. If `num_steps`
is large, setting `progress_bar=False` can make the run faster.
.. note:: For a complex training process (e.g. the one requires early stopping,
epoch training, varying args/kwargs,...), we recommend to use the more
flexible methods :meth:`init`, :meth:`update`, :meth:`evaluate` to
customize your training procedure.
:param jax.random.PRNGKey rng_key: random number generator seed.
:param int num_steps: the number of optimization steps.
:param args: arguments to the model / guide
:param bool progress_bar: Whether to enable progress bar updates. Defaults to
``True``.
:param bool stable_update: whether to use :meth:`stable_update` to update
the state. Defaults to False.
:param SVIState init_state: if not None, begin SVI from the
final state of previous SVI run. Usage::
svi = SVI(model, guide, optimizer, loss=Trace_ELBO())
svi_result = svi.run(random.PRNGKey(0), 2000, data)
# upon inspection of svi_result the user decides that the model has not converged
# continue from the end of the previous svi run rather than beginning again from iteration 0
svi_result = svi.run(random.PRNGKey(1), 2000, data, init_state=svi_result.state)
:param kwargs: keyword arguments to the model / guide
:return: a namedtuple with fields `params` and `losses` where `params`
holds the optimized values at :class:`numpyro.param` sites,
and `losses` is the collected loss during the process.
:rtype: SVIRunResult
"""
if num_steps < 1:
raise ValueError("num_steps must be a positive integer.")
def body_fn(svi_state, _):
if stable_update:
svi_state, loss = self.stable_update(svi_state, *args, **kwargs)
else:
svi_state, loss = self.update(svi_state, *args, **kwargs)
return svi_state, loss
if init_state is None:
svi_state = self.init(rng_key, *args, **kwargs)
else:
svi_state = init_state
if progress_bar:
losses = []
with tqdm.trange(1, num_steps + 1) as t:
batch = max(num_steps // 20, 1)
for i in t:
svi_state, loss = jit(body_fn)(svi_state, None)
losses.append(loss)
if i % batch == 0:
if stable_update:
valid_losses = [x for x in losses[i - batch :] if x == x]
num_valid = len(valid_losses)
if num_valid == 0:
avg_loss = float("nan")
else:
avg_loss = sum(valid_losses) / num_valid
else:
avg_loss = sum(losses[i - batch :]) / batch
t.set_postfix_str(
"init loss: {:.4f}, avg. loss [{}-{}]: {:.4f}".format(
losses[0], i - batch + 1, i, avg_loss
),
refresh=False,
)
losses = jnp.stack(losses)
else:
svi_state, losses = lax.scan(body_fn, svi_state, None, length=num_steps)
# XXX: we also return the last svi_state for further inspection of both
# optimizer's state and mutable state.
return SVIRunResult(self.get_params(svi_state), svi_state, losses)
def evaluate(self, svi_state, *args, **kwargs):
"""
Take a single step of SVI (possibly on a batch / minibatch of data).
:param svi_state: current state of SVI.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide.
:return: evaluate ELBO loss given the current parameter values
(held within `svi_state.optim_state`).
"""
# we split to have the same seed as `update_fn` given an svi_state
_, rng_key_eval = random.split(svi_state.rng_key)
params = self.get_params(svi_state)
return self.loss.loss(
rng_key_eval,
params,
self.model,
self.guide,
*args,
**kwargs,
**self.static_kwargs,
)
| 39.9475 | 111 | 0.592403 |
from functools import namedtuple, partial
import warnings
import tqdm
import jax
from numpyro.util import _versiontuple
if _versiontuple(jax.__version__) >= (0, 2, 25):
from jax.example_libraries import optimizers
else:
from jax.experimental import optimizers
from jax import jit, lax, random
import jax.numpy as jnp
from jax.tree_util import tree_map
from numpyro.distributions import constraints
from numpyro.distributions.transforms import biject_to
from numpyro.handlers import replay, seed, trace
from numpyro.infer.util import helpful_support_errors, transform_fn
from numpyro.optim import _NumPyroOptim
SVIState = namedtuple("SVIState", ["optim_state", "mutable_state", "rng_key"])
SVIRunResult = namedtuple("SVIRunResult", ["params", "state", "losses"])
def _make_loss_fn(
elbo,
rng_key,
constrain_fn,
model,
guide,
args,
kwargs,
static_kwargs,
mutable_state=None,
):
def loss_fn(params):
params = constrain_fn(params)
if mutable_state is not None:
params.update(mutable_state)
result = elbo.loss_with_mutable_state(
rng_key, params, model, guide, *args, **kwargs, **static_kwargs
)
return result["loss"], result["mutable_state"]
else:
return (
elbo.loss(
rng_key, params, model, guide, *args, **kwargs, **static_kwargs
),
None,
)
return loss_fn
class SVI(object):
def __init__(self, model, guide, optim, loss, **static_kwargs):
self.model = model
self.guide = guide
self.loss = loss
self.static_kwargs = static_kwargs
self.constrain_fn = None
if isinstance(optim, _NumPyroOptim):
self.optim = optim
elif isinstance(optim, optimizers.Optimizer):
self.optim = _NumPyroOptim(lambda *args: args, *optim)
else:
try:
import optax
from numpyro.contrib.optim import optax_to_numpyro
except ImportError:
raise ImportError(
"It looks like you tried to use an optimizer that isn't an "
"instance of numpyro.optim._NumPyroOptim or "
"jax.example_libraries.optimizers.Optimizer. There is experimental "
"support for Optax optimizers, but you need to install Optax. "
"It can be installed with `pip install optax`."
)
if not isinstance(optim, optax.GradientTransformation):
raise TypeError(
"Expected either an instance of numpyro.optim._NumPyroOptim, "
"jax.example_libraries.optimizers.Optimizer or "
"optax.GradientTransformation. Got {}".format(type(optim))
)
self.optim = optax_to_numpyro(optim)
def init(self, rng_key, *args, **kwargs):
rng_key, model_seed, guide_seed = random.split(rng_key, 3)
model_init = seed(self.model, model_seed)
guide_init = seed(self.guide, guide_seed)
guide_trace = trace(guide_init).get_trace(*args, **kwargs, **self.static_kwargs)
model_trace = trace(replay(model_init, guide_trace)).get_trace(
*args, **kwargs, **self.static_kwargs
)
params = {}
inv_transforms = {}
mutable_state = {}
# NB: params in model_trace will be overwritten by params in guide_trace
for site in list(model_trace.values()) + list(guide_trace.values()):
if site["type"] == "param":
constraint = site["kwargs"].pop("constraint", constraints.real)
with helpful_support_errors(site):
transform = biject_to(constraint)
inv_transforms[site["name"]] = transform
params[site["name"]] = transform.inv(site["value"])
elif site["type"] == "mutable":
mutable_state[site["name"]] = site["value"]
elif (
site["type"] == "sample"
and (not site["is_observed"])
and site["fn"].support.is_discrete
and not self.loss.can_infer_discrete
):
s_name = type(self.loss).__name__
warnings.warn(
f"Currently, SVI with {s_name} loss does not support models with discrete latent variables"
)
if not mutable_state:
mutable_state = None
self.constrain_fn = partial(transform_fn, inv_transforms)
# we convert weak types like float to float32/float64
# to avoid recompiling body_fn in svi.run
params, mutable_state = tree_map(
lambda x: lax.convert_element_type(x, jnp.result_type(x)),
(params, mutable_state),
)
return SVIState(self.optim.init(params), mutable_state, rng_key)
def get_params(self, svi_state):
params = self.constrain_fn(self.optim.get_params(svi_state.optim_state))
return params
def update(self, svi_state, *args, **kwargs):
rng_key, rng_key_step = random.split(svi_state.rng_key)
loss_fn = _make_loss_fn(
self.loss,
rng_key_step,
self.constrain_fn,
self.model,
self.guide,
args,
kwargs,
self.static_kwargs,
mutable_state=svi_state.mutable_state,
)
(loss_val, mutable_state), optim_state = self.optim.eval_and_update(
loss_fn, svi_state.optim_state
)
return SVIState(optim_state, mutable_state, rng_key), loss_val
def stable_update(self, svi_state, *args, **kwargs):
rng_key, rng_key_step = random.split(svi_state.rng_key)
loss_fn = _make_loss_fn(
self.loss,
rng_key_step,
self.constrain_fn,
self.model,
self.guide,
args,
kwargs,
self.static_kwargs,
mutable_state=svi_state.mutable_state,
)
(loss_val, mutable_state), optim_state = self.optim.eval_and_stable_update(
loss_fn, svi_state.optim_state
)
return SVIState(optim_state, mutable_state, rng_key), loss_val
def run(
self,
rng_key,
num_steps,
*args,
progress_bar=True,
stable_update=False,
init_state=None,
**kwargs,
):
if num_steps < 1:
raise ValueError("num_steps must be a positive integer.")
def body_fn(svi_state, _):
if stable_update:
svi_state, loss = self.stable_update(svi_state, *args, **kwargs)
else:
svi_state, loss = self.update(svi_state, *args, **kwargs)
return svi_state, loss
if init_state is None:
svi_state = self.init(rng_key, *args, **kwargs)
else:
svi_state = init_state
if progress_bar:
losses = []
with tqdm.trange(1, num_steps + 1) as t:
batch = max(num_steps // 20, 1)
for i in t:
svi_state, loss = jit(body_fn)(svi_state, None)
losses.append(loss)
if i % batch == 0:
if stable_update:
valid_losses = [x for x in losses[i - batch :] if x == x]
num_valid = len(valid_losses)
if num_valid == 0:
avg_loss = float("nan")
else:
avg_loss = sum(valid_losses) / num_valid
else:
avg_loss = sum(losses[i - batch :]) / batch
t.set_postfix_str(
"init loss: {:.4f}, avg. loss [{}-{}]: {:.4f}".format(
losses[0], i - batch + 1, i, avg_loss
),
refresh=False,
)
losses = jnp.stack(losses)
else:
svi_state, losses = lax.scan(body_fn, svi_state, None, length=num_steps)
# XXX: we also return the last svi_state for further inspection of both
# optimizer's state and mutable state.
return SVIRunResult(self.get_params(svi_state), svi_state, losses)
def evaluate(self, svi_state, *args, **kwargs):
_, rng_key_eval = random.split(svi_state.rng_key)
params = self.get_params(svi_state)
return self.loss.loss(
rng_key_eval,
params,
self.model,
self.guide,
*args,
**kwargs,
**self.static_kwargs,
)
| true | true |
f7369a79578fc82ca20769d486ae002da3e44f53 | 16,563 | py | Python | src/optics_numpy.py | computational-imaging/DeepOpticsHDR | 1180749b028dd21f6b7140c0538fe332bd29bb46 | [
"BSD-3-Clause"
] | 33 | 2020-08-19T05:54:02.000Z | 2022-03-15T02:38:30.000Z | src/optics_numpy.py | computational-imaging/DeepOpticsHDR | 1180749b028dd21f6b7140c0538fe332bd29bb46 | [
"BSD-3-Clause"
] | null | null | null | src/optics_numpy.py | computational-imaging/DeepOpticsHDR | 1180749b028dd21f6b7140c0538fe332bd29bb46 | [
"BSD-3-Clause"
] | 4 | 2020-09-16T14:26:47.000Z | 2022-03-04T07:30:15.000Z | #Julie Chang and Chris Metzler 2020
import abc
# import tensorflow as tf
import numpy as np
# import matplotlib as mpl
# mpl.use('TKAgg')
import matplotlib.pyplot as plt
from PIL import Image
from numpy.fft import ifftshift
import fractions
# import layers.optics_no_transpose as optics
#import optics_no_transpose as optics
from skimage.transform import resize
from skimage.measure import block_reduce
from scipy.ndimage import gaussian_filter
# from scipy.interpolate import RectBivariateSpline
import scipy.interpolate as interp
from skimage.io import imsave
def phaseshifts_from_height_map(height_map, wave_lengths, refractive_idcs, dtype=np.complex64):
'''Calculates the phase shifts created by a height map with certain
refractive index for light with specific wave length.
'''
# refractive index difference
delta_N = refractive_idcs.reshape([1,-1,1,1]) - 1.
# wave number
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
# phase delay indiced by height field
phi = wave_nos * delta_N * height_map
phase_shifts = np.exp(1j*phi)
return phase_shifts
def get_vanilla_zernike_height_map(zernike_volume, zernike_coeffs, output_resolution=None):
heightmap_zernike = np.sum(zernike_coeffs * zernike_volume, axis=0)
if output_resolution is not None:
heightmap_zernike = resize(heightmap_zernike, output_resolution)
return heightmap_zernike
class PhasePlate():
def __init__(self,
wave_lengths,
height_map,
refractive_idcs,
height_tolerance=None,
lateral_tolerance=None,
dtype=np.complex64):
self.wave_lengths = wave_lengths
self.height_map = height_map
self.resolution = np.array(np.shape(height_map))
self.refractive_idcs=refractive_idcs
self.height_tolerance=height_tolerance
self.lateral_tolerance=lateral_tolerance
self.dtype = dtype
def __call__(self, input_field):
# Add manufacturing tolerances in the form of height map noise
if self.height_tolerance is not None:
self.height_map += np.random.uniform(low=-self.height_tolerance,
high=self.height_tolerance,
size=self.height_map.shape)
print("Phase plate with manufacturing tolerance %0.2e"%self.height_tolerance)
self.phase_shifts = phaseshifts_from_height_map(self.height_map,
self.wave_lengths,
self.refractive_idcs,
dtype=self.dtype)
input_field = input_field.astype(self.dtype)
return input_field * self.phase_shifts
def psf2otf(input_filter, output_size):
"""Convert 4D tensorflow filter into its FFT.
Input shape: [in_channels, out_channels, height, width]
"""
# pad out to output_size with zeros
# circularly shift so center pixel is at 0,0
_, _, fh, fw = np.shape(input_filter)
if output_size[0] != fh:
pad = (output_size[0] - fh)/2
if (output_size[0] - fh) % 2 != 0:
pad_top = pad_left = int(np.ceil(pad))
pad_bottom = pad_right = int(np.floor(pad))
else:
pad_top = pad_left = int(pad) + 1
pad_bottom = pad_right = int(pad) - 1
padded = np.pad(input_filter, ((0,0), (0,0), (pad_top, pad_bottom),
(pad_left, pad_right)), mode='constant')
else:
padded = input_filter
padded = np.fft.ifftshift(padded, axes=(2,3))
tmp = np.fft.fft2(padded)
return tmp
def propagate_exact(input_field, kernels):
_, _, M_orig, N_orig = np.shape(input_field)
# zero padding.
Mpad = M_orig//2
Npad = N_orig//2
M = M_orig + 2*Mpad
N = N_orig + 2*Npad
padded_input_field = np.pad(input_field,
((0,0), (0,0), (Mpad,Mpad), (Npad,Npad)),
mode='constant')
objFT = np.fft.fft2(padded_input_field)
out_field = np.fft.ifft2( objFT * kernels)
out_field = out_field[:,:,Npad:-Npad,Npad:-Npad]
return out_field
def plano_convex_initializer(focal_length,
wave_lengths,
wave_resolution,
discretization_step,
refractive_idx):
convex_radius = (refractive_idx - 1.) * focal_length
N,M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x * discretization_step
y = y * discretization_step
x = x.reshape([N,M])
y = y.reshape([N,M])
# This approximates the spherical surface with qaudratic-phase surfaces.
height_map = -(x ** 2 + y ** 2) / 2. * (1. / convex_radius)
# height_map = np.mod(height_map, get_one_phase_shift_thickness(wave_lengths[0], refractive_idcs[0]))
# return tf.constant(np.sqrt(height_map), dtype=dtype)
return height_map
def circular_aperture(input_field, r_cutoff=None):
try:
input_shape = np.shape(input_field)
except:
input_shape = input_field.shape
[x, y] = np.mgrid[-input_shape[2] // 2: input_shape[2] // 2,
-input_shape[3] // 2: input_shape[3] // 2].astype(np.float64)
if r_cutoff is None:
r_cutoff = np.amax(x)
r = np.sqrt(x ** 2 + y ** 2)[None,None,:,:]
aperture = (r<r_cutoff).astype(np.float32)
return aperture * input_field
def get_psfs(optical_element,
depth_values,
wave_lengths,
optical_feature_size,
sensor_distance,
propagation_kernel,
psf_resolution=None,
sampling_factor=None,
use_circular_aperture=True,
r_cutoff=None,
amplitude_mask=None,
use_planar_incidence=False,
dtype=np.complex64,
sigma=None,
get_otfs=True,
otf_resolution=None):
wave_resolution = optical_element.resolution
physical_size = wave_resolution[0] * optical_feature_size
# what about magnification
N, M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x/N * physical_size
y = y/M * physical_size
squared_sum = x**2 + y**2
squared_sum = squared_sum[None,None,:,:]
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])
input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)
input_fields = np.exp(1.j * wave_nos * input_fields)
if use_circular_aperture:
input_fields = circular_aperture(input_fields, r_cutoff)
if amplitude_mask is not None:
input_fields = input_fields * amplitude_mask
psfs = []
otfs = []
# calculate PSF for each depth
for depth_idx in range(len(depth_values)):
# propagate through optical element
input_field = input_fields[depth_idx:depth_idx+1,:,:,:]
field = optical_element(input_field)
# propagate field to sensor
sensor_incident_field = propagate_exact(field, propagation_kernel)
psf = np.square(np.abs(sensor_incident_field))
psf_edit = []
for wavelength in range(np.shape(psf)[1]):
psf_image = np.squeeze(psf[0,wavelength,:,:])
if psf_resolution is not None:
psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1]),
resample=Image.BILINEAR))
if sampling_factor is not None:
psf_image = block_reduce(psf_image, block_size=(sampling_factor,sampling_factor), func=np.mean)
if sigma is not None:
psf_image = gaussian_filter(psf_image, sigma)
psf_image /= np.sum(psf_image)
psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))
psf = np.concatenate(psf_edit, axis=1)
psfs.append(psf)
# calculate OTF as well
if get_otfs:
if otf_resolution is None:
otf_resolution = np.shape(psf)[2:3]
otf = psf2otf(psf, otf_resolution)
otfs.append(otf)
return psfs, otfs
def get_psfs_coherent(optical_element,
depth_values,
wave_lengths,
optical_feature_size,
sensor_distance,
propagation_kernel,
psf_resolution=None,
use_circular_aperture=True,
r_cutoff=None,
use_planar_incidence=False,
dtype=np.complex64,
get_otfs=True,
otf_resolution=None):
wave_resolution = optical_element.resolution
physical_size = wave_resolution[0] * optical_feature_size
# what about magnification
N, M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x/N * physical_size
y = y/M * physical_size
squared_sum = x**2 + y**2
squared_sum = squared_sum[None,None,:,:]
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])
input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)
input_fields = np.exp(1.j * wave_nos * input_fields)
if use_circular_aperture:
input_fields = circular_aperture(input_fields, r_cutoff)
psfs = []
otfs = []
# calculate PSF for each depth
for depth_idx in range(len(depth_values)):
# propagate through optical element
input_field = input_fields[depth_idx:depth_idx+1,:,:,:]
field = optical_element(input_field)
# propagate field to sensor
sensor_incident_field = propagate_exact(field, propagation_kernel)
psf = sensor_incident_field
# psf_edit = []
# for wavelength in range(np.shape(psf)[1]):
# psf_image = np.squeeze(psf[0,wavelength,:,:])
# if psf_resolution is not None:
# psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1])))
# psf_image /= np.sum(np.abs(psf_image))
# psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))
# psf = np.concatenate(psf_edit, axis=1)
psfs.append(psf)
# calculate OTF as well
if get_otfs:
otf = np.fft.fft2(psf)
otfs.append(otf)
return psfs, otfs
def PhaseShiftThinLens_rgb(focal_length,wave_lengths,wave_resolution,optical_feature_size,refractive_idcs):
#Output is 1 x wave_resolution x wave_resolution x 3
height_map_thinlens_0 = plano_convex_initializer(focal_length,
wave_lengths[0],
wave_resolution,
optical_feature_size,
refractive_idcs[0])
PhaseThinLens_0 = phaseshifts_from_height_map(height_map_thinlens_0, wave_lengths[0],
refractive_idcs[0])
height_map_thinlens_1 = plano_convex_initializer(focal_length,
wave_lengths[1],
wave_resolution,
optical_feature_size,
refractive_idcs[1])
PhaseThinLens_1 = phaseshifts_from_height_map(height_map_thinlens_1, wave_lengths[1],
refractive_idcs[1])
height_map_thinlens_2 = plano_convex_initializer(focal_length,
wave_lengths[2],
wave_resolution,
optical_feature_size,
refractive_idcs[2])
PhaseThinLens_2 = phaseshifts_from_height_map(height_map_thinlens_2, wave_lengths[2],
refractive_idcs[2])
PhaseThinLens = np.concatenate((PhaseThinLens_0, PhaseThinLens_1, PhaseThinLens_2), axis=1)
PhaseThinLens = np.transpose(PhaseThinLens, [0, 2, 3, 1])
return PhaseThinLens
def SaveHeightasTiff(height_map,filename,input_feature_size=4.29e-6,output_feature_size=1e-6,mask_size=5.6e-3,quantization_res=21.16e-9,Interp_Method='Nearest'):
#height_map is given in meters and should be saved as a 32-bit integer where 0=0 nm and 1=21.16 nm (quantization_res)
#Interpolate the height_map to a higher resolution, then resample at the output_feature_size
#Nearest neighbor interpolation works by far the best
assert (np.allclose(np.mod(mask_size, output_feature_size), 0.)), "mask_size must be a common multiple of the output_feature_size"
height_map = height_map/1e-6#Perform interpolation in um
x_input = np.arange(height_map.shape[0]) * input_feature_size
y_input = np.arange(height_map.shape[1]) * input_feature_size
if Interp_Method=='Nearest':
f = interp.RegularGridInterpolator((x_input,y_input), height_map,method='nearest',bounds_error=False,fill_value=0.)
elif Interp_Method=='Linear':
f = interp.RegularGridInterpolator((x_input, y_input), height_map, method='linear', bounds_error=False, fill_value=0.)
else:
f = interp.RectBivariateSpline(x_input, y_input, height_map, bbox=[None, None, None, None], kx=3, ky=3, s=0)
n_pixel_out = int(mask_size / output_feature_size)
if Interp_Method=='Nearest' or Interp_Method=='Linear':
grid_x_out, grid_y_out = np.mgrid[0:n_pixel_out, 0:n_pixel_out]*output_feature_size
grid_x_out=grid_x_out.flatten()
grid_y_out=grid_y_out.flatten()
points_out = np.array((grid_x_out,grid_y_out)).T
resampled_height_map = f(points_out)
resampled_height_map=np.reshape(resampled_height_map,(n_pixel_out,n_pixel_out))
else:
x_output = np.arange(n_pixel_out) * output_feature_size
y_output = np.arange(n_pixel_out) * output_feature_size
resampled_height_map = f(x_output,y_output)
resampled_height_map = np.clip(resampled_height_map,height_map.min(),height_map.max())
# Quantize the height map to the nearest quantization_res. Save as a fp value in um and as a integer value, where 0 = 0 and 1 = quantization_res
quantized_resampled_height_map_fp = (np.floor((resampled_height_map)/(quantization_res/1e-6))*(quantization_res/1e-6)).astype(np.float32)
quantized_resampled_height_map_int = (np.floor((resampled_height_map) / (quantization_res / 1e-6))).astype(np.int32) # In um, quantized to nearest 21.16nm
# import matplotlib.pyplot as plt
# plt.subplot(121)
# imgplot = plt.imshow((height_map))
# plt.colorbar(imgplot)
# plt.title('Height Map After Interpolation')
# plt.subplot(122)
# imgplot = plt.imshow((resampled_height_map))
# plt.colorbar(imgplot)
# plt.title('Height Map After Interpolation')
# plt.show()
#
# import matplotlib.pyplot as plt
# plt.subplot(121)
# height_map_slice = height_map[1000,:]
# imgplot = plt.hist(height_map_slice)
# plt.title('Height Map Slice After Interpolation')
# plt.subplot(122)
# resampled_height_map_slice = resampled_height_map[2500,:]
# imgplot = plt.hist(resampled_height_map_slice)
# plt.title('Height Map Slice After Interpolation')
# plt.show()
filename_fp=filename + "_fp32_wrt_um.tiff"
imsave(filename_fp, quantized_resampled_height_map_fp)
filename_int=filename + "_integer.tiff"
imsave(filename_int, quantized_resampled_height_map_int)
return [resampled_height_map,quantized_resampled_height_map_fp,quantized_resampled_height_map_int]
| 41.825758 | 161 | 0.613476 |
import abc
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from numpy.fft import ifftshift
import fractions
from skimage.transform import resize
from skimage.measure import block_reduce
from scipy.ndimage import gaussian_filter
import scipy.interpolate as interp
from skimage.io import imsave
def phaseshifts_from_height_map(height_map, wave_lengths, refractive_idcs, dtype=np.complex64):
delta_N = refractive_idcs.reshape([1,-1,1,1]) - 1.
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
phi = wave_nos * delta_N * height_map
phase_shifts = np.exp(1j*phi)
return phase_shifts
def get_vanilla_zernike_height_map(zernike_volume, zernike_coeffs, output_resolution=None):
heightmap_zernike = np.sum(zernike_coeffs * zernike_volume, axis=0)
if output_resolution is not None:
heightmap_zernike = resize(heightmap_zernike, output_resolution)
return heightmap_zernike
class PhasePlate():
def __init__(self,
wave_lengths,
height_map,
refractive_idcs,
height_tolerance=None,
lateral_tolerance=None,
dtype=np.complex64):
self.wave_lengths = wave_lengths
self.height_map = height_map
self.resolution = np.array(np.shape(height_map))
self.refractive_idcs=refractive_idcs
self.height_tolerance=height_tolerance
self.lateral_tolerance=lateral_tolerance
self.dtype = dtype
def __call__(self, input_field):
if self.height_tolerance is not None:
self.height_map += np.random.uniform(low=-self.height_tolerance,
high=self.height_tolerance,
size=self.height_map.shape)
print("Phase plate with manufacturing tolerance %0.2e"%self.height_tolerance)
self.phase_shifts = phaseshifts_from_height_map(self.height_map,
self.wave_lengths,
self.refractive_idcs,
dtype=self.dtype)
input_field = input_field.astype(self.dtype)
return input_field * self.phase_shifts
def psf2otf(input_filter, output_size):
_, _, fh, fw = np.shape(input_filter)
if output_size[0] != fh:
pad = (output_size[0] - fh)/2
if (output_size[0] - fh) % 2 != 0:
pad_top = pad_left = int(np.ceil(pad))
pad_bottom = pad_right = int(np.floor(pad))
else:
pad_top = pad_left = int(pad) + 1
pad_bottom = pad_right = int(pad) - 1
padded = np.pad(input_filter, ((0,0), (0,0), (pad_top, pad_bottom),
(pad_left, pad_right)), mode='constant')
else:
padded = input_filter
padded = np.fft.ifftshift(padded, axes=(2,3))
tmp = np.fft.fft2(padded)
return tmp
def propagate_exact(input_field, kernels):
_, _, M_orig, N_orig = np.shape(input_field)
Mpad = M_orig//2
Npad = N_orig//2
M = M_orig + 2*Mpad
N = N_orig + 2*Npad
padded_input_field = np.pad(input_field,
((0,0), (0,0), (Mpad,Mpad), (Npad,Npad)),
mode='constant')
objFT = np.fft.fft2(padded_input_field)
out_field = np.fft.ifft2( objFT * kernels)
out_field = out_field[:,:,Npad:-Npad,Npad:-Npad]
return out_field
def plano_convex_initializer(focal_length,
wave_lengths,
wave_resolution,
discretization_step,
refractive_idx):
convex_radius = (refractive_idx - 1.) * focal_length
N,M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x * discretization_step
y = y * discretization_step
x = x.reshape([N,M])
y = y.reshape([N,M])
height_map = -(x ** 2 + y ** 2) / 2. * (1. / convex_radius)
return height_map
def circular_aperture(input_field, r_cutoff=None):
try:
input_shape = np.shape(input_field)
except:
input_shape = input_field.shape
[x, y] = np.mgrid[-input_shape[2] // 2: input_shape[2] // 2,
-input_shape[3] // 2: input_shape[3] // 2].astype(np.float64)
if r_cutoff is None:
r_cutoff = np.amax(x)
r = np.sqrt(x ** 2 + y ** 2)[None,None,:,:]
aperture = (r<r_cutoff).astype(np.float32)
return aperture * input_field
def get_psfs(optical_element,
depth_values,
wave_lengths,
optical_feature_size,
sensor_distance,
propagation_kernel,
psf_resolution=None,
sampling_factor=None,
use_circular_aperture=True,
r_cutoff=None,
amplitude_mask=None,
use_planar_incidence=False,
dtype=np.complex64,
sigma=None,
get_otfs=True,
otf_resolution=None):
wave_resolution = optical_element.resolution
physical_size = wave_resolution[0] * optical_feature_size
N, M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x/N * physical_size
y = y/M * physical_size
squared_sum = x**2 + y**2
squared_sum = squared_sum[None,None,:,:]
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])
input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)
input_fields = np.exp(1.j * wave_nos * input_fields)
if use_circular_aperture:
input_fields = circular_aperture(input_fields, r_cutoff)
if amplitude_mask is not None:
input_fields = input_fields * amplitude_mask
psfs = []
otfs = []
for depth_idx in range(len(depth_values)):
input_field = input_fields[depth_idx:depth_idx+1,:,:,:]
field = optical_element(input_field)
sensor_incident_field = propagate_exact(field, propagation_kernel)
psf = np.square(np.abs(sensor_incident_field))
psf_edit = []
for wavelength in range(np.shape(psf)[1]):
psf_image = np.squeeze(psf[0,wavelength,:,:])
if psf_resolution is not None:
psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1]),
resample=Image.BILINEAR))
if sampling_factor is not None:
psf_image = block_reduce(psf_image, block_size=(sampling_factor,sampling_factor), func=np.mean)
if sigma is not None:
psf_image = gaussian_filter(psf_image, sigma)
psf_image /= np.sum(psf_image)
psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))
psf = np.concatenate(psf_edit, axis=1)
psfs.append(psf)
if get_otfs:
if otf_resolution is None:
otf_resolution = np.shape(psf)[2:3]
otf = psf2otf(psf, otf_resolution)
otfs.append(otf)
return psfs, otfs
def get_psfs_coherent(optical_element,
depth_values,
wave_lengths,
optical_feature_size,
sensor_distance,
propagation_kernel,
psf_resolution=None,
use_circular_aperture=True,
r_cutoff=None,
use_planar_incidence=False,
dtype=np.complex64,
get_otfs=True,
otf_resolution=None):
wave_resolution = optical_element.resolution
physical_size = wave_resolution[0] * optical_feature_size
N, M = wave_resolution
[x, y] = np.mgrid[-N//2:N//2,
-M//2:M//2].astype(np.float64)
x = x/N * physical_size
y = y/M * physical_size
squared_sum = x**2 + y**2
squared_sum = squared_sum[None,None,:,:]
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1,-1,1,1])
input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])
input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)
input_fields = np.exp(1.j * wave_nos * input_fields)
if use_circular_aperture:
input_fields = circular_aperture(input_fields, r_cutoff)
psfs = []
otfs = []
for depth_idx in range(len(depth_values)):
input_field = input_fields[depth_idx:depth_idx+1,:,:,:]
field = optical_element(input_field)
sensor_incident_field = propagate_exact(field, propagation_kernel)
psf = sensor_incident_field
psfs.append(psf)
if get_otfs:
otf = np.fft.fft2(psf)
otfs.append(otf)
return psfs, otfs
def PhaseShiftThinLens_rgb(focal_length,wave_lengths,wave_resolution,optical_feature_size,refractive_idcs):
height_map_thinlens_0 = plano_convex_initializer(focal_length,
wave_lengths[0],
wave_resolution,
optical_feature_size,
refractive_idcs[0])
PhaseThinLens_0 = phaseshifts_from_height_map(height_map_thinlens_0, wave_lengths[0],
refractive_idcs[0])
height_map_thinlens_1 = plano_convex_initializer(focal_length,
wave_lengths[1],
wave_resolution,
optical_feature_size,
refractive_idcs[1])
PhaseThinLens_1 = phaseshifts_from_height_map(height_map_thinlens_1, wave_lengths[1],
refractive_idcs[1])
height_map_thinlens_2 = plano_convex_initializer(focal_length,
wave_lengths[2],
wave_resolution,
optical_feature_size,
refractive_idcs[2])
PhaseThinLens_2 = phaseshifts_from_height_map(height_map_thinlens_2, wave_lengths[2],
refractive_idcs[2])
PhaseThinLens = np.concatenate((PhaseThinLens_0, PhaseThinLens_1, PhaseThinLens_2), axis=1)
PhaseThinLens = np.transpose(PhaseThinLens, [0, 2, 3, 1])
return PhaseThinLens
def SaveHeightasTiff(height_map,filename,input_feature_size=4.29e-6,output_feature_size=1e-6,mask_size=5.6e-3,quantization_res=21.16e-9,Interp_Method='Nearest'):
assert (np.allclose(np.mod(mask_size, output_feature_size), 0.)), "mask_size must be a common multiple of the output_feature_size"
height_map = height_map/1e-6
x_input = np.arange(height_map.shape[0]) * input_feature_size
y_input = np.arange(height_map.shape[1]) * input_feature_size
if Interp_Method=='Nearest':
f = interp.RegularGridInterpolator((x_input,y_input), height_map,method='nearest',bounds_error=False,fill_value=0.)
elif Interp_Method=='Linear':
f = interp.RegularGridInterpolator((x_input, y_input), height_map, method='linear', bounds_error=False, fill_value=0.)
else:
f = interp.RectBivariateSpline(x_input, y_input, height_map, bbox=[None, None, None, None], kx=3, ky=3, s=0)
n_pixel_out = int(mask_size / output_feature_size)
if Interp_Method=='Nearest' or Interp_Method=='Linear':
grid_x_out, grid_y_out = np.mgrid[0:n_pixel_out, 0:n_pixel_out]*output_feature_size
grid_x_out=grid_x_out.flatten()
grid_y_out=grid_y_out.flatten()
points_out = np.array((grid_x_out,grid_y_out)).T
resampled_height_map = f(points_out)
resampled_height_map=np.reshape(resampled_height_map,(n_pixel_out,n_pixel_out))
else:
x_output = np.arange(n_pixel_out) * output_feature_size
y_output = np.arange(n_pixel_out) * output_feature_size
resampled_height_map = f(x_output,y_output)
resampled_height_map = np.clip(resampled_height_map,height_map.min(),height_map.max())
quantized_resampled_height_map_fp = (np.floor((resampled_height_map)/(quantization_res/1e-6))*(quantization_res/1e-6)).astype(np.float32)
quantized_resampled_height_map_int = (np.floor((resampled_height_map) / (quantization_res / 1e-6))).astype(np.int32)
filename_fp=filename + "_fp32_wrt_um.tiff"
imsave(filename_fp, quantized_resampled_height_map_fp)
filename_int=filename + "_integer.tiff"
imsave(filename_int, quantized_resampled_height_map_int)
return [resampled_height_map,quantized_resampled_height_map_fp,quantized_resampled_height_map_int]
| true | true |
f7369b3bc7c15d50e187bd0fc4166c97b810664c | 6,406 | py | Python | simba/process_data_log_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 172 | 2019-12-18T22:19:42.000Z | 2022-03-29T01:58:25.000Z | simba/process_data_log_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 165 | 2020-01-10T19:05:16.000Z | 2022-03-31T16:08:36.000Z | simba/process_data_log_old.py | justinshenk/simba | a58ccd0ceeda201c1452d186033ce6b25fbab564 | [
"MIT"
] | 80 | 2019-12-20T00:01:43.000Z | 2022-03-29T16:20:10.000Z | import pandas as pd
import os
from configparser import ConfigParser
from datetime import datetime
import numpy as np
def analyze_process_data_log(configini,chosenlist):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
csv_dir = config.get('General settings', 'csv_path')
csv_dir_in = os.path.join(csv_dir, 'machine_results')
no_targets = config.getint('SML settings', 'No_targets')
boutEnd = 0
boutEnd_list = [0]
boutStart_list = []
filesFound = []
target_names = []
vidInfPath = config.get('General settings', 'project_path')
vidInfPath = os.path.join(vidInfPath, 'logs')
vidInfPath = os.path.join(vidInfPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
loop = 0
loopy = 0
########### FIND CSV FILES ###########
for i in os.listdir(csv_dir_in):
if i.endswith(".csv"):
file = os.path.join(csv_dir_in, i)
filesFound.append(file)
########### GET TARGET COLUMN NAMES ###########
for ff in range(no_targets):
currentModelNames = 'target_name_' + str(ff+1)
currentModelNames = config.get('SML settings', currentModelNames)
target_names.append(currentModelNames)
print('Analyzing ' + str(len(target_names)) + ' classifier result(s) in ' + str(len(filesFound)) + ' video file(s).')
########### logfile path ###########
log_fn = 'sklearn_' + str(dateTime) + '.csv'
log_path = config.get('General settings', 'project_path')
log_path = os.path.join(log_path, 'logs')
log_fn = os.path.join(log_path, log_fn)
if not os.path.exists(log_path):
os.makedirs(log_path)
headers = ['Video']
for i in target_names:
head1 = str(i) + ' events'
head2 = str(i) + ' sum duration (s)'
head3 = str(i) + ' mean duration (s)'
head4 = str(i) + ' median duration (s)'
head5 = str(i) + ' first occurance (s)'
head6 = str(i) + ' mean interval (s)'
head7 = str(i) + ' median interval (s)'
headers.extend([head1, head2, head3, head4, head5, head6, head7])
log_df = pd.DataFrame(columns=headers)
for i in filesFound:
boutsDf = pd.DataFrame(columns=['Event', 'Start_frame', 'End_frame'])
currentFile = i
currVidName = os.path.basename(currentFile)
currVidName = currVidName.replace('.csv', '')
fps = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
fps = int(fps['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
loopy+=1
print('Analyzing video ' + str(loopy) + '/' + str(len(filesFound)) + '...')
dataDf = pd.read_csv(currentFile)
dataDf['frames'] = np.arange(len(dataDf))
folderNm = os.path.basename(currentFile)
logFolderNm = str(folderNm.split('.')[0])
for bb in target_names:
currTarget = bb
for indexes, rows in dataDf[dataDf['frames'] >= boutEnd].iterrows():
if rows[currTarget] == 1:
boutStart = rows['frames']
for index, row in dataDf[dataDf['frames'] >= boutStart].iterrows():
if row[currTarget] == 0:
boutEnd = row['frames']
if boutEnd_list[-1] != boutEnd:
boutStart_list.append(boutStart)
boutEnd_list.append(boutEnd)
values = [currTarget, boutStart, boutEnd]
boutsDf.loc[(len(boutsDf))] = values
break
break
boutStart_list = [0]
boutEnd_list = [0]
boutEnd = 0
#Convert to time
boutsDf['Start_time'] = boutsDf['Start_frame'] / fps
boutsDf['End_time'] = boutsDf['End_frame'] / fps
boutsDf['Bout_time'] = boutsDf['End_time'] - boutsDf['Start_time']
#record logs
log_list = []
log_list.append(logFolderNm)
for i in target_names:
currDf = boutsDf.loc[boutsDf['Event'] == i]
try:
firstOccur = round(currDf['Start_time'].iloc[0], 4)
except IndexError:
firstOccur = 0
eventNOs = len(currDf)
TotEventDur = round(currDf['Bout_time'].sum(), 4)
try:
MeanEventDur = round(TotEventDur / eventNOs, 4)
except ZeroDivisionError:
MeanEventDur = 0
try:
MedianEventDur = round(currDf['Bout_time'].median(), 10)
except ZeroDivisionError:
MedianEventDur = 0
currDf_shifted = currDf.shift(periods=-1)
currDf_shifted = currDf_shifted.drop(columns=['Event', 'Start_frame', 'End_frame', 'End_time', 'Bout_time'])
currDf_shifted = currDf_shifted.rename(columns={'Start_time': 'Start_time_shifted'})
currDf_combined = pd.concat([currDf, currDf_shifted], axis=1, join='inner')
currDf_combined['Event_interval'] = currDf_combined['Start_time_shifted'] - currDf_combined['End_time']
meanEventInterval = currDf_combined["Event_interval"].mean()
medianEventInterval = currDf_combined['Event_interval'].median()
log_list.append(eventNOs)
log_list.append(TotEventDur)
log_list.append(MeanEventDur)
log_list.append(MedianEventDur)
log_list.append(firstOccur)
log_list.append(meanEventInterval)
log_list.append(medianEventInterval)
log_df.loc[loop] = log_list
loop += 1
print('File # processed for machine predictions: ' + str(loop) + '/' + str(len(filesFound)))
log_df.fillna(0, inplace=True)
# drop columns not chosen
for i in chosenlist:
log_df = log_df[log_df.columns.drop(list(log_df.filter(regex=str(i))))]
log_df.to_csv(log_fn, index=False)
print('All files processed for machine predictions: data file saved @' + str(log_fn))
| 42.706667 | 146 | 0.568998 | import pandas as pd
import os
from configparser import ConfigParser
from datetime import datetime
import numpy as np
def analyze_process_data_log(configini,chosenlist):
dateTime = datetime.now().strftime('%Y%m%d%H%M%S')
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
csv_dir = config.get('General settings', 'csv_path')
csv_dir_in = os.path.join(csv_dir, 'machine_results')
no_targets = config.getint('SML settings', 'No_targets')
boutEnd = 0
boutEnd_list = [0]
boutStart_list = []
filesFound = []
target_names = []
vidInfPath = config.get('General settings', 'project_path')
vidInfPath = os.path.join(vidInfPath, 'logs')
vidInfPath = os.path.join(vidInfPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
loop = 0
loopy = 0
boutsDf = pd.DataFrame(columns=['Event', 'Start_frame', 'End_frame'])
currentFile = i
currVidName = os.path.basename(currentFile)
currVidName = currVidName.replace('.csv', '')
fps = vidinfDf.loc[vidinfDf['Video'] == currVidName]
try:
fps = int(fps['fps'])
except TypeError:
print('Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file')
loopy+=1
print('Analyzing video ' + str(loopy) + '/' + str(len(filesFound)) + '...')
dataDf = pd.read_csv(currentFile)
dataDf['frames'] = np.arange(len(dataDf))
folderNm = os.path.basename(currentFile)
logFolderNm = str(folderNm.split('.')[0])
for bb in target_names:
currTarget = bb
for indexes, rows in dataDf[dataDf['frames'] >= boutEnd].iterrows():
if rows[currTarget] == 1:
boutStart = rows['frames']
for index, row in dataDf[dataDf['frames'] >= boutStart].iterrows():
if row[currTarget] == 0:
boutEnd = row['frames']
if boutEnd_list[-1] != boutEnd:
boutStart_list.append(boutStart)
boutEnd_list.append(boutEnd)
values = [currTarget, boutStart, boutEnd]
boutsDf.loc[(len(boutsDf))] = values
break
break
boutStart_list = [0]
boutEnd_list = [0]
boutEnd = 0
boutsDf['Start_time'] = boutsDf['Start_frame'] / fps
boutsDf['End_time'] = boutsDf['End_frame'] / fps
boutsDf['Bout_time'] = boutsDf['End_time'] - boutsDf['Start_time']
log_list = []
log_list.append(logFolderNm)
for i in target_names:
currDf = boutsDf.loc[boutsDf['Event'] == i]
try:
firstOccur = round(currDf['Start_time'].iloc[0], 4)
except IndexError:
firstOccur = 0
eventNOs = len(currDf)
TotEventDur = round(currDf['Bout_time'].sum(), 4)
try:
MeanEventDur = round(TotEventDur / eventNOs, 4)
except ZeroDivisionError:
MeanEventDur = 0
try:
MedianEventDur = round(currDf['Bout_time'].median(), 10)
except ZeroDivisionError:
MedianEventDur = 0
currDf_shifted = currDf.shift(periods=-1)
currDf_shifted = currDf_shifted.drop(columns=['Event', 'Start_frame', 'End_frame', 'End_time', 'Bout_time'])
currDf_shifted = currDf_shifted.rename(columns={'Start_time': 'Start_time_shifted'})
currDf_combined = pd.concat([currDf, currDf_shifted], axis=1, join='inner')
currDf_combined['Event_interval'] = currDf_combined['Start_time_shifted'] - currDf_combined['End_time']
meanEventInterval = currDf_combined["Event_interval"].mean()
medianEventInterval = currDf_combined['Event_interval'].median()
log_list.append(eventNOs)
log_list.append(TotEventDur)
log_list.append(MeanEventDur)
log_list.append(MedianEventDur)
log_list.append(firstOccur)
log_list.append(meanEventInterval)
log_list.append(medianEventInterval)
log_df.loc[loop] = log_list
loop += 1
print('File # processed for machine predictions: ' + str(loop) + '/' + str(len(filesFound)))
log_df.fillna(0, inplace=True)
for i in chosenlist:
log_df = log_df[log_df.columns.drop(list(log_df.filter(regex=str(i))))]
log_df.to_csv(log_fn, index=False)
print('All files processed for machine predictions: data file saved @' + str(log_fn))
| true | true |
f7369d706e35c6b3d484b5d59ae9bf2e31b5ddbb | 2,014 | py | Python | picpost/settings.py | kowito/picpost | d710b1a0db988f979aa711c79e1e4cf84eed26a7 | [
"MIT"
] | null | null | null | picpost/settings.py | kowito/picpost | d710b1a0db988f979aa711c79e1e4cf84eed26a7 | [
"MIT"
] | null | null | null | picpost/settings.py | kowito/picpost | d710b1a0db988f979aa711c79e1e4cf84eed26a7 | [
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tk$i&&s(v*wu3l1$)p7*2@4w#d0dxev6%8v6$e$e!kf=j7y==_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'imagestore',
'sorl.thumbnail',
'tagging',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'picpost.urls'
WSGI_APPLICATION = 'picpost.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
#DATABASES['default'] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
| 24.26506 | 71 | 0.726415 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECRET_KEY = 'tk$i&&s(v*wu3l1$)p7*2@4w#d0dxev6%8v6$e$e!kf=j7y==_'
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates/'),
)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'imagestore',
'sorl.thumbnail',
'tagging',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'picpost.urls'
WSGI_APPLICATION = 'picpost.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
#DATABASES['default'] = dj_database_url.config()
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
| true | true |
f7369d79a83366796cc1c9349b904d4c6c4934ce | 846 | py | Python | ML/learn/intro_convolution.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | 2 | 2017-06-07T03:20:42.000Z | 2020-01-07T09:14:26.000Z | ML/learn/intro_convolution.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | ML/learn/intro_convolution.py | qrsforever/workspace | 53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file intro_convolution.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-06-03 20:52:26
################################ jupyter-vim #######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# %pylab --no-import-all # noqa
#####################################################################################
import numpy as np
import matplotlib.pyplot as plt
#####################################################################################
# <codecell>
#####################################################################################
a = np.array([200, 200])
b = np.array([a, a])
kernel_horizonal = np.array([np.array([2, 2]), np.array([-2, 2])])
np.multiply(b, kernel_horizonal)
| 28.2 | 85 | 0.41253 | true | true | |
f7369d912ebf51f29680276faf3ab8e08c6cb647 | 279 | py | Python | docs/guide/tutorial/chatbot/bot.py | teaglebuilt/bocadillo | b2138e77747d3ab9f87e4b352f6b7c1e72520fe1 | [
"MIT"
] | 434 | 2018-11-19T15:16:05.000Z | 2022-02-19T03:18:52.000Z | docs/guide/tutorial/chatbot/bot.py | teaglebuilt/bocadillo | b2138e77747d3ab9f87e4b352f6b7c1e72520fe1 | [
"MIT"
] | 295 | 2018-11-20T15:11:17.000Z | 2020-03-14T19:42:03.000Z | docs/guide/tutorial/chatbot/bot.py | teaglebuilt/bocadillo | b2138e77747d3ab9f87e4b352f6b7c1e72520fe1 | [
"MIT"
] | 62 | 2018-11-17T22:41:06.000Z | 2021-09-11T17:45:59.000Z | # chatbot/bot.py
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
diego = ChatBot("Diego")
trainer = ChatterBotCorpusTrainer(diego)
trainer.train(
"chatterbot.corpus.english.greetings",
"chatterbot.corpus.english.conversations",
)
| 23.25 | 55 | 0.795699 |
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
diego = ChatBot("Diego")
trainer = ChatterBotCorpusTrainer(diego)
trainer.train(
"chatterbot.corpus.english.greetings",
"chatterbot.corpus.english.conversations",
)
| true | true |
f7369da700db4433f77f08fc995b58f9c3d51fe1 | 3,373 | py | Python | setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 406 | 2015-01-20T03:08:53.000Z | 2022-03-31T20:59:07.000Z | setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 2,031 | 2015-01-05T21:35:45.000Z | 2022-03-29T21:44:36.000Z | setup.py | cloudcomputingabc/volttron | 6495e26e3185a7af8d0d79ad2586bdf8ea83992d | [
"Apache-2.0",
"BSD-2-Clause"
] | 219 | 2015-01-20T14:53:57.000Z | 2022-03-06T00:37:41.000Z | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2020, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import contextlib
from setuptools import setup, find_packages
from requirements import extras_require, install_requires
with open('volttron/platform/__init__.py') as file:
for line in file:
if line.startswith('__version__'):
with contextlib.suppress(IndexError):
exec(line)
break
else:
raise RuntimeError('Unable to find version string in {}.'.format(file.name))
if __name__ == '__main__':
setup(
name = 'volttron',
version = __version__,
description = 'Agent Execution Platform',
author = 'Volttron Team',
author_email = 'volttron@pnnl.gov',
url = 'https://github.com/VOLTTRON/volttron',
packages = find_packages('.'),
install_requires = install_requires,
extras_require = extras_require,
entry_points = {
'console_scripts': [
'volttron = volttron.platform.main:_main',
'volttron-ctl = volttron.platform.control:_main',
'volttron-pkg = volttron.platform.packaging:_main',
'volttron-cfg = volttron.platform.config:_main',
'vctl = volttron.platform.control:_main',
'vpkg = volttron.platform.packaging:_main',
'vcfg = volttron.platform.config:_main',
'volttron-upgrade = volttron.platform.upgrade.upgrade_volttron:_main',
]
},
zip_safe = False,
)
| 43.24359 | 86 | 0.701749 |
import contextlib
from setuptools import setup, find_packages
from requirements import extras_require, install_requires
with open('volttron/platform/__init__.py') as file:
for line in file:
if line.startswith('__version__'):
with contextlib.suppress(IndexError):
exec(line)
break
else:
raise RuntimeError('Unable to find version string in {}.'.format(file.name))
if __name__ == '__main__':
setup(
name = 'volttron',
version = __version__,
description = 'Agent Execution Platform',
author = 'Volttron Team',
author_email = 'volttron@pnnl.gov',
url = 'https://github.com/VOLTTRON/volttron',
packages = find_packages('.'),
install_requires = install_requires,
extras_require = extras_require,
entry_points = {
'console_scripts': [
'volttron = volttron.platform.main:_main',
'volttron-ctl = volttron.platform.control:_main',
'volttron-pkg = volttron.platform.packaging:_main',
'volttron-cfg = volttron.platform.config:_main',
'vctl = volttron.platform.control:_main',
'vpkg = volttron.platform.packaging:_main',
'vcfg = volttron.platform.config:_main',
'volttron-upgrade = volttron.platform.upgrade.upgrade_volttron:_main',
]
},
zip_safe = False,
)
| true | true |
f7369e481ec201e74c733b94b5e71e737efe3f28 | 399 | py | Python | epibox/common/create_folder.py | anascacais/EpiBOX | 23f8963c4a2f5457dd8773e6092b86b0db35e2c7 | [
"MIT"
] | 1 | 2021-07-14T10:03:54.000Z | 2021-07-14T10:03:54.000Z | epibox/common/create_folder.py | anascacais/epibox | 23f8963c4a2f5457dd8773e6092b86b0db35e2c7 | [
"MIT"
] | null | null | null | epibox/common/create_folder.py | anascacais/epibox | 23f8963c4a2f5457dd8773e6092b86b0db35e2c7 | [
"MIT"
] | null | null | null | import os
def create_folder(initial_dir, nb, service):
directory = os.path.join(initial_dir, nb)
directory = os.path.join(directory, service)
if not os.path.exists(directory):
os.makedirs(directory)
print('Created patient directory -- ' + directory)
else:
print('Directory -- {} -- already exists'.format(directory))
return directory
| 24.9375 | 68 | 0.636591 | import os
def create_folder(initial_dir, nb, service):
directory = os.path.join(initial_dir, nb)
directory = os.path.join(directory, service)
if not os.path.exists(directory):
os.makedirs(directory)
print('Created patient directory -- ' + directory)
else:
print('Directory -- {} -- already exists'.format(directory))
return directory
| true | true |
f7369e6db45ae6c0416bb4373eb5fe6cf60fab68 | 4,172 | py | Python | conflowgen/tests/posthoc_analyses/test_inbound_to_outbound_capacity_utilization_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2022-02-16T11:44:42.000Z | 2022-02-24T20:02:17.000Z | conflowgen/tests/posthoc_analyses/test_inbound_to_outbound_capacity_utilization_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 90 | 2021-12-08T14:05:44.000Z | 2022-03-24T08:53:31.000Z | conflowgen/tests/posthoc_analyses/test_inbound_to_outbound_capacity_utilization_analysis.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | 5 | 2021-12-07T16:05:15.000Z | 2022-02-16T08:24:07.000Z | import datetime
import unittest
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule, Destination
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Truck, Feeder
from conflowgen.posthoc_analyses.inbound_to_outbound_vehicle_capacity_utilization_analysis import \
InboundToOutboundVehicleCapacityUtilizationAnalysis
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestInboundToOutboundCapacityUtilizationAnalysis(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
Container,
LargeScheduledVehicle,
Truck,
Feeder,
ModeOfTransportDistribution,
Destination
])
mode_of_transport_distribution_seeder.seed()
self.analysis = InboundToOutboundVehicleCapacityUtilizationAnalysis(
transportation_buffer=0.2
)
def test_with_no_data(self):
"""If no schedules are provided, no capacity is needed"""
empty_capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
self.assertDictEqual({}, empty_capacities)
def test_inbound_with_single_feeder(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=250,
vehicle_arrives_every_k_days=-1
)
schedule.save()
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=schedule.average_vehicle_capacity,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=datetime.datetime.now(),
schedule=schedule
)
feeder_lsv.save()
feeder = Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
feeder.save()
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.truck,
picked_up_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.feeder,
picked_up_by_initial=ModeOfTransport.truck
)
capacities_with_one_feeder = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
self.assertEqual(len(capacities_with_one_feeder), 1, "There is only one vehicle")
key_of_entry = list(capacities_with_one_feeder.keys())[0]
self.assertEqual(len(key_of_entry), 3, "Key consists of three components")
mode_of_transport, service_name, vehicle_name = key_of_entry
self.assertEqual(mode_of_transport, ModeOfTransport.feeder)
self.assertEqual(service_name, "TestFeederService")
self.assertEqual(vehicle_name, "TestFeeder1")
value_of_entry = list(capacities_with_one_feeder.values())[0]
self.assertEqual(len(value_of_entry), 2, "Value consists of two components")
(used_capacity_on_inbound_journey, used_capacity_on_outbound_journey) = value_of_entry
self.assertEqual(used_capacity_on_inbound_journey, 250)
self.assertEqual(used_capacity_on_outbound_journey, 1, "One 20' is loaded")
| 46.355556 | 115 | 0.736337 | import datetime
import unittest
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule, Destination
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Truck, Feeder
from conflowgen.posthoc_analyses.inbound_to_outbound_vehicle_capacity_utilization_analysis import \
InboundToOutboundVehicleCapacityUtilizationAnalysis
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestInboundToOutboundCapacityUtilizationAnalysis(unittest.TestCase):
def setUp(self) -> None:
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
Container,
LargeScheduledVehicle,
Truck,
Feeder,
ModeOfTransportDistribution,
Destination
])
mode_of_transport_distribution_seeder.seed()
self.analysis = InboundToOutboundVehicleCapacityUtilizationAnalysis(
transportation_buffer=0.2
)
def test_with_no_data(self):
empty_capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
self.assertDictEqual({}, empty_capacities)
def test_inbound_with_single_feeder(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=250,
vehicle_arrives_every_k_days=-1
)
schedule.save()
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=schedule.average_vehicle_capacity,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=datetime.datetime.now(),
schedule=schedule
)
feeder_lsv.save()
feeder = Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
feeder.save()
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.truck,
picked_up_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.feeder,
picked_up_by_initial=ModeOfTransport.truck
)
capacities_with_one_feeder = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
self.assertEqual(len(capacities_with_one_feeder), 1, "There is only one vehicle")
key_of_entry = list(capacities_with_one_feeder.keys())[0]
self.assertEqual(len(key_of_entry), 3, "Key consists of three components")
mode_of_transport, service_name, vehicle_name = key_of_entry
self.assertEqual(mode_of_transport, ModeOfTransport.feeder)
self.assertEqual(service_name, "TestFeederService")
self.assertEqual(vehicle_name, "TestFeeder1")
value_of_entry = list(capacities_with_one_feeder.values())[0]
self.assertEqual(len(value_of_entry), 2, "Value consists of two components")
(used_capacity_on_inbound_journey, used_capacity_on_outbound_journey) = value_of_entry
self.assertEqual(used_capacity_on_inbound_journey, 250)
self.assertEqual(used_capacity_on_outbound_journey, 1, "One 20' is loaded")
| true | true |
f7369ed232cff0673f5390e0bdb8dc9dd0b644fd | 855 | py | Python | conftest.py | atac-bham/c10-tools | 1562de718a92f4c4e0e30b4e6673dd9108f7077c | [
"BSD-3-Clause"
] | 5 | 2021-06-10T01:32:06.000Z | 2021-12-22T23:05:52.000Z | conftest.py | atac-bham/c10-tools | 1562de718a92f4c4e0e30b4e6673dd9108f7077c | [
"BSD-3-Clause"
] | 17 | 2020-08-03T16:35:26.000Z | 2022-03-30T17:29:41.000Z | conftest.py | atac/c10-tools | 278acfaab8bb42dff448fe1fbe08e7b7f75b1752 | [
"BSD-3-Clause"
] | null | null | null |
from unittest.mock import patch
import os
from chapter10 import C10
import pytest
TESTDIR = os.path.join(os.path.dirname(__file__), 'tests')
def pytest_configure():
pytest.SAMPLE = os.path.join(TESTDIR, '1.c10')
pytest.EVENTS = os.path.join(TESTDIR, 'event.c10')
pytest.ETHERNET = os.path.join(TESTDIR, 'ethernet.c10')
pytest.ERR = os.path.join(TESTDIR, 'err.c10')
pytest.BAD = os.path.join(TESTDIR, 'bad.c10')
pytest.PCAP = os.path.join(TESTDIR, 'test.pcap')
pytest.TMATS = os.path.join(TESTDIR, 'test.tmt')
class MockC10(C10):
def __init__(self, packets):
self.packets = packets
def __iter__(self):
return iter(self.packets)
@pytest.fixture
def c10():
return MockC10
@pytest.fixture(scope='session')
def fake_progress():
with patch('c10_tools.common.FileProgress'):
yield
| 21.923077 | 59 | 0.680702 |
from unittest.mock import patch
import os
from chapter10 import C10
import pytest
TESTDIR = os.path.join(os.path.dirname(__file__), 'tests')
def pytest_configure():
pytest.SAMPLE = os.path.join(TESTDIR, '1.c10')
pytest.EVENTS = os.path.join(TESTDIR, 'event.c10')
pytest.ETHERNET = os.path.join(TESTDIR, 'ethernet.c10')
pytest.ERR = os.path.join(TESTDIR, 'err.c10')
pytest.BAD = os.path.join(TESTDIR, 'bad.c10')
pytest.PCAP = os.path.join(TESTDIR, 'test.pcap')
pytest.TMATS = os.path.join(TESTDIR, 'test.tmt')
class MockC10(C10):
def __init__(self, packets):
self.packets = packets
def __iter__(self):
return iter(self.packets)
@pytest.fixture
def c10():
return MockC10
@pytest.fixture(scope='session')
def fake_progress():
with patch('c10_tools.common.FileProgress'):
yield
| true | true |
f736a105ea05b86e1c0fe4433a250e92c347dfc6 | 4,007 | py | Python | checkout/models.py | PiotrWojniak/MS4 | 66e63dcf74b7f80c7d95c1500a9350e59b755104 | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2021-08-20T12:19:48.000Z | 2021-11-07T12:42:40.000Z | checkout/models.py | PiotrWojniak/MS4 | 66e63dcf74b7f80c7d95c1500a9350e59b755104 | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | checkout/models.py | PiotrWojniak/MS4 | 66e63dcf74b7f80c7d95c1500a9350e59b755104 | [
"CNRI-Python",
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2022-01-28T17:40:34.000Z | 2022-01-29T09:07:33.000Z | """Import uuid to create unique order number"""
import uuid
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django_countries.fields import CountryField
from products.models import Product
from profiles.models import UserProfile
class Order(models.Model):
"""
Create and track orders for anyone who makes a purchase
"""
order_number = models.CharField(max_length=32, null=False, editable=False)
user_profile = models.ForeignKey(
UserProfile, on_delete=models.SET_NULL,
null=True, blank=True, related_name='orders')
full_name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=254, null=False, blank=False)
phone_number = models.CharField(max_length=20, null=False, blank=False)
town_or_city = models.CharField(max_length=40, null=False, blank=False)
street_address1 = models.CharField(max_length=80, null=False, blank=False)
street_address2 = models.CharField(max_length=80, null=True, blank=True)
county = models.CharField(max_length=80, null=True, blank=True)
postcode = models.CharField(max_length=20, null=True, blank=True)
country = CountryField(blank_label='Country *', null=False, blank=False)
date = models.DateTimeField(auto_now_add=True)
delivery_cost = models.DecimalField(
max_digits=6, decimal_places=2, null=False, default=0)
order_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
grand_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
original_bag = models.TextField(
null=False, blank=False, default='')
stripe_pid = models.CharField(
max_length=254, null=False, blank=False, default='')
def _generate_order_number(self):
"""
Generate a random, unique order number using UUID
"""
return uuid.uuid4().hex.upper()
def update_total(self):
"""
Update grand total each time a line item is added,
accounting for delivery costs.
"""
self.order_total = self.lineitems.aggregate(Sum(
'lineitem_total'))['lineitem_total__sum'] or 0
if self.order_total < settings.FREE_SHIPPING_THRESHOLD:
self.delivery_cost = (
self.order_total * settings.STANDARD_SHIPPING_PERCENTAGE / 100)
else:
self.delivery_cost = 0
self.grand_total = self.order_total + self.delivery_cost
self.save()
def save(self, *args, **kwargs):
"""
Override the original save method to set the order number
if it hasn't been set already.
"""
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
class OrderLineItem(models.Model):
"""
Create and referencing product itself relating to a specific order
"""
order = models.ForeignKey(Order, null=False, blank=False,
on_delete=models.CASCADE,
related_name='lineitems')
product = models.ForeignKey(Product, null=False, blank=False,
on_delete=models.CASCADE)
product_size = models.CharField(
max_length=2, null=True, blank=True) # XS, S, M, L, XL,
quantity = models.IntegerField(null=False, blank=False, default=0)
lineitem_total = models.DecimalField(
max_digits=6, decimal_places=2, null=False,
blank=False, editable=False)
def save(self, *args, **kwargs):
"""
Override the original save method to set the lineitem total
and update the order total.
"""
self.lineitem_total = self.product.price * self.quantity
super().save(*args, **kwargs)
def __str__(self):
return f'SKU {self.product.sku} on order {self.order.order_number}'
| 38.902913 | 79 | 0.666583 | import uuid
from django.db import models
from django.db.models import Sum
from django.conf import settings
from django_countries.fields import CountryField
from products.models import Product
from profiles.models import UserProfile
class Order(models.Model):
order_number = models.CharField(max_length=32, null=False, editable=False)
user_profile = models.ForeignKey(
UserProfile, on_delete=models.SET_NULL,
null=True, blank=True, related_name='orders')
full_name = models.CharField(max_length=50, null=False, blank=False)
email = models.EmailField(max_length=254, null=False, blank=False)
phone_number = models.CharField(max_length=20, null=False, blank=False)
town_or_city = models.CharField(max_length=40, null=False, blank=False)
street_address1 = models.CharField(max_length=80, null=False, blank=False)
street_address2 = models.CharField(max_length=80, null=True, blank=True)
county = models.CharField(max_length=80, null=True, blank=True)
postcode = models.CharField(max_length=20, null=True, blank=True)
country = CountryField(blank_label='Country *', null=False, blank=False)
date = models.DateTimeField(auto_now_add=True)
delivery_cost = models.DecimalField(
max_digits=6, decimal_places=2, null=False, default=0)
order_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
grand_total = models.DecimalField(
max_digits=10, decimal_places=2, null=False, default=0)
original_bag = models.TextField(
null=False, blank=False, default='')
stripe_pid = models.CharField(
max_length=254, null=False, blank=False, default='')
def _generate_order_number(self):
return uuid.uuid4().hex.upper()
def update_total(self):
self.order_total = self.lineitems.aggregate(Sum(
'lineitem_total'))['lineitem_total__sum'] or 0
if self.order_total < settings.FREE_SHIPPING_THRESHOLD:
self.delivery_cost = (
self.order_total * settings.STANDARD_SHIPPING_PERCENTAGE / 100)
else:
self.delivery_cost = 0
self.grand_total = self.order_total + self.delivery_cost
self.save()
def save(self, *args, **kwargs):
if not self.order_number:
self.order_number = self._generate_order_number()
super().save(*args, **kwargs)
def __str__(self):
return self.order_number
class OrderLineItem(models.Model):
order = models.ForeignKey(Order, null=False, blank=False,
on_delete=models.CASCADE,
related_name='lineitems')
product = models.ForeignKey(Product, null=False, blank=False,
on_delete=models.CASCADE)
product_size = models.CharField(
max_length=2, null=True, blank=True)
quantity = models.IntegerField(null=False, blank=False, default=0)
lineitem_total = models.DecimalField(
max_digits=6, decimal_places=2, null=False,
blank=False, editable=False)
def save(self, *args, **kwargs):
self.lineitem_total = self.product.price * self.quantity
super().save(*args, **kwargs)
def __str__(self):
return f'SKU {self.product.sku} on order {self.order.order_number}'
| true | true |
f736a1321831b3ded99642a3274807966618052e | 7,202 | py | Python | micronet/compression/quantization/wbwtab/bn_fuse/bn_fuse.py | wmkai/quantization | 280976311ea7ed95b29662405189f36fa154f85d | [
"MIT"
] | 1 | 2021-07-30T08:34:19.000Z | 2021-07-30T08:34:19.000Z | micronet/compression/quantization/wbwtab/bn_fuse/bn_fuse.py | jay757425789/micronet | 351d184527e9867e0394878cf91b64ffd5c6b109 | [
"MIT"
] | null | null | null | micronet/compression/quantization/wbwtab/bn_fuse/bn_fuse.py | jay757425789/micronet | 351d184527e9867e0394878cf91b64ffd5c6b109 | [
"MIT"
] | null | null | null | import copy
import sys
sys.path.append("..")
sys.path.append("../../../..")
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from models import nin_gc, nin
import quantize
# ******************** 是否保存模型完整参数 ********************
#torch.set_printoptions(precision=8, edgeitems=sys.maxsize, linewidth=200, sci_mode=False)
def bn_fuse(conv, bn):
# 可以进行“针对特征(A)二值的BN融合”的BN层位置
global bn_counter, bin_bn_fuse_num
bn_counter = bn_counter + 1
# ******************** bn参数 *********************
mean = bn.running_mean
std = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
beta = bn.bias
# ******************* conv参数 ********************
w = conv.weight
w_fused = w.clone()
if conv.bias is not None:
b = conv.bias
else:
b = mean.new_zeros(mean.shape)
b_fused = b.clone()
# ******************* 针对特征(A)二值的bn融合 *******************
if(bn_counter >= 1 and bn_counter <= bin_bn_fuse_num):
mask_positive = gamma.data.gt(0)
mask_negetive = gamma.data.lt(0)
w_fused[mask_positive] = w[mask_positive]
b_fused[mask_positive] = b[mask_positive] - mean[mask_positive] + \
beta[mask_positive] * (std[mask_positive] / gamma[mask_positive])
w_fused[mask_negetive] = w[mask_negetive] * -1
b_fused[mask_negetive] = mean[mask_negetive] - b[mask_negetive] - \
beta[mask_negetive] * (std[mask_negetive] / gamma[mask_negetive])
# ******************* 普通bn融合 *******************
else:
w_fused = w * (gamma / std).reshape([conv.out_channels, 1, 1, 1])
b_fused = beta + (b - mean) * (gamma / std)
if(bn_counter >= 2 and bn_counter <= bin_bn_fuse_num):
bn_fused_conv = quantize.QuantConv2d(conv.in_channels,
conv.out_channels,
conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True,
padding_mode=conv.padding_mode,
W=args.W,
quant_inference=True)
else:
bn_fused_conv = nn.Conv2d(conv.in_channels,
conv.out_channels,
conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True,
padding_mode=conv.padding_mode)
bn_fused_conv.weight.data = w_fused
bn_fused_conv.bias.data = b_fused
return bn_fused_conv
def bn_fuse_module(module):
for name, child in module.named_children():
if isinstance(child, nn.Conv2d):
conv_name_temp = name
conv_child_temp = child
elif isinstance(child, nn.BatchNorm2d):
bn_fused_conv = bn_fuse(conv_child_temp, child)
module._modules[conv_name_temp] = bn_fused_conv
module._modules[name] = nn.Identity()
else:
bn_fuse_module(child)
def model_bn_fuse(model, inplace=False):
if not inplace:
model = copy.deepcopy(model)
bn_fuse_module(model)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--prune_quant', action='store_true',
help='this is prune_quant model')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
parser.add_argument('--W', type=int, default=2,
help='Wb:2, Wt:3, Wfp:32')
parser.add_argument('--A', type=int, default=2,
help='Ab:2, Afp:32')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if args.prune_quant:
print('******Prune Quant model******')
if args.model_type == 0:
checkpoint = torch.load('../models_save/nin.pth')
quant_model_train = nin.Net(cfg=checkpoint['cfg'])
else:
checkpoint = torch.load('../models_save/nin_gc.pth')
quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])
else:
if args.model_type == 0:
checkpoint = torch.load('../models_save/nin.pth')
quant_model_train = nin.Net()
else:
checkpoint = torch.load('../models_save/nin_gc.pth')
quant_model_train = nin_gc.Net()
quant_bn_fused_model_inference = copy.deepcopy(quant_model_train)
quantize.prepare(quant_model_train, inplace=True, A=args.A, W=args.W)
quantize.prepare(quant_bn_fused_model_inference, inplace=True,
A=args.A, W=args.W, quant_inference=True)
quant_model_train.load_state_dict(checkpoint['state_dict'])
quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])
# ********************** quant_model_train ************************
torch.save(quant_model_train, 'models_save/quant_model_train.pth')
torch.save(quant_model_train.state_dict(), 'models_save/quant_model_train_para.pth')
model_array = np.array(quant_model_train)
model_para_array = np.array(quant_model_train.state_dict())
np.savetxt('models_save/quant_model_train.txt', [model_array], fmt='%s', delimiter=',')
np.savetxt('models_save/quant_model_train_para.txt', [model_para_array], fmt='%s', delimiter=',')
# ********************* quant_bn_fused_model_inference **********************
bn_counter = 0
bin_bn_fuse_num = 0
# 统计可以进行“针对特征(A)二值的BN融合”的BN层位置
for m in quant_bn_fused_model_inference.modules():
if isinstance(m, quantize.ActivationQuantizer):
bin_bn_fuse_num += 1
model_bn_fuse(quant_bn_fused_model_inference, inplace=True) # bn融合
print('***quant_model_train***\n', quant_model_train)
print('\n***quant_bn_fused_model_inference***\n', quant_bn_fused_model_inference)
torch.save(quant_bn_fused_model_inference, 'models_save/quant_bn_fused_model_inference.pth')
torch.save(quant_bn_fused_model_inference.state_dict(), 'models_save/quant_bn_fused_model_inference_para.pth')
model_array = np.array(quant_bn_fused_model_inference)
model_para_array = np.array(quant_bn_fused_model_inference.state_dict())
np.savetxt('models_save/quant_bn_fused_model_inference.txt', [model_array], fmt='%s', delimiter=',')
np.savetxt('models_save/quant_bn_fused_model_inference_para.txt', [model_para_array], fmt='%s', delimiter=',')
print("************* bn_fuse 完成 **************")
print("************* bn_fused_model 已保存 **************")
| 43.648485 | 114 | 0.574701 | import copy
import sys
sys.path.append("..")
sys.path.append("../../../..")
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
from models import nin_gc, nin
import quantize
def bn_fuse(conv, bn):
global bn_counter, bin_bn_fuse_num
bn_counter = bn_counter + 1
mean = bn.running_mean
std = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
beta = bn.bias
w = conv.weight
w_fused = w.clone()
if conv.bias is not None:
b = conv.bias
else:
b = mean.new_zeros(mean.shape)
b_fused = b.clone()
if(bn_counter >= 1 and bn_counter <= bin_bn_fuse_num):
mask_positive = gamma.data.gt(0)
mask_negetive = gamma.data.lt(0)
w_fused[mask_positive] = w[mask_positive]
b_fused[mask_positive] = b[mask_positive] - mean[mask_positive] + \
beta[mask_positive] * (std[mask_positive] / gamma[mask_positive])
w_fused[mask_negetive] = w[mask_negetive] * -1
b_fused[mask_negetive] = mean[mask_negetive] - b[mask_negetive] - \
beta[mask_negetive] * (std[mask_negetive] / gamma[mask_negetive])
else:
w_fused = w * (gamma / std).reshape([conv.out_channels, 1, 1, 1])
b_fused = beta + (b - mean) * (gamma / std)
if(bn_counter >= 2 and bn_counter <= bin_bn_fuse_num):
bn_fused_conv = quantize.QuantConv2d(conv.in_channels,
conv.out_channels,
conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True,
padding_mode=conv.padding_mode,
W=args.W,
quant_inference=True)
else:
bn_fused_conv = nn.Conv2d(conv.in_channels,
conv.out_channels,
conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=conv.groups,
bias=True,
padding_mode=conv.padding_mode)
bn_fused_conv.weight.data = w_fused
bn_fused_conv.bias.data = b_fused
return bn_fused_conv
def bn_fuse_module(module):
for name, child in module.named_children():
if isinstance(child, nn.Conv2d):
conv_name_temp = name
conv_child_temp = child
elif isinstance(child, nn.BatchNorm2d):
bn_fused_conv = bn_fuse(conv_child_temp, child)
module._modules[conv_name_temp] = bn_fused_conv
module._modules[name] = nn.Identity()
else:
bn_fuse_module(child)
def model_bn_fuse(model, inplace=False):
if not inplace:
model = copy.deepcopy(model)
bn_fuse_module(model)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--prune_quant', action='store_true',
help='this is prune_quant model')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
parser.add_argument('--W', type=int, default=2,
help='Wb:2, Wt:3, Wfp:32')
parser.add_argument('--A', type=int, default=2,
help='Ab:2, Afp:32')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if args.prune_quant:
print('******Prune Quant model******')
if args.model_type == 0:
checkpoint = torch.load('../models_save/nin.pth')
quant_model_train = nin.Net(cfg=checkpoint['cfg'])
else:
checkpoint = torch.load('../models_save/nin_gc.pth')
quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])
else:
if args.model_type == 0:
checkpoint = torch.load('../models_save/nin.pth')
quant_model_train = nin.Net()
else:
checkpoint = torch.load('../models_save/nin_gc.pth')
quant_model_train = nin_gc.Net()
quant_bn_fused_model_inference = copy.deepcopy(quant_model_train)
quantize.prepare(quant_model_train, inplace=True, A=args.A, W=args.W)
quantize.prepare(quant_bn_fused_model_inference, inplace=True,
A=args.A, W=args.W, quant_inference=True)
quant_model_train.load_state_dict(checkpoint['state_dict'])
quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])
torch.save(quant_model_train, 'models_save/quant_model_train.pth')
torch.save(quant_model_train.state_dict(), 'models_save/quant_model_train_para.pth')
model_array = np.array(quant_model_train)
model_para_array = np.array(quant_model_train.state_dict())
np.savetxt('models_save/quant_model_train.txt', [model_array], fmt='%s', delimiter=',')
np.savetxt('models_save/quant_model_train_para.txt', [model_para_array], fmt='%s', delimiter=',')
bn_counter = 0
bin_bn_fuse_num = 0
for m in quant_bn_fused_model_inference.modules():
if isinstance(m, quantize.ActivationQuantizer):
bin_bn_fuse_num += 1
model_bn_fuse(quant_bn_fused_model_inference, inplace=True)
print('***quant_model_train***\n', quant_model_train)
print('\n***quant_bn_fused_model_inference***\n', quant_bn_fused_model_inference)
torch.save(quant_bn_fused_model_inference, 'models_save/quant_bn_fused_model_inference.pth')
torch.save(quant_bn_fused_model_inference.state_dict(), 'models_save/quant_bn_fused_model_inference_para.pth')
model_array = np.array(quant_bn_fused_model_inference)
model_para_array = np.array(quant_bn_fused_model_inference.state_dict())
np.savetxt('models_save/quant_bn_fused_model_inference.txt', [model_array], fmt='%s', delimiter=',')
np.savetxt('models_save/quant_bn_fused_model_inference_para.txt', [model_para_array], fmt='%s', delimiter=',')
print("************* bn_fuse 完成 **************")
print("************* bn_fused_model 已保存 **************")
| true | true |
f736a1bbdce036a5414adc38804039cbc6539f9a | 879 | py | Python | manage.py | 299hannah/Pitch | 7a8c0f4358c79f5d58d2056a790c05c7476d66bf | [
"MIT"
] | null | null | null | manage.py | 299hannah/Pitch | 7a8c0f4358c79f5d58d2056a790c05c7476d66bf | [
"MIT"
] | null | null | null | manage.py | 299hannah/Pitch | 7a8c0f4358c79f5d58d2056a790c05c7476d66bf | [
"MIT"
] | null | null | null | from app import create_app,db
from flask_script import Manager,Server
from app.models import User, Joke, Commentjoke, Debate, Commentdebate, Pickup, Commentlines
from flask_migrate import Migrate, MigrateCommand
#Creating app instance
# app = create_app('development')
app = create_app('production')
# app = create_app('test')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
manager.add_command('server',Server)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app, db = db, User = User, Joke = Joke, Commentjoke = Commentjoke, Commentdebate = Commentdebate, Commentlines = Commentlines)
if __name__ == '__main__':
manager.run()
| 29.3 | 148 | 0.74744 | from app import create_app,db
from flask_script import Manager,Server
from app.models import User, Joke, Commentjoke, Debate, Commentdebate, Pickup, Commentlines
from flask_migrate import Migrate, MigrateCommand
app = create_app('production')
manager = Manager(app)
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
manager.add_command('server',Server)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell
def make_shell_context():
return dict(app = app, db = db, User = User, Joke = Joke, Commentjoke = Commentjoke, Commentdebate = Commentdebate, Commentlines = Commentlines)
if __name__ == '__main__':
manager.run()
| true | true |
f736a229bcdcef49e1e562a0a7b325367968498b | 371 | py | Python | psy/__init__.py | cegfdb/IRT | 20fcde3b385bce1644fecab7cdc8bda5beacda03 | [
"MIT"
] | 8 | 2018-11-25T12:57:53.000Z | 2022-03-28T10:48:16.000Z | psy/__init__.py | glan-wxl/IRT | 1eed348fd3a8b9c27ea72d476ab2844113468eb8 | [
"MIT"
] | null | null | null | psy/__init__.py | glan-wxl/IRT | 1eed348fd3a8b9c27ea72d476ab2844113468eb8 | [
"MIT"
] | 2 | 2019-09-28T09:09:41.000Z | 2020-11-19T02:23:23.000Z | from psy.cdm.irm import McmcHoDina, McmcDina, EmDina, MlDina
from psy.mirt.irm import Irt2PL, Mirt2PL
from psy.mirt.grm import Grm
from psy.cat.tirt import SimAdaptiveTirt
from psy.fa.rotations import GPForth
from psy.fa.factors import Factor
from psy.sem.cfa import cfa
from psy.sem.sem import sem
from psy.sem.ccfa import delta_i_ccfa, get_irt_parameter, get_thresholds | 41.222222 | 72 | 0.824798 | from psy.cdm.irm import McmcHoDina, McmcDina, EmDina, MlDina
from psy.mirt.irm import Irt2PL, Mirt2PL
from psy.mirt.grm import Grm
from psy.cat.tirt import SimAdaptiveTirt
from psy.fa.rotations import GPForth
from psy.fa.factors import Factor
from psy.sem.cfa import cfa
from psy.sem.sem import sem
from psy.sem.ccfa import delta_i_ccfa, get_irt_parameter, get_thresholds | true | true |
f736a27b89edc6e8b2b7656f69ea9bb351e3eda5 | 5,958 | py | Python | src/poliastro/plotting/tisserand.py | havi121/poliastro-AU | 98889b36892622b43cb284f64e6ecf72e3f01c6f | [
"MIT"
] | null | null | null | src/poliastro/plotting/tisserand.py | havi121/poliastro-AU | 98889b36892622b43cb284f64e6ecf72e3f01c6f | [
"MIT"
] | null | null | null | src/poliastro/plotting/tisserand.py | havi121/poliastro-AU | 98889b36892622b43cb284f64e6ecf72e3f01c6f | [
"MIT"
] | null | null | null | """ Generates Tisserand plots """
from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
"""All possible Tisserand kinds"""
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
"""Generates Tisserand figures"""
def __init__(self, kind=TisserandKind.APSIS, axes=None):
"""Object initializer
Parameters
----------
kind: TisserandKind
Nature for the Tisserand
axes: ~matplotlib.pyplot.axes
Axes for the figure
"""
# Asign Tisserand kind
self.kind = kind
# Check if axis available
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
# Force axes scale regarding Tisserand kind
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
"""Solves all possible Tisserand lines with a meshgrid workflow
Parameters
----------
body: ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_array: ~astropy.units.Quantity
Desired Vinf for the flyby
num_contours: int
Number of contour lines for flyby speed
N: int
Number of points for flyby angle
Note
----
The algorithm for generating Tisserand plots is the one depicted in
"Preliminary Trajectory Design of a Mission to Enceladus" by David
Falcato Fialho Palma, section 3.6
"""
# Generate mean orbital elements Earth
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
# Generate non-dimensional velocity and alpha span
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
# Construct the mesh for any configuration
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
# Solving for non-dimensional a_sc and ecc_sc
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
# Compute main Tisserand variables
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
# Build color lines to internal canvas
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
"""Collect lines and append them to internal data
Parameters
----------
data: list
Array containing [RR_P, RR_A, EE, TT, color]
Returns
-------
lines: list
Plotting lines for the Tisserand
"""
# Plot desired kind lines
if self.kind == TisserandKind.APSIS:
# Generate apsis lines
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
# Generate energy lines
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.au ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
# Generate period lines
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
"""Plots body Tisserand line within flyby angle
Parameters
----------
body: ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf: ~astropy.units.Quantity
Vinf velocity line
alpha_lim: tuple
Minimum and maximum flyby angles
color: str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# HACK: to reuse Tisserand solver, we transform input Vinf into a tuple
vinf_span = (vinf, vinf)
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
"""Plots body Tisserand for given amount of solutions within Vinf span
Parameters
----------
body: ~poliastro.bodies.Body
Body to be plotted Tisserand
vinf_span: tuple
Minimum and maximum Vinf velocities
num_contours: int
Number of points to iterate over previously defined velocities
color: str
String representing for the color lines
Returns
-------
self.ax: ~matplotlib.axes.Axes
Apsis tisserand is the default plotting option
"""
# Solve Tisserand parameters
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
# Check if color defined
if not color:
color = BODY_COLORS[body.name]
# Build canvas lines from Tisserand parameters
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
| 30.090909 | 81 | 0.588117 | from enum import Enum
import numpy as np
from astropy import units as u
from matplotlib import pyplot as plt
from poliastro.plotting._base import BODY_COLORS
from poliastro.twobody.mean_elements import get_mean_elements
from poliastro.util import norm
class TisserandKind(Enum):
APSIS = "apsis"
ENERGY = "energy"
PERIOD = "period"
class TisserandPlotter:
def __init__(self, kind=TisserandKind.APSIS, axes=None):
self.kind = kind
if not axes:
_, self.ax = plt.subplots(1, 1)
else:
self.ax = axes
self.ax.set_xscale("log")
if self.kind == TisserandKind.APSIS:
self.ax.set_yscale("log")
def _solve_tisserand(
self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100
):
body_rv = get_mean_elements(body).to_vectors()
R_body, V_body = norm(body_rv.r), norm(body_rv.v)
vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)
alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)
vinf_array /= V_body
V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)
A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))
ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)
RR_P = A_SC * R_body * (1 - ECC_SC)
RR_A = A_SC * R_body * (1 + ECC_SC)
TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)
EE = -body.parent.k / (2 * A_SC * R_body)
return RR_P, RR_A, EE, TT
def _build_lines(self, RR_P, RR_A, EE, TT, color):
if self.kind == TisserandKind.APSIS:
lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)
elif self.kind == TisserandKind.ENERGY:
lines = self.ax.plot(
RR_P.to(u.AU), EE.to(u.au ** 2 / u.s ** 2), color=color
)
elif self.kind == TisserandKind.PERIOD:
lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)
return lines
def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):
vinf_span = (vinf, vinf)
RR_P, RR_A, EE, TT = self._solve_tisserand(
body, vinf_span, num_contours=2, alpha_lim=alpha_lim
)
if not color:
color = BODY_COLORS[body.name]
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
def plot(self, body, vinf_span, num_contours=10, color=None):
RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)
if not color:
color = BODY_COLORS[body.name]
self._build_lines(RR_P, RR_A, EE, TT, color)
return self.ax
| true | true |
f736a3517e3c5f420c40d3bfa794b57a3d8b2046 | 17,362 | py | Python | balance.py | Charterhouse/random_forest | b842f08fee1054dbff78b6fb3afd4006a7f14a6d | [
"MIT"
] | 2 | 2019-10-24T07:22:46.000Z | 2019-11-18T12:32:26.000Z | balance.py | Charterhouse/random_forest | b842f08fee1054dbff78b6fb3afd4006a7f14a6d | [
"MIT"
] | null | null | null | balance.py | Charterhouse/random_forest | b842f08fee1054dbff78b6fb3afd4006a7f14a6d | [
"MIT"
] | 2 | 2020-03-03T18:30:14.000Z | 2021-09-06T13:55:06.000Z | from mpyc.runtime import mpc
from src.dataset import ObliviousDataset, Sample
from src.output import output
from src.secint import secint as s
from src.forest import train_forest
def sample(ins, out):
return Sample([s(i) for i in ins], s(out))
samples = ObliviousDataset.create(
sample([1, 1, 1, 2], 1),
sample([1, 1, 1, 3], 1),
sample([1, 1, 1, 4], 1),
sample([1, 1, 1, 5], 1),
sample([1, 1, 2, 1], 1),
sample([1, 1, 2, 2], 1),
sample([1, 1, 2, 3], 1),
sample([1, 1, 2, 4], 1),
sample([1, 1, 2, 5], 1),
sample([1, 1, 3, 1], 1),
sample([1, 1, 3, 2], 1),
sample([1, 1, 3, 3], 1),
sample([1, 1, 3, 4], 1),
sample([1, 1, 3, 5], 1),
sample([1, 1, 4, 1], 1),
sample([1, 1, 4, 2], 1),
sample([1, 1, 4, 3], 1),
sample([1, 1, 4, 4], 1),
sample([1, 1, 4, 5], 1),
sample([1, 1, 5, 1], 1),
sample([1, 1, 5, 2], 1),
sample([1, 1, 5, 3], 1),
sample([1, 1, 5, 4], 1),
sample([1, 1, 5, 5], 1),
sample([1, 2, 1, 1], 0),
sample([1, 2, 1, 3], 1),
sample([1, 2, 1, 4], 1),
sample([1, 2, 1, 5], 1),
sample([1, 2, 2, 2], 1),
sample([1, 2, 2, 3], 1),
sample([1, 2, 2, 4], 1),
sample([1, 2, 2, 5], 1),
sample([1, 2, 3, 1], 1),
sample([1, 2, 3, 2], 1),
sample([1, 2, 3, 3], 1),
sample([1, 2, 3, 4], 1),
sample([1, 2, 3, 5], 1),
sample([1, 2, 4, 1], 1),
sample([1, 2, 4, 2], 1),
sample([1, 2, 4, 3], 1),
sample([1, 2, 4, 4], 1),
sample([1, 2, 4, 5], 1),
sample([1, 2, 5, 1], 1),
sample([1, 2, 5, 2], 1),
sample([1, 2, 5, 3], 1),
sample([1, 2, 5, 4], 1),
sample([1, 2, 5, 5], 1),
sample([1, 3, 1, 1], 0),
sample([1, 3, 1, 2], 0),
sample([1, 3, 1, 4], 1),
sample([1, 3, 1, 5], 1),
sample([1, 3, 2, 1], 0),
sample([1, 3, 2, 2], 1),
sample([1, 3, 2, 3], 1),
sample([1, 3, 2, 4], 1),
sample([1, 3, 2, 5], 1),
sample([1, 3, 3, 2], 1),
sample([1, 3, 3, 3], 1),
sample([1, 3, 3, 4], 1),
sample([1, 3, 3, 5], 1),
sample([1, 3, 4, 1], 1),
sample([1, 3, 4, 2], 1),
sample([1, 3, 4, 3], 1),
sample([1, 3, 4, 4], 1),
sample([1, 3, 4, 5], 1),
sample([1, 3, 5, 1], 1),
sample([1, 3, 5, 2], 1),
sample([1, 3, 5, 3], 1),
sample([1, 3, 5, 4], 1),
sample([1, 3, 5, 5], 1),
sample([1, 4, 1, 1], 0),
sample([1, 4, 1, 2], 0),
sample([1, 4, 1, 3], 0),
sample([1, 4, 1, 5], 1),
sample([1, 4, 2, 1], 0),
sample([1, 4, 2, 3], 1),
sample([1, 4, 2, 4], 1),
sample([1, 4, 2, 5], 1),
sample([1, 4, 3, 1], 0),
sample([1, 4, 3, 2], 1),
sample([1, 4, 3, 3], 1),
sample([1, 4, 3, 4], 1),
sample([1, 4, 3, 5], 1),
sample([1, 4, 4, 2], 1),
sample([1, 4, 4, 3], 1),
sample([1, 4, 4, 4], 1),
sample([1, 4, 4, 5], 1),
sample([1, 4, 5, 1], 1),
sample([1, 4, 5, 2], 1),
sample([1, 4, 5, 3], 1),
sample([1, 4, 5, 4], 1),
sample([1, 4, 5, 5], 1),
sample([1, 5, 1, 1], 0),
sample([1, 5, 1, 2], 0),
sample([1, 5, 1, 3], 0),
sample([1, 5, 1, 4], 0),
sample([1, 5, 2, 1], 0),
sample([1, 5, 2, 2], 0),
sample([1, 5, 2, 3], 1),
sample([1, 5, 2, 4], 1),
sample([1, 5, 2, 5], 1),
sample([1, 5, 3, 1], 0),
sample([1, 5, 3, 2], 1),
sample([1, 5, 3, 3], 1),
sample([1, 5, 3, 4], 1),
sample([1, 5, 3, 5], 1),
sample([1, 5, 4, 1], 0),
sample([1, 5, 4, 2], 1),
sample([1, 5, 4, 3], 1),
sample([1, 5, 4, 4], 1),
sample([1, 5, 4, 5], 1),
sample([1, 5, 5, 2], 1),
sample([1, 5, 5, 3], 1),
sample([1, 5, 5, 4], 1),
sample([1, 5, 5, 5], 1),
sample([2, 1, 1, 1], 0),
sample([2, 1, 1, 3], 1),
sample([2, 1, 1, 4], 1),
sample([2, 1, 1, 5], 1),
sample([2, 1, 2, 2], 1),
sample([2, 1, 2, 3], 1),
sample([2, 1, 2, 4], 1),
sample([2, 1, 2, 5], 1),
sample([2, 1, 3, 1], 1),
sample([2, 1, 3, 2], 1),
sample([2, 1, 3, 3], 1),
sample([2, 1, 3, 4], 1),
sample([2, 1, 3, 5], 1),
sample([2, 1, 4, 1], 1),
sample([2, 1, 4, 2], 1),
sample([2, 1, 4, 3], 1),
sample([2, 1, 4, 4], 1),
sample([2, 1, 4, 5], 1),
sample([2, 1, 5, 1], 1),
sample([2, 1, 5, 2], 1),
sample([2, 1, 5, 3], 1),
sample([2, 1, 5, 4], 1),
sample([2, 1, 5, 5], 1),
sample([2, 2, 1, 1], 0),
sample([2, 2, 1, 2], 0),
sample([2, 2, 1, 3], 0),
sample([2, 2, 1, 5], 1),
sample([2, 2, 2, 1], 0),
sample([2, 2, 2, 3], 1),
sample([2, 2, 2, 4], 1),
sample([2, 2, 2, 5], 1),
sample([2, 2, 3, 1], 0),
sample([2, 2, 3, 2], 1),
sample([2, 2, 3, 3], 1),
sample([2, 2, 3, 4], 1),
sample([2, 2, 3, 5], 1),
sample([2, 2, 4, 2], 1),
sample([2, 2, 4, 3], 1),
sample([2, 2, 4, 4], 1),
sample([2, 2, 4, 5], 1),
sample([2, 2, 5, 1], 1),
sample([2, 2, 5, 2], 1),
sample([2, 2, 5, 3], 1),
sample([2, 2, 5, 4], 1),
sample([2, 2, 5, 5], 1),
sample([2, 3, 1, 1], 0),
sample([2, 3, 1, 2], 0),
sample([2, 3, 1, 3], 0),
sample([2, 3, 1, 4], 0),
sample([2, 3, 1, 5], 0),
sample([2, 3, 2, 1], 0),
sample([2, 3, 2, 2], 0),
sample([2, 3, 2, 4], 1),
sample([2, 3, 2, 5], 1),
sample([2, 3, 3, 1], 0),
sample([2, 3, 3, 3], 1),
sample([2, 3, 3, 4], 1),
sample([2, 3, 3, 5], 1),
sample([2, 3, 4, 1], 0),
sample([2, 3, 4, 2], 1),
sample([2, 3, 4, 3], 1),
sample([2, 3, 4, 4], 1),
sample([2, 3, 4, 5], 1),
sample([2, 3, 5, 1], 0),
sample([2, 3, 5, 2], 1),
sample([2, 3, 5, 3], 1),
sample([2, 3, 5, 4], 1),
sample([2, 3, 5, 5], 1),
sample([2, 4, 1, 1], 0),
sample([2, 4, 1, 2], 0),
sample([2, 4, 1, 3], 0),
sample([2, 4, 1, 4], 0),
sample([2, 4, 1, 5], 0),
sample([2, 4, 2, 1], 0),
sample([2, 4, 2, 2], 0),
sample([2, 4, 2, 3], 0),
sample([2, 4, 2, 5], 1),
sample([2, 4, 3, 1], 0),
sample([2, 4, 3, 2], 0),
sample([2, 4, 3, 3], 1),
sample([2, 4, 3, 4], 1),
sample([2, 4, 3, 5], 1),
sample([2, 4, 4, 1], 0),
sample([2, 4, 4, 3], 1),
sample([2, 4, 4, 4], 1),
sample([2, 4, 4, 5], 1),
sample([2, 4, 5, 1], 0),
sample([2, 4, 5, 2], 1),
sample([2, 4, 5, 3], 1),
sample([2, 4, 5, 4], 1),
sample([2, 4, 5, 5], 1),
sample([2, 5, 1, 1], 0),
sample([2, 5, 1, 2], 0),
sample([2, 5, 1, 3], 0),
sample([2, 5, 1, 4], 0),
sample([2, 5, 1, 5], 0),
sample([2, 5, 2, 1], 0),
sample([2, 5, 2, 2], 0),
sample([2, 5, 2, 3], 0),
sample([2, 5, 2, 4], 0),
sample([2, 5, 3, 1], 0),
sample([2, 5, 3, 2], 0),
sample([2, 5, 3, 3], 0),
sample([2, 5, 3, 4], 1),
sample([2, 5, 3, 5], 1),
sample([2, 5, 4, 1], 0),
sample([2, 5, 4, 2], 0),
sample([2, 5, 4, 3], 1),
sample([2, 5, 4, 4], 1),
sample([2, 5, 4, 5], 1),
sample([2, 5, 5, 1], 0),
sample([2, 5, 5, 3], 1),
sample([2, 5, 5, 4], 1),
sample([2, 5, 5, 5], 1),
sample([3, 1, 1, 1], 0),
sample([3, 1, 1, 2], 0),
sample([3, 1, 1, 4], 1),
sample([3, 1, 1, 5], 1),
sample([3, 1, 2, 1], 0),
sample([3, 1, 2, 2], 1),
sample([3, 1, 2, 3], 1),
sample([3, 1, 2, 4], 1),
sample([3, 1, 2, 5], 1),
sample([3, 1, 3, 2], 1),
sample([3, 1, 3, 3], 1),
sample([3, 1, 3, 4], 1),
sample([3, 1, 3, 5], 1),
sample([3, 1, 4, 1], 1),
sample([3, 1, 4, 2], 1),
sample([3, 1, 4, 3], 1),
sample([3, 1, 4, 4], 1),
sample([3, 1, 4, 5], 1),
sample([3, 1, 5, 1], 1),
sample([3, 1, 5, 2], 1),
sample([3, 1, 5, 3], 1),
sample([3, 1, 5, 4], 1),
sample([3, 1, 5, 5], 1),
sample([3, 2, 1, 1], 0),
sample([3, 2, 1, 2], 0),
sample([3, 2, 1, 3], 0),
sample([3, 2, 1, 4], 0),
sample([3, 2, 1, 5], 0),
sample([3, 2, 2, 1], 0),
sample([3, 2, 2, 2], 0),
sample([3, 2, 2, 4], 1),
sample([3, 2, 2, 5], 1),
sample([3, 2, 3, 1], 0),
sample([3, 2, 3, 3], 1),
sample([3, 2, 3, 4], 1),
sample([3, 2, 3, 5], 1),
sample([3, 2, 4, 1], 0),
sample([3, 2, 4, 2], 1),
sample([3, 2, 4, 3], 1),
sample([3, 2, 4, 4], 1),
sample([3, 2, 4, 5], 1),
sample([3, 2, 5, 1], 0),
sample([3, 2, 5, 2], 1),
sample([3, 2, 5, 3], 1),
sample([3, 2, 5, 4], 1),
sample([3, 2, 5, 5], 1),
sample([3, 3, 1, 1], 0),
sample([3, 3, 1, 2], 0),
sample([3, 3, 1, 3], 0),
sample([3, 3, 1, 4], 0),
sample([3, 3, 1, 5], 0),
sample([3, 3, 2, 1], 0),
sample([3, 3, 2, 2], 0),
sample([3, 3, 2, 3], 0),
sample([3, 3, 2, 4], 0),
sample([3, 3, 2, 5], 1),
sample([3, 3, 3, 1], 0),
sample([3, 3, 3, 2], 0),
sample([3, 3, 3, 4], 1),
sample([3, 3, 3, 5], 1),
sample([3, 3, 4, 1], 0),
sample([3, 3, 4, 2], 0),
sample([3, 3, 4, 3], 1),
sample([3, 3, 4, 4], 1),
sample([3, 3, 4, 5], 1),
sample([3, 3, 5, 1], 0),
sample([3, 3, 5, 2], 1),
sample([3, 3, 5, 3], 1),
sample([3, 3, 5, 4], 1),
sample([3, 3, 5, 5], 1),
sample([3, 4, 1, 1], 0),
sample([3, 4, 1, 2], 0),
sample([3, 4, 1, 3], 0),
sample([3, 4, 1, 4], 0),
sample([3, 4, 1, 5], 0),
sample([3, 4, 2, 1], 0),
sample([3, 4, 2, 2], 0),
sample([3, 4, 2, 3], 0),
sample([3, 4, 2, 4], 0),
sample([3, 4, 2, 5], 0),
sample([3, 4, 3, 1], 0),
sample([3, 4, 3, 2], 0),
sample([3, 4, 3, 3], 0),
sample([3, 4, 3, 5], 1),
sample([3, 4, 4, 1], 0),
sample([3, 4, 4, 2], 0),
sample([3, 4, 4, 4], 1),
sample([3, 4, 4, 5], 1),
sample([3, 4, 5, 1], 0),
sample([3, 4, 5, 2], 0),
sample([3, 4, 5, 3], 1),
sample([3, 4, 5, 4], 1),
sample([3, 4, 5, 5], 1),
sample([3, 5, 1, 1], 0),
sample([3, 5, 1, 2], 0),
sample([3, 5, 1, 3], 0),
sample([3, 5, 1, 4], 0),
sample([3, 5, 1, 5], 0),
sample([3, 5, 2, 1], 0),
sample([3, 5, 2, 2], 0),
sample([3, 5, 2, 3], 0),
sample([3, 5, 2, 4], 0),
sample([3, 5, 2, 5], 0),
sample([3, 5, 3, 1], 0),
sample([3, 5, 3, 2], 0),
sample([3, 5, 3, 3], 0),
sample([3, 5, 3, 4], 0),
sample([3, 5, 4, 1], 0),
sample([3, 5, 4, 2], 0),
sample([3, 5, 4, 3], 0),
sample([3, 5, 4, 4], 1),
sample([3, 5, 4, 5], 1),
sample([3, 5, 5, 1], 0),
sample([3, 5, 5, 2], 0),
sample([3, 5, 5, 4], 1),
sample([3, 5, 5, 5], 1),
sample([4, 1, 1, 1], 0),
sample([4, 1, 1, 2], 0),
sample([4, 1, 1, 3], 0),
sample([4, 1, 1, 5], 1),
sample([4, 1, 2, 1], 0),
sample([4, 1, 2, 3], 1),
sample([4, 1, 2, 4], 1),
sample([4, 1, 2, 5], 1),
sample([4, 1, 3, 1], 0),
sample([4, 1, 3, 2], 1),
sample([4, 1, 3, 3], 1),
sample([4, 1, 3, 4], 1),
sample([4, 1, 3, 5], 1),
sample([4, 1, 4, 2], 1),
sample([4, 1, 4, 3], 1),
sample([4, 1, 4, 4], 1),
sample([4, 1, 4, 5], 1),
sample([4, 1, 5, 1], 1),
sample([4, 1, 5, 2], 1),
sample([4, 1, 5, 3], 1),
sample([4, 1, 5, 4], 1),
sample([4, 1, 5, 5], 1),
sample([4, 2, 1, 1], 0),
sample([4, 2, 1, 2], 0),
sample([4, 2, 1, 3], 0),
sample([4, 2, 1, 4], 0),
sample([4, 2, 1, 5], 0),
sample([4, 2, 2, 1], 0),
sample([4, 2, 2, 2], 0),
sample([4, 2, 2, 3], 0),
sample([4, 2, 2, 5], 1),
sample([4, 2, 3, 1], 0),
sample([4, 2, 3, 2], 0),
sample([4, 2, 3, 3], 1),
sample([4, 2, 3, 4], 1),
sample([4, 2, 3, 5], 1),
sample([4, 2, 4, 1], 0),
sample([4, 2, 4, 3], 1),
sample([4, 2, 4, 4], 1),
sample([4, 2, 4, 5], 1),
sample([4, 2, 5, 1], 0),
sample([4, 2, 5, 2], 1),
sample([4, 2, 5, 3], 1),
sample([4, 2, 5, 4], 1),
sample([4, 2, 5, 5], 1),
sample([4, 3, 1, 1], 0),
sample([4, 3, 1, 2], 0),
sample([4, 3, 1, 3], 0),
sample([4, 3, 1, 4], 0),
sample([4, 3, 1, 5], 0),
sample([4, 3, 2, 1], 0),
sample([4, 3, 2, 2], 0),
sample([4, 3, 2, 3], 0),
sample([4, 3, 2, 4], 0),
sample([4, 3, 2, 5], 0),
sample([4, 3, 3, 1], 0),
sample([4, 3, 3, 2], 0),
sample([4, 3, 3, 3], 0),
sample([4, 3, 3, 5], 1),
sample([4, 3, 4, 1], 0),
sample([4, 3, 4, 2], 0),
sample([4, 3, 4, 4], 1),
sample([4, 3, 4, 5], 1),
sample([4, 3, 5, 1], 0),
sample([4, 3, 5, 2], 0),
sample([4, 3, 5, 3], 1),
sample([4, 3, 5, 4], 1),
sample([4, 3, 5, 5], 1),
sample([4, 4, 1, 1], 0),
sample([4, 4, 1, 2], 0),
sample([4, 4, 1, 3], 0),
sample([4, 4, 1, 4], 0),
sample([4, 4, 1, 5], 0),
sample([4, 4, 2, 1], 0),
sample([4, 4, 2, 2], 0),
sample([4, 4, 2, 3], 0),
sample([4, 4, 2, 4], 0),
sample([4, 4, 2, 5], 0),
sample([4, 4, 3, 1], 0),
sample([4, 4, 3, 2], 0),
sample([4, 4, 3, 3], 0),
sample([4, 4, 3, 4], 0),
sample([4, 4, 3, 5], 0),
sample([4, 4, 4, 1], 0),
sample([4, 4, 4, 2], 0),
sample([4, 4, 4, 3], 0),
sample([4, 4, 4, 5], 1),
sample([4, 4, 5, 1], 0),
sample([4, 4, 5, 2], 0),
sample([4, 4, 5, 3], 0),
sample([4, 4, 5, 4], 1),
sample([4, 4, 5, 5], 1),
sample([4, 5, 1, 1], 0),
sample([4, 5, 1, 2], 0),
sample([4, 5, 1, 3], 0),
sample([4, 5, 1, 4], 0),
sample([4, 5, 1, 5], 0),
sample([4, 5, 2, 1], 0),
sample([4, 5, 2, 2], 0),
sample([4, 5, 2, 3], 0),
sample([4, 5, 2, 4], 0),
sample([4, 5, 2, 5], 0),
sample([4, 5, 3, 1], 0),
sample([4, 5, 3, 2], 0),
sample([4, 5, 3, 3], 0),
sample([4, 5, 3, 4], 0),
sample([4, 5, 3, 5], 0),
sample([4, 5, 4, 1], 0),
sample([4, 5, 4, 2], 0),
sample([4, 5, 4, 3], 0),
sample([4, 5, 4, 4], 0),
sample([4, 5, 5, 1], 0),
sample([4, 5, 5, 2], 0),
sample([4, 5, 5, 3], 0),
sample([4, 5, 5, 5], 1),
sample([5, 1, 1, 1], 0),
sample([5, 1, 1, 2], 0),
sample([5, 1, 1, 3], 0),
sample([5, 1, 1, 4], 0),
sample([5, 1, 2, 1], 0),
sample([5, 1, 2, 2], 0),
sample([5, 1, 2, 3], 1),
sample([5, 1, 2, 4], 1),
sample([5, 1, 2, 5], 1),
sample([5, 1, 3, 1], 0),
sample([5, 1, 3, 2], 1),
sample([5, 1, 3, 3], 1),
sample([5, 1, 3, 4], 1),
sample([5, 1, 3, 5], 1),
sample([5, 1, 4, 1], 0),
sample([5, 1, 4, 2], 1),
sample([5, 1, 4, 3], 1),
sample([5, 1, 4, 4], 1),
sample([5, 1, 4, 5], 1),
sample([5, 1, 5, 2], 1),
sample([5, 1, 5, 3], 1),
sample([5, 1, 5, 4], 1),
sample([5, 1, 5, 5], 1),
sample([5, 2, 1, 1], 0),
sample([5, 2, 1, 2], 0),
sample([5, 2, 1, 3], 0),
sample([5, 2, 1, 4], 0),
sample([5, 2, 1, 5], 0),
sample([5, 2, 2, 1], 0),
sample([5, 2, 2, 2], 0),
sample([5, 2, 2, 3], 0),
sample([5, 2, 2, 4], 0),
sample([5, 2, 3, 1], 0),
sample([5, 2, 3, 2], 0),
sample([5, 2, 3, 3], 0),
sample([5, 2, 3, 4], 1),
sample([5, 2, 3, 5], 1),
sample([5, 2, 4, 1], 0),
sample([5, 2, 4, 2], 0),
sample([5, 2, 4, 3], 1),
sample([5, 2, 4, 4], 1),
sample([5, 2, 4, 5], 1),
sample([5, 2, 5, 1], 0),
sample([5, 2, 5, 3], 1),
sample([5, 2, 5, 4], 1),
sample([5, 2, 5, 5], 1),
sample([5, 3, 1, 1], 0),
sample([5, 3, 1, 2], 0),
sample([5, 3, 1, 3], 0),
sample([5, 3, 1, 4], 0),
sample([5, 3, 1, 5], 0),
sample([5, 3, 2, 1], 0),
sample([5, 3, 2, 2], 0),
sample([5, 3, 2, 3], 0),
sample([5, 3, 2, 4], 0),
sample([5, 3, 2, 5], 0),
sample([5, 3, 3, 1], 0),
sample([5, 3, 3, 2], 0),
sample([5, 3, 3, 3], 0),
sample([5, 3, 3, 4], 0),
sample([5, 3, 4, 1], 0),
sample([5, 3, 4, 2], 0),
sample([5, 3, 4, 3], 0),
sample([5, 3, 4, 4], 1),
sample([5, 3, 4, 5], 1),
sample([5, 3, 5, 1], 0),
sample([5, 3, 5, 2], 0),
sample([5, 3, 5, 4], 1),
sample([5, 3, 5, 5], 1),
sample([5, 4, 1, 1], 0),
sample([5, 4, 1, 2], 0),
sample([5, 4, 1, 3], 0),
sample([5, 4, 1, 4], 0),
sample([5, 4, 1, 5], 0),
sample([5, 4, 2, 1], 0),
sample([5, 4, 2, 2], 0),
sample([5, 4, 2, 3], 0),
sample([5, 4, 2, 4], 0),
sample([5, 4, 2, 5], 0),
sample([5, 4, 3, 1], 0),
sample([5, 4, 3, 2], 0),
sample([5, 4, 3, 3], 0),
sample([5, 4, 3, 4], 0),
sample([5, 4, 3, 5], 0),
sample([5, 4, 4, 1], 0),
sample([5, 4, 4, 2], 0),
sample([5, 4, 4, 3], 0),
sample([5, 4, 4, 4], 0),
sample([5, 4, 5, 1], 0),
sample([5, 4, 5, 2], 0),
sample([5, 4, 5, 3], 0),
sample([5, 4, 5, 5], 1),
sample([5, 5, 1, 1], 0),
sample([5, 5, 1, 2], 0),
sample([5, 5, 1, 3], 0),
sample([5, 5, 1, 4], 0),
sample([5, 5, 1, 5], 0),
sample([5, 5, 2, 1], 0),
sample([5, 5, 2, 2], 0),
sample([5, 5, 2, 3], 0),
sample([5, 5, 2, 4], 0),
sample([5, 5, 2, 5], 0),
sample([5, 5, 3, 1], 0),
sample([5, 5, 3, 2], 0),
sample([5, 5, 3, 3], 0),
sample([5, 5, 3, 4], 0),
sample([5, 5, 3, 5], 0),
sample([5, 5, 4, 1], 0),
sample([5, 5, 4, 2], 0),
sample([5, 5, 4, 3], 0),
sample([5, 5, 4, 4], 0),
sample([5, 5, 4, 5], 0),
sample([5, 5, 5, 1], 0),
sample([5, 5, 5, 2], 0),
sample([5, 5, 5, 3], 0),
sample([5, 5, 5, 4], 0),
continuous=[True, True, True, True]
)
async def main():
async with mpc:
forest = await output(await train_forest(samples, amount=2, depth=4))
for index, tree in enumerate(forest):
print(f"Tree #{index}")
tree.pretty_print()
if __name__ == '__main__':
mpc.run(main())
| 26.669739 | 77 | 0.388722 | from mpyc.runtime import mpc
from src.dataset import ObliviousDataset, Sample
from src.output import output
from src.secint import secint as s
from src.forest import train_forest
def sample(ins, out):
return Sample([s(i) for i in ins], s(out))
samples = ObliviousDataset.create(
sample([1, 1, 1, 2], 1),
sample([1, 1, 1, 3], 1),
sample([1, 1, 1, 4], 1),
sample([1, 1, 1, 5], 1),
sample([1, 1, 2, 1], 1),
sample([1, 1, 2, 2], 1),
sample([1, 1, 2, 3], 1),
sample([1, 1, 2, 4], 1),
sample([1, 1, 2, 5], 1),
sample([1, 1, 3, 1], 1),
sample([1, 1, 3, 2], 1),
sample([1, 1, 3, 3], 1),
sample([1, 1, 3, 4], 1),
sample([1, 1, 3, 5], 1),
sample([1, 1, 4, 1], 1),
sample([1, 1, 4, 2], 1),
sample([1, 1, 4, 3], 1),
sample([1, 1, 4, 4], 1),
sample([1, 1, 4, 5], 1),
sample([1, 1, 5, 1], 1),
sample([1, 1, 5, 2], 1),
sample([1, 1, 5, 3], 1),
sample([1, 1, 5, 4], 1),
sample([1, 1, 5, 5], 1),
sample([1, 2, 1, 1], 0),
sample([1, 2, 1, 3], 1),
sample([1, 2, 1, 4], 1),
sample([1, 2, 1, 5], 1),
sample([1, 2, 2, 2], 1),
sample([1, 2, 2, 3], 1),
sample([1, 2, 2, 4], 1),
sample([1, 2, 2, 5], 1),
sample([1, 2, 3, 1], 1),
sample([1, 2, 3, 2], 1),
sample([1, 2, 3, 3], 1),
sample([1, 2, 3, 4], 1),
sample([1, 2, 3, 5], 1),
sample([1, 2, 4, 1], 1),
sample([1, 2, 4, 2], 1),
sample([1, 2, 4, 3], 1),
sample([1, 2, 4, 4], 1),
sample([1, 2, 4, 5], 1),
sample([1, 2, 5, 1], 1),
sample([1, 2, 5, 2], 1),
sample([1, 2, 5, 3], 1),
sample([1, 2, 5, 4], 1),
sample([1, 2, 5, 5], 1),
sample([1, 3, 1, 1], 0),
sample([1, 3, 1, 2], 0),
sample([1, 3, 1, 4], 1),
sample([1, 3, 1, 5], 1),
sample([1, 3, 2, 1], 0),
sample([1, 3, 2, 2], 1),
sample([1, 3, 2, 3], 1),
sample([1, 3, 2, 4], 1),
sample([1, 3, 2, 5], 1),
sample([1, 3, 3, 2], 1),
sample([1, 3, 3, 3], 1),
sample([1, 3, 3, 4], 1),
sample([1, 3, 3, 5], 1),
sample([1, 3, 4, 1], 1),
sample([1, 3, 4, 2], 1),
sample([1, 3, 4, 3], 1),
sample([1, 3, 4, 4], 1),
sample([1, 3, 4, 5], 1),
sample([1, 3, 5, 1], 1),
sample([1, 3, 5, 2], 1),
sample([1, 3, 5, 3], 1),
sample([1, 3, 5, 4], 1),
sample([1, 3, 5, 5], 1),
sample([1, 4, 1, 1], 0),
sample([1, 4, 1, 2], 0),
sample([1, 4, 1, 3], 0),
sample([1, 4, 1, 5], 1),
sample([1, 4, 2, 1], 0),
sample([1, 4, 2, 3], 1),
sample([1, 4, 2, 4], 1),
sample([1, 4, 2, 5], 1),
sample([1, 4, 3, 1], 0),
sample([1, 4, 3, 2], 1),
sample([1, 4, 3, 3], 1),
sample([1, 4, 3, 4], 1),
sample([1, 4, 3, 5], 1),
sample([1, 4, 4, 2], 1),
sample([1, 4, 4, 3], 1),
sample([1, 4, 4, 4], 1),
sample([1, 4, 4, 5], 1),
sample([1, 4, 5, 1], 1),
sample([1, 4, 5, 2], 1),
sample([1, 4, 5, 3], 1),
sample([1, 4, 5, 4], 1),
sample([1, 4, 5, 5], 1),
sample([1, 5, 1, 1], 0),
sample([1, 5, 1, 2], 0),
sample([1, 5, 1, 3], 0),
sample([1, 5, 1, 4], 0),
sample([1, 5, 2, 1], 0),
sample([1, 5, 2, 2], 0),
sample([1, 5, 2, 3], 1),
sample([1, 5, 2, 4], 1),
sample([1, 5, 2, 5], 1),
sample([1, 5, 3, 1], 0),
sample([1, 5, 3, 2], 1),
sample([1, 5, 3, 3], 1),
sample([1, 5, 3, 4], 1),
sample([1, 5, 3, 5], 1),
sample([1, 5, 4, 1], 0),
sample([1, 5, 4, 2], 1),
sample([1, 5, 4, 3], 1),
sample([1, 5, 4, 4], 1),
sample([1, 5, 4, 5], 1),
sample([1, 5, 5, 2], 1),
sample([1, 5, 5, 3], 1),
sample([1, 5, 5, 4], 1),
sample([1, 5, 5, 5], 1),
sample([2, 1, 1, 1], 0),
sample([2, 1, 1, 3], 1),
sample([2, 1, 1, 4], 1),
sample([2, 1, 1, 5], 1),
sample([2, 1, 2, 2], 1),
sample([2, 1, 2, 3], 1),
sample([2, 1, 2, 4], 1),
sample([2, 1, 2, 5], 1),
sample([2, 1, 3, 1], 1),
sample([2, 1, 3, 2], 1),
sample([2, 1, 3, 3], 1),
sample([2, 1, 3, 4], 1),
sample([2, 1, 3, 5], 1),
sample([2, 1, 4, 1], 1),
sample([2, 1, 4, 2], 1),
sample([2, 1, 4, 3], 1),
sample([2, 1, 4, 4], 1),
sample([2, 1, 4, 5], 1),
sample([2, 1, 5, 1], 1),
sample([2, 1, 5, 2], 1),
sample([2, 1, 5, 3], 1),
sample([2, 1, 5, 4], 1),
sample([2, 1, 5, 5], 1),
sample([2, 2, 1, 1], 0),
sample([2, 2, 1, 2], 0),
sample([2, 2, 1, 3], 0),
sample([2, 2, 1, 5], 1),
sample([2, 2, 2, 1], 0),
sample([2, 2, 2, 3], 1),
sample([2, 2, 2, 4], 1),
sample([2, 2, 2, 5], 1),
sample([2, 2, 3, 1], 0),
sample([2, 2, 3, 2], 1),
sample([2, 2, 3, 3], 1),
sample([2, 2, 3, 4], 1),
sample([2, 2, 3, 5], 1),
sample([2, 2, 4, 2], 1),
sample([2, 2, 4, 3], 1),
sample([2, 2, 4, 4], 1),
sample([2, 2, 4, 5], 1),
sample([2, 2, 5, 1], 1),
sample([2, 2, 5, 2], 1),
sample([2, 2, 5, 3], 1),
sample([2, 2, 5, 4], 1),
sample([2, 2, 5, 5], 1),
sample([2, 3, 1, 1], 0),
sample([2, 3, 1, 2], 0),
sample([2, 3, 1, 3], 0),
sample([2, 3, 1, 4], 0),
sample([2, 3, 1, 5], 0),
sample([2, 3, 2, 1], 0),
sample([2, 3, 2, 2], 0),
sample([2, 3, 2, 4], 1),
sample([2, 3, 2, 5], 1),
sample([2, 3, 3, 1], 0),
sample([2, 3, 3, 3], 1),
sample([2, 3, 3, 4], 1),
sample([2, 3, 3, 5], 1),
sample([2, 3, 4, 1], 0),
sample([2, 3, 4, 2], 1),
sample([2, 3, 4, 3], 1),
sample([2, 3, 4, 4], 1),
sample([2, 3, 4, 5], 1),
sample([2, 3, 5, 1], 0),
sample([2, 3, 5, 2], 1),
sample([2, 3, 5, 3], 1),
sample([2, 3, 5, 4], 1),
sample([2, 3, 5, 5], 1),
sample([2, 4, 1, 1], 0),
sample([2, 4, 1, 2], 0),
sample([2, 4, 1, 3], 0),
sample([2, 4, 1, 4], 0),
sample([2, 4, 1, 5], 0),
sample([2, 4, 2, 1], 0),
sample([2, 4, 2, 2], 0),
sample([2, 4, 2, 3], 0),
sample([2, 4, 2, 5], 1),
sample([2, 4, 3, 1], 0),
sample([2, 4, 3, 2], 0),
sample([2, 4, 3, 3], 1),
sample([2, 4, 3, 4], 1),
sample([2, 4, 3, 5], 1),
sample([2, 4, 4, 1], 0),
sample([2, 4, 4, 3], 1),
sample([2, 4, 4, 4], 1),
sample([2, 4, 4, 5], 1),
sample([2, 4, 5, 1], 0),
sample([2, 4, 5, 2], 1),
sample([2, 4, 5, 3], 1),
sample([2, 4, 5, 4], 1),
sample([2, 4, 5, 5], 1),
sample([2, 5, 1, 1], 0),
sample([2, 5, 1, 2], 0),
sample([2, 5, 1, 3], 0),
sample([2, 5, 1, 4], 0),
sample([2, 5, 1, 5], 0),
sample([2, 5, 2, 1], 0),
sample([2, 5, 2, 2], 0),
sample([2, 5, 2, 3], 0),
sample([2, 5, 2, 4], 0),
sample([2, 5, 3, 1], 0),
sample([2, 5, 3, 2], 0),
sample([2, 5, 3, 3], 0),
sample([2, 5, 3, 4], 1),
sample([2, 5, 3, 5], 1),
sample([2, 5, 4, 1], 0),
sample([2, 5, 4, 2], 0),
sample([2, 5, 4, 3], 1),
sample([2, 5, 4, 4], 1),
sample([2, 5, 4, 5], 1),
sample([2, 5, 5, 1], 0),
sample([2, 5, 5, 3], 1),
sample([2, 5, 5, 4], 1),
sample([2, 5, 5, 5], 1),
sample([3, 1, 1, 1], 0),
sample([3, 1, 1, 2], 0),
sample([3, 1, 1, 4], 1),
sample([3, 1, 1, 5], 1),
sample([3, 1, 2, 1], 0),
sample([3, 1, 2, 2], 1),
sample([3, 1, 2, 3], 1),
sample([3, 1, 2, 4], 1),
sample([3, 1, 2, 5], 1),
sample([3, 1, 3, 2], 1),
sample([3, 1, 3, 3], 1),
sample([3, 1, 3, 4], 1),
sample([3, 1, 3, 5], 1),
sample([3, 1, 4, 1], 1),
sample([3, 1, 4, 2], 1),
sample([3, 1, 4, 3], 1),
sample([3, 1, 4, 4], 1),
sample([3, 1, 4, 5], 1),
sample([3, 1, 5, 1], 1),
sample([3, 1, 5, 2], 1),
sample([3, 1, 5, 3], 1),
sample([3, 1, 5, 4], 1),
sample([3, 1, 5, 5], 1),
sample([3, 2, 1, 1], 0),
sample([3, 2, 1, 2], 0),
sample([3, 2, 1, 3], 0),
sample([3, 2, 1, 4], 0),
sample([3, 2, 1, 5], 0),
sample([3, 2, 2, 1], 0),
sample([3, 2, 2, 2], 0),
sample([3, 2, 2, 4], 1),
sample([3, 2, 2, 5], 1),
sample([3, 2, 3, 1], 0),
sample([3, 2, 3, 3], 1),
sample([3, 2, 3, 4], 1),
sample([3, 2, 3, 5], 1),
sample([3, 2, 4, 1], 0),
sample([3, 2, 4, 2], 1),
sample([3, 2, 4, 3], 1),
sample([3, 2, 4, 4], 1),
sample([3, 2, 4, 5], 1),
sample([3, 2, 5, 1], 0),
sample([3, 2, 5, 2], 1),
sample([3, 2, 5, 3], 1),
sample([3, 2, 5, 4], 1),
sample([3, 2, 5, 5], 1),
sample([3, 3, 1, 1], 0),
sample([3, 3, 1, 2], 0),
sample([3, 3, 1, 3], 0),
sample([3, 3, 1, 4], 0),
sample([3, 3, 1, 5], 0),
sample([3, 3, 2, 1], 0),
sample([3, 3, 2, 2], 0),
sample([3, 3, 2, 3], 0),
sample([3, 3, 2, 4], 0),
sample([3, 3, 2, 5], 1),
sample([3, 3, 3, 1], 0),
sample([3, 3, 3, 2], 0),
sample([3, 3, 3, 4], 1),
sample([3, 3, 3, 5], 1),
sample([3, 3, 4, 1], 0),
sample([3, 3, 4, 2], 0),
sample([3, 3, 4, 3], 1),
sample([3, 3, 4, 4], 1),
sample([3, 3, 4, 5], 1),
sample([3, 3, 5, 1], 0),
sample([3, 3, 5, 2], 1),
sample([3, 3, 5, 3], 1),
sample([3, 3, 5, 4], 1),
sample([3, 3, 5, 5], 1),
sample([3, 4, 1, 1], 0),
sample([3, 4, 1, 2], 0),
sample([3, 4, 1, 3], 0),
sample([3, 4, 1, 4], 0),
sample([3, 4, 1, 5], 0),
sample([3, 4, 2, 1], 0),
sample([3, 4, 2, 2], 0),
sample([3, 4, 2, 3], 0),
sample([3, 4, 2, 4], 0),
sample([3, 4, 2, 5], 0),
sample([3, 4, 3, 1], 0),
sample([3, 4, 3, 2], 0),
sample([3, 4, 3, 3], 0),
sample([3, 4, 3, 5], 1),
sample([3, 4, 4, 1], 0),
sample([3, 4, 4, 2], 0),
sample([3, 4, 4, 4], 1),
sample([3, 4, 4, 5], 1),
sample([3, 4, 5, 1], 0),
sample([3, 4, 5, 2], 0),
sample([3, 4, 5, 3], 1),
sample([3, 4, 5, 4], 1),
sample([3, 4, 5, 5], 1),
sample([3, 5, 1, 1], 0),
sample([3, 5, 1, 2], 0),
sample([3, 5, 1, 3], 0),
sample([3, 5, 1, 4], 0),
sample([3, 5, 1, 5], 0),
sample([3, 5, 2, 1], 0),
sample([3, 5, 2, 2], 0),
sample([3, 5, 2, 3], 0),
sample([3, 5, 2, 4], 0),
sample([3, 5, 2, 5], 0),
sample([3, 5, 3, 1], 0),
sample([3, 5, 3, 2], 0),
sample([3, 5, 3, 3], 0),
sample([3, 5, 3, 4], 0),
sample([3, 5, 4, 1], 0),
sample([3, 5, 4, 2], 0),
sample([3, 5, 4, 3], 0),
sample([3, 5, 4, 4], 1),
sample([3, 5, 4, 5], 1),
sample([3, 5, 5, 1], 0),
sample([3, 5, 5, 2], 0),
sample([3, 5, 5, 4], 1),
sample([3, 5, 5, 5], 1),
sample([4, 1, 1, 1], 0),
sample([4, 1, 1, 2], 0),
sample([4, 1, 1, 3], 0),
sample([4, 1, 1, 5], 1),
sample([4, 1, 2, 1], 0),
sample([4, 1, 2, 3], 1),
sample([4, 1, 2, 4], 1),
sample([4, 1, 2, 5], 1),
sample([4, 1, 3, 1], 0),
sample([4, 1, 3, 2], 1),
sample([4, 1, 3, 3], 1),
sample([4, 1, 3, 4], 1),
sample([4, 1, 3, 5], 1),
sample([4, 1, 4, 2], 1),
sample([4, 1, 4, 3], 1),
sample([4, 1, 4, 4], 1),
sample([4, 1, 4, 5], 1),
sample([4, 1, 5, 1], 1),
sample([4, 1, 5, 2], 1),
sample([4, 1, 5, 3], 1),
sample([4, 1, 5, 4], 1),
sample([4, 1, 5, 5], 1),
sample([4, 2, 1, 1], 0),
sample([4, 2, 1, 2], 0),
sample([4, 2, 1, 3], 0),
sample([4, 2, 1, 4], 0),
sample([4, 2, 1, 5], 0),
sample([4, 2, 2, 1], 0),
sample([4, 2, 2, 2], 0),
sample([4, 2, 2, 3], 0),
sample([4, 2, 2, 5], 1),
sample([4, 2, 3, 1], 0),
sample([4, 2, 3, 2], 0),
sample([4, 2, 3, 3], 1),
sample([4, 2, 3, 4], 1),
sample([4, 2, 3, 5], 1),
sample([4, 2, 4, 1], 0),
sample([4, 2, 4, 3], 1),
sample([4, 2, 4, 4], 1),
sample([4, 2, 4, 5], 1),
sample([4, 2, 5, 1], 0),
sample([4, 2, 5, 2], 1),
sample([4, 2, 5, 3], 1),
sample([4, 2, 5, 4], 1),
sample([4, 2, 5, 5], 1),
sample([4, 3, 1, 1], 0),
sample([4, 3, 1, 2], 0),
sample([4, 3, 1, 3], 0),
sample([4, 3, 1, 4], 0),
sample([4, 3, 1, 5], 0),
sample([4, 3, 2, 1], 0),
sample([4, 3, 2, 2], 0),
sample([4, 3, 2, 3], 0),
sample([4, 3, 2, 4], 0),
sample([4, 3, 2, 5], 0),
sample([4, 3, 3, 1], 0),
sample([4, 3, 3, 2], 0),
sample([4, 3, 3, 3], 0),
sample([4, 3, 3, 5], 1),
sample([4, 3, 4, 1], 0),
sample([4, 3, 4, 2], 0),
sample([4, 3, 4, 4], 1),
sample([4, 3, 4, 5], 1),
sample([4, 3, 5, 1], 0),
sample([4, 3, 5, 2], 0),
sample([4, 3, 5, 3], 1),
sample([4, 3, 5, 4], 1),
sample([4, 3, 5, 5], 1),
sample([4, 4, 1, 1], 0),
sample([4, 4, 1, 2], 0),
sample([4, 4, 1, 3], 0),
sample([4, 4, 1, 4], 0),
sample([4, 4, 1, 5], 0),
sample([4, 4, 2, 1], 0),
sample([4, 4, 2, 2], 0),
sample([4, 4, 2, 3], 0),
sample([4, 4, 2, 4], 0),
sample([4, 4, 2, 5], 0),
sample([4, 4, 3, 1], 0),
sample([4, 4, 3, 2], 0),
sample([4, 4, 3, 3], 0),
sample([4, 4, 3, 4], 0),
sample([4, 4, 3, 5], 0),
sample([4, 4, 4, 1], 0),
sample([4, 4, 4, 2], 0),
sample([4, 4, 4, 3], 0),
sample([4, 4, 4, 5], 1),
sample([4, 4, 5, 1], 0),
sample([4, 4, 5, 2], 0),
sample([4, 4, 5, 3], 0),
sample([4, 4, 5, 4], 1),
sample([4, 4, 5, 5], 1),
sample([4, 5, 1, 1], 0),
sample([4, 5, 1, 2], 0),
sample([4, 5, 1, 3], 0),
sample([4, 5, 1, 4], 0),
sample([4, 5, 1, 5], 0),
sample([4, 5, 2, 1], 0),
sample([4, 5, 2, 2], 0),
sample([4, 5, 2, 3], 0),
sample([4, 5, 2, 4], 0),
sample([4, 5, 2, 5], 0),
sample([4, 5, 3, 1], 0),
sample([4, 5, 3, 2], 0),
sample([4, 5, 3, 3], 0),
sample([4, 5, 3, 4], 0),
sample([4, 5, 3, 5], 0),
sample([4, 5, 4, 1], 0),
sample([4, 5, 4, 2], 0),
sample([4, 5, 4, 3], 0),
sample([4, 5, 4, 4], 0),
sample([4, 5, 5, 1], 0),
sample([4, 5, 5, 2], 0),
sample([4, 5, 5, 3], 0),
sample([4, 5, 5, 5], 1),
sample([5, 1, 1, 1], 0),
sample([5, 1, 1, 2], 0),
sample([5, 1, 1, 3], 0),
sample([5, 1, 1, 4], 0),
sample([5, 1, 2, 1], 0),
sample([5, 1, 2, 2], 0),
sample([5, 1, 2, 3], 1),
sample([5, 1, 2, 4], 1),
sample([5, 1, 2, 5], 1),
sample([5, 1, 3, 1], 0),
sample([5, 1, 3, 2], 1),
sample([5, 1, 3, 3], 1),
sample([5, 1, 3, 4], 1),
sample([5, 1, 3, 5], 1),
sample([5, 1, 4, 1], 0),
sample([5, 1, 4, 2], 1),
sample([5, 1, 4, 3], 1),
sample([5, 1, 4, 4], 1),
sample([5, 1, 4, 5], 1),
sample([5, 1, 5, 2], 1),
sample([5, 1, 5, 3], 1),
sample([5, 1, 5, 4], 1),
sample([5, 1, 5, 5], 1),
sample([5, 2, 1, 1], 0),
sample([5, 2, 1, 2], 0),
sample([5, 2, 1, 3], 0),
sample([5, 2, 1, 4], 0),
sample([5, 2, 1, 5], 0),
sample([5, 2, 2, 1], 0),
sample([5, 2, 2, 2], 0),
sample([5, 2, 2, 3], 0),
sample([5, 2, 2, 4], 0),
sample([5, 2, 3, 1], 0),
sample([5, 2, 3, 2], 0),
sample([5, 2, 3, 3], 0),
sample([5, 2, 3, 4], 1),
sample([5, 2, 3, 5], 1),
sample([5, 2, 4, 1], 0),
sample([5, 2, 4, 2], 0),
sample([5, 2, 4, 3], 1),
sample([5, 2, 4, 4], 1),
sample([5, 2, 4, 5], 1),
sample([5, 2, 5, 1], 0),
sample([5, 2, 5, 3], 1),
sample([5, 2, 5, 4], 1),
sample([5, 2, 5, 5], 1),
sample([5, 3, 1, 1], 0),
sample([5, 3, 1, 2], 0),
sample([5, 3, 1, 3], 0),
sample([5, 3, 1, 4], 0),
sample([5, 3, 1, 5], 0),
sample([5, 3, 2, 1], 0),
sample([5, 3, 2, 2], 0),
sample([5, 3, 2, 3], 0),
sample([5, 3, 2, 4], 0),
sample([5, 3, 2, 5], 0),
sample([5, 3, 3, 1], 0),
sample([5, 3, 3, 2], 0),
sample([5, 3, 3, 3], 0),
sample([5, 3, 3, 4], 0),
sample([5, 3, 4, 1], 0),
sample([5, 3, 4, 2], 0),
sample([5, 3, 4, 3], 0),
sample([5, 3, 4, 4], 1),
sample([5, 3, 4, 5], 1),
sample([5, 3, 5, 1], 0),
sample([5, 3, 5, 2], 0),
sample([5, 3, 5, 4], 1),
sample([5, 3, 5, 5], 1),
sample([5, 4, 1, 1], 0),
sample([5, 4, 1, 2], 0),
sample([5, 4, 1, 3], 0),
sample([5, 4, 1, 4], 0),
sample([5, 4, 1, 5], 0),
sample([5, 4, 2, 1], 0),
sample([5, 4, 2, 2], 0),
sample([5, 4, 2, 3], 0),
sample([5, 4, 2, 4], 0),
sample([5, 4, 2, 5], 0),
sample([5, 4, 3, 1], 0),
sample([5, 4, 3, 2], 0),
sample([5, 4, 3, 3], 0),
sample([5, 4, 3, 4], 0),
sample([5, 4, 3, 5], 0),
sample([5, 4, 4, 1], 0),
sample([5, 4, 4, 2], 0),
sample([5, 4, 4, 3], 0),
sample([5, 4, 4, 4], 0),
sample([5, 4, 5, 1], 0),
sample([5, 4, 5, 2], 0),
sample([5, 4, 5, 3], 0),
sample([5, 4, 5, 5], 1),
sample([5, 5, 1, 1], 0),
sample([5, 5, 1, 2], 0),
sample([5, 5, 1, 3], 0),
sample([5, 5, 1, 4], 0),
sample([5, 5, 1, 5], 0),
sample([5, 5, 2, 1], 0),
sample([5, 5, 2, 2], 0),
sample([5, 5, 2, 3], 0),
sample([5, 5, 2, 4], 0),
sample([5, 5, 2, 5], 0),
sample([5, 5, 3, 1], 0),
sample([5, 5, 3, 2], 0),
sample([5, 5, 3, 3], 0),
sample([5, 5, 3, 4], 0),
sample([5, 5, 3, 5], 0),
sample([5, 5, 4, 1], 0),
sample([5, 5, 4, 2], 0),
sample([5, 5, 4, 3], 0),
sample([5, 5, 4, 4], 0),
sample([5, 5, 4, 5], 0),
sample([5, 5, 5, 1], 0),
sample([5, 5, 5, 2], 0),
sample([5, 5, 5, 3], 0),
sample([5, 5, 5, 4], 0),
continuous=[True, True, True, True]
)
async def main():
async with mpc:
forest = await output(await train_forest(samples, amount=2, depth=4))
for index, tree in enumerate(forest):
print(f"Tree #{index}")
tree.pretty_print()
if __name__ == '__main__':
mpc.run(main())
| true | true |
f736a38632ad7968c777a3b31c417244b1106c9c | 731 | py | Python | python/readme_prog_change_sub.py | romanstrazanec/ChaosEquations | cff505832b3ef8db2e3dc05e299a30f52b8e6473 | [
"MIT"
] | null | null | null | python/readme_prog_change_sub.py | romanstrazanec/ChaosEquations | cff505832b3ef8db2e3dc05e299a30f52b8e6473 | [
"MIT"
] | null | null | null | python/readme_prog_change_sub.py | romanstrazanec/ChaosEquations | cff505832b3ef8db2e3dc05e299a30f52b8e6473 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
n = 5
T = [-1, -.5, 0., .5, 1]
x = [1] * len(T)
y = [1] * len(T)
plt.subplot(122)
for i in range(1, n+1):
for j in range(len(T)):
x[j], y[j] = (x[j] + y[j]*T[j], x[j] - y[j]*T[j])
for j in range(len(T)-1):
plt.arrow(x[j], y[j], x[j+1]-x[j], y[j+1]-y[j], head_width=.35, head_length=.35, alpha=.3, fc='k')
plt.plot(x, y, alpha=.7, label=f"{i} i")
plt.subplot(121)
for t in T:
x, y = (1, 1)
xs, ys = [x], [y]
for i in range(1, n+1):
x, y = (x + y*t, x - y*t)
xs.append(x)
ys.append(y)
plt.plot(xs, ys, '.-', alpha=.5, label=f"T = {t}")
plt.legend()
plt.subplot(122)
plt.legend()
plt.savefig("../images/plot4sub.png")
plt.show()
| 22.84375 | 106 | 0.492476 | import matplotlib.pyplot as plt
n = 5
T = [-1, -.5, 0., .5, 1]
x = [1] * len(T)
y = [1] * len(T)
plt.subplot(122)
for i in range(1, n+1):
for j in range(len(T)):
x[j], y[j] = (x[j] + y[j]*T[j], x[j] - y[j]*T[j])
for j in range(len(T)-1):
plt.arrow(x[j], y[j], x[j+1]-x[j], y[j+1]-y[j], head_width=.35, head_length=.35, alpha=.3, fc='k')
plt.plot(x, y, alpha=.7, label=f"{i} i")
plt.subplot(121)
for t in T:
x, y = (1, 1)
xs, ys = [x], [y]
for i in range(1, n+1):
x, y = (x + y*t, x - y*t)
xs.append(x)
ys.append(y)
plt.plot(xs, ys, '.-', alpha=.5, label=f"T = {t}")
plt.legend()
plt.subplot(122)
plt.legend()
plt.savefig("../images/plot4sub.png")
plt.show()
| true | true |
f736a3f7a63e1b1fe8e6efa421cc4735f2835d7d | 636 | py | Python | iac/settings/aws_glue.py | InfrastructureHQ/AWS-CDK-Accelerators-Template | 107f881af741240fe67e687854b79e440d232efb | [
"Apache-2.0"
] | null | null | null | iac/settings/aws_glue.py | InfrastructureHQ/AWS-CDK-Accelerators-Template | 107f881af741240fe67e687854b79e440d232efb | [
"Apache-2.0"
] | null | null | null | iac/settings/aws_glue.py | InfrastructureHQ/AWS-CDK-Accelerators-Template | 107f881af741240fe67e687854b79e440d232efb | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional
import pydantic
from settings.globalsettings import GlobalSettings
globalsettings = GlobalSettings()
AWS_ACCOUNT_ID = globalsettings.AWS_ACCOUNT_ID
AWS_REGION = globalsettings.AWS_REGION
class GlueSettings(
pydantic.BaseSettings
): # pylint: disable=too-few-public-methods
stack_name: str = "APIStack"
description: Optional[str] = "API Stack"
aws_default_region = "us-east-1"
stage: str = "production"
email: Optional[str] = "<INSERT_VALUE>"
cost_center: Optional[str]
api_name: Optional[str] = "<INSERT_VALUE>"
api_version: Optional[str] = "<INSERT_VALUE>"
| 25.44 | 50 | 0.738994 | from typing import Dict, Optional
import pydantic
from settings.globalsettings import GlobalSettings
globalsettings = GlobalSettings()
AWS_ACCOUNT_ID = globalsettings.AWS_ACCOUNT_ID
AWS_REGION = globalsettings.AWS_REGION
class GlueSettings(
pydantic.BaseSettings
):
stack_name: str = "APIStack"
description: Optional[str] = "API Stack"
aws_default_region = "us-east-1"
stage: str = "production"
email: Optional[str] = "<INSERT_VALUE>"
cost_center: Optional[str]
api_name: Optional[str] = "<INSERT_VALUE>"
api_version: Optional[str] = "<INSERT_VALUE>"
| true | true |
f736a462e2911ac70d4200ae6272f14b7e399bed | 865 | py | Python | molecule/default/tests/test_default.py | darkwizard242/ansible-role-logrotate | cc4c61f7fc2c1f26a23db609e41396c90b6d469a | [
"MIT"
] | 1 | 2021-09-18T16:08:27.000Z | 2021-09-18T16:08:27.000Z | molecule/default/tests/test_default.py | darkwizard242/ansible-role-logrotate | cc4c61f7fc2c1f26a23db609e41396c90b6d469a | [
"MIT"
] | 5 | 2020-03-31T17:52:41.000Z | 2022-02-09T18:42:42.000Z | molecule/default/tests/test_default.py | darkwizard242/ansible-role-logrotate | cc4c61f7fc2c1f26a23db609e41396c90b6d469a | [
"MIT"
] | null | null | null | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
PACKAGE = 'logrotate'
PACKAGE_BINARY = '/usr/sbin/logrotate'
def test_logrotate_package_installed(host):
"""
Tests if logrotate package is installed.
"""
assert host.package("logrotate").is_installed
def test_logrotate_binary_exists(host):
"""
Tests if logrotate binary exists.
"""
assert host.file(PACKAGE_BINARY).exists
def test_logrotate_binary_file(host):
"""
Tests if logrotate binary is a file type.
"""
assert host.file(PACKAGE_BINARY).is_file
def test_logrotate_binary_which(host):
"""
Tests the output to confirm logrotate's binary location.
"""
assert host.check_output('which logrotate') == PACKAGE_BINARY
| 22.179487 | 65 | 0.726012 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
PACKAGE = 'logrotate'
PACKAGE_BINARY = '/usr/sbin/logrotate'
def test_logrotate_package_installed(host):
assert host.package("logrotate").is_installed
def test_logrotate_binary_exists(host):
assert host.file(PACKAGE_BINARY).exists
def test_logrotate_binary_file(host):
assert host.file(PACKAGE_BINARY).is_file
def test_logrotate_binary_which(host):
assert host.check_output('which logrotate') == PACKAGE_BINARY
| true | true |
f736a4d870cd3dd7db742978f719215d4f7f36b4 | 3,091 | py | Python | calibrate.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | calibrate.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | calibrate.py | josephko91/checkers-ai | e5963ffebf9a64724604a620d2432c1798505c22 | [
"MIT"
] | null | null | null | from board import Board
from algorithm import minimax_alpha_beta
import time
from statistics import mean
from math import sqrt, floor
board_list_start = [[".", "b", ".", "b", ".", "b", ".", "b"],
["b", ".", "b", ".", "b", ".", "b", "."],
[".", "b", ".", "b", ".", "b", ".", "b"],
[".", ".", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."],
["w", ".", "w", ".", "w", ".", "w", "."],
[".", "w", ".", "w", ".", "w", ".", "w"],
["w", ".", "w", ".", "w", ".", "w", "."]]
board_list_middle = [[".", "b", ".", ".", ".", ".", ".", "."],
["b", ".", "b", ".", "b", ".", "b", "."],
[".", ".", ".", ".", ".", ".", ".", "b"],
[".", ".", "w", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", "b", ".", "w"],
[".", ".", ".", ".", "b", ".", ".", "."],
[".", "w", ".", "w", ".", "w", ".", "."],
[".", ".", "w", ".", ".", ".", "w", "."]]
board_list_end = [[".", ".", ".", ".", ".", "W", ".", "."],
[".", ".", ".", ".", ".", ".", "b", "."],
[".", ".", ".", ".", ".", ".", ".", "b"],
[".", "w", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "w"],
[".", "b", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", "B", ".", "w", "."]]
start = time.time()
board_start = Board(board_list_start, True)
board_middle = Board(board_list_middle, True)
board_end = Board(board_list_end, True)
end = time.time()
# Measure algorithm runtime for start board
start_runtimes = []
for depth in range(1, 10, 1): # run from depth 1-9
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_start, depth, float("-Inf"), float("Inf"), True)
end = time.time()
start_runtimes.append(end-start)
# Measure algorithm runtime for middle board
middle_runtimes = []
for depth in range(1, 10, 1): # run from depth 1-9
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_middle, depth, float("-Inf"), float("Inf"), True)
end = time.time()
middle_runtimes.append(end-start)
# Measure algorithm runtime for end board
end_runtimes = []
for depth in range(1, 10, 1): # run from depth 1-9
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_middle, depth, float("-Inf"), float("Inf"), True)
end = time.time()
end_runtimes.append(end-start)
mean_runtimes = []
# average cases into one array
for i in range(len(start_runtimes)):
mean_value = mean([start_runtimes[i], middle_runtimes[i], end_runtimes[i]])
mean_runtimes.append(mean_value)
# write mean runtimes to calibrate.txt
with open('calibrate.txt', 'w') as output:
for i in range(len(mean_runtimes)):
if i == len(mean_runtimes)-1:
print(mean_runtimes[i], file = output, end = "")
else:
print(mean_runtimes[i], file = output, end = ",") | 39.126582 | 79 | 0.421546 | from board import Board
from algorithm import minimax_alpha_beta
import time
from statistics import mean
from math import sqrt, floor
board_list_start = [[".", "b", ".", "b", ".", "b", ".", "b"],
["b", ".", "b", ".", "b", ".", "b", "."],
[".", "b", ".", "b", ".", "b", ".", "b"],
[".", ".", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."],
["w", ".", "w", ".", "w", ".", "w", "."],
[".", "w", ".", "w", ".", "w", ".", "w"],
["w", ".", "w", ".", "w", ".", "w", "."]]
board_list_middle = [[".", "b", ".", ".", ".", ".", ".", "."],
["b", ".", "b", ".", "b", ".", "b", "."],
[".", ".", ".", ".", ".", ".", ".", "b"],
[".", ".", "w", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", "b", ".", "w"],
[".", ".", ".", ".", "b", ".", ".", "."],
[".", "w", ".", "w", ".", "w", ".", "."],
[".", ".", "w", ".", ".", ".", "w", "."]]
board_list_end = [[".", ".", ".", ".", ".", "W", ".", "."],
[".", ".", ".", ".", ".", ".", "b", "."],
[".", ".", ".", ".", ".", ".", ".", "b"],
[".", "w", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "w"],
[".", "b", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", ".", ".", ".", "."],
[".", ".", ".", ".", "B", ".", "w", "."]]
start = time.time()
board_start = Board(board_list_start, True)
board_middle = Board(board_list_middle, True)
board_end = Board(board_list_end, True)
end = time.time()
start_runtimes = []
for depth in range(1, 10, 1):
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_start, depth, float("-Inf"), float("Inf"), True)
end = time.time()
start_runtimes.append(end-start)
middle_runtimes = []
for depth in range(1, 10, 1):
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_middle, depth, float("-Inf"), float("Inf"), True)
end = time.time()
middle_runtimes.append(end-start)
end_runtimes = []
for depth in range(1, 10, 1):
start = time.time()
minimax_alpha_beta.count = 0
minimax_alpha_beta(board_middle, depth, float("-Inf"), float("Inf"), True)
end = time.time()
end_runtimes.append(end-start)
mean_runtimes = []
for i in range(len(start_runtimes)):
mean_value = mean([start_runtimes[i], middle_runtimes[i], end_runtimes[i]])
mean_runtimes.append(mean_value)
with open('calibrate.txt', 'w') as output:
for i in range(len(mean_runtimes)):
if i == len(mean_runtimes)-1:
print(mean_runtimes[i], file = output, end = "")
else:
print(mean_runtimes[i], file = output, end = ",") | true | true |
f736a58b37452a4a8cfec128efabf5850633138b | 3,425 | py | Python | pmaso_tools.py | Jordan-M-Young/Port_Maso | eb9c137ce79004ae218ef6c721f0c9b54d099f68 | [
"MIT"
] | 3 | 2020-08-15T00:52:43.000Z | 2020-08-29T18:57:04.000Z | pmaso_tools.py | Jordan-M-Young/Pmaso | eb9c137ce79004ae218ef6c721f0c9b54d099f68 | [
"MIT"
] | null | null | null | pmaso_tools.py | Jordan-M-Young/Pmaso | eb9c137ce79004ae218ef6c721f0c9b54d099f68 | [
"MIT"
] | 1 | 2020-07-21T17:56:56.000Z | 2020-07-21T17:56:56.000Z | import csv
def parse_tickers(tickers):
"""accepts tickers string input i.e. 'GOOG,AAPL,MSFT'
and outputs tickers list ['GOOG','AAPL','MSFT']"""
num_commas = tickers.find(',')
tickers = tickers.rsplit(',',num_commas+1)
return tickers
def write_portfolio(portfolio,tickers,file_path):
"""accepts a portfolio name, list of tickers, and a
filepath as arguments and saves the info to a .csv file"""
if '.csv' not in file_path:
file_path = file_path + '.csv'
port = {'Portfolio':portfolio,'Tickers':tickers}
#writes dict to .csv file
with open(file_path,'w', newline='') as csvfile:
fieldnames = ['Portfolio','Tickers']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(port)
def load_portfolio(file_path):
"""Loads saved portfolio .csv files"""
with open(file_path,newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
portfolio = row['Portfolio']
tickers = row['Tickers']
return portfolio,tickers
def get_perms(num_assets,bounds):
"""Generates a set of asset weight permutations based on a number
of assets argument and boundary conditions.This function is used
specifically for the results window worker thread
"""
low_bound, up_bound = bounds
x = range(low_bound,up_bound)
perms = itertools.product(x,repeat=num_assets)
return perms
def gen_report(tickers,opt_params,fname,selection):
if '.xlsx' not in fname or '.xls' not in fname:
fname = fname + '.xlsx'
op = opt_params
sheets = ['Selected Portfolios','All Portfolios','Asset']
#Sheet One: Best Parameters
cl = ['Std Deviation','Expected Returns','Sharpe Ratio','ALpha','Beta']
for i in reversed(range(len(tickers))):
cl.insert(5,tickers[i])
sh1 = pd.DataFrame(selection,columns=cl)
#Sheet Two: All Portfolios
alphas = op['Alphas']
betas = op['Betas']
sharpes = op['Portfolio_Sharpe_Ratios']
rets = op['Portfolio_Space_Returns']
stds = op['Portfolio_Space_Stds']
weights = op['Weights']
cl = ['Std Deviation','Expected Returns','Sharpe Ratio','ALpha','Beta']
for i in reversed(range(len(tickers))):
cl.insert(5,tickers[i])
ports = [[float(stds[i]),
float(rets[i]),
float(sharpes[i]),
float(alphas[i]),
float(betas[i])] for i in range(len(alphas))]
#Packs all values into a list of lists
num = len(ports[0])
for j in range(len(ports)):
for i in reversed(range(len(weights[0]))):
ports[j].insert(num,float(weights[j][i]))
sh2 = pd.DataFrame(ports,columns=cl)
#Sheet Three: Assest Parameters
rw = tickers
cl = [key for key in op.keys() if 'Asset' in key]
sh3 = [[op[cl[j]][rw[i]] for i in range(len(rw))] for j in range(len(cl))]
sh3 = pd.DataFrame(sh3,index=cl,columns=rw).T
with pd.ExcelWriter(fname) as writer:
sh1.to_excel(writer,'Selected Portfolios')
sh2.to_excel(writer,'All Portfolios')
sh3.to_excel(writer,'Assets')
| 27.845528 | 78 | 0.588613 | import csv
def parse_tickers(tickers):
num_commas = tickers.find(',')
tickers = tickers.rsplit(',',num_commas+1)
return tickers
def write_portfolio(portfolio,tickers,file_path):
if '.csv' not in file_path:
file_path = file_path + '.csv'
port = {'Portfolio':portfolio,'Tickers':tickers}
with open(file_path,'w', newline='') as csvfile:
fieldnames = ['Portfolio','Tickers']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(port)
def load_portfolio(file_path):
with open(file_path,newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
portfolio = row['Portfolio']
tickers = row['Tickers']
return portfolio,tickers
def get_perms(num_assets,bounds):
low_bound, up_bound = bounds
x = range(low_bound,up_bound)
perms = itertools.product(x,repeat=num_assets)
return perms
def gen_report(tickers,opt_params,fname,selection):
if '.xlsx' not in fname or '.xls' not in fname:
fname = fname + '.xlsx'
op = opt_params
sheets = ['Selected Portfolios','All Portfolios','Asset']
cl = ['Std Deviation','Expected Returns','Sharpe Ratio','ALpha','Beta']
for i in reversed(range(len(tickers))):
cl.insert(5,tickers[i])
sh1 = pd.DataFrame(selection,columns=cl)
alphas = op['Alphas']
betas = op['Betas']
sharpes = op['Portfolio_Sharpe_Ratios']
rets = op['Portfolio_Space_Returns']
stds = op['Portfolio_Space_Stds']
weights = op['Weights']
cl = ['Std Deviation','Expected Returns','Sharpe Ratio','ALpha','Beta']
for i in reversed(range(len(tickers))):
cl.insert(5,tickers[i])
ports = [[float(stds[i]),
float(rets[i]),
float(sharpes[i]),
float(alphas[i]),
float(betas[i])] for i in range(len(alphas))]
num = len(ports[0])
for j in range(len(ports)):
for i in reversed(range(len(weights[0]))):
ports[j].insert(num,float(weights[j][i]))
sh2 = pd.DataFrame(ports,columns=cl)
rw = tickers
cl = [key for key in op.keys() if 'Asset' in key]
sh3 = [[op[cl[j]][rw[i]] for i in range(len(rw))] for j in range(len(cl))]
sh3 = pd.DataFrame(sh3,index=cl,columns=rw).T
with pd.ExcelWriter(fname) as writer:
sh1.to_excel(writer,'Selected Portfolios')
sh2.to_excel(writer,'All Portfolios')
sh3.to_excel(writer,'Assets')
| true | true |
f736a64f7969709ed96b686f0de0dd7294ecab2a | 2,169 | py | Python | listings/migrations/0001_initial.py | RodrigoMachado9/django | b7c1b6e05af509f264bdb7c5096db4cbcc8efc5f | [
"MIT"
] | 1 | 2019-06-23T00:49:45.000Z | 2019-06-23T00:49:45.000Z | listings/migrations/0001_initial.py | RodrigoMachado9/django-web | b7c1b6e05af509f264bdb7c5096db4cbcc8efc5f | [
"MIT"
] | 1 | 2019-06-23T00:54:24.000Z | 2019-06-23T00:54:24.000Z | listings/migrations/0001_initial.py | RodrigoMachado9/django | b7c1b6e05af509f264bdb7c5096db4cbcc8efc5f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-09-28 19:21
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zipcode', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| 47.152174 | 118 | 0.576302 |
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zipcode', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| true | true |
f736a69a2ffc52c26ca8303f21a5e0717f53ab72 | 41,427 | py | Python | project/utils/modules.py | amorehead/Equivariant-GNNs | 4e81136242a4c8905b0e5fc39be5f704a42cc5e1 | [
"Apache-2.0"
] | 2 | 2021-10-07T12:53:51.000Z | 2022-01-04T19:26:08.000Z | project/utils/modules.py | amorehead/Equivariant-GNNs | 4e81136242a4c8905b0e5fc39be5f704a42cc5e1 | [
"Apache-2.0"
] | null | null | null | project/utils/modules.py | amorehead/Equivariant-GNNs | 4e81136242a4c8905b0e5fc39be5f704a42cc5e1 | [
"Apache-2.0"
] | null | null | null | from typing import Dict
import dgl
import dgl.function as fn # for graphs
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling
from dgl.nn.pytorch.softmax import edge_softmax
from einops import rearrange
from packaging import version
from torch import Tensor, einsum, broadcast_tensors, relu, sigmoid
from torch.nn import GELU
from torch.nn.functional import normalize
from torch.nn.parameter import Parameter
from project.utils.fibers import Fiber, fiber2head
from project.utils.from_se3cnn.utils_steerable import _basis_transformation_Q_J, get_spherical_from_cartesian_torch, \
precompute_sh
from project.utils.utils import fourier_encode_dist, batched_index_select
from project.utils.utils_profiling import profile # load before other local modules
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from SE(3)-Transformer (https://github.com/FabianFuchsML/se3-transformer-public/):
# -------------------------------------------------------------------------------------------------------------------------------------
@profile
def get_basis(Y, max_degree):
"""Precompute the SE(3)-equivariant weight basis.
This is called by get_basis_and_r().
Args:
Y: spherical harmonic dict, returned by utils_steerable.precompute_sh()
max_degree: non-negative int for degree of highest feature type
Returns:
dict of equivariant bases, keys are in form '<d_in><d_out>'
"""
device = Y[0].device
# No need to backprop through the basis construction
with torch.no_grad():
basis = {}
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
# Get spherical harmonic projection matrices
Q_J = _basis_transformation_Q_J(J, d_in, d_out)
Q_J = Q_J.float().T.to(device)
# Create kernel from spherical harmonics
K_J = torch.matmul(Y[J], Q_J)
K_Js.append(K_J)
# Reshape so can take linear combinations with a dot product
size = (-1, 1, 2 * d_out + 1, 1, 2 * d_in + 1, 2 * min(d_in, d_out) + 1)
basis[f'{d_in},{d_out}'] = torch.stack(K_Js, -1).view(*size)
return basis
def get_basis_and_r(G, max_degree):
"""Return equivariant weight basis (basis) and internodal distances (r).
Call this function *once* at the start of each forward pass of the model.
It computes the equivariant weight basis, W_J^lk(x), and internodal
distances, needed to compute varphi_J^lk(x), of eqn 8 of
https://arxiv.org/pdf/2006.10503.pdf. The return values of this function
can be shared as input across all SE(3)-Transformer layers in a model.
Args:
G: DGL graph instance of type dgl.DGLGraph()
max_degree: non-negative int for degree of highest feature-type
Returns:
dict of equivariant bases, keys are in form '<d_in><d_out>'
vector of relative distances, ordered according to edge ordering of G
"""
# Relative positional encodings (vector)
r_ij = get_spherical_from_cartesian_torch(G.edata['d'])
# Spherical harmonic basis
Y = precompute_sh(r_ij, 2 * max_degree)
# Equivariant basis (dict['d_in><d_out>'])
basis = get_basis(Y, max_degree)
# Relative distances (scalar)
r = torch.sqrt(torch.sum(G.edata['d'] ** 2, -1, keepdim=True))
return basis, r
### SE(3) equivariant operations on graphs in DGL
class GConvSE3(nn.Module):
"""A tensor field network layer as a DGL module.
GConvSE3 stands for a Graph Convolution SE(3)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
At each node, the activations are split into different "feature types",
indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..
"""
def __init__(self, f_in, f_out, self_interaction: bool = False, edge_dim: int = 0):
"""SE(3)-equivariant Graph Conv Layer
Args:
f_in: list of tuples [(multiplicities, type),...]
f_out: list of tuples [(multiplicities, type),...]
self_interaction: include self-interaction in convolution
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
# Neighbor -> center weights
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
# Center -> center weights
self.kernel_self = nn.ParameterDict()
if self_interaction:
for m_in, d_in in self.f_in.structure:
if d_in in self.f_out.degrees:
m_out = self.f_out.structure_dict[d_in]
W = nn.Parameter(torch.randn(1, m_out, m_in) / np.sqrt(m_in))
self.kernel_self[f'{d_in}'] = W
def __repr__(self):
return f'GConvSE3(structure={self.f_out}, self_interaction={self.self_interaction})'
def udf_u_mul_e(self, d_out):
"""Compute the convolution for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
edge -> node function handle
"""
def fnc(edges):
# Neighbor -> center messages
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
# Center -> center messages
if self.self_interaction:
if f'{d_out}' in self.kernel_self.keys():
dst = edges.dst[f'{d_out}']
W = self.kernel_self[f'{d_out}']
msg = msg + torch.matmul(W, dst)
return {'msg': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
"""Forward pass of the linear layer
Args:
G: minibatch of (homo)graphs
h: dict of features
r: inter-atomic distances
basis: pre-computed Q * Y
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
for k, v in h.items():
G.ndata[k] = v
# Add edge features
if 'w' in G.edata.keys():
w = G.edata['w']
feat = torch.cat([w, r], -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
# Perform message-passing for each output feature type
for d in self.f_out.degrees:
G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))
return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
class RadialFunc(nn.Module):
"""NN parameterized radial profile function."""
def __init__(self, num_freq, in_dim, out_dim, edge_dim: int = 0):
"""NN parameterized radial profile function.
Args:
num_freq: number of output frequencies
in_dim: multiplicity of input (num input channels)
out_dim: multiplicity of output (num output channels)
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
self.num_freq = num_freq
self.in_dim = in_dim
self.mid_dim = 32
self.out_dim = out_dim
self.edge_dim = edge_dim
self.net = nn.Sequential(nn.Linear(self.edge_dim + 1, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.num_freq * in_dim * out_dim))
nn.init.kaiming_uniform_(self.net[0].weight)
nn.init.kaiming_uniform_(self.net[3].weight)
nn.init.kaiming_uniform_(self.net[6].weight)
def __repr__(self):
return f"RadialFunc(edge_dim={self.edge_dim}, in_dim={self.in_dim}, out_dim={self.out_dim})"
def forward(self, x):
y = self.net(x)
return y.view(-1, self.out_dim, 1, self.in_dim, 1, self.num_freq)
class PairwiseConv(nn.Module):
"""SE(3)-equivariant convolution between two single-type features"""
def __init__(self, degree_in: int, nc_in: int, degree_out: int,
nc_out: int, edge_dim: int = 0):
"""SE(3)-equivariant convolution between a pair of feature types.
This layer performs a convolution from nc_in features of type degree_in
to nc_out features of type degree_out.
Args:
degree_in: degree of input fiber
nc_in: number of channels on input
degree_out: degree of out order
nc_out: number of channels on output
edge_dim: number of dimensions for edge embedding
"""
super().__init__()
# Log settings
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
# Functions of the degree
self.num_freq = 2 * min(degree_in, degree_out) + 1
self.d_out = 2 * degree_out + 1
self.edge_dim = edge_dim
# Radial profile function
self.rp = RadialFunc(self.num_freq, nc_in, nc_out, self.edge_dim)
@profile
def forward(self, feat, basis):
# Get radial weights
R = self.rp(feat)
kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], -1)
return kernel.view(kernel.shape[0], self.d_out * self.nc_out, -1)
class G1x1SE3(nn.Module):
"""Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.
This is equivalent to a self-interaction layer in TensorField Networks.
"""
def __init__(self, f_in, f_out, learnable=True):
"""SE(3)-equivariant 1x1 convolution.
Args:
f_in: input Fiber() of feature multiplicities and types
f_out: output Fiber() of feature multiplicities and types
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
# Linear mappings: 1 per output feature type
self.transform = nn.ParameterDict()
for m_out, d_out in self.f_out.structure:
m_in = self.f_in.structure_dict[d_out]
self.transform[str(d_out)] = nn.Parameter(torch.randn(m_out, m_in) / np.sqrt(m_in), requires_grad=learnable)
def __repr__(self):
return f"G1x1SE3(structure={self.f_out})"
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
if str(k) in self.transform.keys():
output[k] = torch.matmul(self.transform[str(k)], v)
return output
class GNormSE3(nn.Module):
"""Graph Norm-based SE(3)-equivariant nonlinearity.
Nonlinearities are important in SE(3) equivariant GCNs. They are also quite
expensive to compute, so it is convenient for them to share resources with
other layers, such as normalization. The general workflow is as follows:
> for feature type in features:
> norm, phase <- feature
> output = fnc(norm) * phase
where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.
"""
def __init__(self, fiber, nonlin=nn.ReLU(inplace=True), num_layers: int = 0):
"""Initializer.
Args:
fiber: Fiber() of feature multiplicities and types
nonlin: nonlinearity to use everywhere
num_layers: non-negative number of linear layers in fnc
"""
super().__init__()
self.fiber = fiber
self.nonlin = nonlin
self.num_layers = num_layers
# Regularization for computing phase: gradients explode otherwise
self.eps = 1e-12
# Norm mappings: 1 per feature type
self.transform = nn.ModuleDict()
for m, d in self.fiber.structure:
self.transform[str(d)] = self._build_net(int(m))
def __repr__(self):
return f"GNormSE3(num_layers={self.num_layers}, nonlin={self.nonlin})"
def _build_net(self, m: int):
net = []
for i in range(self.num_layers):
net.append(BN(int(m)))
net.append(self.nonlin)
# TODO: implement cleaner init
net.append(nn.Linear(m, m, bias=(i == self.num_layers - 1)))
nn.init.kaiming_uniform_(net[-1].weight)
if self.num_layers == 0:
net.append(BN(int(m)))
net.append(self.nonlin)
return nn.Sequential(*net)
@profile
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
# Compute the norms and normalized features
# v shape: [...,m , 2*k+1]
norm = v.norm(2, -1, keepdim=True).clamp_min(self.eps).expand_as(v)
phase = v / norm
# Transform on norms
transformed = self.transform[str(k)](norm[..., 0]).unsqueeze(-1)
# Nonlinearity on norm
output[k] = (transformed * phase).view(*v.shape)
return output
class BN(nn.Module):
"""SE(3)-equvariant batch/layer normalization"""
def __init__(self, m):
"""SE(3)-equvariant batch/layer normalization
Args:
m: int for number of output channels
"""
super().__init__()
self.bn = nn.LayerNorm(m)
def forward(self, x):
return self.bn(x)
class GConvSE3Partial(nn.Module):
"""Graph SE(3)-equivariant node -> edge layer"""
def __init__(self, f_in, f_out, edge_dim: int = 0):
"""SE(3)-equivariant partial convolution.
A partial convolution computes the inner product between a kernel and
each input channel, without summing over the result from each input
channel. This unfolded structure makes it amenable to be used for
computing the value-embeddings of the attention mechanism.
Args:
f_in: list of tuples [(multiplicities, type),...]
f_out: list of tuples [(multiplicities, type),...]
"""
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
# Node -> edge weights
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
def __repr__(self):
return f'GConvSE3Partial(structure={self.f_out})'
def udf_u_mul_e(self, d_out):
"""Compute the partial convolution for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
node -> edge function handle
"""
def fnc(edges):
# Neighbor -> center messages
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
return {f'out{d_out}': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
"""Forward pass of the linear layer
Args:
h: dict of node-features
G: minibatch of (homo)graphs
r: inter-atomic distances
basis: pre-computed Q * Y
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
for k, v in h.items():
G.ndata[k] = v
# Add edge features
if 'w' in G.edata.keys():
w = G.edata['w'] # shape: [#edges_in_batch, #bond_types]
feat = torch.cat([w, r], -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
# Perform message-passing for each output feature type
for d in self.f_out.degrees:
G.apply_edges(self.udf_u_mul_e(d))
return {f'{d}': G.edata[f'out{d}'] for d in self.f_out.degrees}
class GMABSE3(nn.Module):
"""An SE(3)-equivariant multi-headed self-attention module for DGL graphs."""
def __init__(self, f_value: Fiber, f_key: Fiber, n_heads: int):
"""SE(3)-equivariant MAB (multi-headed attention block) layer.
Args:
f_value: Fiber() object for value-embeddings
f_key: Fiber() object for key-embeddings
n_heads: number of heads
"""
super().__init__()
self.f_value = f_value
self.f_key = f_key
self.n_heads = n_heads
self.new_dgl = version.parse(dgl.__version__) > version.parse('0.4.4')
def __repr__(self):
return f'GMABSE3(n_heads={self.n_heads}, structure={self.f_value})'
def udf_u_mul_e(self, d_out):
"""Compute the weighted sum for a single output feature type.
This function is set up as a User Defined Function in DGL.
Args:
d_out: output feature type
Returns:
edge -> node function handle
"""
def fnc(edges):
# Neighbor -> center messages
attn = edges.data['a']
value = edges.data[f'v{d_out}']
# Apply attention weights
msg = attn.unsqueeze(-1).unsqueeze(-1) * value
return {'m': msg}
return fnc
@profile
def forward(self, v, k: Dict = None, q: Dict = None, G=None, **kwargs):
"""Forward pass of the linear layer
Args:
G: minibatch of (homo)graphs
v: dict of value edge-features
k: dict of key edge-features
q: dict of query node-features
Returns:
tensor with new features [B, n_points, n_features_out]
"""
with G.local_scope():
# Add node features to local graph scope
## We use the stacked tensor representation for attention
for m, d in self.f_value.structure:
G.edata[f'v{d}'] = v[f'{d}'].view(-1, self.n_heads, m // self.n_heads, 2 * d + 1)
G.edata['k'] = fiber2head(k, self.n_heads, self.f_key, squeeze=True)
G.ndata['q'] = fiber2head(q, self.n_heads, self.f_key, squeeze=True)
# Compute attention weights
## Inner product between (key) neighborhood and (query) center
G.apply_edges(fn.e_dot_v('k', 'q', 'e'))
## Apply softmax
e = G.edata.pop('e')
if self.new_dgl:
# in dgl 5.3, e has an extra dimension compared to dgl 4.3
# the following, we get rid of this be reshaping
n_edges = G.edata['k'].shape[0]
e = e.view([n_edges, self.n_heads])
e = e / np.sqrt(self.f_key.n_features)
G.edata['a'] = edge_softmax(G, e)
# Perform attention-weighted message-passing
for d in self.f_value.degrees:
G.update_all(self.udf_u_mul_e(d), fn.sum('m', f'out{d}'))
output = {}
for m, d in self.f_value.structure:
output[f'{d}'] = G.ndata[f'out{d}'].view(-1, m, 2 * d + 1)
return output
class GSE3Res(nn.Module):
"""Graph attention block with SE(3)-equivariance and skip connection"""
def __init__(self, f_in: Fiber, f_out: Fiber, edge_dim: int = 0, div: float = 4,
n_heads: int = 1, learnable_skip=True):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.div = div
self.n_heads = n_heads
# f_mid_out has same structure as 'f_out' but #channels divided by 'div'
# this will be used for the values
f_mid_out = {k: int(v // div) for k, v in self.f_out.structure_dict.items()}
self.f_mid_out = Fiber(dictionary=f_mid_out)
# f_mid_in has same structure as f_mid_out, but only degrees which are in f_in
# this will be used for keys and queries
# (queries are merely projected, hence degrees have to match input)
f_mid_in = {d: m for d, m in f_mid_out.items() if d in self.f_in.degrees}
self.f_mid_in = Fiber(dictionary=f_mid_in)
self.edge_dim = edge_dim
self.GMAB = nn.ModuleDict()
# Projections
self.GMAB['v'] = GConvSE3Partial(f_in, self.f_mid_out, edge_dim=edge_dim)
self.GMAB['k'] = GConvSE3Partial(f_in, self.f_mid_in, edge_dim=edge_dim)
self.GMAB['q'] = G1x1SE3(f_in, self.f_mid_in)
# Attention
self.GMAB['attn'] = GMABSE3(self.f_mid_out, self.f_mid_in, n_heads=n_heads)
# Skip connections
self.project = G1x1SE3(self.f_mid_out, f_out, learnable=learnable_skip)
self.add = GSum(f_out, f_in)
# the following checks whether the skip connection would change
# the output fibre structure; the reason can be that the input has
# more channels than the output (for at least one degree); this would
# then cause a (hard to debug) error in the next layer
assert self.add.f_out.structure_dict == f_out.structure_dict, \
'skip connection would change output structure'
@profile
def forward(self, features, G, **kwargs):
# Embeddings
v = self.GMAB['v'](features, G=G, **kwargs)
k = self.GMAB['k'](features, G=G, **kwargs)
q = self.GMAB['q'](features, G=G)
# Attention
z = self.GMAB['attn'](v, k=k, q=q, G=G)
# Skip + residual
z = self.project(z)
z = self.add(z, features)
return z
### Helper and wrapper functions
class GSum(nn.Module):
"""SE(3)-equivariant graph residual sum function."""
def __init__(self, f_x: Fiber, f_y: Fiber):
"""SE(3)-equivariant graph residual sum function.
Args:
f_x: Fiber() object for fiber of summands
f_y: Fiber() object for fiber of summands
"""
super().__init__()
self.f_x = f_x
self.f_y = f_y
self.f_out = Fiber.combine_max(f_x, f_y)
def __repr__(self):
return f"GSum(structure={self.f_out})"
def forward(self, x, y):
out = {}
for k in self.f_out.degrees:
k = str(k)
if (k in x) and (k in y):
if x[k].shape[1] > y[k].shape[1]:
diff = x[k].shape[1] - y[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
y[k] = torch.cat([y[k], zeros], 1)
elif x[k].shape[1] < y[k].shape[1]:
diff = y[k].shape[1] - x[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
x[k] = torch.cat([x[k], zeros], 1)
out[k] = x[k] + y[k]
elif k in x:
out[k] = x[k]
elif k in y:
out[k] = y[k]
return out
class GAvgPooling(nn.Module):
"""Graph Average Pooling module."""
def __init__(self, type='0'):
super().__init__()
self.pool = AvgPooling()
self.type = type
@profile
def forward(self, features, G, **kwargs):
if self.type == '0':
h = features['0'][..., -1]
pooled = self.pool(G, h)
elif self.type == '1':
pooled = []
for i in range(3):
h_i = features['1'][..., i]
pooled.append(self.pool(G, h_i).unsqueeze(-1))
pooled = torch.cat(pooled, axis=-1)
pooled = {'1': pooled}
else:
print('GAvgPooling for type > 0 not implemented')
exit()
return pooled
class GMaxPooling(nn.Module):
"""Graph Max Pooling module."""
def __init__(self):
super().__init__()
self.pool = MaxPooling()
@profile
def forward(self, features, G, **kwargs):
h = features['0'][..., -1]
return self.pool(G, h)
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from egnn-pytorch (https://github.com/lucidrains/egnn-pytorch/blob/main/egnn_pytorch/egnn_pytorch.py):
# -------------------------------------------------------------------------------------------------------------------------------------
class EnInvGraphConv(nn.Module):
"""A graph neural network layer as a DGL module.
EnInvGraphConv stands for a Graph Convolution E(n)-invariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
"""
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
"""E(n)-invariant Graph Conv Layer
Parameters
----------
node_feat : int
Node feature size.
edge_feat : int
Edge feature size.
coord_feat : int
Coordinates feature size.
fourier_feat : int
Fourier feature size.
norm_rel_coords : boolean
Fourier feature size.
norm_coord_weights : boolean
Fourier feature size.
num_nearest_neighbors : int
Fourier feature size.
dropout : float
Fourier feature size.
init_eps : float
Fourier feature size.
"""
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# Seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
"""Forward pass of the linear layer
Parameters
----------
h : Tensor
The input node embedding.
x : Tensor
The input coordinates embedding.
e : Tensor
The input edge embedding.
mask : Tensor
The coordinate mask to apply.
"""
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
nbhd_indices = None
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
# Free GPU memory
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out
def __repr__(self):
return f'EnInvGraphConv(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
class EnGraphConv(nn.Module):
"""A graph neural network layer.
EnGraphConv stands for a Graph Convolution E(n)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
"""
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
"""E(n)-equivariant Graph Conv Layer
Parameters
----------
node_feat : int
Node feature size.
edge_feat : int
Edge feature size.
coord_feat : int
Coordinates feature size.
fourier_feat : int
Fourier feature size.
norm_rel_coords : boolean
Fourier feature size.
norm_coord_weights : boolean
Fourier feature size.
num_nearest_neighbors : int
Fourier feature size.
dropout : float
Fourier feature size.
init_eps : float
Fourier feature size.
"""
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
# Seems to be needed to keep the network from exploding to NaN with greater depths
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
"""Forward pass of the linear layer
Parameters
----------
h : Tensor
The input node embedding.
x : Tensor
The input coordinates embedding.
e : Tensor
The input edge embedding.
mask : Tensor
The coordinate mask to apply.
"""
nbhd_indices = None
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
coord_weights = self.coords_mlp(m_ij)
coord_weights = rearrange(coord_weights, 'b i j () -> b i j')
if self.norm_coord_weights:
coord_weights = coord_weights.tanh()
if self.norm_rel_coords:
rel_coords = normalize(rel_coords, dim=-1) * self.rel_coords_scale
if mask is not None:
mask_i = rearrange(mask, 'b i -> b i ()')
if use_nearest:
mask_j = batched_index_select(mask, nbhd_indices, dim=1)
else:
mask_j = rearrange(mask, 'b j -> b () j')
mask = mask_i * mask_j
coord_weights.masked_fill_(~mask, 0.)
# Free GPU memory
mask_i.detach()
mask_j.detach()
coords_out = einsum('b i j, b i j c -> b i c', coord_weights, rel_coords) + x
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
# Free GPU memory
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
coord_weights.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out, coords_out
def __repr__(self):
return f'GConvEn(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code derived from DMLC (https://github.com/dmlc/dgl/blob/master/examples/pytorch/dagnn/main.py):
# -------------------------------------------------------------------------------------------------------------------------------------
class DAGNNConv(nn.Module):
def __init__(self,
in_dim,
k):
super(DAGNNConv, self).__init__()
self.s = Parameter(torch.FloatTensor(in_dim, 1))
self.k = k
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('sigmoid')
nn.init.xavier_uniform_(self.s, gain=gain)
def forward(self, graph, feats):
with graph.local_scope():
results = [feats]
degs = graph.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm = norm.to(feats.device).unsqueeze(1)
for _ in range(self.k):
feats = feats * norm
graph.ndata['h'] = feats
graph.update_all(fn.copy_u('h', 'm'),
fn.sum('m', 'h'))
feats = graph.ndata['h']
feats = feats * norm
results.append(feats)
H = torch.stack(results, dim=1)
S = sigmoid(torch.matmul(H, self.s))
S = S.permute(0, 2, 1)
H = torch.matmul(S, H).squeeze()
return H
class MLPLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
bias=True,
activation=None,
dropout=0):
super(MLPLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim, bias=bias)
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = 1.
if self.activation is relu:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.linear.weight, gain=gain)
if self.linear.bias is not None:
nn.init.zeros_(self.linear.bias)
def forward(self, feats):
feats = self.dropout(feats)
feats = self.linear(feats)
if self.activation:
feats = self.activation(feats)
return feats
class DAGNN(nn.Module):
def __init__(self,
k,
in_dim,
hid_dim,
out_dim,
bias=True,
activation=relu,
dropout=0, ):
super(DAGNN, self).__init__()
self.mlp = nn.ModuleList()
self.mlp.append(MLPLayer(in_dim=in_dim, out_dim=hid_dim, bias=bias,
activation=activation, dropout=dropout))
self.mlp.append(MLPLayer(in_dim=hid_dim, out_dim=out_dim, bias=bias,
activation=None, dropout=dropout))
self.dagnn = DAGNNConv(in_dim=out_dim, k=k)
def forward(self, graph, feats):
for layer in self.mlp:
feats = layer(feats)
feats = self.dagnn(graph, feats)
return feats
# -------------------------------------------------------------------------------------------------------------------------------------
# Following code curated for DeepInteract (https://github.com/jianlin-cheng/DeepInteract):
# -------------------------------------------------------------------------------------------------------------------------------------
class SAGEConv(nn.Module):
"""GraphSAGE convolution module used by the GraphSAGE model.
This variant of the SAGEConv layer is able to infer edges via a soft estimation on messages.
Parameters
----------
in_feat : int
Input feature size.
out_feat : int
Output feature size.
"""
def __init__(self, in_feat, out_feat):
super(SAGEConv, self).__init__()
# A linear submodule for projecting the input and neighbor feature to the output.
self.linear = nn.Linear(in_feat * 2, out_feat)
def forward(self, g, h):
"""Forward computation
Parameters
----------
g : Graph
The input graph.
h : Tensor
The input node feature.
"""
with g.local_scope():
g.ndata['h'] = h
# update_all is a message passing API.
g.update_all(message_func=fn.copy_u('h', 'm'), reduce_func=fn.mean('m', 'h_N'))
h_N = g.ndata['h_N']
h_total = torch.cat([h, h_N], dim=1)
return self.linear(h_total)
| 35.34727 | 135 | 0.556207 | from typing import Dict
import dgl
import dgl.function as fn
import numpy as np
import torch
import torch.nn as nn
from dgl.nn.pytorch.glob import AvgPooling, MaxPooling
from dgl.nn.pytorch.softmax import edge_softmax
from einops import rearrange
from packaging import version
from torch import Tensor, einsum, broadcast_tensors, relu, sigmoid
from torch.nn import GELU
from torch.nn.functional import normalize
from torch.nn.parameter import Parameter
from project.utils.fibers import Fiber, fiber2head
from project.utils.from_se3cnn.utils_steerable import _basis_transformation_Q_J, get_spherical_from_cartesian_torch, \
precompute_sh
from project.utils.utils import fourier_encode_dist, batched_index_select
from project.utils.utils_profiling import profile
@profile
def get_basis(Y, max_degree):
device = Y[0].device
with torch.no_grad():
basis = {}
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
Q_J = _basis_transformation_Q_J(J, d_in, d_out)
Q_J = Q_J.float().T.to(device)
K_J = torch.matmul(Y[J], Q_J)
K_Js.append(K_J)
size = (-1, 1, 2 * d_out + 1, 1, 2 * d_in + 1, 2 * min(d_in, d_out) + 1)
basis[f'{d_in},{d_out}'] = torch.stack(K_Js, -1).view(*size)
return basis
def get_basis_and_r(G, max_degree):
r_ij = get_spherical_from_cartesian_torch(G.edata['d'])
Y = precompute_sh(r_ij, 2 * max_degree)
basis = get_basis(Y, max_degree)
r = torch.sqrt(torch.sum(G.edata['d'] ** 2, -1, keepdim=True))
return basis, r
e, edge_dim: int = 0):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
self.kernel_self = nn.ParameterDict()
if self_interaction:
for m_in, d_in in self.f_in.structure:
if d_in in self.f_out.degrees:
m_out = self.f_out.structure_dict[d_in]
W = nn.Parameter(torch.randn(1, m_out, m_in) / np.sqrt(m_in))
self.kernel_self[f'{d_in}'] = W
def __repr__(self):
return f'GConvSE3(structure={self.f_out}, self_interaction={self.self_interaction})'
def udf_u_mul_e(self, d_out):
def fnc(edges):
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
if self.self_interaction:
if f'{d_out}' in self.kernel_self.keys():
dst = edges.dst[f'{d_out}']
W = self.kernel_self[f'{d_out}']
msg = msg + torch.matmul(W, dst)
return {'msg': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
with G.local_scope():
for k, v in h.items():
G.ndata[k] = v
if 'w' in G.edata.keys():
w = G.edata['w']
feat = torch.cat([w, r], -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
for d in self.f_out.degrees:
G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))
return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}
class RadialFunc(nn.Module):
def __init__(self, num_freq, in_dim, out_dim, edge_dim: int = 0):
super().__init__()
self.num_freq = num_freq
self.in_dim = in_dim
self.mid_dim = 32
self.out_dim = out_dim
self.edge_dim = edge_dim
self.net = nn.Sequential(nn.Linear(self.edge_dim + 1, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.mid_dim),
BN(self.mid_dim),
nn.ReLU(),
nn.Linear(self.mid_dim, self.num_freq * in_dim * out_dim))
nn.init.kaiming_uniform_(self.net[0].weight)
nn.init.kaiming_uniform_(self.net[3].weight)
nn.init.kaiming_uniform_(self.net[6].weight)
def __repr__(self):
return f"RadialFunc(edge_dim={self.edge_dim}, in_dim={self.in_dim}, out_dim={self.out_dim})"
def forward(self, x):
y = self.net(x)
return y.view(-1, self.out_dim, 1, self.in_dim, 1, self.num_freq)
class PairwiseConv(nn.Module):
def __init__(self, degree_in: int, nc_in: int, degree_out: int,
nc_out: int, edge_dim: int = 0):
super().__init__()
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
self.num_freq = 2 * min(degree_in, degree_out) + 1
self.d_out = 2 * degree_out + 1
self.edge_dim = edge_dim
self.rp = RadialFunc(self.num_freq, nc_in, nc_out, self.edge_dim)
@profile
def forward(self, feat, basis):
R = self.rp(feat)
kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], -1)
return kernel.view(kernel.shape[0], self.d_out * self.nc_out, -1)
class G1x1SE3(nn.Module):
def __init__(self, f_in, f_out, learnable=True):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.transform = nn.ParameterDict()
for m_out, d_out in self.f_out.structure:
m_in = self.f_in.structure_dict[d_out]
self.transform[str(d_out)] = nn.Parameter(torch.randn(m_out, m_in) / np.sqrt(m_in), requires_grad=learnable)
def __repr__(self):
return f"G1x1SE3(structure={self.f_out})"
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
if str(k) in self.transform.keys():
output[k] = torch.matmul(self.transform[str(k)], v)
return output
class GNormSE3(nn.Module):
def __init__(self, fiber, nonlin=nn.ReLU(inplace=True), num_layers: int = 0):
super().__init__()
self.fiber = fiber
self.nonlin = nonlin
self.num_layers = num_layers
self.eps = 1e-12
self.transform = nn.ModuleDict()
for m, d in self.fiber.structure:
self.transform[str(d)] = self._build_net(int(m))
def __repr__(self):
return f"GNormSE3(num_layers={self.num_layers}, nonlin={self.nonlin})"
def _build_net(self, m: int):
net = []
for i in range(self.num_layers):
net.append(BN(int(m)))
net.append(self.nonlin)
net.append(nn.Linear(m, m, bias=(i == self.num_layers - 1)))
nn.init.kaiming_uniform_(net[-1].weight)
if self.num_layers == 0:
net.append(BN(int(m)))
net.append(self.nonlin)
return nn.Sequential(*net)
@profile
def forward(self, features, **kwargs):
output = {}
for k, v in features.items():
norm = v.norm(2, -1, keepdim=True).clamp_min(self.eps).expand_as(v)
phase = v / norm
transformed = self.transform[str(k)](norm[..., 0]).unsqueeze(-1)
output[k] = (transformed * phase).view(*v.shape)
return output
class BN(nn.Module):
def __init__(self, m):
super().__init__()
self.bn = nn.LayerNorm(m)
def forward(self, x):
return self.bn(x)
class GConvSE3Partial(nn.Module):
def __init__(self, f_in, f_out, edge_dim: int = 0):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.edge_dim = edge_dim
self.kernel_unary = nn.ModuleDict()
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)
def __repr__(self):
return f'GConvSE3Partial(structure={self.f_out})'
def udf_u_mul_e(self, d_out):
def fnc(edges):
msg = 0
for m_in, d_in in self.f_in.structure:
src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)
edge = edges.data[f'({d_in},{d_out})']
msg = msg + torch.matmul(edge, src)
msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)
return {f'out{d_out}': msg.view(msg.shape[0], -1, 2 * d_out + 1)}
return fnc
@profile
def forward(self, h, G=None, r=None, basis=None, **kwargs):
with G.local_scope():
for k, v in h.items():
G.ndata[k] = v
if 'w' in G.edata.keys():
w = G.edata['w'] -1)
else:
feat = torch.cat([r, ], -1)
for (mi, di) in self.f_in.structure:
for (mo, do) in self.f_out.structure:
etype = f'({di},{do})'
G.edata[etype] = self.kernel_unary[etype](feat, basis)
for d in self.f_out.degrees:
G.apply_edges(self.udf_u_mul_e(d))
return {f'{d}': G.edata[f'out{d}'] for d in self.f_out.degrees}
class GMABSE3(nn.Module):
def __init__(self, f_value: Fiber, f_key: Fiber, n_heads: int):
super().__init__()
self.f_value = f_value
self.f_key = f_key
self.n_heads = n_heads
self.new_dgl = version.parse(dgl.__version__) > version.parse('0.4.4')
def __repr__(self):
return f'GMABSE3(n_heads={self.n_heads}, structure={self.f_value})'
def udf_u_mul_e(self, d_out):
def fnc(edges):
attn = edges.data['a']
value = edges.data[f'v{d_out}']
msg = attn.unsqueeze(-1).unsqueeze(-1) * value
return {'m': msg}
return fnc
@profile
def forward(self, v, k: Dict = None, q: Dict = None, G=None, **kwargs):
with G.local_scope():
G.edata[f'v{d}'] = v[f'{d}'].view(-1, self.n_heads, m // self.n_heads, 2 * d + 1)
G.edata['k'] = fiber2head(k, self.n_heads, self.f_key, squeeze=True)
G.ndata['q'] = fiber2head(q, self.n_heads, self.f_key, squeeze=True)
= G.edata.pop('e')
if self.new_dgl:
n_edges = G.edata['k'].shape[0]
e = e.view([n_edges, self.n_heads])
e = e / np.sqrt(self.f_key.n_features)
G.edata['a'] = edge_softmax(G, e)
for d in self.f_value.degrees:
G.update_all(self.udf_u_mul_e(d), fn.sum('m', f'out{d}'))
output = {}
for m, d in self.f_value.structure:
output[f'{d}'] = G.ndata[f'out{d}'].view(-1, m, 2 * d + 1)
return output
class GSE3Res(nn.Module):
def __init__(self, f_in: Fiber, f_out: Fiber, edge_dim: int = 0, div: float = 4,
n_heads: int = 1, learnable_skip=True):
super().__init__()
self.f_in = f_in
self.f_out = f_out
self.div = div
self.n_heads = n_heads
t = {k: int(v // div) for k, v in self.f_out.structure_dict.items()}
self.f_mid_out = Fiber(dictionary=f_mid_out)
f_mid_in = {d: m for d, m in f_mid_out.items() if d in self.f_in.degrees}
self.f_mid_in = Fiber(dictionary=f_mid_in)
self.edge_dim = edge_dim
self.GMAB = nn.ModuleDict()
self.GMAB['v'] = GConvSE3Partial(f_in, self.f_mid_out, edge_dim=edge_dim)
self.GMAB['k'] = GConvSE3Partial(f_in, self.f_mid_in, edge_dim=edge_dim)
self.GMAB['q'] = G1x1SE3(f_in, self.f_mid_in)
self.GMAB['attn'] = GMABSE3(self.f_mid_out, self.f_mid_in, n_heads=n_heads)
self.project = G1x1SE3(self.f_mid_out, f_out, learnable=learnable_skip)
self.add = GSum(f_out, f_in)
assert self.add.f_out.structure_dict == f_out.structure_dict, \
'skip connection would change output structure'
@profile
def forward(self, features, G, **kwargs):
v = self.GMAB['v'](features, G=G, **kwargs)
k = self.GMAB['k'](features, G=G, **kwargs)
q = self.GMAB['q'](features, G=G)
z = self.GMAB['attn'](v, k=k, q=q, G=G)
z = self.project(z)
z = self.add(z, features)
return z
f_y: Fiber):
super().__init__()
self.f_x = f_x
self.f_y = f_y
self.f_out = Fiber.combine_max(f_x, f_y)
def __repr__(self):
return f"GSum(structure={self.f_out})"
def forward(self, x, y):
out = {}
for k in self.f_out.degrees:
k = str(k)
if (k in x) and (k in y):
if x[k].shape[1] > y[k].shape[1]:
diff = x[k].shape[1] - y[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
y[k] = torch.cat([y[k], zeros], 1)
elif x[k].shape[1] < y[k].shape[1]:
diff = y[k].shape[1] - x[k].shape[1]
zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)
x[k] = torch.cat([x[k], zeros], 1)
out[k] = x[k] + y[k]
elif k in x:
out[k] = x[k]
elif k in y:
out[k] = y[k]
return out
class GAvgPooling(nn.Module):
def __init__(self, type='0'):
super().__init__()
self.pool = AvgPooling()
self.type = type
@profile
def forward(self, features, G, **kwargs):
if self.type == '0':
h = features['0'][..., -1]
pooled = self.pool(G, h)
elif self.type == '1':
pooled = []
for i in range(3):
h_i = features['1'][..., i]
pooled.append(self.pool(G, h_i).unsqueeze(-1))
pooled = torch.cat(pooled, axis=-1)
pooled = {'1': pooled}
else:
print('GAvgPooling for type > 0 not implemented')
exit()
return pooled
class GMaxPooling(nn.Module):
def __init__(self):
super().__init__()
self.pool = MaxPooling()
@profile
def forward(self, features, G, **kwargs):
h = features['0'][..., -1]
return self.pool(G, h)
class EnInvGraphConv(nn.Module):
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
nbhd_indices = None
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out
def __repr__(self):
return f'EnInvGraphConv(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
class EnGraphConv(nn.Module):
def __init__(
self,
node_feat,
edge_feat=0,
coord_feat=16,
fourier_feat=0,
norm_rel_coords=False,
norm_coord_weights=False,
num_nearest_neighbors=0,
dropout=0.0,
init_eps=1e-3
):
super().__init__()
self.fourier_feat = fourier_feat
edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1
dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
dropout,
GELU(),
nn.Linear(edge_input_dim * 2, coord_feat),
GELU()
)
self.node_mlp = nn.Sequential(
nn.Linear(node_feat + coord_feat, node_feat * 2),
dropout,
GELU(),
nn.Linear(node_feat * 2, node_feat),
)
self.norm_coord_weights = norm_coord_weights
self.norm_rel_coords = norm_rel_coords
if norm_rel_coords:
self.rel_coords_scale = nn.Parameter(torch.ones(1))
self.coords_mlp = nn.Sequential(
nn.Linear(coord_feat, coord_feat * 4),
dropout,
GELU(),
nn.Linear(coord_feat * 4, 1)
)
self.num_nearest_neighbors = num_nearest_neighbors
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
nn.init.normal_(module.weight, std=self.init_eps)
def forward(self, h, x, e=None, mask=None):
nbhd_indices = None
b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors
use_nearest = num_nearest > 0
rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')
rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)
if use_nearest:
nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices
rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)
rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)
if fourier_features > 0:
rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)
rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')
if use_nearest:
feats_j = batched_index_select(h, nbhd_indices, dim=1)
else:
feats_j = rearrange(h, 'b j d -> b () j d')
feats_i = rearrange(h, 'b i d -> b i () d')
feats_i, feats_j = broadcast_tensors(feats_i, feats_j)
edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)
if e is not None:
edge_input = torch.cat((edge_input, e), dim=-1)
m_ij = self.edge_mlp(edge_input)
coord_weights = self.coords_mlp(m_ij)
coord_weights = rearrange(coord_weights, 'b i j () -> b i j')
if self.norm_coord_weights:
coord_weights = coord_weights.tanh()
if self.norm_rel_coords:
rel_coords = normalize(rel_coords, dim=-1) * self.rel_coords_scale
if mask is not None:
mask_i = rearrange(mask, 'b i -> b i ()')
if use_nearest:
mask_j = batched_index_select(mask, nbhd_indices, dim=1)
else:
mask_j = rearrange(mask, 'b j -> b () j')
mask = mask_i * mask_j
coord_weights.masked_fill_(~mask, 0.)
mask_i.detach()
mask_j.detach()
coords_out = einsum('b i j, b i j c -> b i c', coord_weights, rel_coords) + x
m_i = m_ij.sum(dim=-2)
node_mlp_input = torch.cat((h, m_i), dim=-1)
node_out = self.node_mlp(node_mlp_input) + h
rel_coords.detach()
rel_dist.detach()
feats_i.detach()
feats_j.detach()
edge_input.detach()
m_i.detach()
m_ij.detach()
coord_weights.detach()
node_mlp_input.detach()
if nbhd_indices is not None:
nbhd_indices.detach()
if mask is not None:
mask.detach()
return node_out, coords_out
def __repr__(self):
return f'GConvEn(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'
class DAGNNConv(nn.Module):
def __init__(self,
in_dim,
k):
super(DAGNNConv, self).__init__()
self.s = Parameter(torch.FloatTensor(in_dim, 1))
self.k = k
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain('sigmoid')
nn.init.xavier_uniform_(self.s, gain=gain)
def forward(self, graph, feats):
with graph.local_scope():
results = [feats]
degs = graph.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm = norm.to(feats.device).unsqueeze(1)
for _ in range(self.k):
feats = feats * norm
graph.ndata['h'] = feats
graph.update_all(fn.copy_u('h', 'm'),
fn.sum('m', 'h'))
feats = graph.ndata['h']
feats = feats * norm
results.append(feats)
H = torch.stack(results, dim=1)
S = sigmoid(torch.matmul(H, self.s))
S = S.permute(0, 2, 1)
H = torch.matmul(S, H).squeeze()
return H
class MLPLayer(nn.Module):
def __init__(self,
in_dim,
out_dim,
bias=True,
activation=None,
dropout=0):
super(MLPLayer, self).__init__()
self.linear = nn.Linear(in_dim, out_dim, bias=bias)
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = 1.
if self.activation is relu:
gain = nn.init.calculate_gain('relu')
nn.init.xavier_uniform_(self.linear.weight, gain=gain)
if self.linear.bias is not None:
nn.init.zeros_(self.linear.bias)
def forward(self, feats):
feats = self.dropout(feats)
feats = self.linear(feats)
if self.activation:
feats = self.activation(feats)
return feats
class DAGNN(nn.Module):
def __init__(self,
k,
in_dim,
hid_dim,
out_dim,
bias=True,
activation=relu,
dropout=0, ):
super(DAGNN, self).__init__()
self.mlp = nn.ModuleList()
self.mlp.append(MLPLayer(in_dim=in_dim, out_dim=hid_dim, bias=bias,
activation=activation, dropout=dropout))
self.mlp.append(MLPLayer(in_dim=hid_dim, out_dim=out_dim, bias=bias,
activation=None, dropout=dropout))
self.dagnn = DAGNNConv(in_dim=out_dim, k=k)
def forward(self, graph, feats):
for layer in self.mlp:
feats = layer(feats)
feats = self.dagnn(graph, feats)
return feats
class SAGEConv(nn.Module):
def __init__(self, in_feat, out_feat):
super(SAGEConv, self).__init__()
self.linear = nn.Linear(in_feat * 2, out_feat)
def forward(self, g, h):
with g.local_scope():
g.ndata['h'] = h
g.update_all(message_func=fn.copy_u('h', 'm'), reduce_func=fn.mean('m', 'h_N'))
h_N = g.ndata['h_N']
h_total = torch.cat([h, h_N], dim=1)
return self.linear(h_total)
| true | true |
f736a73c419ca895e86b294fb0a1f7568fe06235 | 5,281 | py | Python | site-packages/pskf/tools/run/pythonmodule.py | jjokella/pyshemkf | 61a6329f7aa5739a38a68504fd6a44568fcd833b | [
"MIT"
] | 5 | 2019-02-06T10:52:52.000Z | 2021-05-21T09:32:45.000Z | site-packages/pskf/tools/run/pythonmodule.py | jjokella/pyshemkf | 61a6329f7aa5739a38a68504fd6a44568fcd833b | [
"MIT"
] | null | null | null | site-packages/pskf/tools/run/pythonmodule.py | jjokella/pyshemkf | 61a6329f7aa5739a38a68504fd6a44568fcd833b | [
"MIT"
] | 1 | 2018-12-04T11:39:10.000Z | 2018-12-04T11:39:10.000Z | """
pythonmodule
============
Provides utility functions for working with SHEMAT-Suite output in Python.
"""
import os
import shutil
###############################################################################
# Directories #
###############################################################################
python_dir = os.environ['HOME']+'/pyshemkf'
python_scripts_dir = python_dir+'/scripts'
python_output_dir = python_dir+'/output'
###############################################################################
# Output #
###############################################################################
def py_output_dir(tag, ending):
"""
Generate Python output directory according to tag ending.
Parameters
----------
tag : string
Subdirectory name in ~/pyshemkf/output
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_output_dir : string
Designated output directory.
"""
py_output_dir = (python_output_dir + "/"
+ tag + "/"
+ ending)
return py_output_dir
def py_output_filename(tag, filename, spec, ending):
"""
Generate Python output filename (with specifier)
according to filename (without ending), tag ending, spec.
Parameters
----------
tag : string
Subdirectory name in ~/pyshemkf/output
filename : string
Filename body, without ending.
spec : string
Output identifier that will be added to the filename.
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_output_filename : string
Absolute filename for output file.
Notes
----------
spec is added to filename to form the body filename body.
The format-convention for spec is of the following form
(see pskf.tools.plot.spec.specl()):
'model_2018_01_01_b'
In principle any string can be used as spec.
"""
py_output_filename = (py_output_dir(tag, ending) + "/"
+ filename + "_" + spec + "."
+ ending)
return py_output_filename
def py_simple_output_filename(filename, tag, ending):
"""
Generate Python simple output filename (without specifier)
according to filename (without ending), tag, ending.
Parameters
----------
filename : string
Filename body, without ending.
tag : string
Subdirectory name in ~/pyshemkf/output
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_simple_output_filename : string
Absolute filename for output file.
Notes
----------
filename is used as body of the output-filename, nothing is added.
"""
py_simple_output_filename = (py_output_dir(tag, ending) + "/"
+ filename + "."
+ ending)
return py_simple_output_filename
###############################################################################
# Script Backup #
###############################################################################
def py_backup(python_sub_dir, tag, filename, ending, spec):
"""
Copy a python script to backup directory and add specifier
Parameters
----------
python_sub_dir : string
Absolute subdirectory of the script, mostly
~/PythonExecDir and ~/pyshemkf/scripts
tag : string
Subdirectory name in ~/PythonExecDir or
~/pyshemkf/scripts
filename : string
Filename body, without ending.
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
spec : string
Output identifier that will be added to the filename.
Notes
----------
Returns nothing, but copies the file to a possibly generated backup
directory.
spec is added to filename to form the body filename body.
The format-convention for spec is of the following form
(see pskf.tools.plot.spec.specl()):
'model_2018_01_01_b'
In principle any string can be used as spec.
"""
# Script Name
py_file_name = (python_sub_dir + "/"
+ tag + "/"
+ filename + "."
+ ending)
# Possibly create backup directory
if not os.path.exists(python_sub_dir+"/"+tag+"/backup"):
os.mkdir(python_sub_dir+"/"+tag+"/backup")
# Backup Script Name
py_backup_file_name = (python_sub_dir + "/"
+ tag + "/"
+ "backup" + "/"
+ filename + "_"
+ spec + "."
+ ending)
# Exception if file already exists
if os.path.isfile(py_backup_file_name):
os.remove(py_backup_file_name)
print('Removed old file: '+py_backup_file_name)
shutil.copyfile(py_file_name, py_backup_file_name)
print('Backup as '+py_backup_file_name)
| 28.392473 | 79 | 0.514675 | import os
import shutil
| true | true |
f736a75f3739344c19eb83880c8470b6c22cdbf3 | 3,490 | py | Python | crtk/mapping.py | hao-n/crtk | e63d70ed2abfc70964618f666c5b2ae422aa4128 | [
"MIT"
] | 1 | 2020-09-23T09:06:52.000Z | 2020-09-23T09:06:52.000Z | crtk/mapping.py | hao-n/crtk | e63d70ed2abfc70964618f666c5b2ae422aa4128 | [
"MIT"
] | null | null | null | crtk/mapping.py | hao-n/crtk | e63d70ed2abfc70964618f666c5b2ae422aa4128 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Smart Contract Reverse Engineering Toolkit: Mapping
#
# Copyright (C) 2019-2020 CRTK Project
# Author: Hao-Nan Zhu <hao-n.zhu@outlook.com>
# URL: <https://github.com/hao-n/crtk>
# For license information, see LICENSE
opcode_mapping = {
'00': 'STOP',
'01': 'ADD',
'02': 'MUL',
'03': 'SUB',
'04': 'DIV',
'05': 'SDIV',
'06': 'MOD',
'07': 'SMOD',
'08': 'ADDMOD',
'09': 'MULMOD',
'0a': 'EXP',
'0b': 'SIGNEXTEND',
'10': 'LT',
'11': 'GT',
'12': 'SLT',
'13': 'SGT',
'14': 'EQ',
'15': 'ISZERO',
'16': 'AND',
'17': 'OR',
'18': 'XOR',
'19': 'NOT',
'1a': 'BYTE',
'1b': 'SHL',
'1c': 'SHR',
'1d': 'SAR',
'20': 'SHA3',
'30': 'ADDRESS',
'31': 'BALANCE',
'32': 'ORIGIN',
'33': 'CALLER',
'34': 'CALLVALUE',
'35': 'CALLDATALOAD',
'36': 'CALLDATASIZE',
'37': 'CALLDATACOPY',
'38': 'CODESIZE',
'39': 'CODECOPY',
'3a': 'GASPRICE',
'3b': 'EXTCODESIZE',
'3c': 'EXTCODECOPY',
'3d': 'RETURNDATASIZE',
'3e': 'RETURNDATACOPY',
'40': 'BLOCKHASH',
'41': 'COINBASE',
'42': 'TIMESTAMP',
'43': 'NUMBER',
'44': 'DIFFICULTY',
'45': 'GASLIMIT',
'50': 'POP',
'51': 'MLOAD',
'52': 'MSTORE',
'53': 'MSTORE8',
'54': 'SLOAD',
'55': 'SSTORE',
'56': 'JUMP',
'57': 'JUMPI',
'58': 'PC',
'59': 'MSIZE',
'5a': 'GAS',
'5b': 'JUMPDEST',
'60': 'PUSH1',
'61': 'PUSH2',
'62': 'PUSH3',
'63': 'PUSH4',
'64': 'PUSH5',
'65': 'PUSH6',
'66': 'PUSH7',
'67': 'PUSH8',
'68': 'PUSH9',
'69': 'PUSH10',
'6a': 'PUSH11',
'6b': 'PUSH12',
'6c': 'PUSH13',
'6d': 'PUSH14',
'6e': 'PUSH15',
'6f': 'PUSH16',
'70': 'PUSH17',
'71': 'PUSH18',
'72': 'PUSH19',
'73': 'PUSH20',
'74': 'PUSH21',
'75': 'PUSH22',
'76': 'PUSH23',
'77': 'PUSH24',
'78': 'PUSH25',
'79': 'PUSH26',
'7a': 'PUSH27',
'7b': 'PUSH28',
'7c': 'PUSH29',
'7d': 'PUSH30',
'7e': 'PUSH31',
'7f': 'PUSH32',
'80': 'DUP1',
'81': 'DUP2',
'82': 'DUP3',
'83': 'DUP4',
'84': 'DUP5',
'85': 'DUP6',
'86': 'DUP7',
'87': 'DUP8',
'88': 'DUP9',
'89': 'DUP10',
'8a': 'DUP11',
'8b': 'DUP12',
'8c': 'DUP13',
'8d': 'DUP14',
'8e': 'DUP15',
'8f': 'DUP16',
'90': 'SWAP1',
'91': 'SWAP2',
'92': 'SWAP3',
'93': 'SWAP4',
'94': 'SWAP5',
'95': 'SWAP6',
'96': 'SWAP7',
'97': 'SWAP8',
'98': 'SWAP9',
'99': 'SWAP10',
'9a': 'SWAP11',
'9b': 'SWAP12',
'9c': 'SWAP13',
'9d': 'SWAP14',
'9e': 'SWAP15',
'9f': 'SWAP16',
'a0': 'LOG0',
'a1': 'LOG1',
'a2': 'LOG2',
'a3': 'LOG3',
'a4': 'LOG4',
'f0': 'CREATE',
'f1': 'CALL',
'f2': 'CALLCODE',
'f3': 'RETURN',
'f4': 'DELEGATECALL',
'f5': 'CALLBLACKBOX',
'fa': 'STATICCALL',
'fd': 'REVERT',
'fe': 'INVALID',
# 'ff' : 'SUICIDE',
'ff': 'SELFDESTRUCT',
}
push_mapping = {
'60': 1,
'61': 2,
'62': 3,
'63': 4,
'64': 5,
'65': 6,
'66': 7,
'67': 8,
'68': 9,
'69': 10,
'6a': 11,
'6b': 12,
'6c': 13,
'6d': 14,
'6e': 15,
'6f': 16,
'70': 17,
'71': 18,
'72': 19,
'73': 20,
'74': 21,
'75': 22,
'76': 23,
'77': 24,
'78': 25,
'79': 26,
'7a': 27,
'7b': 28,
'7c': 29,
'7d': 30,
'7e': 31,
'7f': 32,
}
| 18.763441 | 53 | 0.401146 |
opcode_mapping = {
'00': 'STOP',
'01': 'ADD',
'02': 'MUL',
'03': 'SUB',
'04': 'DIV',
'05': 'SDIV',
'06': 'MOD',
'07': 'SMOD',
'08': 'ADDMOD',
'09': 'MULMOD',
'0a': 'EXP',
'0b': 'SIGNEXTEND',
'10': 'LT',
'11': 'GT',
'12': 'SLT',
'13': 'SGT',
'14': 'EQ',
'15': 'ISZERO',
'16': 'AND',
'17': 'OR',
'18': 'XOR',
'19': 'NOT',
'1a': 'BYTE',
'1b': 'SHL',
'1c': 'SHR',
'1d': 'SAR',
'20': 'SHA3',
'30': 'ADDRESS',
'31': 'BALANCE',
'32': 'ORIGIN',
'33': 'CALLER',
'34': 'CALLVALUE',
'35': 'CALLDATALOAD',
'36': 'CALLDATASIZE',
'37': 'CALLDATACOPY',
'38': 'CODESIZE',
'39': 'CODECOPY',
'3a': 'GASPRICE',
'3b': 'EXTCODESIZE',
'3c': 'EXTCODECOPY',
'3d': 'RETURNDATASIZE',
'3e': 'RETURNDATACOPY',
'40': 'BLOCKHASH',
'41': 'COINBASE',
'42': 'TIMESTAMP',
'43': 'NUMBER',
'44': 'DIFFICULTY',
'45': 'GASLIMIT',
'50': 'POP',
'51': 'MLOAD',
'52': 'MSTORE',
'53': 'MSTORE8',
'54': 'SLOAD',
'55': 'SSTORE',
'56': 'JUMP',
'57': 'JUMPI',
'58': 'PC',
'59': 'MSIZE',
'5a': 'GAS',
'5b': 'JUMPDEST',
'60': 'PUSH1',
'61': 'PUSH2',
'62': 'PUSH3',
'63': 'PUSH4',
'64': 'PUSH5',
'65': 'PUSH6',
'66': 'PUSH7',
'67': 'PUSH8',
'68': 'PUSH9',
'69': 'PUSH10',
'6a': 'PUSH11',
'6b': 'PUSH12',
'6c': 'PUSH13',
'6d': 'PUSH14',
'6e': 'PUSH15',
'6f': 'PUSH16',
'70': 'PUSH17',
'71': 'PUSH18',
'72': 'PUSH19',
'73': 'PUSH20',
'74': 'PUSH21',
'75': 'PUSH22',
'76': 'PUSH23',
'77': 'PUSH24',
'78': 'PUSH25',
'79': 'PUSH26',
'7a': 'PUSH27',
'7b': 'PUSH28',
'7c': 'PUSH29',
'7d': 'PUSH30',
'7e': 'PUSH31',
'7f': 'PUSH32',
'80': 'DUP1',
'81': 'DUP2',
'82': 'DUP3',
'83': 'DUP4',
'84': 'DUP5',
'85': 'DUP6',
'86': 'DUP7',
'87': 'DUP8',
'88': 'DUP9',
'89': 'DUP10',
'8a': 'DUP11',
'8b': 'DUP12',
'8c': 'DUP13',
'8d': 'DUP14',
'8e': 'DUP15',
'8f': 'DUP16',
'90': 'SWAP1',
'91': 'SWAP2',
'92': 'SWAP3',
'93': 'SWAP4',
'94': 'SWAP5',
'95': 'SWAP6',
'96': 'SWAP7',
'97': 'SWAP8',
'98': 'SWAP9',
'99': 'SWAP10',
'9a': 'SWAP11',
'9b': 'SWAP12',
'9c': 'SWAP13',
'9d': 'SWAP14',
'9e': 'SWAP15',
'9f': 'SWAP16',
'a0': 'LOG0',
'a1': 'LOG1',
'a2': 'LOG2',
'a3': 'LOG3',
'a4': 'LOG4',
'f0': 'CREATE',
'f1': 'CALL',
'f2': 'CALLCODE',
'f3': 'RETURN',
'f4': 'DELEGATECALL',
'f5': 'CALLBLACKBOX',
'fa': 'STATICCALL',
'fd': 'REVERT',
'fe': 'INVALID',
'ff': 'SELFDESTRUCT',
}
push_mapping = {
'60': 1,
'61': 2,
'62': 3,
'63': 4,
'64': 5,
'65': 6,
'66': 7,
'67': 8,
'68': 9,
'69': 10,
'6a': 11,
'6b': 12,
'6c': 13,
'6d': 14,
'6e': 15,
'6f': 16,
'70': 17,
'71': 18,
'72': 19,
'73': 20,
'74': 21,
'75': 22,
'76': 23,
'77': 24,
'78': 25,
'79': 26,
'7a': 27,
'7b': 28,
'7c': 29,
'7d': 30,
'7e': 31,
'7f': 32,
}
| true | true |
f736a7d8c53160ea30ccc4f917fe829d5743fd83 | 20,814 | py | Python | cinder/tests/unit/test_v7000_fcp.py | UbuntuEvangelist/cinder | cbb55074de48176cbaa3f31a5b1d595b8aad7aa8 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_v7000_fcp.py | UbuntuEvangelist/cinder | cbb55074de48176cbaa3f31a5b1d595b8aad7aa8 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/test_v7000_fcp.py | UbuntuEvangelist/cinder | cbb55074de48176cbaa3f31a5b1d595b8aad7aa8 | [
"Apache-2.0"
] | 15 | 2017-01-12T10:35:10.000Z | 2019-04-19T08:22:10.000Z | # Copyright 2015 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v7000_common
from cinder.volume.drivers.violin import v7000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': ['50014380186b3f65', '50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
PHY_DEVICES_RESPONSE = {
'data':
{'physical_devices':
[{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '3cc4d6dd-166d-77d2-4967-00005463f597',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0',
'is_foreign': True,
'name': 'BKSC:OTHDISK-MFCN01.000',
'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151',
'owner': 'example.com',
'pool': None,
'product': 'OTHDISK-MFCN01',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '0',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '283b2694-192b-4745-6768-00005463f673',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN08.000',
'object_id': '8555b888-bf43-5083-a433-f0c7b0282370',
'owner': 'example.com',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN08',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '11',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 1048569,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '7f47db19-019c-707d-0df1-00005463f949',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN09.000',
'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291',
'owner': 'a.b.c.d',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN09',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '12',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 524284,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}],
'total_physical_devices': 3},
'msg': 'Successful',
'success': True
}
# The FC_INFO dict returned by the backend is keyed on
# object_id of the FC adapter and the values are the
# wwmns
FC_INFO = {
'1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'],
'4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'],
'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'],
'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231']
}
CLIENT_INFO = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices':
[{'access': 'ReadWrite',
'id': 'v0000004',
'initiatorWWPN': '*',
'lun': '8',
'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba',
'sizeMB': 10240,
'targetWWPN': '*',
'type': 'SAN'}]
}
CLIENT_INFO1 = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices': []
}
class V7000FCPDriverTestCase(test.TestCase):
"""Test cases for VMEM FCP driver."""
def setUp(self):
super(V7000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V7000FCPDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_fcp'
config.san_ip = '8.8.8.8'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_concerto(self, m_conf=None):
"""Create a fake Concerto communication object."""
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertIsNone(result)
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
"""No wwns were found during setup."""
self.driver.gateway_fc_wwns = []
failure = exception.ViolinInvalidBackendConfig
self.assertRaises(failure, self.driver.check_for_setup_error)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_create_volume_from_snapshot(self):
self.driver.common._create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.driver.common._create_volume_from_snapshot.assert_called_with(
SNAPSHOT, VOLUME)
self.assertIsNone(result)
def test_create_cloned_volume(self):
self.driver.common._create_lun_from_lun = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.common._create_lun_from_lun.assert_called_with(
SRC_VOL, VOLUME)
self.assertIsNone(result)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_extend_volume(self):
"""Volume extended successfully."""
new_size = 10
self.driver.common._extend_lun = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
self.assertIsNone(result)
def test_create_snapshot(self):
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_delete_snapshot(self):
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_get_volume_stats(self):
self.driver._update_volume_stats = mock.Mock()
self.driver._update_volume_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
@mock.patch('socket.gethostbyaddr')
def test_update_volume_stats(self, mock_gethost):
"""Test Update Volume Stats.
Makes a mock query to the backend to collect stats on all physical
devices.
"""
def gethostbyaddr(addr):
if addr == '8.8.8.8' or addr == 'example.com':
return ('example.com', [], ['8.8.8.8'])
else:
return ('a.b.c.d', [], addr)
mock_gethost.side_effect = gethostbyaddr
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_gb = 2046
free_gb = 1022
phy_devices = "/batch/physicalresource/physicaldevice"
conf = {
'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ],
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._update_volume_stats()
calls = [mock.call(phy_devices)]
self.driver.common.vmem_mg.basic.get.assert_has_calls(calls)
self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb'])
self.assertEqual(free_gb, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertIsNone(result)
def test_get_active_fc_targets(self):
"""Test Get Active FC Targets.
Makes a mock query to the backend to collect all the physical
adapters and extract the WWNs.
"""
conf = {
'adapter.get_fc_info.return_value': FC_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_active_fc_targets()
self.assertEqual({'2100001b9745e230', '2100001b9745e25f',
'2100001b9745e231', '2100001b9745e25e'},
set(result))
def test_initialize_connection(self):
lun_id = 1
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
conf = {
'client.create_client.return_value': None,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.common.vmem_mg.client.create_client.assert_called_with(
name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns'])
self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(True, props['data']['target_discovered'])
self.assertEqual(self.driver.gateway_fc_wwns,
props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_export_lun(self):
lun_id = '1'
response = {'success': True, 'msg': 'Assign SAN client successfully'}
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vmem_mg.lun.assign_lun_to_client,
self.driver._is_lun_id_ready,
'Assign SAN client successfully',
[VOLUME['id'], CONNECTOR['host'], "ReadWrite"],
[VOLUME['id'], CONNECTOR['host']])
self.driver._get_lun_id.assert_called_with(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
response = {'status': False, 'msg': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=exception.ViolinBackendErr(response['msg']))
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR)
def test_unexport_lun(self):
response = {'success': True, 'msg': 'Unassign SAN client successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vmem_mg.lun.unassign_client_lun,
"Unassign SAN client successfully",
VOLUME['id'], CONNECTOR['host'], True)
self.assertIsNone(result)
def test_get_lun_id(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
self.assertEqual(8, result)
def test_is_lun_id_ready(self):
lun_id = '1'
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._is_lun_id_ready(
VOLUME['id'], CONNECTOR['host'])
self.assertTrue(result)
def test_build_initiator_target_map(self):
"""Successfully build a map when zoning is enabled."""
expected_targ_wwns = FC_TARGET_WWPNS
self.driver.lookup_service = mock.Mock()
(self.driver.lookup_service.get_device_mapping_from_network.
return_value) = FC_FABRIC_MAP
result = self.driver._build_initiator_target_map(CONNECTOR)
(targ_wwns, init_targ_map) = result
(self.driver.lookup_service.get_device_mapping_from_network.
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns))
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
i = FC_INITIATOR_WWPNS[0]
self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
i = FC_INITIATOR_WWPNS[1]
self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
self.assertEqual(2, len(init_targ_map))
def test_build_initiator_target_map_no_lookup_service(self):
"""Successfully build a map when zoning is disabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
"""Successfully finds an initiator with remaining active session."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vmem_mg.client.get_client_info.assert_called_with(
CONNECTOR['host'])
def test_is_initiator_connected_to_array_empty_response(self):
"""Successfully finds no initiators with remaining active sessions."""
conf = {
'client.get_client_info.return_value': CLIENT_INFO1
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
| 36.07279 | 79 | 0.642356 |
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v7000_common
from cinder.volume.drivers.violin import v7000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "myhost",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': ['50014380186b3f65', '50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
PHY_DEVICES_RESPONSE = {
'data':
{'physical_devices':
[{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '3cc4d6dd-166d-77d2-4967-00005463f597',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN01 v1.0',
'is_foreign': True,
'name': 'BKSC:OTHDISK-MFCN01.000',
'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151',
'owner': 'example.com',
'pool': None,
'product': 'OTHDISK-MFCN01',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '0',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN01 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 524284,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '283b2694-192b-4745-6768-00005463f673',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN08 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN08.000',
'object_id': '8555b888-bf43-5083-a433-f0c7b0282370',
'owner': 'example.com',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN08',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '11',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 1048569,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN08 v1.0-0-0-00'},
{'availsize': 1099504287744,
'availsize_mb': 1048569,
'category': 'Virtual Device',
'connection_type': 'block',
'firmware': 'v1.0',
'guid': '7f47db19-019c-707d-0df1-00005463f949',
'inquiry_string': '000002122b000032BKSC OTHDISK-MFCN09 v1.0',
'is_foreign': False,
'name': 'BKSC:OTHDISK-MFCN09.000',
'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291',
'owner': 'a.b.c.d',
'pool':
{'name': 'mga-pool',
'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
'product': 'OTHDISK-MFCN09',
'scsi_address':
{'adapter': '98',
'channel': '0',
'id': '12',
'lun': '0',
'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
'size': 1099504287744,
'size_mb': 524284,
'type': 'Direct-Access',
'usedsize': 0,
'usedsize_mb': 0,
'vendor': 'BKSC',
'wwid': 'BKSC OTHDISK-MFCN09 v1.0-0-0-00'}],
'total_physical_devices': 3},
'msg': 'Successful',
'success': True
}
FC_INFO = {
'1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'],
'4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'],
'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'],
'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231']
}
CLIENT_INFO = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices':
[{'access': 'ReadWrite',
'id': 'v0000004',
'initiatorWWPN': '*',
'lun': '8',
'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba',
'sizeMB': 10240,
'targetWWPN': '*',
'type': 'SAN'}]
}
CLIENT_INFO1 = {
'FCPolicy':
{'AS400enabled': False,
'VSAenabled': False,
'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
'50-01-43-80-18-6b-3f-64']},
'FibreChannelDevices': []
}
class V7000FCPDriverTestCase(test.TestCase):
def setUp(self):
super(V7000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V7000FCPDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v7000_fcp'
config.san_ip = '8.8.8.8'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_concerto(self, m_conf=None):
_m_concerto = mock.Mock(name='Concerto',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_concerto.configure_mock(**m_conf)
return _m_concerto
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertIsNone(result)
@mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
self.driver.gateway_fc_wwns = []
failure = exception.ViolinInvalidBackendConfig
self.assertRaises(failure, self.driver.check_for_setup_error)
def test_create_volume(self):
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_create_volume_from_snapshot(self):
self.driver.common._create_volume_from_snapshot = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
self.driver.common._create_volume_from_snapshot.assert_called_with(
SNAPSHOT, VOLUME)
self.assertIsNone(result)
def test_create_cloned_volume(self):
self.driver.common._create_lun_from_lun = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
self.driver.common._create_lun_from_lun.assert_called_with(
SRC_VOL, VOLUME)
self.assertIsNone(result)
def test_delete_volume(self):
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertIsNone(result)
def test_extend_volume(self):
new_size = 10
self.driver.common._extend_lun = mock.Mock()
result = self.driver.extend_volume(VOLUME, new_size)
self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
self.assertIsNone(result)
def test_create_snapshot(self):
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_delete_snapshot(self):
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertIsNone(result)
def test_get_volume_stats(self):
self.driver._update_volume_stats = mock.Mock()
self.driver._update_volume_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_volume_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
@mock.patch('socket.gethostbyaddr')
def test_update_volume_stats(self, mock_gethost):
def gethostbyaddr(addr):
if addr == '8.8.8.8' or addr == 'example.com':
return ('example.com', [], ['8.8.8.8'])
else:
return ('a.b.c.d', [], addr)
mock_gethost.side_effect = gethostbyaddr
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_gb = 2046
free_gb = 1022
phy_devices = "/batch/physicalresource/physicaldevice"
conf = {
'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ],
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._update_volume_stats()
calls = [mock.call(phy_devices)]
self.driver.common.vmem_mg.basic.get.assert_has_calls(calls)
self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb'])
self.assertEqual(free_gb, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertIsNone(result)
def test_get_active_fc_targets(self):
conf = {
'adapter.get_fc_info.return_value': FC_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_active_fc_targets()
self.assertEqual({'2100001b9745e230', '2100001b9745e25f',
'2100001b9745e231', '2100001b9745e25e'},
set(result))
def test_initialize_connection(self):
lun_id = 1
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
conf = {
'client.create_client.return_value': None,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(VOLUME, CONNECTOR)
self.driver.common.vmem_mg.client.create_client.assert_called_with(
name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns'])
self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(True, props['data']['target_discovered'])
self.assertEqual(self.driver.gateway_fc_wwns,
props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(VOLUME, CONNECTOR)
self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR)
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_export_lun(self):
lun_id = '1'
response = {'success': True, 'msg': 'Assign SAN client successfully'}
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vmem_mg.lun.assign_lun_to_client,
self.driver._is_lun_id_ready,
'Assign SAN client successfully',
[VOLUME['id'], CONNECTOR['host'], "ReadWrite"],
[VOLUME['id'], CONNECTOR['host']])
self.driver._get_lun_id.assert_called_with(
VOLUME['id'], CONNECTOR['host'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
response = {'status': False, 'msg': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=exception.ViolinBackendErr(response['msg']))
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR)
def test_unexport_lun(self):
response = {'success': True, 'msg': 'Unassign SAN client successfully'}
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver.common._send_cmd = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME, CONNECTOR)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vmem_mg.lun.unassign_client_lun,
"Unassign SAN client successfully",
VOLUME['id'], CONNECTOR['host'], True)
self.assertIsNone(result)
def test_get_lun_id(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
self.assertEqual(8, result)
def test_is_lun_id_ready(self):
lun_id = '1'
self.driver.common.vmem_mg = self.setup_mock_concerto()
self.driver._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._is_lun_id_ready(
VOLUME['id'], CONNECTOR['host'])
self.assertTrue(result)
def test_build_initiator_target_map(self):
expected_targ_wwns = FC_TARGET_WWPNS
self.driver.lookup_service = mock.Mock()
(self.driver.lookup_service.get_device_mapping_from_network.
return_value) = FC_FABRIC_MAP
result = self.driver._build_initiator_target_map(CONNECTOR)
(targ_wwns, init_targ_map) = result
(self.driver.lookup_service.get_device_mapping_from_network.
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns))
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
i = FC_INITIATOR_WWPNS[0]
self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
i = FC_INITIATOR_WWPNS[1]
self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
self.assertEqual(2, len(init_targ_map[i]))
self.assertEqual(2, len(init_targ_map))
def test_build_initiator_target_map_no_lookup_service(self):
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO,
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vmem_mg.client.get_client_info.assert_called_with(
CONNECTOR['host'])
def test_is_initiator_connected_to_array_empty_response(self):
conf = {
'client.get_client_info.return_value': CLIENT_INFO1
}
self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
| true | true |
f736a971a77ec4dbef874d6e5e2b4a879b5e0f32 | 965 | py | Python | contents/apis/accounts.py | williamlagos/contents-api | d501d343396af7aec8d5dfa6092136ba689d8565 | [
"MIT"
] | null | null | null | contents/apis/accounts.py | williamlagos/contents-api | d501d343396af7aec8d5dfa6092136ba689d8565 | [
"MIT"
] | 3 | 2021-04-24T20:50:16.000Z | 2021-05-24T17:46:55.000Z | contents/apis/accounts.py | williamlagos/contents-api | d501d343396af7aec8d5dfa6092136ba689d8565 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# This file is part of django-emporio project.
#
# Copyright (C) 2011-2020 William Oliveira de Lagos <william.lagos@icloud.com>
#
# Emporio is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Emporio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Emporio. If not, see <http://www.gnu.org/licenses/>.
#
from socialize.socialize.services import SocialService
from .default import DefaultServiceResource
class AccountResource(DefaultServiceResource):
service = SocialService()
| 32.166667 | 78 | 0.772021 |
from socialize.socialize.services import SocialService
from .default import DefaultServiceResource
class AccountResource(DefaultServiceResource):
service = SocialService()
| true | true |
f736a98872d1e12416941d26320a083bc986237d | 13,347 | py | Python | bin/node.py | SauravMaheshkar/rtdl | c3f8051210d1cd7fdffc5a63221e3c4e84415ed8 | [
"Apache-2.0"
] | 1 | 2022-01-24T13:35:03.000Z | 2022-01-24T13:35:03.000Z | bin/node.py | SauravMaheshkar/rtdl | c3f8051210d1cd7fdffc5a63221e3c4e84415ed8 | [
"Apache-2.0"
] | null | null | null | bin/node.py | SauravMaheshkar/rtdl | c3f8051210d1cd7fdffc5a63221e3c4e84415ed8 | [
"Apache-2.0"
] | null | null | null | # %%
import gc
import itertools
import math
import typing as ty
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.swa_utils as swa_utils
import zero
from torch import Tensor
import wandb
import lib
import lib.node as node
# %%
class NODE(nn.Module):
def __init__(
self,
*,
d_in: int,
num_layers: int,
layer_dim: int,
depth: int,
tree_dim: int,
choice_function: str,
bin_function: str,
d_out: int,
categories: ty.Optional[ty.List[int]],
d_embedding: int,
) -> None:
super().__init__()
if categories is not None:
d_in += len(categories) * d_embedding
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
self.d_out = d_out
self.block = node.DenseBlock(
input_dim=d_in,
num_layers=num_layers,
layer_dim=layer_dim,
depth=depth,
tree_dim=tree_dim,
bin_function=getattr(node, bin_function),
choice_function=getattr(node, choice_function),
flatten_output=False,
)
def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:
if x_cat is not None:
x_cat = self.category_embeddings(x_cat + self.category_offsets[None])
x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)
else:
x = x_num
x = self.block(x)
x = x[..., : self.d_out].mean(dim=-2)
x = x.squeeze(-1)
return x
# %%
args, output = lib.load_config()
assert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'
if 'swa' in args:
assert args['swa']['n_checkpoints'] > 1
# %%
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
train_size = D.size(lib.TRAIN)
batch_size, epoch_size = (
stats['batch_size'],
stats['epoch_size'],
) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))
eval_batch_size = args['training']['eval_batch_size']
chunk_size = None
stats['chunk_sizes'] = {}
stats['eval_batch_sizes'] = {}
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
args['model'].setdefault('d_embedding', None)
model = NODE(
d_in=0 if X_num is None else X_num['train'].shape[1],
d_out=D.info['n_classes'] if D.is_multiclass else 1,
categories=lib.get_categories(X_cat),
**args['model'],
).to(device)
if torch.cuda.device_count() > 1: # type: ignore[code]
print('Using nn.DataParallel')
model = nn.DataParallel(model)
stats['n_parameters'] = lib.get_n_parameters(model)
optimizer = lib.make_optimizer(
args['training']['optimizer'],
model.parameters(),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
stage = 0
lr_n_decays = 0
timer = zero.Timer()
swa_stage_first_epoch = None
def print_epoch_info():
print(
f'\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'
)
details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}
details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])
print(' | '.join(f'{k} = {v}' for k, v in details.items()))
def get_checkpoint_path(suffix):
return output / f'checkpoint_{suffix}.pt'
def step(batch_idx):
logits = model(
X_num[lib.TRAIN][batch_idx],
None if X_cat is None else X_cat[lib.TRAIN][batch_idx],
)
targets = Y_device[lib.TRAIN][batch_idx] # type: ignore[code]
if not D.is_multiclass:
targets = targets.to(logits.dtype)
return logits, targets
def _predict(part):
result = []
for idx in lib.IndexLoader(
D.size(part),
args['training']['eval_batch_size'],
False,
device,
):
result.append(
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
)
return torch.cat(result).cpu()
@torch.no_grad()
def predict(m, part):
global eval_batch_size
m.eval()
random_state = zero.get_random_state()
while eval_batch_size:
try:
zero.set_random_state(random_state)
return _predict(part)
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
zero.free_memory()
gc.collect()
eval_batch_size //= 2
print('New eval batch size:', eval_batch_size)
stats['eval_batch_sizes'][stream.epoch] = eval_batch_size
raise RuntimeError('Not enough memory even for eval_batch_size=1')
@torch.no_grad()
def evaluate(m, parts):
metrics = {}
predictions = {}
for part in parts:
predictions[part] = predict(m, part).numpy()
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(), # type: ignore[code]
predictions[part], # type: ignore[code]
'logits',
y_info,
)
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
STATE_VARIABLES = [
'progress',
'stats',
'timer',
'training_log',
'stage',
'swa_stage_first_epoch',
'lr_n_decays',
'chunk_size',
'eval_batch_size',
]
def save_checkpoint(suffix):
model_artifact = wandb.Artifact('node-artifact', type='model')
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{x: globals()[x] for x in STATE_VARIABLES},
},
get_checkpoint_path(suffix),
)
lib.dump_stats(stats, output, suffix == 'final')
lib.backup_output(output)
model_artifact.add_file(get_checkpoint_path(suffix))
wandb.run.log_artifact(model_artifact)
for stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:
if get_checkpoint_path(stage).exists():
print(f'Loading checkpoint {get_checkpoint_path(stage).name}')
c = torch.load(get_checkpoint_path(stage))
model.load_state_dict(c['model'])
optimizer.load_state_dict(c['optimizer'])
stream.load_state_dict(c['stream'])
globals().update({x: c[x] for x in STATE_VARIABLES})
stats.setdefault('old_stats', []).append(deepcopy(stats))
stats.setdefault('continuations', []).append(stream.epoch)
zero.set_random_state(c['random_state'])
break
# %%
timer.run()
with torch.no_grad():
# NODE-specific initialization
if stream.epoch == 0:
model.eval()
size = 2048
while True:
try:
zero.set_randomness(args['seed'])
x = step(torch.randperm(train_size)[:size])
del x
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
size //= 2
else:
break
wandb.init(project="RTDL", config=args)
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
epoch_losses = []
for batch_idx in epoch:
loss, new_chunk_size = lib.learn_with_auto_virtual_batch(
model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size
)
wandb.log({"Training Loss": loss})
epoch_losses.append(loss.detach())
if new_chunk_size and new_chunk_size < (chunk_size or batch_size):
chunk_size = new_chunk_size
print('New chunk size:', chunk_size)
stats['chunk_sizes'][stream.iteration] = chunk_size
zero.free_memory()
gc.collect()
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])
wandb.log({"score": metrics[lib.VAL]['score']})
for k, v in metrics.items():
training_log[k].append(v)
wandb.log({k:v})
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats[f'best_epoch_{stage}'] = stream.epoch
stats[f'metrics_{stage}'] = metrics
save_checkpoint(stage)
for k, v in predictions.items():
np.save(output / f'p_{stage}_{k}.npy', v)
wandb.log({f"predictions_{k}": v})
elif progress.fail:
if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:
print('Reducing lr...')
stats[f'lr_decay_{lr_n_decays}'] = stream.epoch
lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])
lr_n_decays += 1
progress.forget_bad_updates()
else:
print(f'Finishing stage {stage}...')
stats[f'time_{stage}'] = lib.format_seconds(timer())
if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:
break
best_stage_checkpoint = torch.load(get_checkpoint_path(stage))
model.load_state_dict(best_stage_checkpoint['model'])
optimizer.load_state_dict(best_stage_checkpoint['optimizer'])
progress = zero.ProgressTracker(args['swa']['patience'])
lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])
swa_stage_first_epoch = stream.epoch + 1
stage += 1
if stream.epoch == swa_stage_first_epoch:
lib.set_lr(optimizer, args['training']['lr'])
# %%
def load_best_model(stage):
model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])
if 'swa' in args:
print('\nRunning SWA...')
swa_model = swa_utils.AveragedModel(model)
swa_progress = zero.ProgressTracker(None)
best_swa_model = None
for stage in range(args['swa']['n_checkpoints']):
load_best_model(stage)
swa_model.update_parameters(model)
if stage > 0 and args['swa']['update_bn_n_epochs']:
zero.set_randomness(args['seed'])
with torch.no_grad():
swa_utils.update_bn(
itertools.chain.from_iterable(
zero.iter_batches(
X[lib.TRAIN], chunk_size or batch_size, shuffle=True
)
for _ in range(args['swa']['update_bn_n_epochs'])
),
swa_model,
device,
)
swa_progress.update(
evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']
)
if swa_progress.success:
print('New best SWA checkpoint!')
stats['n_swa_checkpoints'] = stage + 1
if stage > 0:
best_swa_model = deepcopy(swa_model)
if best_swa_model is None:
load_best_model(0)
else:
lib.load_swa_state_dict(model, best_swa_model)
else:
load_best_model(0)
print('\nRunning the final evaluation...')
stats['metrics'], predictions = evaluate(model, lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
wandb.run.summary[f"final_prediction_{k}"] = v
stats['time_final'] = lib.format_seconds(timer())
save_checkpoint('final')
print(f'Done! Time elapsed: {stats["time_final"]}')
print(
'\n!!! WARNING !!! The metrics for a single model are stored under the "metrics_0" key.\n'
)
| 31.553191 | 96 | 0.611373 |
import gc
import itertools
import math
import typing as ty
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim.swa_utils as swa_utils
import zero
from torch import Tensor
import wandb
import lib
import lib.node as node
class NODE(nn.Module):
def __init__(
self,
*,
d_in: int,
num_layers: int,
layer_dim: int,
depth: int,
tree_dim: int,
choice_function: str,
bin_function: str,
d_out: int,
categories: ty.Optional[ty.List[int]],
d_embedding: int,
) -> None:
super().__init__()
if categories is not None:
d_in += len(categories) * d_embedding
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
self.d_out = d_out
self.block = node.DenseBlock(
input_dim=d_in,
num_layers=num_layers,
layer_dim=layer_dim,
depth=depth,
tree_dim=tree_dim,
bin_function=getattr(node, bin_function),
choice_function=getattr(node, choice_function),
flatten_output=False,
)
def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:
if x_cat is not None:
x_cat = self.category_embeddings(x_cat + self.category_offsets[None])
x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)
else:
x = x_num
x = self.block(x)
x = x[..., : self.d_out].mean(dim=-2)
x = x.squeeze(-1)
return x
args, output = lib.load_config()
assert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'
if 'swa' in args:
assert args['swa']['n_checkpoints'] > 1
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
train_size = D.size(lib.TRAIN)
batch_size, epoch_size = (
stats['batch_size'],
stats['epoch_size'],
) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))
eval_batch_size = args['training']['eval_batch_size']
chunk_size = None
stats['chunk_sizes'] = {}
stats['eval_batch_sizes'] = {}
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
args['model'].setdefault('d_embedding', None)
model = NODE(
d_in=0 if X_num is None else X_num['train'].shape[1],
d_out=D.info['n_classes'] if D.is_multiclass else 1,
categories=lib.get_categories(X_cat),
**args['model'],
).to(device)
if torch.cuda.device_count() > 1:
print('Using nn.DataParallel')
model = nn.DataParallel(model)
stats['n_parameters'] = lib.get_n_parameters(model)
optimizer = lib.make_optimizer(
args['training']['optimizer'],
model.parameters(),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
stage = 0
lr_n_decays = 0
timer = zero.Timer()
swa_stage_first_epoch = None
def print_epoch_info():
print(
f'\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'
)
details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}
details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])
print(' | '.join(f'{k} = {v}' for k, v in details.items()))
def get_checkpoint_path(suffix):
return output / f'checkpoint_{suffix}.pt'
def step(batch_idx):
logits = model(
X_num[lib.TRAIN][batch_idx],
None if X_cat is None else X_cat[lib.TRAIN][batch_idx],
)
targets = Y_device[lib.TRAIN][batch_idx]
if not D.is_multiclass:
targets = targets.to(logits.dtype)
return logits, targets
def _predict(part):
result = []
for idx in lib.IndexLoader(
D.size(part),
args['training']['eval_batch_size'],
False,
device,
):
result.append(
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
)
return torch.cat(result).cpu()
@torch.no_grad()
def predict(m, part):
global eval_batch_size
m.eval()
random_state = zero.get_random_state()
while eval_batch_size:
try:
zero.set_random_state(random_state)
return _predict(part)
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
zero.free_memory()
gc.collect()
eval_batch_size //= 2
print('New eval batch size:', eval_batch_size)
stats['eval_batch_sizes'][stream.epoch] = eval_batch_size
raise RuntimeError('Not enough memory even for eval_batch_size=1')
@torch.no_grad()
def evaluate(m, parts):
metrics = {}
predictions = {}
for part in parts:
predictions[part] = predict(m, part).numpy()
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(),
predictions[part],
'logits',
y_info,
)
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
STATE_VARIABLES = [
'progress',
'stats',
'timer',
'training_log',
'stage',
'swa_stage_first_epoch',
'lr_n_decays',
'chunk_size',
'eval_batch_size',
]
def save_checkpoint(suffix):
model_artifact = wandb.Artifact('node-artifact', type='model')
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{x: globals()[x] for x in STATE_VARIABLES},
},
get_checkpoint_path(suffix),
)
lib.dump_stats(stats, output, suffix == 'final')
lib.backup_output(output)
model_artifact.add_file(get_checkpoint_path(suffix))
wandb.run.log_artifact(model_artifact)
for stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:
if get_checkpoint_path(stage).exists():
print(f'Loading checkpoint {get_checkpoint_path(stage).name}')
c = torch.load(get_checkpoint_path(stage))
model.load_state_dict(c['model'])
optimizer.load_state_dict(c['optimizer'])
stream.load_state_dict(c['stream'])
globals().update({x: c[x] for x in STATE_VARIABLES})
stats.setdefault('old_stats', []).append(deepcopy(stats))
stats.setdefault('continuations', []).append(stream.epoch)
zero.set_random_state(c['random_state'])
break
timer.run()
with torch.no_grad():
if stream.epoch == 0:
model.eval()
size = 2048
while True:
try:
zero.set_randomness(args['seed'])
x = step(torch.randperm(train_size)[:size])
del x
except RuntimeError as err:
if not lib.is_oom_exception(err):
raise
size //= 2
else:
break
wandb.init(project="RTDL", config=args)
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
epoch_losses = []
for batch_idx in epoch:
loss, new_chunk_size = lib.learn_with_auto_virtual_batch(
model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size
)
wandb.log({"Training Loss": loss})
epoch_losses.append(loss.detach())
if new_chunk_size and new_chunk_size < (chunk_size or batch_size):
chunk_size = new_chunk_size
print('New chunk size:', chunk_size)
stats['chunk_sizes'][stream.iteration] = chunk_size
zero.free_memory()
gc.collect()
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])
wandb.log({"score": metrics[lib.VAL]['score']})
for k, v in metrics.items():
training_log[k].append(v)
wandb.log({k:v})
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats[f'best_epoch_{stage}'] = stream.epoch
stats[f'metrics_{stage}'] = metrics
save_checkpoint(stage)
for k, v in predictions.items():
np.save(output / f'p_{stage}_{k}.npy', v)
wandb.log({f"predictions_{k}": v})
elif progress.fail:
if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:
print('Reducing lr...')
stats[f'lr_decay_{lr_n_decays}'] = stream.epoch
lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])
lr_n_decays += 1
progress.forget_bad_updates()
else:
print(f'Finishing stage {stage}...')
stats[f'time_{stage}'] = lib.format_seconds(timer())
if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:
break
best_stage_checkpoint = torch.load(get_checkpoint_path(stage))
model.load_state_dict(best_stage_checkpoint['model'])
optimizer.load_state_dict(best_stage_checkpoint['optimizer'])
progress = zero.ProgressTracker(args['swa']['patience'])
lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])
swa_stage_first_epoch = stream.epoch + 1
stage += 1
if stream.epoch == swa_stage_first_epoch:
lib.set_lr(optimizer, args['training']['lr'])
def load_best_model(stage):
model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])
if 'swa' in args:
print('\nRunning SWA...')
swa_model = swa_utils.AveragedModel(model)
swa_progress = zero.ProgressTracker(None)
best_swa_model = None
for stage in range(args['swa']['n_checkpoints']):
load_best_model(stage)
swa_model.update_parameters(model)
if stage > 0 and args['swa']['update_bn_n_epochs']:
zero.set_randomness(args['seed'])
with torch.no_grad():
swa_utils.update_bn(
itertools.chain.from_iterable(
zero.iter_batches(
X[lib.TRAIN], chunk_size or batch_size, shuffle=True
)
for _ in range(args['swa']['update_bn_n_epochs'])
),
swa_model,
device,
)
swa_progress.update(
evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']
)
if swa_progress.success:
print('New best SWA checkpoint!')
stats['n_swa_checkpoints'] = stage + 1
if stage > 0:
best_swa_model = deepcopy(swa_model)
if best_swa_model is None:
load_best_model(0)
else:
lib.load_swa_state_dict(model, best_swa_model)
else:
load_best_model(0)
print('\nRunning the final evaluation...')
stats['metrics'], predictions = evaluate(model, lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
wandb.run.summary[f"final_prediction_{k}"] = v
stats['time_final'] = lib.format_seconds(timer())
save_checkpoint('final')
print(f'Done! Time elapsed: {stats["time_final"]}')
print(
'\n!!! WARNING !!! The metrics for a single model are stored under the "metrics_0" key.\n'
)
| true | true |
f736ac424182660035d1847d61e82b2d381b5bcc | 2,098 | py | Python | dephell_shells/_shells.py | jayvdb/dephell_shells | e3c6bdbd6f97542bfd1224e37a515eba044d616b | [
"MIT"
] | null | null | null | dephell_shells/_shells.py | jayvdb/dephell_shells | e3c6bdbd6f97542bfd1224e37a515eba044d616b | [
"MIT"
] | 2 | 2019-06-03T21:28:31.000Z | 2020-05-29T07:53:26.000Z | dephell_shells/_shells.py | jayvdb/dephell_shells | e3c6bdbd6f97542bfd1224e37a515eba044d616b | [
"MIT"
] | 1 | 2020-01-03T12:40:45.000Z | 2020-01-03T12:40:45.000Z | # built-in
from typing import List, Type
# app
from ._base import BaseShell
from ._manager import Shells
from ._utils import is_windows
def _register_shell(cls: Type[BaseShell]) -> Type[BaseShell]:
if cls.name in Shells.shells:
raise NameError('already registered: ' + cls.name)
Shells.shells[cls.name] = cls
return cls
@_register_shell
class CmdShell(BaseShell):
name = 'cmd'
activate = 'activate.bat'
interactive = False
@property
def command(self):
return [self.executable, '/k', self.entrypoint]
@_register_shell
class PowerShell(BaseShell):
name = 'powershell'
activate = 'activate.ps1'
interactive = False
@property
def command(self):
return [self.executable, '-executionpolicy', 'bypass', '-NoExit', '-NoLogo', '-File', self.activate]
@_register_shell
class BashShell(BaseShell):
name = 'bash'
activate = 'activate'
interactive = True
@_register_shell
class ShShell(BaseShell):
name = 'sh'
activate = 'activate'
interactive = True
@property
def command(self) -> str:
return '. "{}"'.format(str(self.entrypoint))
@_register_shell
class FishShell(BaseShell):
name = 'fish'
activate = 'activate.fish'
interactive = True
@_register_shell
class ZshShell(BaseShell):
name = 'zsh'
activate = 'activate'
interactive = True
@_register_shell
class XonShell(BaseShell):
name = 'xonsh'
activate = 'activate'
interactive = not is_windows()
@property
def command(self):
path = str(self.bin_path.parent)
if self.interactive:
return '$PATH.insert(0, "{}")'.format(path)
return [self.executable, '-i', '-D', 'VIRTUAL_ENV="{}"'.format(path)]
@property
def args(self) -> List[str]:
return ['-i', '-D', 'VIRTUAL_ENV=' + str(self.bin_path.parent)]
@_register_shell
class TcShell(BaseShell):
name = 'tcsh'
activate = 'activate.csh'
interactive = True
@_register_shell
class CShell(BaseShell):
name = 'csh'
activate = 'activate.csh'
interactive = True
| 20.772277 | 108 | 0.64919 |
from typing import List, Type
from ._base import BaseShell
from ._manager import Shells
from ._utils import is_windows
def _register_shell(cls: Type[BaseShell]) -> Type[BaseShell]:
if cls.name in Shells.shells:
raise NameError('already registered: ' + cls.name)
Shells.shells[cls.name] = cls
return cls
@_register_shell
class CmdShell(BaseShell):
name = 'cmd'
activate = 'activate.bat'
interactive = False
@property
def command(self):
return [self.executable, '/k', self.entrypoint]
@_register_shell
class PowerShell(BaseShell):
name = 'powershell'
activate = 'activate.ps1'
interactive = False
@property
def command(self):
return [self.executable, '-executionpolicy', 'bypass', '-NoExit', '-NoLogo', '-File', self.activate]
@_register_shell
class BashShell(BaseShell):
name = 'bash'
activate = 'activate'
interactive = True
@_register_shell
class ShShell(BaseShell):
name = 'sh'
activate = 'activate'
interactive = True
@property
def command(self) -> str:
return '. "{}"'.format(str(self.entrypoint))
@_register_shell
class FishShell(BaseShell):
name = 'fish'
activate = 'activate.fish'
interactive = True
@_register_shell
class ZshShell(BaseShell):
name = 'zsh'
activate = 'activate'
interactive = True
@_register_shell
class XonShell(BaseShell):
name = 'xonsh'
activate = 'activate'
interactive = not is_windows()
@property
def command(self):
path = str(self.bin_path.parent)
if self.interactive:
return '$PATH.insert(0, "{}")'.format(path)
return [self.executable, '-i', '-D', 'VIRTUAL_ENV="{}"'.format(path)]
@property
def args(self) -> List[str]:
return ['-i', '-D', 'VIRTUAL_ENV=' + str(self.bin_path.parent)]
@_register_shell
class TcShell(BaseShell):
name = 'tcsh'
activate = 'activate.csh'
interactive = True
@_register_shell
class CShell(BaseShell):
name = 'csh'
activate = 'activate.csh'
interactive = True
| true | true |
f736ac8b2d77860fe7695f47b5c2ac5c4fabad2e | 104 | py | Python | app/forms.py | vikramjeet007/epochconverter123 | f104d65d4de12c46a60e3c16748714c0c374b05e | [
"bzip2-1.0.6"
] | null | null | null | app/forms.py | vikramjeet007/epochconverter123 | f104d65d4de12c46a60e3c16748714c0c374b05e | [
"bzip2-1.0.6"
] | 7 | 2020-06-05T20:53:51.000Z | 2021-09-22T18:31:25.000Z | app/forms.py | vikramjeet007/epochconverter123 | f104d65d4de12c46a60e3c16748714c0c374b05e | [
"bzip2-1.0.6"
] | null | null | null | from django import forms
class EpochForm(forms.Form):
epochtime = forms.IntegerField(required=True) | 26 | 49 | 0.788462 | from django import forms
class EpochForm(forms.Form):
epochtime = forms.IntegerField(required=True) | true | true |
f736acb9cc6e2195ae0b177fda76b60d0fabe9c4 | 14,218 | py | Python | src/oci/management_agent/models/management_agent_install_key_summary.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/management_agent/models/management_agent_install_key_summary.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/management_agent/models/management_agent_install_key_summary.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentInstallKeySummary(object):
"""
The summary of the Agent Install Key details.
"""
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ManagementAgentInstallKeySummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentInstallKeySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this ManagementAgentInstallKeySummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this ManagementAgentInstallKeySummary.
:type display_name: str
:param created_by_principal_id:
The value to assign to the created_by_principal_id property of this ManagementAgentInstallKeySummary.
:type created_by_principal_id: str
:param allowed_key_install_count:
The value to assign to the allowed_key_install_count property of this ManagementAgentInstallKeySummary.
:type allowed_key_install_count: int
:param current_key_install_count:
The value to assign to the current_key_install_count property of this ManagementAgentInstallKeySummary.
:type current_key_install_count: int
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ManagementAgentInstallKeySummary.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this ManagementAgentInstallKeySummary.
:type lifecycle_details: str
:param time_created:
The value to assign to the time_created property of this ManagementAgentInstallKeySummary.
:type time_created: datetime
:param time_expires:
The value to assign to the time_expires property of this ManagementAgentInstallKeySummary.
:type time_expires: datetime
:param compartment_id:
The value to assign to the compartment_id property of this ManagementAgentInstallKeySummary.
:type compartment_id: str
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'created_by_principal_id': 'str',
'allowed_key_install_count': 'int',
'current_key_install_count': 'int',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'compartment_id': 'str'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'created_by_principal_id': 'createdByPrincipalId',
'allowed_key_install_count': 'allowedKeyInstallCount',
'current_key_install_count': 'currentKeyInstallCount',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'compartment_id': 'compartmentId'
}
self._id = None
self._display_name = None
self._created_by_principal_id = None
self._allowed_key_install_count = None
self._current_key_install_count = None
self._lifecycle_state = None
self._lifecycle_details = None
self._time_created = None
self._time_expires = None
self._compartment_id = None
@property
def id(self):
"""
**[Required]** Gets the id of this ManagementAgentInstallKeySummary.
Agent Install Key identifier
:return: The id of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ManagementAgentInstallKeySummary.
Agent Install Key identifier
:param id: The id of this ManagementAgentInstallKeySummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
Gets the display_name of this ManagementAgentInstallKeySummary.
Management Agent Install Key Name
:return: The display_name of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this ManagementAgentInstallKeySummary.
Management Agent Install Key Name
:param display_name: The display_name of this ManagementAgentInstallKeySummary.
:type: str
"""
self._display_name = display_name
@property
def created_by_principal_id(self):
"""
Gets the created_by_principal_id of this ManagementAgentInstallKeySummary.
Principal id of user who created the Agent Install key
:return: The created_by_principal_id of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._created_by_principal_id
@created_by_principal_id.setter
def created_by_principal_id(self, created_by_principal_id):
"""
Sets the created_by_principal_id of this ManagementAgentInstallKeySummary.
Principal id of user who created the Agent Install key
:param created_by_principal_id: The created_by_principal_id of this ManagementAgentInstallKeySummary.
:type: str
"""
self._created_by_principal_id = created_by_principal_id
@property
def allowed_key_install_count(self):
"""
Gets the allowed_key_install_count of this ManagementAgentInstallKeySummary.
Total number of install for this keys
:return: The allowed_key_install_count of this ManagementAgentInstallKeySummary.
:rtype: int
"""
return self._allowed_key_install_count
@allowed_key_install_count.setter
def allowed_key_install_count(self, allowed_key_install_count):
"""
Sets the allowed_key_install_count of this ManagementAgentInstallKeySummary.
Total number of install for this keys
:param allowed_key_install_count: The allowed_key_install_count of this ManagementAgentInstallKeySummary.
:type: int
"""
self._allowed_key_install_count = allowed_key_install_count
@property
def current_key_install_count(self):
"""
Gets the current_key_install_count of this ManagementAgentInstallKeySummary.
Total number of install for this keys
:return: The current_key_install_count of this ManagementAgentInstallKeySummary.
:rtype: int
"""
return self._current_key_install_count
@current_key_install_count.setter
def current_key_install_count(self, current_key_install_count):
"""
Sets the current_key_install_count of this ManagementAgentInstallKeySummary.
Total number of install for this keys
:param current_key_install_count: The current_key_install_count of this ManagementAgentInstallKeySummary.
:type: int
"""
self._current_key_install_count = current_key_install_count
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this ManagementAgentInstallKeySummary.
Status of Key
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ManagementAgentInstallKeySummary.
Status of Key
:param lifecycle_state: The lifecycle_state of this ManagementAgentInstallKeySummary.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this ManagementAgentInstallKeySummary.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:return: The lifecycle_details of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this ManagementAgentInstallKeySummary.
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param lifecycle_details: The lifecycle_details of this ManagementAgentInstallKeySummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def time_created(self):
"""
Gets the time_created of this ManagementAgentInstallKeySummary.
The time when Management Agent install Key was created. An RFC3339 formatted date time string
:return: The time_created of this ManagementAgentInstallKeySummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ManagementAgentInstallKeySummary.
The time when Management Agent install Key was created. An RFC3339 formatted date time string
:param time_created: The time_created of this ManagementAgentInstallKeySummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_expires(self):
"""
Gets the time_expires of this ManagementAgentInstallKeySummary.
date after which key would expire after creation
:return: The time_expires of this ManagementAgentInstallKeySummary.
:rtype: datetime
"""
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
"""
Sets the time_expires of this ManagementAgentInstallKeySummary.
date after which key would expire after creation
:param time_expires: The time_expires of this ManagementAgentInstallKeySummary.
:type: datetime
"""
self._time_expires = time_expires
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ManagementAgentInstallKeySummary.
Compartment Identifier
:return: The compartment_id of this ManagementAgentInstallKeySummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ManagementAgentInstallKeySummary.
Compartment Identifier
:param compartment_id: The compartment_id of this ManagementAgentInstallKeySummary.
:type: str
"""
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 36.45641 | 245 | 0.69201 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentInstallKeySummary(object):
LIFECYCLE_STATE_CREATING = "CREATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
LIFECYCLE_STATE_DELETING = "DELETING"
LIFECYCLE_STATE_DELETED = "DELETED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'created_by_principal_id': 'str',
'allowed_key_install_count': 'int',
'current_key_install_count': 'int',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'time_created': 'datetime',
'time_expires': 'datetime',
'compartment_id': 'str'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'created_by_principal_id': 'createdByPrincipalId',
'allowed_key_install_count': 'allowedKeyInstallCount',
'current_key_install_count': 'currentKeyInstallCount',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'time_created': 'timeCreated',
'time_expires': 'timeExpires',
'compartment_id': 'compartmentId'
}
self._id = None
self._display_name = None
self._created_by_principal_id = None
self._allowed_key_install_count = None
self._current_key_install_count = None
self._lifecycle_state = None
self._lifecycle_details = None
self._time_created = None
self._time_expires = None
self._compartment_id = None
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def display_name(self):
return self._display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
@property
def created_by_principal_id(self):
return self._created_by_principal_id
@created_by_principal_id.setter
def created_by_principal_id(self, created_by_principal_id):
self._created_by_principal_id = created_by_principal_id
@property
def allowed_key_install_count(self):
return self._allowed_key_install_count
@allowed_key_install_count.setter
def allowed_key_install_count(self, allowed_key_install_count):
self._allowed_key_install_count = allowed_key_install_count
@property
def current_key_install_count(self):
return self._current_key_install_count
@current_key_install_count.setter
def current_key_install_count(self, current_key_install_count):
self._current_key_install_count = current_key_install_count
@property
def lifecycle_state(self):
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "INACTIVE", "TERMINATED", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
self._lifecycle_details = lifecycle_details
@property
def time_created(self):
return self._time_created
@time_created.setter
def time_created(self, time_created):
self._time_created = time_created
@property
def time_expires(self):
return self._time_expires
@time_expires.setter
def time_expires(self, time_expires):
self._time_expires = time_expires
@property
def compartment_id(self):
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f736adaf616c91406e862b13dd34b5ab14501dad | 771 | py | Python | v1/tnb_faucet/factories/tnb_faucet.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 1 | 2021-07-24T06:54:11.000Z | 2021-07-24T06:54:11.000Z | v1/tnb_faucet/factories/tnb_faucet.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 4 | 2021-10-01T20:23:14.000Z | 2021-10-01T20:57:42.000Z | v1/tnb_faucet/factories/tnb_faucet.py | picko1990/Bank | d35b0a76c1dd6ceb37155b6af32e3043c63d33f5 | [
"MIT"
] | 3 | 2021-02-23T01:09:05.000Z | 2021-09-12T15:52:56.000Z | from factory import Faker
from factory.django import DjangoModelFactory
from ..models.tnb_faucet import FaucetModel, FaucetOption, PostModel
class FaucetOptionFactory(DjangoModelFactory):
# account_number = Faker('pystr', max_chars=VERIFY_KEY_LENGTH)
coins = Faker('pyint', max_value=1500, min_value=1)
delay = Faker('pyint', max_value=(30 * 24), min_value=1)
class Meta:
model = FaucetOption
class FaucetModelFactory(DjangoModelFactory):
# account
# social_type
# social_user_id
# next_valid_access_time
# created_at
class Meta:
model = FaucetModel
class PostModelFactory(DjangoModelFactory):
post_id = Faker('pyint', max_value=281474976710656, min_value=1)
class Meta:
model = PostModel
| 24.09375 | 68 | 0.723735 | from factory import Faker
from factory.django import DjangoModelFactory
from ..models.tnb_faucet import FaucetModel, FaucetOption, PostModel
class FaucetOptionFactory(DjangoModelFactory):
coins = Faker('pyint', max_value=1500, min_value=1)
delay = Faker('pyint', max_value=(30 * 24), min_value=1)
class Meta:
model = FaucetOption
class FaucetModelFactory(DjangoModelFactory):
class Meta:
model = FaucetModel
class PostModelFactory(DjangoModelFactory):
post_id = Faker('pyint', max_value=281474976710656, min_value=1)
class Meta:
model = PostModel
| true | true |
f736ae56f85e9231991f5384d9498595803c4cdd | 331 | py | Python | okta/models/factor/Verification.py | xmercury-qb/okta-sdk-python | a668a963b13fc61177b36c5438c6ec5fa6f17c4e | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-09-09T12:59:19.000Z | 2020-09-09T12:59:19.000Z | okta/models/factor/Verification.py | xmercury-qb/okta-sdk-python | a668a963b13fc61177b36c5438c6ec5fa6f17c4e | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2018-10-09T22:14:33.000Z | 2018-10-09T23:10:40.000Z | okta/models/factor/Verification.py | xmercury-qb/okta-sdk-python | a668a963b13fc61177b36c5438c6ec5fa6f17c4e | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2018-11-08T19:32:46.000Z | 2021-03-30T06:35:48.000Z | class Verification:
types = {
'activationToken': str,
'answer': str,
'passCode': str,
'nextPassCode': str
}
def __init__(self):
self.activationToken = None # str
self.answer = None # str
self.passCode = None # str
self.nextPassCode = None # str
| 17.421053 | 42 | 0.528701 | class Verification:
types = {
'activationToken': str,
'answer': str,
'passCode': str,
'nextPassCode': str
}
def __init__(self):
self.activationToken = None
self.answer = None
self.passCode = None
self.nextPassCode = None
| true | true |
f736aef1b0fa433e7b740b92d4db0cf32848e82a | 4,159 | py | Python | locations/spiders/pigglywiggly.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | 1 | 2019-08-19T10:00:55.000Z | 2019-08-19T10:00:55.000Z | locations/spiders/pigglywiggly.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | locations/spiders/pigglywiggly.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
import re
import logging
from locations.items import GeojsonPointItem
class PigglyWigglySpider(scrapy.Spider):
''' This spider scrapes from two different places, an api which has their stores in Wisconsin
and Illinois, and a page which has all of their other stores. Cookies are used for the
api request.
'''
name = "pigglywiggly"
allowed_domains = ["pigglywiggly.com"]
def start_requests(self):
url = 'https://www.shopthepig.com/api/m_store_location'
headers = {
'x-newrelic-id': 'XQYBWFVVGwAEVFNRBQcP',
'accept-encoding': 'gzip, deflate, br',
'x-csrf-token': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'accept': 'application/json, text/plain, */*',
'referer': 'https://www.shopthepig.com/stores',
}
cookies = {
'__cfduid': 'db0a53231376d78a40dd7fd728fa896f51512948321',
'SESSb159e7a0d4a6fad9ba3abc7fadef99ec': 'h3o7xcjnfcERSRrqJVh0soQdUI5IFIBDIQlytOZkhIU',
'XSRF-TOKEN': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'has_js': 1,
}
yield scrapy.http.FormRequest(
url=url, headers=headers, callback=self.parse_wi, cookies=cookies
)
yield scrapy.Request(
'https://www.pigglywiggly.com/store-locations',
callback=self.parse_nonwi,
)
def parse_wi(self, response):
data = json.loads(response.body_as_unicode())
stores = data['stores']
for store in stores:
unp = {
'ref': store['storeID'],
'name': store['storeName'],
'addr_full': store['normalized_address'],
'city': store['normalized_city'],
'state': store['normalized_state'],
'postcode': store['normalized_zip'],
'lat': store['latitude'],
'lon': store['longitude'],
'phone': store['phone']
}
properties = {}
for key in unp:
if unp[key]: properties[key] = unp[key]
yield GeojsonPointItem(**properties)
def parse_nonwi(self, response):
for state_url in response.xpath('//div[@class="views-field-province-1"]/span[@class="field-content"]/a/@href').extract():
yield scrapy.Request(
response.urljoin(state_url),
callback=self.parse_state,
)
def parse_state(self, response):
for location in response.xpath('//li[contains(@class, "views-row")]'):
unp = {
'addr_full': location.xpath('.//div[@class="street-address"]/text()').extract_first(),
'city': location.xpath('.//span[@class="locality"]/text()').extract_first(),
'state': location.xpath('.//span[@class="region"]/text()').extract_first(),
'postcode': location.xpath('.//span[@class="postal-code"]/text()').extract_first(),
'phone': location.xpath('.//label[@class="views-label-field-phone-value"]/following-sibling::span[1]/text()').extract_first(),
'website': location.xpath('.//label[@class="views-label-field-website-value"]/following-sibling::span[1]/a/@href').extract_first(),
}
if unp['website']:
if 'google' in unp['website']:
unp['website'] = None
if unp['phone']:
unp['phone'] = unp['phone'].replace('.', '-')
properties = {}
for key in unp:
if unp[key]:
properties[key] = unp[key].strip()
ref = ''
if 'addr_full' in properties: ref += properties['addr_full']
if 'phone' in properties: ref += properties['phone']
properties['ref'] = ref
yield GeojsonPointItem(**properties)
| 42.010101 | 147 | 0.560231 |
import scrapy
import json
import re
import logging
from locations.items import GeojsonPointItem
class PigglyWigglySpider(scrapy.Spider):
name = "pigglywiggly"
allowed_domains = ["pigglywiggly.com"]
def start_requests(self):
url = 'https://www.shopthepig.com/api/m_store_location'
headers = {
'x-newrelic-id': 'XQYBWFVVGwAEVFNRBQcP',
'accept-encoding': 'gzip, deflate, br',
'x-csrf-token': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'accept-language': 'en-US,en;q=0.9',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'accept': 'application/json, text/plain, */*',
'referer': 'https://www.shopthepig.com/stores',
}
cookies = {
'__cfduid': 'db0a53231376d78a40dd7fd728fa896f51512948321',
'SESSb159e7a0d4a6fad9ba3abc7fadef99ec': 'h3o7xcjnfcERSRrqJVh0soQdUI5IFIBDIQlytOZkhIU',
'XSRF-TOKEN': 'eF2m10r8n51nsRgBSv1xSvhAGtCo8E84BExlmn54Vvc',
'has_js': 1,
}
yield scrapy.http.FormRequest(
url=url, headers=headers, callback=self.parse_wi, cookies=cookies
)
yield scrapy.Request(
'https://www.pigglywiggly.com/store-locations',
callback=self.parse_nonwi,
)
def parse_wi(self, response):
data = json.loads(response.body_as_unicode())
stores = data['stores']
for store in stores:
unp = {
'ref': store['storeID'],
'name': store['storeName'],
'addr_full': store['normalized_address'],
'city': store['normalized_city'],
'state': store['normalized_state'],
'postcode': store['normalized_zip'],
'lat': store['latitude'],
'lon': store['longitude'],
'phone': store['phone']
}
properties = {}
for key in unp:
if unp[key]: properties[key] = unp[key]
yield GeojsonPointItem(**properties)
def parse_nonwi(self, response):
for state_url in response.xpath('//div[@class="views-field-province-1"]/span[@class="field-content"]/a/@href').extract():
yield scrapy.Request(
response.urljoin(state_url),
callback=self.parse_state,
)
def parse_state(self, response):
for location in response.xpath('//li[contains(@class, "views-row")]'):
unp = {
'addr_full': location.xpath('.//div[@class="street-address"]/text()').extract_first(),
'city': location.xpath('.//span[@class="locality"]/text()').extract_first(),
'state': location.xpath('.//span[@class="region"]/text()').extract_first(),
'postcode': location.xpath('.//span[@class="postal-code"]/text()').extract_first(),
'phone': location.xpath('.//label[@class="views-label-field-phone-value"]/following-sibling::span[1]/text()').extract_first(),
'website': location.xpath('.//label[@class="views-label-field-website-value"]/following-sibling::span[1]/a/@href').extract_first(),
}
if unp['website']:
if 'google' in unp['website']:
unp['website'] = None
if unp['phone']:
unp['phone'] = unp['phone'].replace('.', '-')
properties = {}
for key in unp:
if unp[key]:
properties[key] = unp[key].strip()
ref = ''
if 'addr_full' in properties: ref += properties['addr_full']
if 'phone' in properties: ref += properties['phone']
properties['ref'] = ref
yield GeojsonPointItem(**properties)
| true | true |
f736af9878abfb41476fe970b23b2a9fa03030a1 | 3,751 | py | Python | models/custom_models.py | ColdFrenzy/Adaptive_Learning | 02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1 | [
"MIT"
] | null | null | null | models/custom_models.py | ColdFrenzy/Adaptive_Learning | 02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1 | [
"MIT"
] | null | null | null | models/custom_models.py | ColdFrenzy/Adaptive_Learning | 02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1 | [
"MIT"
] | null | null | null | import tensorflow as tf
def dense_model(in_shape, hidden_layer_shapes, num_outputs, name):
x = None
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = tf.keras.layers.Dense(
layer_shape, name="dense_" + str(i), activation=tf.nn.relu
)(x if x is not None else inputs)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def res_net_model(in_shape, hidden_layer_shapes, num_outputs, name):
"""
hidden_layer_shapes : list
list with the shape of every hidden layer
Simple neural network block with n_layers dense layers and a residual connection
"""
x = None
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = tf.keras.layers.Dense(
layer_shape, name="dense_"+str(i), activation=tf.nn.relu
)(x if x is not None else inputs)
x = tf.keras.layers.Dense(in_shape, name="dense_" + str(i) +".2", activation=tf.nn.relu)(
x
)
x = tf.keras.layers.Add()([inputs, x])
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model(in_shape, num_outputs, name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = tf.keras.Input(shape=in_shape , name="observations")
x = tf.keras.layers.Conv2D(64, 4, name="conv_1")(inputs)
x = tf.keras.layers.Conv2D(64, 2, name="conv_2")(x)
x = tf.keras.layers.Conv2D(64, 2, name="conv_3")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(64, name="dense_1",activation=tf.nn.relu)(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model_connect3(in_shape,num_outputs,name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = tf.keras.Input(shape=in_shape , name="observations")
x = tf.keras.layers.Conv2D(64, 3, name="conv_1")(inputs)
x = tf.keras.layers.Conv2D(64, 2, name="conv_2")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(64, name="dense_1",activation=tf.nn.relu)(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def dense_q_model(in_shape, hidden_shape, num_outputs, name):
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
hidden_layer = tf.keras.layers.Dense(
hidden_shape, name="layer1", activation=tf.nn.relu
)(inputs)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
hidden_layer
)
return tf.keras.Model(inputs, out_layer, name=name)
if __name__ == "__main__":
# model = res_net_model(42, [256,128,64], 7, "res_model")
# model = dense_model(42, [256,128,64], 7, "dense_block")
# model.summary()
model = conv_dense_model((7,6,1),7,"conv_dense_model")
tf.keras.utils.plot_model(model, "conv_dense_model.png", True)
| 42.146067 | 97 | 0.666756 | import tensorflow as tf
def dense_model(in_shape, hidden_layer_shapes, num_outputs, name):
x = None
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = tf.keras.layers.Dense(
layer_shape, name="dense_" + str(i), activation=tf.nn.relu
)(x if x is not None else inputs)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def res_net_model(in_shape, hidden_layer_shapes, num_outputs, name):
x = None
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
for i,layer_shape in enumerate(hidden_layer_shapes):
x = tf.keras.layers.Dense(
layer_shape, name="dense_"+str(i), activation=tf.nn.relu
)(x if x is not None else inputs)
x = tf.keras.layers.Dense(in_shape, name="dense_" + str(i) +".2", activation=tf.nn.relu)(
x
)
x = tf.keras.layers.Add()([inputs, x])
x = tf.keras.layers.ReLU()(x)
x = tf.keras.layers.BatchNormalization()(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
x
)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model(in_shape, num_outputs, name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = tf.keras.Input(shape=in_shape , name="observations")
x = tf.keras.layers.Conv2D(64, 4, name="conv_1")(inputs)
x = tf.keras.layers.Conv2D(64, 2, name="conv_2")(x)
x = tf.keras.layers.Conv2D(64, 2, name="conv_3")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(64, name="dense_1",activation=tf.nn.relu)(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def conv_dense_model_connect3(in_shape,num_outputs,name):
if len(in_shape) == 2:
in_shape = in_shape + (1,)
inputs = tf.keras.Input(shape=in_shape , name="observations")
x = tf.keras.layers.Conv2D(64, 3, name="conv_1")(inputs)
x = tf.keras.layers.Conv2D(64, 2, name="conv_2")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(64, name="dense_1",activation=tf.nn.relu)(x)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(x)
value_layer = tf.keras.layers.Dense(1, name="value", activation=None)(x)
return tf.keras.Model(inputs, [out_layer, value_layer], name=name)
def dense_q_model(in_shape, hidden_shape, num_outputs, name):
inputs = tf.keras.layers.Input(shape=(in_shape,), name="observations")
hidden_layer = tf.keras.layers.Dense(
hidden_shape, name="layer1", activation=tf.nn.relu
)(inputs)
out_layer = tf.keras.layers.Dense(num_outputs, name="out", activation=None)(
hidden_layer
)
return tf.keras.Model(inputs, out_layer, name=name)
if __name__ == "__main__":
model = conv_dense_model((7,6,1),7,"conv_dense_model")
tf.keras.utils.plot_model(model, "conv_dense_model.png", True)
| true | true |
f736b01602f379c31ea80e661458f27572a3c2bd | 23,029 | py | Python | utils_nvidia.py | kcyu2014/nas-landmarkreg | a00c3619bf4042e446e1919087f0b09fe9fa3a65 | [
"MIT"
] | 8 | 2021-04-13T01:52:11.000Z | 2022-03-30T03:53:12.000Z | utils_nvidia.py | kcyu2014/nas-landmarkreg | a00c3619bf4042e446e1919087f0b09fe9fa3a65 | [
"MIT"
] | 4 | 2021-05-29T01:41:00.000Z | 2021-08-24T09:40:43.000Z | utils_nvidia.py | kcyu2014/nas-landmarkreg | a00c3619bf4042e446e1919087f0b09fe9fa3a65 | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
import time
import math
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
except ImportError:
raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
from nasws.cnn.utils import AverageMeter
from utils import accuracy
# item() is a recent addition, so this helps with backward compatibility.
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop,
shard_id, num_shards, dali_cpu=False, args=None,
file_list=None
):
super(HybridTrainPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir,
shard_id=args.apex_local_rank,
num_shards=args.world_size,
random_shuffle=True,
pad_last_batch=True,
file_list=file_list)
#let user decide which pipeline works him bets for RN version he runs
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device,
resize_x=crop,
resize_y=crop,
interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
logging.info('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop,
size, shard_id, num_shards, args=None):
super(HybridValPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir,
shard_id=args.apex_local_rank,
num_shards=args.world_size,
random_shuffle=False,
pad_last_batch=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu",
resize_shorter=size,
interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def fast_collate(batch, memory_format):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size()[1]
h = imgs[0].size()[2]
# print(imgs[0].size())
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
# nump_array = np.rollaxis(nump_array, 2)
# print(nump_array.shape)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
if input is not None:
input.record_stream(torch.cuda.current_stream())
if target is not None:
target.record_stream(torch.cuda.current_stream())
self.preload()
return input, target
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= world_size
return rt
def adjust_learning_rate(optimizer, epoch, step, len_epoch, args):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.learning_rate*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.apex_local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# def adjust_learning_rate(optimizer, epoch, args):
# # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large
# if args.epochs - epoch > 5:
# lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5)
# else:
# lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5)
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
# return lr
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
if args.apex_profiling >= 0 and i == args.apex_profiling:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
# compute output
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("forward")
logits, logtis_aux = model(input)
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logtis_aux, target)
loss += args.auxiliary_weight * loss_aux
# compute gradient and do SGD step
optimizer.zero_grad()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("backward")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
if i%args.report_freq == 0:
# Every report_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.report_freq)
end = time.time()
if args.apex_local_rank == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("prefetcher.next()")
input, target = prefetcher.next()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
# Pop range "Body of iteration {}".format(i)
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.apex_profiling >= 0 and i == args.apex_profiling + 10:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
return top1.avg, losses.avg
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
# compute output
with torch.no_grad():
output, _ = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.apex_local_rank == 0 and i % args.report_freq == 0:
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
def dali_apex_train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, data in enumerate(train_loader):
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
train_loader_len = int(math.ceil(train_loader._size / args.batch_size))
if args.dali_profiling >= 0 and i == args.dali_profiling:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
# adjust_learning_rate(optimizer, epoch, i, train_loader_len, args)
if args.debug:
if i > 10:
logging.info('Break in debug mode after 10 batchs...')
break
# compute output
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("forward")
logits, logtis_aux = model(input)
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logtis_aux, target)
loss += args.auxiliary_weight * loss_aux
# compute gradient and do SGD step
optimizer.zero_grad()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("backward")
if args.apex_opt_level is not None:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if i%args.report_freq == 0:
# Every print_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.report_freq)
end = time.time()
if args.apex_local_rank == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader_len,
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
# Pop range "Body of iteration {}".format(i)
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.dali_profiling >= 0 and i == args.dali_profiling + 2:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
return top1.avg, losses.avg
def dali_validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, data in enumerate(val_loader):
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
val_loader_len = int(val_loader._size / args.batch_size)
if args.debug:
if i > 10:
break
# compute output
with torch.no_grad():
output, _ = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.apex_local_rank == 0 and i % args.report_freq == 0:
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader_len,
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
| 40.544014 | 124 | 0.574146 | import argparse
import os
import shutil
import time
import math
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
except ImportError:
raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
from nasws.cnn.utils import AverageMeter
from utils import accuracy
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop,
shard_id, num_shards, dali_cpu=False, args=None,
file_list=None
):
super(HybridTrainPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir,
shard_id=args.apex_local_rank,
num_shards=args.world_size,
random_shuffle=True,
pad_last_batch=True,
file_list=file_list)
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[0.8, 1.25],
random_area=[0.1, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device,
resize_x=crop,
resize_y=crop,
interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
logging.info('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop,
size, shard_id, num_shards, args=None):
super(HybridValPipe, self).__init__(batch_size,
num_threads,
device_id,
seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir,
shard_id=args.apex_local_rank,
num_shards=args.world_size,
random_shuffle=False,
pad_last_batch=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu",
resize_shorter=size,
interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def fast_collate(batch, memory_format):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size()[1]
h = imgs[0].size()[2]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
if input is not None:
input.record_stream(torch.cuda.current_stream())
if target is not None:
target.record_stream(torch.cuda.current_stream())
self.preload()
return input, target
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= world_size
return rt
def adjust_learning_rate(optimizer, epoch, step, len_epoch, args):
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.learning_rate*(0.1**factor)
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
if args.apex_profiling >= 0 and i == args.apex_profiling:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("forward")
logits, logtis_aux = model(input)
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logtis_aux, target)
loss += args.auxiliary_weight * loss_aux
optimizer.zero_grad()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("backward")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
if i%args.report_freq == 0:
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.report_freq)
end = time.time()
if args.apex_local_rank == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("prefetcher.next()")
input, target = prefetcher.next()
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
# Pop range "Body of iteration {}".format(i)
if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.apex_profiling >= 0 and i == args.apex_profiling + 10:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
return top1.avg, losses.avg
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
# compute output
with torch.no_grad():
output, _ = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.apex_local_rank == 0 and i % args.report_freq == 0:
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
def dali_apex_train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, data in enumerate(train_loader):
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
train_loader_len = int(math.ceil(train_loader._size / args.batch_size))
if args.dali_profiling >= 0 and i == args.dali_profiling:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
# adjust_learning_rate(optimizer, epoch, i, train_loader_len, args)
if args.debug:
if i > 10:
logging.info('Break in debug mode after 10 batchs...')
break
# compute output
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("forward")
logits, logtis_aux = model(input)
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logtis_aux, target)
loss += args.auxiliary_weight * loss_aux
# compute gradient and do SGD step
optimizer.zero_grad()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("backward")
if args.apex_opt_level is not None:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if i%args.report_freq == 0:
# Every print_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.report_freq)
end = time.time()
if args.apex_local_rank == 0:
logging.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, train_loader_len,
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()
if args.dali_profiling >= 0 and i == args.dali_profiling + 2:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
return top1.avg, losses.avg
def dali_validate(val_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
for i, data in enumerate(val_loader):
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
val_loader_len = int(val_loader._size / args.batch_size)
if args.debug:
if i > 10:
break
with torch.no_grad():
output, _ = model(input)
loss = criterion(output, target)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if args.apex_local_rank == 0 and i % args.report_freq == 0:
logging.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, val_loader_len,
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
| true | true |
f736b02876ffd51c2b3784b5eda8b3e828d376ff | 792 | py | Python | Examples/ControlFlowExamples/functions.py | AbdullahNoori/CS-1.1-Intro-to-Programming | 3b3ab4a7ff5c44ce9d74df767a8d5d3cf2a3ee2e | [
"MIT"
] | 13 | 2018-08-27T18:17:13.000Z | 2018-10-30T06:33:45.000Z | Examples/ControlFlowExamples/functions.py | AbdullahNoori/CS-1.1-Intro-to-Programming | 3b3ab4a7ff5c44ce9d74df767a8d5d3cf2a3ee2e | [
"MIT"
] | 3 | 2018-09-14T09:33:26.000Z | 2018-10-01T17:30:25.000Z | Examples/ControlFlowExamples/functions.py | AbdullahNoori/CS-1.1-Intro-to-Programming | 3b3ab4a7ff5c44ce9d74df767a8d5d3cf2a3ee2e | [
"MIT"
] | 83 | 2019-09-02T19:15:00.000Z | 2020-02-29T23:58:16.000Z | """Program to make a peanut butter and jelly sandwich."""
def go_to_store():
"""Go to the store and buy sandwich ingredients."""
print("Buying bread...")
print("Buying peanut butter and jelly...\n")
def prepare_ingredients():
"""Prepare sandwich ingredients."""
print("Toasting bread...")
print("Getting a knife...")
print("Opening peanut butter jar...\n")
def assemble_sandwich():
"""Assemble a peanut butter and jelly sandwich."""
print("Stick knife in peanut butter jar...")
print("Spread peanut butter on sandwich...")
print("Spread jelly on sandwich...\n")
def make_sandwich():
"""Make a peanut butter and jelly sandwich."""
go_to_store()
prepare_ingredients()
assemble_sandwich()
print("Done!")
make_sandwich()
| 23.294118 | 57 | 0.656566 |
def go_to_store():
print("Buying bread...")
print("Buying peanut butter and jelly...\n")
def prepare_ingredients():
print("Toasting bread...")
print("Getting a knife...")
print("Opening peanut butter jar...\n")
def assemble_sandwich():
print("Stick knife in peanut butter jar...")
print("Spread peanut butter on sandwich...")
print("Spread jelly on sandwich...\n")
def make_sandwich():
go_to_store()
prepare_ingredients()
assemble_sandwich()
print("Done!")
make_sandwich()
| true | true |
f736b1152645836d3c818a4e475a399ae85a10a9 | 936 | py | Python | pyro/__init__.py | jrmcornish/pyro | 38914d5eb596dc140e226031534ff4ea7903dc35 | [
"MIT"
] | null | null | null | pyro/__init__.py | jrmcornish/pyro | 38914d5eb596dc140e226031534ff4ea7903dc35 | [
"MIT"
] | null | null | null | pyro/__init__.py | jrmcornish/pyro | 38914d5eb596dc140e226031534ff4ea7903dc35 | [
"MIT"
] | null | null | null | import pyro.poutine as poutine
from pyro.logger import log
from pyro.poutine import condition, do, markov
from pyro.primitives import (clear_param_store, enable_validation, factor, get_param_store, iarange, irange, module,
param, plate, plate_stack, random_module, sample, validation_enabled)
from pyro.util import set_rng_seed
version_prefix = '1.0.0'
# Get the __version__ string from the auto-generated _version.py file, if exists.
try:
from pyro._version import __version__
except ImportError:
__version__ = version_prefix
__all__ = [
"__version__",
"clear_param_store",
"condition",
"do",
"enable_validation",
"factor",
"get_param_store",
"iarange",
"irange",
"log",
"markov",
"module",
"param",
"plate",
"plate",
"plate_stack",
"poutine",
"random_module",
"sample",
"set_rng_seed",
"validation_enabled",
]
| 24 | 116 | 0.669872 | import pyro.poutine as poutine
from pyro.logger import log
from pyro.poutine import condition, do, markov
from pyro.primitives import (clear_param_store, enable_validation, factor, get_param_store, iarange, irange, module,
param, plate, plate_stack, random_module, sample, validation_enabled)
from pyro.util import set_rng_seed
version_prefix = '1.0.0'
try:
from pyro._version import __version__
except ImportError:
__version__ = version_prefix
__all__ = [
"__version__",
"clear_param_store",
"condition",
"do",
"enable_validation",
"factor",
"get_param_store",
"iarange",
"irange",
"log",
"markov",
"module",
"param",
"plate",
"plate",
"plate_stack",
"poutine",
"random_module",
"sample",
"set_rng_seed",
"validation_enabled",
]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.