code stringlengths 281 23.7M |
|---|
class TestCAlgorithms(unittest.TestCase):
def test_get_julian_day_from_gregorian(self):
self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2016, 2, 30)
self.assertRaises(ValueError, alg_p.get_julian_day_from_gregorian_date, 2015, 2, 29)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, 2016, 2, 30)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, 2015, 2, 29)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, (- 4713), 2, 30)
self.assertRaises(ValueError, alg_c.get_julian_day_from_gregorian_date, (- 4713), 2, 29)
self.assertEqual(alg_c.get_julian_day_from_gregorian_date((- 4713), 11, 25), alg_p.get_julian_day_from_gregorian_date((- 4713), 11, 25))
for i in range(3000):
self.assertEqual(alg_c.get_julian_day_from_gregorian_date(i, 1, 1), alg_p.get_julian_day_from_gregorian_date(i, 1, 1))
def test_is_leap_year(self):
for i in range(3000):
self.assertEqual(alg_c.is_jalali_leap_year(i), alg_p.is_jalali_leap_year(i))
def test_days_in_year(self):
for i in range(3000):
self.assertEqual(alg_c.get_days_in_jalali_year(i), alg_p.get_days_in_jalali_year(i))
def test_days_in_month(self):
for i in range(3000):
for m in range(1, 13):
c = alg_c.get_days_in_jalali_month(i, m)
p = alg_p.get_days_in_jalali_month(i, m)
self.assertEqual(c, p, ('year: %s, month: %s, results: {c: %s, py: %s}' % (i, m, c, p)))
def test_julian_day_from_jalali_date(self):
for y in range(303):
for m in range(1, 13):
for d in range(1, (alg_c.get_days_in_jalali_month(y, m) + 1)):
self.assertEqual(alg_c.get_julian_day_from_jalali_date(y, m, d), alg_p.get_julian_day_from_jalali_date(y, m, d), ('year: %s, month: %s, day: %s' % (y, m, d)))
def test_jalali_date_from_julian_day(self):
jd = 0
while (jd < (365 * 1000)):
jd += 1
c = alg_c.get_jalali_date_from_julian_day(jd)
p = alg_p.get_jalali_date_from_julian_day(jd)
self.assertEqual(c, p, ('Julian day: %s\t%s <> %s' % (jd, c, p)))
def test_gregorian_date_from_julian_day(self):
jd = 0
self.assertRaises(ValueError, alg_c.get_gregorian_date_from_julian_day, jd)
self.assertRaises(ValueError, alg_p.get_gregorian_date_from_julian_day, jd)
while (jd < (365 * 200)):
jd += 1
self.assertEqual(alg_c.get_gregorian_date_from_julian_day(jd), alg_p.get_gregorian_date_from_julian_day(jd))
def test_jalali_date_from_gregorian_date(self):
jd = 0
while (jd < (365 * 200)):
jd += 1
cd = alg_c.get_gregorian_date_from_julian_day(jd)
pd = alg_p.get_gregorian_date_from_julian_day(jd)
c = alg_c.get_jalali_date_from_gregorian_date(*cd)
p = alg_p.get_jalali_date_from_gregorian_date(*pd)
self.assertEqual(c, p, ('jd: %s c: %s py: %s cdate: %s pydate: %s' % (jd, c, p, cd, pd)))
def test_algorithm_import(self):
from khayyam import algorithms
self.assertTrue(hasattr(algorithms, 'is_jalali_leap_year'))
self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_year'))
self.assertTrue(hasattr(algorithms, 'get_days_in_jalali_month'))
self.assertTrue(hasattr(algorithms, 'get_julian_day_from_gregorian_date'))
self.assertTrue(hasattr(algorithms, 'get_julian_day_from_jalali_date'))
self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_julian_day'))
self.assertTrue(hasattr(algorithms, 'get_jalali_date_from_gregorian_date'))
self.assertTrue(hasattr(algorithms, 'get_gregorian_date_from_julian_day')) |
def test_set_size_custom() -> None:
instance = printer.Dummy()
instance.set_with_default(custom_size=True, width=8, height=7)
expected_sequence = (TXT_SIZE, bytes(((TXT_STYLE['width'][8] + TXT_STYLE['height'][7]),)), TXT_STYLE['flip'][False], TXT_STYLE['smooth'][False], TXT_STYLE['bold'][False], TXT_STYLE['underline'][0], SET_FONT(b'\x00'), TXT_STYLE['align']['left'], TXT_STYLE['invert'][False])
assert (instance.output == b''.join(expected_sequence)) |
def recursive_render(template_dir: Path, environment: Environment, _root_dir: (str | os.PathLike[str])='.') -> list[str]:
rendered_paths: list[str] = []
for (root, file) in ((Path(root), file) for (root, _, files) in os.walk(template_dir) for file in files if ((not any((elem.startswith('.') for elem in Path(root).relative_to(template_dir).parts[1:]))) and (not file.startswith('.')))):
output_path = (_root_dir / root.relative_to(template_dir)).resolve()
log.info('Rendering templates from %s to %s', root, output_path)
output_path.mkdir(parents=True, exist_ok=True)
if file.endswith('.j2'):
output_filename = file[:(- 3)]
src_file_path = str((root / file).relative_to(template_dir))
output_file_path = str((output_path / output_filename).resolve())
log.debug('rendering %s to %s', src_file_path, output_file_path)
stream = environment.get_template(src_file_path).stream()
with open(output_file_path, 'wb+') as output_file:
stream.dump(output_file, encoding='utf-8')
rendered_paths.append(output_file_path)
else:
src_file = str((root / file).resolve())
target_file = str((output_path / file).resolve())
log.debug('source file %s is not a template, copying to %s', src_file, target_file)
shutil.copyfile(src_file, target_file)
rendered_paths.append(target_file)
return rendered_paths |
class Bookmark():
def for_widget(cls, description, query_arguments=None, **bookmark_kwargs):
return Bookmark('', '', description, query_arguments=query_arguments, ajax=True, **bookmark_kwargs)
def __init__(self, base_path, relative_path, description, query_arguments=None, ajax=False, detour=False, exact=True, locale=None, read_check=None, write_check=None):
self.base_path = base_path
self.relative_path = relative_path
self.description = description
self.query_arguments = (query_arguments or {})
self.ajax = ajax
self.detour = detour
self.exact = exact
self.locale = locale
self.read_check = read_check
self.write_check = write_check
def with_description(self, description):
return Bookmark(self.base_path, self.relative_path, description, query_arguments=self.query_arguments, ajax=self.ajax, detour=self.detour, read_check=self.read_check, write_check=self.write_check)
def href(self):
path = (self.base_path + self.relative_path).replace('//', '/')
url = Url(path)
query_arguments = OrderedDict(sorted(self.query_arguments.items()))
url.set_query_from(query_arguments)
if self.detour:
request = ExecutionContext.get_context().request
if (not url.is_currently_active(exact_path=True)):
query_arguments['returnTo'] = request.url
elif ('returnTo' in request.params):
query_arguments['returnTo'] = request.params['returnTo']
url.make_locale_absolute(locale=self.locale)
url.set_query_from(query_arguments)
return url
def is_page_internal(self):
return (self.ajax and (not (self.base_path or self.relative_path)))
def on_view(self, view):
if (view is view.user_interface.current_view):
request = ExecutionContext.get_context().request
query_arguments = request.GET
else:
query_arguments = {}
return (view.as_bookmark(query_arguments=query_arguments) + self)
def combine_checks(self, own_check, other_check):
def combined_check():
own_passes = ((not own_check) or own_check())
other_passes = ((not other_check) or other_check())
return all([own_passes, other_passes])
return combined_check
def __add__(self, other):
if (not other.is_page_internal):
raise ProgrammerError('only page-internal Bookmarks can be added to other bookmarks')
query_arguments = {}
query_arguments.update(self.query_arguments)
query_arguments.update(other.query_arguments)
return Bookmark(self.base_path, self.relative_path, other.description, query_arguments=query_arguments, ajax=other.ajax, detour=self.detour, read_check=self.combine_checks(self.read_check, other.read_check), write_check=self.combine_checks(self.write_check, other.write_check)) |
class WorkflowEnabledMeta(base.WorkflowEnabledMeta, models.base.ModelBase):
def _find_workflows(mcs, attrs):
workflows = {}
for (k, v) in attrs.items():
if isinstance(v, StateField):
workflows[k] = v
return workflows
def _add_workflow(mcs, field_name, state_field, attrs):
pass |
class QtHandler(QtHandlerBase):
pin_signal = pyqtSignal(object, object)
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.pin_signal.connect(self.pin_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
def get_pin(self, msg, *, show_strength=True):
self.done.clear()
self.pin_signal.emit(msg, show_strength)
self.done.wait()
return self.response
def pin_dialog(self, msg, show_strength):
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _('Enter PIN'))
matrix = self.pin_matrix_widget_class(show_strength)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set() |
class DialogSelectTrack(SimpleBuilderApp):
def __init__(self, data_path=None, tracks=None, okmethod=None, gpx=None):
logging.debug('>>')
self.okmethod = okmethod
self.tracks = tracks
self.gpx = gpx
SimpleBuilderApp.__init__(self, 'selecttrackdialog.ui')
logging.debug('<<')
def new(self):
logging.debug('>>')
column_names = [_('Track Name'), _('Date')]
self.create_treeview(self.trkpTreeView, column_names)
self.actualize_treeview(self.trkpTreeView, self.tracks)
logging.debug('<<')
def on_ok_clicked(self, widget):
logging.debug('>>')
(selected, iter) = self.trkpTreeView.get_selection().get_selected()
if iter:
trackname = selected.get_value(iter, 0)
logging.debug(('selected track: ' + trackname))
self.okmethod(self.gpx, trackname)
self.closewindow()
logging.debug('<<')
def on_cancel_clicked(self, widget):
logging.debug('--')
self.closewindow()
def closewindow(self):
logging.debug('--')
self.selecttrackdialog.hide()
self.quit()
def create_treeview(self, treeview, column_names):
logging.debug('>>')
i = 0
for (column_index, column_name) in enumerate(column_names):
column = Gtk.TreeViewColumn(column_name, Gtk.CellRendererText(), text=column_index)
column.set_resizable(True)
column.set_sort_column_id(i)
treeview.append_column(column)
i += 1
logging.debug('<<')
def actualize_treeview(self, treeview, record_list):
logging.debug('>>')
iterOne = False
store = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING, object)
for i in record_list:
iter = store.append()
if (not iterOne):
iterOne = iter
store.set(iter, 0, str(i[0]), 1, str(i[1]))
treeview.set_model(store)
if iterOne:
treeview.get_selection().select_iter(iterOne)
logging.debug('<<') |
def HeronFit(DB, Gamedata, Saveddata):
print('Creating Heron - RemoteSebo')
item = DB['gamedata_session'].query(Gamedata['Item']).filter((Gamedata['Item'].name == 'Heron')).first()
ship = Saveddata['Ship'](item)
fit = Saveddata['Fit'](ship, 'Heron - RemoteSebo')
mod = Saveddata['Module'](DB['db'].getItem('Remote Sensor Booster II'))
mod.state = Saveddata['State'].ONLINE
for _ in range(4):
fit.modules.append(mod)
return fit |
def _wait_for_condition(self, condition=None, timeout=None, poll_frequency=0.5, ignored_exceptions=None):
condition = functools.partial((condition or self.visit_condition), self)
timeout = (timeout or self.wait_time)
return wait.WebDriverWait(self.driver, timeout, poll_frequency=poll_frequency, ignored_exceptions=ignored_exceptions).until((lambda browser: condition())) |
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_MainWindow, self).__init__()
def setupUi(self, MainWindow):
MainWindow.setObjectName('MainWindow')
MainWindow.setEnabled(True)
MainWindow.resize(1000, 650)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName('centralwidget')
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName('verticalLayout')
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName('tabWidget')
self.share_tab = QtWidgets.QWidget()
self.share_tab.setObjectName('share_tab')
self.share_vlayout = QtWidgets.QVBoxLayout(self.share_tab)
self.share_vlayout.setObjectName('share_vlayout')
self.share_hlayout_top = QtWidgets.QHBoxLayout()
self.share_hlayout_top.setObjectName('share_hlayout_top')
self.label_share_url = QtWidgets.QLabel(self.share_tab)
self.label_share_url.setObjectName('label_share_url')
self.share_hlayout_top.addWidget(self.label_share_url)
self.line_share_url = QtWidgets.QLineEdit(self.share_tab)
self.line_share_url.setObjectName('line_share_url')
self.line_share_pwd = QtWidgets.QLineEdit(self.share_tab)
self.line_share_pwd.setObjectName('line_share_pwd')
self.share_hlayout_top.addWidget(self.line_share_url)
self.share_hlayout_top.addWidget(self.line_share_pwd)
self.btn_extract = QtWidgets.QPushButton(self.share_tab)
self.btn_extract.setObjectName('btn_extract')
self.share_hlayout_top.addWidget(self.btn_extract)
self.share_vlayout.addLayout(self.share_hlayout_top)
self.table_share = QtWidgets.QTableView(self.share_tab)
self.table_share.setObjectName('table_share')
self.share_vlayout.addWidget(self.table_share)
self.share_hlayout_bottom = QtWidgets.QHBoxLayout()
self.share_hlayout_bottom.setObjectName('share_hlayout_bottom')
self.btn_share_select_all = QtWidgets.QPushButton(self.share_tab)
self.btn_share_select_all.setObjectName('btn_share_select_all')
self.share_hlayout_bottom.addWidget(self.btn_share_select_all)
self.label_dl_path = QtWidgets.QLabel(self.share_tab)
self.label_dl_path.setObjectName('label_dl_path')
self.share_hlayout_bottom.addWidget(self.label_dl_path)
self.share_set_dl_path = MyLineEdit(self.share_tab)
self.share_set_dl_path.setObjectName('share_set_dl_path')
self.share_hlayout_bottom.addWidget(self.share_set_dl_path)
self.btn_share_dl = QtWidgets.QPushButton(self.share_tab)
self.btn_share_dl.setObjectName('btn_share_dl')
self.share_hlayout_bottom.addWidget(self.btn_share_dl)
self.share_vlayout.addLayout(self.share_hlayout_bottom)
self.tabWidget.addTab(self.share_tab, '')
self.disk_tab = QtWidgets.QWidget()
self.disk_tab.setEnabled(True)
self.disk_tab.setMinimumSize(QtCore.QSize(620, 0))
self.disk_tab.setObjectName('disk_tab')
self.disk_vlayout = QtWidgets.QVBoxLayout(self.disk_tab)
self.disk_vlayout.setObjectName('disk_vlayout')
self.disk_hlayout_top = QtWidgets.QHBoxLayout()
self.disk_hlayout_top.setObjectName('disk_hlayout_top')
self.disk_loc_hbox = QtWidgets.QHBoxLayout()
self.disk_loc_hbox.setObjectName('disk_loc_hbox')
self.label_disk_loc = QtWidgets.QLabel(self.disk_tab)
self.label_disk_loc.setObjectName('label_disk_loc')
self.disk_loc_hbox.addWidget(self.label_disk_loc)
self.disk_hlayout_top.addLayout(self.disk_loc_hbox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.disk_hlayout_top.addItem(spacerItem)
self.btn_disk_mkdir = QtWidgets.QPushButton(self.disk_tab)
self.btn_disk_mkdir.setObjectName('btn_disk_mkdir')
self.disk_hlayout_top.addWidget(self.btn_disk_mkdir)
self.disk_vlayout.addLayout(self.disk_hlayout_top)
self.table_disk = MyTableView(self.disk_tab)
self.table_disk.setObjectName('table_disk')
self.disk_vlayout.addWidget(self.table_disk)
self.disk_hlayout_bottom = QtWidgets.QHBoxLayout()
self.disk_hlayout_bottom.setObjectName('disk_hlayout_bottom')
self.btn_disk_select_all = QtWidgets.QPushButton(self.disk_tab)
self.btn_disk_select_all.setObjectName('btn_disk_select_all')
self.disk_hlayout_bottom.addWidget(self.btn_disk_select_all)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.disk_hlayout_bottom.addItem(spacerItem1)
self.btn_disk_delete = QtWidgets.QPushButton(self.disk_tab)
self.btn_disk_delete.setObjectName('btn_disk_delete')
self.disk_hlayout_bottom.addWidget(self.btn_disk_delete)
self.btn_disk_dl = QtWidgets.QPushButton(self.disk_tab)
self.btn_disk_dl.setObjectName('btn_disk_dl')
self.disk_hlayout_bottom.addWidget(self.btn_disk_dl)
self.disk_vlayout.addLayout(self.disk_hlayout_bottom)
self.tabWidget.addTab(self.disk_tab, '')
self.rec_tab = QtWidgets.QWidget()
self.rec_tab.setObjectName('rec_tab')
self.rec_verticalLayout = QtWidgets.QVBoxLayout(self.rec_tab)
self.rec_verticalLayout.setObjectName('rec_verticalLayout')
self.rec_horizontalLayout = QtWidgets.QHBoxLayout()
self.rec_horizontalLayout.setObjectName('rec_horizontalLayout')
self.btn_rec_select_all = QtWidgets.QPushButton(self.rec_tab)
self.btn_rec_select_all.setObjectName('btn_rec_select_all')
self.rec_horizontalLayout.addWidget(self.btn_rec_select_all)
self.btn_recovery = QtWidgets.QPushButton(self.rec_tab)
self.btn_recovery.setObjectName('btn_recovery')
self.rec_horizontalLayout.addWidget(self.btn_recovery)
self.btn_rec_delete = QtWidgets.QPushButton(self.rec_tab)
self.btn_rec_delete.setObjectName('btn_rec_delete')
self.rec_horizontalLayout.addWidget(self.btn_rec_delete)
self.rec_horizontalLayout.addStretch(1)
self.btn_rec_clean = QtWidgets.QPushButton(self.rec_tab)
self.btn_rec_clean.setObjectName('btn_rec_clean')
self.rec_horizontalLayout.addWidget(self.btn_rec_clean)
self.btn_recovery_all = QtWidgets.QPushButton(self.rec_tab)
self.btn_recovery_all.setObjectName('btn_recovery_all')
self.rec_horizontalLayout.addWidget(self.btn_recovery_all)
self.btn_rec_expire_files = QtWidgets.QPushButton(self.rec_tab)
self.btn_rec_expire_files.setObjectName('btn_rec_expire_files')
self.rec_horizontalLayout.addWidget(self.btn_rec_expire_files)
self.rec_verticalLayout.addLayout(self.rec_horizontalLayout)
self.table_rec = QtWidgets.QTableView(self.rec_tab)
self.table_rec.setObjectName('table_rec')
self.rec_verticalLayout.addWidget(self.table_rec)
self.tabWidget.addTab(self.rec_tab, '')
self.jobs_tab = QtWidgets.QWidget()
self.jobs_tab.setObjectName('jobs_tab')
self.jobs_verticalLayout = QtWidgets.QVBoxLayout(self.jobs_tab)
self.jobs_verticalLayout.setObjectName('jobs_verticalLayout')
self.jobs_horizontalLayout = QtWidgets.QHBoxLayout()
self.jobs_horizontalLayout.setObjectName('jobs_horizontalLayout')
self.btn_jobs_start_all = QtWidgets.QPushButton(self.jobs_tab)
self.btn_jobs_clean_all = QtWidgets.QPushButton(self.jobs_tab)
self.jobs_horizontalLayout.addWidget(self.btn_jobs_start_all)
self.jobs_horizontalLayout.addStretch(1)
self.jobs_horizontalLayout.addWidget(self.btn_jobs_clean_all)
self.table_jobs = MyTableView(self.jobs_tab)
self.table_jobs.setObjectName('table_jobs')
self.jobs_verticalLayout.addLayout(self.jobs_horizontalLayout)
self.jobs_verticalLayout.addWidget(self.table_jobs)
self.tabWidget.addTab(self.jobs_tab, '')
self.verticalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 30))
self.menubar.setObjectName('menubar')
self.acount = QtWidgets.QMenu(self.menubar)
self.acount.setObjectName('acount')
self.files = QtWidgets.QMenu(self.menubar)
self.files.setObjectName('files')
self.help = QtWidgets.QMenu(self.menubar)
self.help.setObjectName('help')
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName('statusbar')
MainWindow.setStatusBar(self.statusbar)
self.toolbar = QtWidgets.QToolBar(MainWindow)
self.toolbar.setObjectName('toolbar')
MainWindow.addToolBar(QtCore.Qt.ToolBarArea.TopToolBarArea, self.toolbar)
self.login = QtGui.QAction(MainWindow)
self.login.setObjectName('login')
self.logout = QtGui.QAction(MainWindow)
self.logout.setObjectName('logout')
self.upload = QtGui.QAction(MainWindow)
self.upload.setObjectName('upload')
self.download = QtGui.QAction(MainWindow)
self.download.setObjectName('download')
self.delete = QtGui.QAction(MainWindow)
self.delete.setObjectName('delete')
self.show_toolbar = QtGui.QAction(MainWindow)
self.show_toolbar.setObjectName('show_toolbar')
self.merge_file = QtGui.QAction(MainWindow)
self.merge_file.setObjectName('merge_file')
self.setting_menu = QtGui.QAction(MainWindow)
self.setting_menu.setObjectName('setting_menu')
self.how = QtGui.QAction(MainWindow)
self.how.setObjectName('how')
self.about = QtGui.QAction(MainWindow)
self.about.setObjectName('about')
self.acount.addSeparator()
self.acount.addAction(self.login)
self.acount.addAction(self.logout)
self.files.addAction(self.upload)
self.files.addAction(self.download)
self.files.addAction(self.delete)
self.files.addAction(self.show_toolbar)
self.files.addAction(self.merge_file)
self.files.addAction(self.setting_menu)
self.help.addAction(self.how)
self.help.addAction(self.about)
self.menubar.addAction(self.acount.menuAction())
self.menubar.addAction(self.files.menuAction())
self.menubar.addAction(self.help.menuAction())
self.toolbar.addAction(self.login)
self.statusbar_msg_label = QtWidgets.QLabel()
self.statusbar_load_lb = QtWidgets.QLabel()
self.statusbar_load_movie = QtGui.QMovie((SRC_DIR + 'loading_more.gif'))
self.statusbar_load_lb.setMovie(self.statusbar_load_movie)
self.statusbar_msg_label.setObjectName('msg_label')
self.statusbar_load_lb.setObjectName('msg_movie_lb')
self.statusbar.addWidget(self.statusbar_load_lb)
self.statusbar.addWidget(self.statusbar_msg_label)
self.retranslateUi(MainWindow)
self.other_init()
self.set_window_at_center()
self.create_left_menus()
self.init_main_menu()
self.text_add_shadow()
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate('MainWindow', 'MainWindow'))
self.label_share_url.setText(_translate('MainWindow', ''))
self.btn_extract.setText(_translate('MainWindow', ''))
self.btn_share_select_all.setText(_translate('MainWindow', ''))
self.label_dl_path.setText(_translate('MainWindow', ''))
self.btn_share_dl.setText(_translate('MainWindow', ''))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.share_tab), _translate('MainWindow', ''))
self.label_disk_loc.setText(_translate('MainWindow', ''))
self.btn_disk_mkdir.setText(_translate('MainWindow', ''))
self.btn_disk_select_all.setText(_translate('MainWindow', ''))
self.btn_disk_delete.setText(_translate('MainWindow', ''))
self.btn_disk_dl.setText(_translate('MainWindow', ''))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.disk_tab), _translate('MainWindow', ''))
self.btn_rec_select_all.setText(_translate('MainWindow', ''))
self.btn_recovery.setText(_translate('MainWindow', ''))
self.btn_rec_delete.setText(_translate('MainWindow', ''))
self.btn_rec_clean.setText(_translate('MainWindow', ''))
self.btn_recovery_all.setText(_translate('MainWindow', ''))
self.btn_rec_expire_files.setText(_translate('MainWindow', ''))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.rec_tab), _translate('MainWindow', ''))
self.btn_jobs_start_all.setText(_translate('MainWindow', ''))
self.btn_jobs_clean_all.setText(_translate('MainWindow', ''))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.jobs_tab), _translate('MainWindow', ''))
self.acount.setTitle(_translate('MainWindow', ''))
self.files.setTitle(_translate('MainWindow', ''))
self.help.setTitle(_translate('MainWindow', ''))
self.toolbar.setWindowTitle(_translate('MainWindow', ''))
self.login.setText(_translate('MainWindow', ''))
self.logout.setText(_translate('MainWindow', ''))
self.upload.setText(_translate('MainWindow', ''))
self.download.setText(_translate('MainWindow', ''))
self.delete.setText(_translate('MainWindow', ''))
self.show_toolbar.setText(_translate('MainWindow', ''))
self.merge_file.setText(_translate('MainWindow', ''))
self.setting_menu.setText(_translate('MainWindow', ''))
self.how.setText(_translate('MainWindow', ''))
self.about.setText(_translate('MainWindow', ''))
def other_init(self):
self.tabWidget.setCurrentIndex(0)
self.tabWidget.removeTab(3)
self.tabWidget.removeTab(2)
self.tabWidget.removeTab(1)
self.disk_tab.setEnabled(False)
self.rec_tab.setEnabled(False)
self.jobs_tab.setEnabled(False)
def set_window_at_center(self):
pass
def create_left_menus(self):
self.left_menus = QtWidgets.QMenu()
self.left_menu_share_url = self.left_menus.addAction('')
self.left_menu_share_url.setIcon(QtGui.QIcon((SRC_DIR + 'share.ico')))
self.left_menu_rename_set_desc = self.left_menus.addAction('()')
self.left_menu_rename_set_desc.setIcon(QtGui.QIcon((SRC_DIR + 'desc.ico')))
self.left_menu_set_pwd = self.left_menus.addAction('()')
self.left_menu_set_pwd.setIcon(QtGui.QIcon((SRC_DIR + 'password.ico')))
self.left_menu_move = self.left_menus.addAction('()')
self.left_menu_move.setIcon(QtGui.QIcon((SRC_DIR + 'move.ico')))
self.left_menu_copy = self.left_menus.addAction('')
self.left_menu_copy.setIcon(QtGui.QIcon((SRC_DIR + 'count.ico')))
def init_main_menu(self):
self.login.setIcon(QtGui.QIcon((SRC_DIR + 'login.ico')))
self.login.setShortcut('Ctrl+L')
self.logout.setIcon(QtGui.QIcon((SRC_DIR + 'logout.ico')))
self.logout.setShortcut('Ctrl+Q')
self.logout.setEnabled(False)
self.download.setIcon(QtGui.QIcon((SRC_DIR + 'download.ico')))
self.download.setShortcut('Ctrl+J')
self.download.setEnabled(False)
self.delete.setIcon(QtGui.QIcon((SRC_DIR + 'delete.ico')))
self.delete.setShortcut('Ctrl+D')
self.delete.setEnabled(False)
self.show_toolbar.setIcon(QtGui.QIcon((SRC_DIR + 'password.ico')))
self.show_toolbar.setShortcut('Ctrl+T')
self.toolbar.close()
self.merge_file.setIcon(QtGui.QIcon((SRC_DIR + 'folder.gif')))
self.merge_file.setShortcut('Ctrl+M')
self.setting_menu.setIcon(QtGui.QIcon((SRC_DIR + 'settings.ico')))
self.setting_menu.setShortcut('Ctrl+P')
self.how.setIcon(QtGui.QIcon((SRC_DIR + 'help.ico')))
self.how.setShortcut('F1')
self.about.setIcon(QtGui.QIcon((SRC_DIR + 'about.ico')))
self.about.setShortcut('Ctrl+B')
self.upload.setIcon(QtGui.QIcon((SRC_DIR + 'upload.ico')))
self.upload.setShortcut('Ctrl+U')
self.upload.setEnabled(False)
def text_add_shadow(self):
share_url_shadow = QtWidgets.QGraphicsDropShadowEffect()
share_url_shadow.setBlurRadius(4)
share_url_shadow.setColor(QtGui.QColor('red'))
share_url_shadow.setOffset(0)
self.label_share_url.setGraphicsEffect(share_url_shadow)
dl_path_shadow = QtWidgets.QGraphicsDropShadowEffect()
dl_path_shadow.setBlurRadius(4)
dl_path_shadow.setColor(QtGui.QColor('green'))
dl_path_shadow.setOffset(0)
self.label_dl_path.setGraphicsEffect(dl_path_shadow)
disk_loc_shadow = QtWidgets.QGraphicsDropShadowEffect()
disk_loc_shadow.setBlurRadius(5)
disk_loc_shadow.setColor(QtGui.QColor('white'))
disk_loc_shadow.setOffset(0)
self.label_disk_loc.setGraphicsEffect(disk_loc_shadow) |
def format_version(version: ScmVersion) -> str:
log.debug('scm version %s', version)
log.debug('config %s', version.config)
if version.preformatted:
assert isinstance(version.tag, str)
return version.tag
main_version = _entrypoints._call_version_scheme(version, 'setuptools_scm.version_scheme', version.config.version_scheme, None)
log.debug('version %s', main_version)
assert (main_version is not None)
local_version = _entrypoints._call_version_scheme(version, 'setuptools_scm.local_scheme', version.config.local_scheme, '+unknown')
log.debug('local_version %s', local_version)
return (main_version + local_version) |
def prepare_class_def(path: str, module_name: str, cdef: ClassDef, errors: Errors, mapper: Mapper) -> None:
ir = mapper.type_to_ir[cdef.info]
info = cdef.info
attrs = get_mypyc_attrs(cdef)
if (attrs.get('allow_interpreted_subclasses') is True):
ir.allow_interpreted_subclasses = True
if (attrs.get('serializable') is True):
ir._serializable = True
for cls in info.mro:
if (cls.fullname == 'builtins.BaseException'):
ir.builtin_base = 'PyBaseExceptionObject'
elif (cls.fullname == 'builtins.dict'):
ir.builtin_base = 'PyDictObject'
elif cls.fullname.startswith('builtins.'):
if (not can_subclass_builtin(cls.fullname)):
errors.error('Inheriting from most builtin types is unimplemented', path, cdef.line)
bases = [mapper.type_to_ir[base.type] for base in info.bases if (base.type in mapper.type_to_ir)]
if ((len(bases) > 1) and any(((not c.is_trait) for c in bases)) and bases[0].is_trait):
errors.error('Non-trait base must appear first in parent list', path, cdef.line)
ir.traits = [c for c in bases if c.is_trait]
mro = []
base_mro = []
for cls in info.mro:
if (cls not in mapper.type_to_ir):
if (cls.fullname != 'builtins.object'):
ir.inherits_python = True
continue
base_ir = mapper.type_to_ir[cls]
if (not base_ir.is_trait):
base_mro.append(base_ir)
mro.append(base_ir)
if (cls.defn.removed_base_type_exprs or (not base_ir.is_ext_class)):
ir.inherits_python = True
base_idx = (1 if (not ir.is_trait) else 0)
if (len(base_mro) > base_idx):
ir.base = base_mro[base_idx]
ir.mro = mro
ir.base_mro = base_mro
prepare_methods_and_attributes(cdef, ir, path, module_name, errors, mapper)
prepare_init_method(cdef, ir, module_name, mapper)
for base in bases:
if (base.children is not None):
base.children.append(ir)
if is_dataclass(cdef):
ir.is_augmented = True |
class SpatialGroupEnhance(nn.Module):
def __init__(self, groups):
super().__init__()
self.groups = groups
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.weight = nn.Parameter(torch.zeros(1, groups, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, groups, 1, 1))
self.sig = nn.Sigmoid()
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if (m.bias is not None):
init.constant_(m.bias, 0)
def forward(self, x):
(b, c, h, w) = x.shape
x = x.view((b * self.groups), (- 1), h, w)
xn = (x * self.avg_pool(x))
xn = xn.sum(dim=1, keepdim=True)
t = xn.view((b * self.groups), (- 1))
t = (t - t.mean(dim=1, keepdim=True))
std = (t.std(dim=1, keepdim=True) + 1e-05)
t = (t / std)
t = t.view(b, self.groups, h, w)
t = ((t * self.weight) + self.bias)
t = t.view((b * self.groups), 1, h, w)
x = (x * self.sig(t))
x = x.view(b, c, h, w)
return x |
def import_gmsh_mesh(filename, analysis=None):
assert (filename[(- 4):] == u'.geo')
mesh_filename = (filename[:(- 4)] + u'.unv')
cmdlist = [u'gmsh', u'-format', u'unv', filename, u'-o', mesh_filename, u'-']
error = _run_command(cmdlist)
if (not error):
if analysis:
docName = analysis.Document.Name
else:
docName = None
import Fem
Fem.insert(mesh_filename, docName) |
def create_straight_road(road_id, length=100, junction=(- 1), n_lanes=1, lane_offset=3):
warn('create_straight_road should not be used anymore, please use the create_road function instead', DeprecationWarning, 2)
line1 = Line(length)
planview1 = PlanView()
planview1.add_geometry(line1)
lanesec1 = LaneSection(0, standard_lane())
for i in range(1, (n_lanes + 1), 1):
lanesec1.add_right_lane(standard_lane(lane_offset))
lanesec1.add_left_lane(standard_lane(lane_offset))
lanes1 = Lanes()
lanes1.add_lanesection(lanesec1)
return Road(road_id, planview1, lanes1, road_type=junction) |
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
def bound_open(fullurl, data=None, timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT):
return OpenerDirector.open(self, fullurl, data, timeout)
return wrapped_open(bound_open, self.process_response_object, fullurl, data, timeout)
def process_response_object(self, response):
return response |
class PascalVocGenerator(Generator):
def __init__(self, data_dir, set_name, classes=voc_classes, image_extension='.jpg', skip_truncated=False, skip_difficult=False, **kwargs):
self.data_dir = data_dir
self.set_name = set_name
self.classes = classes
self.image_names = [l.strip().split(None, 1)[0] for l in open(os.path.join(data_dir, 'ImageSets', 'Main', (set_name + '.txt'))).readlines()]
self.image_extension = image_extension
self.skip_truncated = skip_truncated
self.skip_difficult = skip_difficult
self.labels = {}
for (key, value) in self.classes.items():
self.labels[value] = key
super(PascalVocGenerator, self).__init__(**kwargs)
def size(self):
return len(self.image_names)
def num_classes(self):
return len(self.classes)
def has_label(self, label):
return (label in self.labels)
def has_name(self, name):
return (name in self.classes)
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def image_aspect_ratio(self, image_index):
path = os.path.join(self.data_dir, 'JPEGImages', (self.image_names[image_index] + self.image_extension))
image = Image.open(path)
return (float(image.width) / float(image.height))
def load_image(self, image_index):
path = os.path.join(self.data_dir, 'JPEGImages', (self.image_names[image_index] + self.image_extension))
return read_image_bgr(path)
def __parse_annotation(self, element):
truncated = _findNode(element, 'truncated', parse=int)
difficult = _findNode(element, 'difficult', parse=int)
class_name = _findNode(element, 'name').text
if (class_name not in self.classes):
raise ValueError("class name '{}' not found in classes: {}".format(class_name, list(self.classes.keys())))
box = np.zeros((4,))
label = self.name_to_label(class_name)
bndbox = _findNode(element, 'bndbox')
box[0] = (_findNode(bndbox, 'xmin', 'bndbox.xmin', parse=float) - 1)
box[1] = (_findNode(bndbox, 'ymin', 'bndbox.ymin', parse=float) - 1)
box[2] = (_findNode(bndbox, 'xmax', 'bndbox.xmax', parse=float) - 1)
box[3] = (_findNode(bndbox, 'ymax', 'bndbox.ymax', parse=float) - 1)
return (truncated, difficult, box, label)
def __parse_annotations(self, xml_root):
annotations = {'labels': np.empty((0,), dtype=np.int32), 'bboxes': np.empty((0, 4))}
for (i, element) in enumerate(xml_root.iter('object')):
try:
(truncated, difficult, box, label) = self.__parse_annotation(element)
except ValueError as e:
raise_from(ValueError('could not parse object #{}: {}'.format(i, e)), None)
if (truncated and self.skip_truncated):
continue
if (difficult and self.skip_difficult):
continue
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [box]])
annotations['labels'] = np.concatenate([annotations['labels'], [label]])
return annotations
def load_annotations(self, image_index):
filename = (self.image_names[image_index] + '.xml')
try:
tree = ET.parse(os.path.join(self.data_dir, 'Annotations', filename))
return self.__parse_annotations(tree.getroot())
except ET.ParseError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)
except ValueError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None) |
class Scheduler(abc.ABC, Generic[T]):
def __init__(self, backend: str, session_name: str) -> None:
self.backend = backend
self.session_name = session_name
def close(self) -> None:
pass
def submit(self, app: AppDef, cfg: T, workspace: Optional[str]=None) -> str:
resolved_cfg = self.run_opts().resolve(cfg)
if workspace:
sched = self
assert isinstance(sched, WorkspaceMixin)
role = app.roles[0]
sched.build_workspace_and_update_role(role, workspace, resolved_cfg)
dryrun_info = self.submit_dryrun(app, resolved_cfg)
return self.schedule(dryrun_info)
def schedule(self, dryrun_info: AppDryRunInfo) -> str:
raise NotImplementedError()
def submit_dryrun(self, app: AppDef, cfg: T) -> AppDryRunInfo:
resolved_cfg = self.run_opts().resolve(cfg)
dryrun_info = self._submit_dryrun(app, resolved_cfg)
for role in app.roles:
dryrun_info = role.pre_proc(self.backend, dryrun_info)
dryrun_info._app = app
dryrun_info._cfg = resolved_cfg
return dryrun_info
def _submit_dryrun(self, app: AppDef, cfg: T) -> AppDryRunInfo:
raise NotImplementedError()
def run_opts(self) -> runopts:
opts = self._run_opts()
if isinstance(self, WorkspaceMixin):
opts.update(self.workspace_opts())
return opts
def _run_opts(self) -> runopts:
return runopts()
def describe(self, app_id: str) -> Optional[DescribeAppResponse]:
raise NotImplementedError()
def list(self) -> List[ListAppResponse]:
raise NotImplementedError()
def exists(self, app_id: str) -> bool:
desc = self.describe(app_id)
return (desc is not None)
def _cancel_existing(self, app_id: str) -> None:
raise NotImplementedError()
def cancel(self, app_id: str) -> None:
if self.exists(app_id):
self._cancel_existing(app_id)
else:
return
def log_iter(self, app_id: str, role_name: str, k: int=0, regex: Optional[str]=None, since: Optional[datetime]=None, until: Optional[datetime]=None, should_tail: bool=False, streams: Optional[Stream]=None) -> Iterable[str]:
raise NotImplementedError(f'{self.__class__.__qualname__} does not support application log iteration')
def _validate(self, app: AppDef, scheduler: str) -> None:
for role in app.roles:
if (role.resource == NULL_RESOURCE):
raise ValueError(f'No resource for role: {role.image}. Did you forget to attach resource to the role') |
def test_expand_multiple_levels(df_expand):
expected = df_expand.pivot_wider('id', ('year', 'gender'), 'percentage', names_expand=True, flatten_levels=False)
actual = df_expand.complete('year', 'gender', 'id').pivot(index='id', columns=('year', 'gender'), values='percentage')
assert_frame_equal(actual, expected) |
class BarthezConverter(SpmConverter):
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(single='<s> $A </s>', pair='<s> $A </s> </s> $B </s>', special_tokens=[('<s>', self.original_tokenizer.convert_tokens_to_ids('<s>')), ('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))]) |
def confirm_team_invite(code, user_obj):
found = find_matching_team_invite(code, user_obj)
code_found = False
for invite in find_organization_invites(found.team.organization, user_obj):
try:
code_found = True
add_user_to_team(user_obj, invite.team)
except UserAlreadyInTeam:
pass
invite.delete_instance()
if (not code_found):
if found.user:
message = ('This invite is intended for user "%s".\n Please login to that account and try again.' % found.user.username)
raise DataModelException(message)
else:
message = ('This invite is intended for email "%s".\n Please login to that account and try again.' % found.email)
raise DataModelException(message)
team = found.team
inviter = found.inviter
return (team, inviter) |
class SignUp():
async def sign_up(self: 'pyrogram.Client', phone_number: str, phone_code_hash: str, first_name: str, last_name: str='') -> 'types.User':
phone_number = phone_number.strip(' +')
r = (await self.invoke(raw.functions.auth.SignUp(phone_number=phone_number, first_name=first_name, last_name=last_name, phone_code_hash=phone_code_hash)))
(await self.storage.user_id(r.user.id))
(await self.storage.is_bot(False))
return types.User._parse(self, r.user) |
class Solution():
def checkPossibility(self, nums: List[int]) -> bool:
n = len(nums)
if ((n == 1) or (n == 2)):
return True
i = 0
nums1 = nums[:]
while (i < (n - 1)):
if (nums[i] <= nums[(i + 1)]):
i += 1
continue
else:
nums = (nums[:i] + nums[(i + 1):])
nums1 = (nums1[:(i + 1)] + nums1[(i + 2):])
break
i += 1
if ((nums == sorted(nums)) or (nums1 == sorted(nums1))):
return True
return False |
class TestNameCheckVisitor(TestNameCheckVisitorBase):
_passes()
def test_known_ordered(self):
from typing_extensions import OrderedDict
known_ordered = OrderedDict({1: 2})
bad_ordered = OrderedDict({'a': 'b'})
def capybara(arg: OrderedDict[(int, int)]) -> None:
pass
def caller() -> None:
capybara(known_ordered)
capybara(bad_ordered)
_passes()
def test_undefined_name(self):
def run():
print(undefined_variable)
_passes()
def test_undefined_attribute(self):
def run():
lst = []
print(lst.coruro)
def test_undefined_name_with_star_import(self):
self.assert_fails(ErrorCode.undefined_name, '\n from qcore.asserts import *\n def run():\n print(not_in_qcore.asserts)\n ')
_passes()
def test_undefined_name_in_return(self):
def what_is_it():
return tucotuco
_passes()
def test_undefined_name_in_class_kwarg(self):
def capybara():
class Capybara(metaclass=Hutia):
pass
_passes()
def test_no_failure_on_builtin(self):
def run():
print(len)
_passes()
def test_no_failure_on_global(self):
capybara = 3
def run():
print(capybara)
_passes()
def test_no_failure_on_global_return(self):
tucotuco = 'a burrowing rodent'
def what_is_it():
return tucotuco
_passes()
def test_no_failure_on_arg(self):
def double_it(x):
return (x * 2)
_passes()
def test_class_scope(self):
class Porcupine(object):
def coendou(self):
return 1
sphiggurus = coendou
_passes()
def test_class_scope_fails_wrong_order(self):
def run():
class Porcupine(object):
sphiggurus = coendou
def coendou(self):
return 1
_passes()
def test_class_scope_is_not_searched(self):
class Porcupine(object):
sphiggurus = 1
def coendou(self):
return sphiggurus
_passes()
def test_getter_decorator(self):
class Porcupine(object):
sphiggurus = property()
def sphiggurus(self):
pass
_passes()
def test_ipython_whitelisting(self):
def run():
print(__IPYTHON__)
_passes()
def test_mock_attributes(self):
def cavy():
pass
def run():
print(cavy.call_count)
_passes()
def test_mock_attr(self):
from unittest import mock
class X():
a = mock.MagicMock()
class Y():
def __init__(self):
self.x = X()
def f():
y = Y()
assert_is_value(y.x.a, KnownValue(X.a))
_passes()
def test_method_mock_attributes(self):
class Capybara(object):
def hutia(self):
pass
def kerodon(self):
print(self.hutia.call_count)
_passes()
def test_global_assignment(self):
from qcore.asserts import assert_eq
fn = assert_eq
def run():
assert_is_value(fn, KnownValue(assert_eq))
_passes()
def test_builtin_attribute(self):
def run():
print(True .hutia)
_passes()
def test_module_reassignment(self):
_std_set = set
def set(key, value):
return _std_set([key, value])
_std_set()
_passes()
def test_display(self):
def run():
print([1, 2]())
_passes()
def test_set_display(self):
def run():
print({[]})
print({*[1, 2, 3], 'a', 'b'})
print({*[{}], 'a', 'b'})
_passes()
def test_multiple_assignment_global(self):
if False:
goes_in_set = []
else:
goes_in_set = 'capybara'
if False:
assert_is_value(goes_in_set, KnownValue('capybara'))
print({goes_in_set})
_passes()
def test_multiple_assignment_function(self):
def fn(cond):
if cond:
goes_in_set = []
else:
goes_in_set = 'capybara'
assert_is_value(goes_in_set, (KnownValue([]) | KnownValue('capybara')))
print({goes_in_set})
_passes()
def test_duplicate_dict_key(self):
def run():
print({'capybara': 1, 'capybara': 2})
_passes()
def test_unhashable_dict_key(self):
def run():
print({[]: 1})
_passes()
def test_inferred_duplicate_dict_key(self):
key = 'capybara'
def run():
print({'capybara': 1, key: 1})
_passes()
def test_inferred_unhashable_dict_key(self):
key = []
def run():
print({key: 1})
_passes()
def test_nested_classes(self):
class Caviids(object):
class Capybaras(object):
if False:
print(neochoerus)
def method(self, cap: Capybaras):
assert_is_value(cap, TypedValue(Caviids.Capybaras))
_passes()
def test_class_in_function(self):
def get_capybaras(object):
class Capybaras(object):
if False:
print(neochoerus)
_passes()
def test_cant_del_tuple(self):
tpl = (1, 2, 3)
def run():
del tpl[1]
_passes()
def test_cant_del_generator(self):
tpl = (x for x in (1, 2, 3))
def run():
del tpl[1]
_passes()
def test_cant_assign_tuple(self):
tpl = (1, 2, 3)
def run():
tpl[1] = 1
_passes()
def test_global_sets_value(self):
capybara = None
def set_it():
global capybara
capybara = (0,)
def use_it():
assert_is_value(capybara, (KnownValue((0,)) | KnownValue(None)))
_fails(ErrorCode.unsupported_operation)
def test_self_type_inference(self):
class Capybara(object):
def get(self, i):
assert_is_value(self, TypedValue(Capybara))
return self[i]
_passes()
def test_self_is_subscriptable(self):
class Capybara(object):
def get(self, i):
return self[i]
def __getitem__(self, i):
return i
_passes()
def test_cls_type_inference(self):
class OldStyle():
def __init_subclass__(cls):
assert_is_value(cls, SubclassValue(TypedValue(OldStyle)))
def __new__(cls):
assert_is_value(cls, SubclassValue(TypedValue(OldStyle)))
def capybara(cls):
assert_is_value(cls, SubclassValue(TypedValue(OldStyle)))
_passes()
def test_display_type_inference(self):
UNANNOTATED = AnyValue(AnySource.unannotated)
def capybara(a, b):
x = [a, b]
assert_is_value(x, make_simple_sequence(list, [UNANNOTATED, UNANNOTATED]))
y = (a, 2)
assert_is_value(y, make_simple_sequence(tuple, [UNANNOTATED, KnownValue(2)]))
s = {a, b}
assert_is_value(s, make_simple_sequence(set, [UNANNOTATED, UNANNOTATED]))
z = {a: b}
assert_is_value(z, DictIncompleteValue(dict, [KVPair(UNANNOTATED, UNANNOTATED)]))
q = {a: 3, b: 4}
assert_is_value(q, DictIncompleteValue(dict, [KVPair(UNANNOTATED, KnownValue(3)), KVPair(UNANNOTATED, KnownValue(4))]))
_passes()
def test_if_exp(self):
def capybara(x):
y = (3 if x else 4)
assert_is_value(y, MultiValuedValue([KnownValue(3), KnownValue(4)]))
_passes()
def test_namedtuple(self):
import collections
typ = collections.namedtuple('typ', 'foo bar')
def fn():
t = typ(1, 2)
print(t.baz)
_passes()
def test_local_namedtuple(self):
import collections
def capybara():
typ = collections.namedtuple('typ', 'foo bar')
print(typ(1, 2))
_passes()
def test_set_after_get(self):
def fn():
capybara = None
for _ in range(5):
if capybara:
print(capybara[0])
capybara = 'foo'
_passes()
def test_multiple_anys(self):
def fn(item):
if False:
item = None
assert_is_value(item, (KnownValue(None) | AnyValue(AnySource.unannotated)))
_passes()
def test_bad_attribute_of_global(self):
import os
path = os.path
def capybara():
print(path.joyn)
_passes()
def test_double_assignment(self):
from pyanalyze.tests import PropertyObject
def capybara(aid):
answer = PropertyObject(aid)
print(answer)
answer = PropertyObject(aid)
assert_is_value(answer, TypedValue(PropertyObject))
_passes()
def test_duplicate_method(self):
class Tucotuco(object):
def __init__(self, fn):
pass
def __init__(self, an):
pass
_passes()
def test_duplicate_attribute(self):
class Hutia():
capromys = 1
capromys = 2
_passes()
def test_duplicate_attribute_augassign(self):
class Capybara():
x = 1
x += 1
_passes()
def test_duplicate_property_method(self):
class Capybara(object):
def fur(self):
return 'a lot'
def fur(self, value):
pass
_passes()
def test_bad_global(self):
global x
_passes()
def test_undefined_global(self):
def fn():
global x
return x
_passes()
def test_global_value(self):
x = 3
def capybara():
global x
assert_is_value(x, KnownValue(3)) |
.parametrize('stream', ['stdout', 'stderr'])
def test_exit_successful_output(qtbot, proc, py_proc, stream):
with qtbot.wait_signal(proc.finished, timeout=10000):
proc.start(*py_proc('\n import sys\n print("test", file=sys.{})\n sys.exit(0)\n '.format(stream))) |
def test_custom_css(pytester, css_file_path, expandvar):
result = run(pytester, 'report.html', cmd_flags=['--css', expandvar, '--css', 'two.css'])
result.assert_outcomes(passed=1)
path = pytester.path.joinpath('assets', 'style.css')
with open(str(path)) as f:
css = f.read()
assert_that(css).contains(('* ' + str(css_file_path))).contains('* two.css') |
def average(dj_init=None, img_db=None, djs_file=None, avgs_file=None, pcas_file=None, op=None):
djs = load_dict(op['data_checkpoint'])
avgs = load_dict(op['average']['checkpoint'])
if ((- 1) not in djs):
assert (len(djs) == 0)
djs[(- 1)] = dj_init
AIF.pickle_dump(djs, op['data_checkpoint'])
dj = djs[(- 1)]
for pass_i in range(op['option']['pass_num']):
print('pass_i', pass_i)
if (pass_i in djs):
dj = djs[pass_i]
continue
dj = copy.deepcopy(dj)
c = str(uuid.uuid4())
avg_t = vol_avg(dj=dj, op=op['average'], img_db=img_db)
avgs[c] = avg_t
avgs[c]['pass_i'] = pass_i
avgs[c]['id'] = c
AIF.pickle_dump(avgs, op['average']['checkpoint'])
print('averaging done')
al = align_all_pairs(avgs=avgs, dj=dj, img_db=img_db)
a = align_all_pairs__select_best(al)
for d in dj:
i = d['subtomogram']
d['loc'] = a[i]['loc']
d['angle'] = a[i]['angle']
d['score'] = a[i]['score']
d['template_id'] = a[i]['template_id']
print('re-align done')
djs[pass_i] = dj
AIF.pickle_dump(djs, op['data_checkpoint']) |
class Trainer():
def __init__(self, G, D, latent_size, dataset, device, Gs=None, Gs_beta=(0.5 ** (32 / 10000)), Gs_device=None, batch_size=32, device_batch_size=4, label_size=0, data_workers=4, G_loss='logistic_ns', D_loss='logistic', G_reg='pathreg:2', G_reg_interval=4, G_opt_class='Adam', G_opt_kwargs={'lr': 0.002, 'betas': (0, 0.99)}, G_reg_batch_size=None, G_reg_device_batch_size=None, D_reg='r1:10', D_reg_interval=16, D_opt_class='Adam', D_opt_kwargs={'lr': 0.002, 'betas': (0, 0.99)}, style_mix_prob=0.9, G_iter=1, D_iter=1, pl_avg=0.0, tensorboard_log_dir=None, checkpoint_dir=None, checkpoint_interval=10000, seen=0, half=False, rank=None, world_size=None, master_addr='127.0.0.1', master_port='23456', freezeG=(- 1), freezeD=(- 1), augmentopt=None):
assert ((not isinstance(G, nn.parallel.DistributedDataParallel)) and (not isinstance(D, nn.parallel.DistributedDataParallel))), (('Encountered a model wrapped in `DistributedDataParallel`. ' + 'Distributed parallelism is handled by this class and can ') + 'not be initialized before.')
kwargs = locals()
kwargs.pop('self')
kwargs.pop('G')
kwargs.pop('D')
kwargs.pop('Gs')
kwargs.pop('dataset')
kwargs.update(pl_avg=float(pl_avg))
if isinstance(device, torch.device):
kwargs.update(device=str(device))
if isinstance(Gs_device, torch.device):
kwargs.update(device=str(Gs_device))
self.kwargs = kwargs
if (device or (device == 0)):
if isinstance(device, (tuple, list)):
self.device = torch.device(device[0])
else:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
if (self.device.index is not None):
torch.cuda.set_device(self.device.index)
else:
assert (not half), ('Mixed precision training only available ' + 'for CUDA devices.')
self.G = G.train().to(self.device)
self.D = D.train().to(self.device)
if (isinstance(device, (tuple, list)) and (len(device) > 1)):
assert all((isinstance(dev, int) for dev in device)), ('Multiple devices have to be specified as a list ' + 'or tuple of integers corresponding to device indices.')
assert ((G_reg is None) and (D_reg is None)), (('Regularization ' + 'currently not supported for multi-gpu training in single process. ') + 'Please use distributed training with one device per process instead.')
device_batch_size *= len(device)
def to_data_parallel(model):
if (not isinstance(model, nn.DataParallel)):
return nn.DataParallel(model, device_ids=device)
return model
self.G = to_data_parallel(self.G)
self.D = to_data_parallel(self.D)
G_reg_batch_size = (G_reg_batch_size or batch_size)
G_reg_device_batch_size = (G_reg_device_batch_size or device_batch_size)
rank = os.environ.get('RANK', rank)
if (rank is not None):
rank = int(rank)
addr = os.environ.get('MASTER_ADDR', master_addr)
port = os.environ.get('MASTER_PORT', master_port)
world_size = os.environ.get('WORLD_SIZE', world_size)
assert (world_size is not None), ('Distributed training ' + 'requires specifying world size.')
world_size = int(world_size)
assert (self.device.index is not None), 'Distributed training is only supported for CUDA.'
assert ((batch_size % world_size) == 0), ('Batch size has to be ' + 'evenly divisible by world size.')
assert ((G_reg_batch_size % world_size) == 0), ('G reg batch size has to be ' + 'evenly divisible by world size.')
batch_size = (batch_size // world_size)
G_reg_batch_size = (G_reg_batch_size // world_size)
init_method = 'tcp://{}:{}'.format(addr, port)
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=world_size)
else:
world_size = 1
self.rank = rank
self.world_size = world_size
self.pl_avg = torch.tensor(pl_avg, dtype=(torch.float16 if half else torch.float32), device=self.device)
self._sync_distributed(G=self.G, D=self.D, broadcast_weights=True)
if (not self.rank):
self.Gs = Gs
if (not isinstance(Gs, utils.MovingAverageModule)):
self.Gs = utils.MovingAverageModule(from_module=self.G, to_module=Gs, param_beta=Gs_beta, device=(self.device if (Gs_device is None) else Gs_device))
else:
self.Gs = None
self.G_loss = get_loss_fn('G', G_loss)
self.D_loss = get_loss_fn('D', D_loss)
self.G_reg = get_reg_fn('G', G_reg, pl_avg=self.pl_avg)
self.D_reg = get_reg_fn('D', D_reg)
self.G_reg_interval = G_reg_interval
self.D_reg_interval = D_reg_interval
self.G_iter = G_iter
self.D_iter = D_iter
self.G_opt = build_opt(self.G, G_opt_class, G_opt_kwargs, self.G_reg, self.G_reg_interval)
self.D_opt = build_opt(self.D, D_opt_class, D_opt_kwargs, self.D_reg, self.D_reg_interval)
if half:
assert ('apex' in sys.modules), ('Can not run mixed precision ' + 'training (`half=True`) without the apex module.')
((self.G, self.D), (self.G_opt, self.D_opt)) = amp.initialize([self.G, self.D], [self.G_opt, self.D_opt], opt_level='O1')
self.half = half
sampler = None
if (self.rank is not None):
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True)
self.dataloader = torch.utils.data.DataLoader(dataset, batch_size=device_batch_size, num_workers=data_workers, shuffle=(sampler is None), pin_memory=(self.device.index is not None), drop_last=True, sampler=sampler)
self.dataloader_iter = None
self.prior_generator = utils.PriorGenerator(latent_size=latent_size, label_size=label_size, batch_size=device_batch_size, device=self.device)
assert ((batch_size % device_batch_size) == 0), ('Batch size has to be evenly divisible by the product of ' + 'device batch size and world size.')
self.subdivisions = (batch_size // device_batch_size)
assert ((G_reg_batch_size % G_reg_device_batch_size) == 0), ('G reg batch size has to be evenly divisible by the product of ' + 'G reg device batch size and world size.')
self.G_reg_subdivisions = (G_reg_batch_size // G_reg_device_batch_size)
self.G_reg_device_batch_size = G_reg_device_batch_size
self.tb_writer = None
if (tensorboard_log_dir and (not self.rank)):
self.tb_writer = torch.utils.tensorboard.SummaryWriter(tensorboard_log_dir)
self.label_size = label_size
self.style_mix_prob = style_mix_prob
self.checkpoint_dir = checkpoint_dir
self.checkpoint_interval = checkpoint_interval
self.seen = seen
self.metrics = {}
self.callbacks = []
self.freezeG = freezeG
self.freezeD = freezeD
self.augmentopt = augmentopt
def _get_batch(self):
if (self.dataloader_iter is None):
self.dataloader_iter = iter(self.dataloader)
try:
batch = next(self.dataloader_iter)
except StopIteration:
self.dataloader_iter = None
return self._get_batch()
if isinstance(batch, (tuple, list)):
if (len(batch) > 1):
(data, label) = batch[:2]
else:
(data, label) = (batch[0], None)
else:
(data, label) = (batch, None)
if (not self.label_size):
label = None
if torch.is_tensor(data):
data = data.to(self.device)
if torch.is_tensor(label):
label = label.to(self.device)
return (data, label)
def _sync_distributed(self, G=None, D=None, broadcast_weights=False):
if (self.rank is None):
return
for net in [G, D]:
if (net is None):
continue
for p in net.parameters():
if (p.grad is not None):
torch.distributed.all_reduce(p.grad, async_op=True)
if broadcast_weights:
torch.distributed.broadcast(p.data, src=0, async_op=True)
if (G is not None):
if (G.dlatent_avg is not None):
torch.distributed.broadcast(G.dlatent_avg, src=0, async_op=True)
if (self.pl_avg is not None):
torch.distributed.broadcast(self.pl_avg, src=0, async_op=True)
if ((G is not None) or (D is not None)):
torch.distributed.barrier(async_op=False)
def _backward(self, loss, opt, mul=1, subdivisions=None):
if (loss is None):
return 0
mul /= (subdivisions or self.subdivisions)
mul /= (self.world_size or 1)
if (mul != 1):
loss *= mul
if self.half:
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return (loss.item() * (self.world_size or 1))
def train(self, iterations, callbacks=None, verbose=True):
evaluated_metrics = {}
if self.rank:
verbose = False
if verbose:
progress = utils.ProgressWriter(iterations)
value_tracker = utils.ValueTracker()
ada_augment = torch.tensor([0.0, 0.0], device=self.device)
ada_aug_p = (self.augmentopt['augment_p'] if (self.augmentopt['augment_p'] > 0) else 0.0)
ada_aug_step = (self.augmentopt['ada_target'] / self.augmentopt['ada_length'])
self.D.requires_grad_(False)
self.G.requires_grad_(False)
for _ in range(iterations):
G_reg = (self.G_reg is not None)
if (self.G_reg_interval and G_reg):
G_reg = ((self.seen % self.G_reg_interval) == 0)
D_reg = (self.D_reg is not None)
if (self.D_reg_interval and D_reg):
D_reg = ((self.seen % self.D_reg_interval) == 0)
if (self.freezeG < 0):
self.G.requires_grad_(True)
else:
requires_grad_G(self.G, True, self.freezeG)
if (self.freezeD < 0):
self.D.requires_grad_(False)
else:
requires_grad_D(self.D, False, self.freezeD)
for _ in range(self.G_iter):
self.G_opt.zero_grad()
G_loss = 0
for i in range(self.subdivisions):
(latents, latent_labels) = self.prior_generator(multi_latent_prob=self.style_mix_prob)
(loss, _) = self.G_loss(G=self.G, D=self.D, latents=latents, latent_labels=latent_labels, augment=(augment if self.augmentopt['augment'] else None), ada_augment=ada_augment, ada_aug_p=ada_aug_p, ada_aug_step=ada_aug_step)
G_loss += self._backward(loss, self.G_opt)
if G_reg:
if self.G_reg_interval:
self._sync_distributed(G=self.G)
self.G_opt.step()
self.G_opt.zero_grad()
G_reg_loss = 0
for i in range(self.G_reg_subdivisions):
(latents, latent_labels) = self.prior_generator(batch_size=self.G_reg_device_batch_size, multi_latent_prob=self.style_mix_prob)
(_, reg_loss) = self.G_reg(G=self.G, latents=latents, latent_labels=latent_labels)
G_reg_loss += self._backward(reg_loss, self.G_opt, mul=(self.G_reg_interval or 1), subdivisions=self.G_reg_subdivisions)
self._sync_distributed(G=self.G)
self.G_opt.step()
if (self.Gs is not None):
self.Gs.update()
if (self.freezeD < 0):
self.D.requires_grad_(True)
else:
requires_grad_D(self.D, True, self.freezeD)
if (self.freezeG < 0):
self.G.requires_grad_(False)
else:
requires_grad_G(self.G, False, self.freezeG)
for _ in range(self.D_iter):
self.D_opt.zero_grad()
D_loss = 0
for i in range(self.subdivisions):
(latents, latent_labels) = self.prior_generator(multi_latent_prob=self.style_mix_prob)
(reals, real_labels) = self._get_batch()
(loss, _, real_scores) = self.D_loss(G=self.G, D=self.D, latents=latents, latent_labels=latent_labels, reals=reals, real_labels=real_labels, augment=(augment if self.augmentopt['augment'] else None), ada_augment=ada_augment, ada_aug_p=ada_aug_p, ada_aug_step=ada_aug_step, device=self.device)
D_loss += self._backward(loss, self.D_opt)
if ((augment is not None) and (self.augmentopt['augment_p'] == 0)):
ada_augment += torch.tensor((torch.sign(real_scores).sum().item(), real_scores.shape[0]), device=self.device)
ada_augment = reduce_sum(ada_augment)
if (ada_augment[1] > 255):
(pred_signs, n_pred) = ada_augment.tolist()
r_t_stat = (pred_signs / n_pred)
if (r_t_stat > self.augmentopt['ada_target']):
sign = 1
else:
sign = (- 1)
ada_aug_p += ((sign * ada_aug_step) * n_pred)
ada_aug_p = min(1, max(0, ada_aug_p))
ada_augment.mul_(0)
if D_reg:
if self.D_reg_interval:
self._sync_distributed(D=self.D)
self.D_opt.step()
self.D_opt.zero_grad()
D_reg_loss = 0
for i in range(self.subdivisions):
(latents, latent_labels) = self.prior_generator(multi_latent_prob=self.style_mix_prob)
(reals, real_labels) = self._get_batch()
(_, reg_loss) = self.D_reg(G=self.G, D=self.D, latents=latents, latent_labels=latent_labels, reals=reals, real_labels=real_labels)
D_reg_loss += self._backward(reg_loss, self.D_opt, mul=(self.D_reg_interval or 1))
self._sync_distributed(D=self.D)
self.D_opt.step()
if ((self.tb_writer is not None) or verbose):
G_grad_norm = utils.get_grad_norm_from_optimizer(self.G_opt)
D_grad_norm = utils.get_grad_norm_from_optimizer(self.D_opt)
"\n for name, metric in self.metrics.items():\n if not metric['interval'] or self.seen % metric['interval'] == 0:\n evaluated_metrics[name] = metric['eval_fn']()\n "
if (self.tb_writer is not None):
self.tb_writer.add_scalar('Loss/G_loss', G_loss, self.seen)
if G_reg:
self.tb_writer.add_scalar('Loss/G_reg', G_reg_loss, self.seen)
self.tb_writer.add_scalar('Grad_norm/G_reg', G_grad_norm, self.seen)
self.tb_writer.add_scalar('Params/pl_avg', self.pl_avg, self.seen)
else:
self.tb_writer.add_scalar('Grad_norm/G_loss', G_grad_norm, self.seen)
self.tb_writer.add_scalar('Loss/D_loss', D_loss, self.seen)
if D_reg:
self.tb_writer.add_scalar('Loss/D_reg', D_reg_loss, self.seen)
self.tb_writer.add_scalar('Grad_norm/D_reg', D_grad_norm, self.seen)
else:
self.tb_writer.add_scalar('Grad_norm/D_loss', D_grad_norm, self.seen)
for (name, value) in evaluated_metrics.items():
self.tb_writer.add_scalar('Metrics/{}'.format(name), value, self.seen)
if verbose:
value_tracker.add('seen', (self.seen + 1), beta=0)
value_tracker.add('G_lr', self.G_opt.param_groups[0]['lr'], beta=0)
value_tracker.add('G_loss', G_loss)
if G_reg:
value_tracker.add('G_reg', G_reg_loss)
value_tracker.add('G_reg_grad_norm', G_grad_norm)
value_tracker.add('pl_avg', self.pl_avg, beta=0)
else:
value_tracker.add('G_loss_grad_norm', G_grad_norm)
value_tracker.add('D_lr', self.D_opt.param_groups[0]['lr'], beta=0)
value_tracker.add('D_loss', D_loss)
if D_reg:
value_tracker.add('D_reg', D_reg_loss)
value_tracker.add('D_reg_grad_norm', D_grad_norm)
else:
value_tracker.add('D_loss_grad_norm', D_grad_norm)
for (name, value) in evaluated_metrics.items():
value_tracker.add(name, value, beta=0)
progress.write(str(value_tracker))
for callback in (utils.to_list(callbacks) + self.callbacks):
callback(self.seen)
self.seen += 1
torch.cuda.empty_cache()
if ((not self.rank) and self.checkpoint_dir and self.checkpoint_interval):
if ((self.seen % self.checkpoint_interval) == 0):
checkpoint_path = os.path.join(self.checkpoint_dir, '{}_{}'.format(self.seen, time.strftime('%Y-%m-%d_%H-%M-%S')))
self.save_checkpoint(checkpoint_path)
if verbose:
progress.close()
def register_metric(self, name, eval_fn, interval):
self.metrics[name] = {'eval_fn': eval_fn, 'interval': interval}
def remove_metric(self, name):
if (name in self.metrics):
del self.metrics[name]
else:
warnings.warn(('Attempting to remove metric {} '.format(name) + 'which does not exist.'))
def generate_images(self, num_images, seed=None, truncation_psi=None, truncation_cutoff=None, label=None, pixel_min=(- 1), pixel_max=1):
if (seed is None):
seed = int((10000 * time.time()))
(latents, latent_labels) = self.prior_generator(num_images, seed=seed)
if label:
assert (latent_labels is not None), ('Can not specify label when no labels ' + 'are used by this model.')
label = utils.to_list(label)
assert all((isinstance(l, int) for l in label)), ('`label` can only consist of ' + 'one or more python integers.')
assert ((len(label) == 1) or (len(label) == num_images)), ((('`label` can either ' + 'specify one label to use for all images or a list of labels of the ') + 'same length as number of images. Received {} labels '.format(len(label))) + 'but {} images are to be generated.'.format(num_images))
if (len(label) == 1):
latent_labels.fill_(label[0])
else:
latent_labels = torch.tensor(label).to(latent_labels)
self.Gs.set_truncation(truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
with torch.no_grad():
generated = self.Gs(latents=latents, labels=latent_labels)
assert ((generated.dim() - 2) == 2), ('Can only generate images when using a ' + 'network built for 2-dimensional data.')
assert (generated.dim() == 4), ('Only generators that produce 2d data ' + 'can be used to generate images.')
return utils.tensor_to_PIL(generated, pixel_min=pixel_min, pixel_max=pixel_max)
def log_images_tensorboard(self, images, name, resize=256):
assert (self.tb_writer is not None), ('No tensorboard log dir was specified ' + 'when constructing this object.')
image = utils.stack_images_PIL(images, individual_img_size=resize)
image = torchvision.transforms.ToTensor()(image)
self.tb_writer.add_image(name, image, self.seen)
def add_tensorboard_image_logging(self, name, interval, num_images, resize=256, seed=None, truncation_psi=None, truncation_cutoff=None, label=None, pixel_min=(- 1), pixel_max=1):
if self.rank:
return
def callback(seen):
if ((seen % interval) == 0):
images = self.generate_images(num_images=num_images, seed=seed, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, label=label, pixel_min=pixel_min, pixel_max=pixel_max)
self.log_images_tensorboard(images=images, name=name, resize=resize)
self.callbacks.append(callback)
def save_checkpoint(self, dir_path):
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
else:
assert os.path.isdir(dir_path), '`dir_path` points to a file.'
kwargs = self.kwargs.copy()
kwargs.update(seen=self.seen, pl_avg=float(self.pl_avg))
with open(os.path.join(dir_path, 'kwargs.json'), 'w') as fp:
json.dump(kwargs, fp)
torch.save(self.G_opt.state_dict(), os.path.join(dir_path, 'G_opt.pth'))
torch.save(self.D_opt.state_dict(), os.path.join(dir_path, 'D_opt.pth'))
models.save(self.G, os.path.join(dir_path, 'G.pth'))
models.save(self.D, os.path.join(dir_path, 'D.pth'))
if (self.Gs is not None):
models.save(self.Gs, os.path.join(dir_path, 'Gs.pth'))
def load_checkpoint(cls, checkpoint_path, dataset, **kwargs):
checkpoint_path = _find_checkpoint(checkpoint_path)
_is_checkpoint(checkpoint_path, enforce=True)
with open(os.path.join(checkpoint_path, 'kwargs.json'), 'r') as fp:
loaded_kwargs = json.load(fp)
loaded_kwargs.update(**kwargs)
device = torch.device('cpu')
if isinstance(loaded_kwargs['device'], (list, tuple)):
device = torch.device(loaded_kwargs['device'][0])
for name in ['G', 'D']:
fpath = os.path.join(checkpoint_path, (name + '.pth'))
loaded_kwargs[name] = models.load(fpath, map_location=device)
if os.path.exists(os.path.join(checkpoint_path, 'Gs.pth')):
loaded_kwargs['Gs'] = models.load(os.path.join(checkpoint_path, 'Gs.pth'), map_location=(device if (loaded_kwargs['Gs_device'] is None) else torch.device(loaded_kwargs['Gs_device'])))
obj = cls(dataset=dataset, **loaded_kwargs)
for name in ['G_opt', 'D_opt']:
fpath = os.path.join(checkpoint_path, (name + '.pth'))
state_dict = torch.load(fpath, map_location=device)
getattr(obj, name).load_state_dict(state_dict)
return obj |
def main(args):
print(args)
split_name = ('dev' if args.dev else 'train')
dataset_break = DatasetBreak(args.qdmr_path, split_name)
dataset_spider = DatasetSpider(args.spider_path, split_name)
if args.input_grounding:
partial_grounding = load_grounding_from_file(args.input_grounding)
else:
partial_grounding = {}
if (args.spider_idx is not None):
qdmr_name = None
for name in dataset_break.names:
idx = dataset_break.get_index_from_name(name)
if (idx == args.spider_idx):
qdmr_name = name
break
assert (qdmr_name is not None), 'Could find QDMR with index {args.spider_idx}'
qdmr = dataset_break.qdmrs[qdmr_name]
qdmr_grounding = (partial_grounding[qdmr_name] if (qdmr_name in partial_grounding) else None)
print()
print(qdmr_name)
(grounding, _) = compute_grounding(qdmr, qdmr_name, dataset_spider, partial_grounding=qdmr_grounding, verbose=True)
else:
all_grounding = {}
for (qdmr_name, qdmr) in dataset_break:
spider_idx = DatasetBreak.get_index_from_name(qdmr_name)
qdmr_grounding = (partial_grounding[qdmr_name] if (qdmr_name in partial_grounding) else None)
print(qdmr_name, end=' ')
try:
(grounding, message) = compute_grounding(qdmr, qdmr_name, dataset_spider, partial_grounding=qdmr_grounding, verbose=False)
all_grounding[qdmr_name] = grounding
message_list = (qdmr_grounding['MESSAGES'] if ('MESSAGES' in qdmr_grounding) else [])
message_list.append(message)
all_grounding[qdmr_name]['MESSAGES'] = message_list
print(message)
except Exception as e:
error_details = handle_exception(e, verbose=False)
print(f"ERROR: {error_details['type']}:{error_details['message']}, file: {error_details['file']}, line {error_details['line_number']}")
all_grounding[qdmr_name] = {'ERRORS': [error_details]}
if args.output_path:
save_grounding_to_file(args.output_path, all_grounding)
check = load_grounding_from_file(args.output_path)
assert_check_grounding_save_load(all_grounding, check) |
class AlgorithmSummary():
algorithm_qubits: float = _PRETTY_FLOAT
measurements: float = _PRETTY_FLOAT
t_gates: float = _PRETTY_FLOAT
toffoli_gates: float = _PRETTY_FLOAT
rotation_gates: float = _PRETTY_FLOAT
rotation_circuit_depth: float = _PRETTY_FLOAT
def __mul__(self, other: int) -> 'AlgorithmSummary':
if (not isinstance(other, int)):
raise TypeError(f"Multiplication isn't supported between AlgorithmSummary and non integer type {type(other)}")
return AlgorithmSummary(algorithm_qubits=(self.algorithm_qubits * other), measurements=(self.measurements * other), t_gates=(self.t_gates * other), toffoli_gates=(self.toffoli_gates * other), rotation_gates=(self.rotation_gates * other), rotation_circuit_depth=(self.rotation_circuit_depth * other))
def __rmul__(self, other: int) -> 'AlgorithmSummary':
return self.__mul__(other)
def __add__(self, other: 'AlgorithmSummary') -> 'AlgorithmSummary':
if (not isinstance(other, AlgorithmSummary)):
raise TypeError(f"Addition isn't supported between AlgorithmSummary and type {type(other)}")
return AlgorithmSummary(algorithm_qubits=(self.algorithm_qubits + other.algorithm_qubits), measurements=(self.measurements + other.measurements), t_gates=(self.t_gates + other.t_gates), toffoli_gates=(self.toffoli_gates + other.toffoli_gates), rotation_gates=(self.rotation_gates + other.rotation_gates), rotation_circuit_depth=(self.rotation_circuit_depth + other.rotation_circuit_depth))
def __sub__(self, other: 'AlgorithmSummary') -> 'AlgorithmSummary':
if (not isinstance(other, AlgorithmSummary)):
raise TypeError(f"Subtraction isn't supported between AlgorithmSummary and type {type(other)}")
return AlgorithmSummary(algorithm_qubits=(self.algorithm_qubits - other.algorithm_qubits), measurements=(self.measurements - other.measurements), t_gates=(self.t_gates - other.t_gates), toffoli_gates=(self.toffoli_gates - other.toffoli_gates), rotation_gates=(self.rotation_gates - other.rotation_gates), rotation_circuit_depth=(self.rotation_circuit_depth - other.rotation_circuit_depth)) |
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter((lambda x: x), line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(), 'asctime': self.formatTime(record, self.datefmt), 'name': record.name, 'msg': record.msg, 'args': record.args, 'levelname': record.levelname, 'levelno': record.levelno, 'pathname': record.pathname, 'filename': record.filename, 'module': record.module, 'lineno': record.lineno, 'funcname': record.funcName, 'created': record.created, 'msecs': record.msecs, 'relative_created': record.relativeCreated, 'thread': record.thread, 'thread_name': record.threadName, 'process_name': record.processName, 'process': record.process, 'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return json.dumps(message) |
class InferGroupedEmbeddingsLookup(InferGroupedLookupMixin, BaseEmbeddingLookup[(KJTList, List[torch.Tensor])], TBEToRegisterMixIn):
def __init__(self, grouped_configs_per_rank: List[List[GroupedEmbeddingConfig]], world_size: int, fused_params: Optional[Dict[(str, Any)]]=None, device: Optional[torch.device]=None) -> None:
super().__init__()
self._embedding_lookups_per_rank: List[MetaInferGroupedEmbeddingsLookup] = []
device_type = ('meta' if ((device is not None) and (device.type == 'meta')) else 'cuda')
for rank in range(world_size):
self._embedding_lookups_per_rank.append(MetaInferGroupedEmbeddingsLookup(grouped_configs=grouped_configs_per_rank[rank], device=torch.device(type=device_type, index=rank), fused_params=fused_params))
def get_tbes_to_register(self) -> Dict[(IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig)]:
return get_tbes_to_register_from_iterable(self._embedding_lookups_per_rank) |
class WeightedLottery(Generic[T]):
def __init__(self, items: Iterable[T], weight_key: Callable[([T], int)]):
self.weights: List[int] = []
self.items = list(items)
if (not self.items):
raise ValueError('items must not be empty')
accumulated_weight = 0
for item in self.items:
weight = weight_key(item)
if (weight < 0):
raise ValueError(f'weight for {item!r} must be non-negative')
accumulated_weight += weight
self.weights.append(accumulated_weight)
if (accumulated_weight <= 0):
raise ValueError('at least one item must have weight')
def _pick_index(self) -> int:
winning_ticket = (random.random() * self.weights[(- 1)])
return bisect.bisect(self.weights, winning_ticket)
def pick(self) -> T:
winning_idx = self._pick_index()
return self.items[winning_idx]
def sample(self, sample_size: int) -> Iterable[T]:
if (not (0 <= sample_size < len(self.items))):
raise ValueError('sample size is negative or larger than the population')
already_picked: Set[int] = set()
results: List[Optional[T]] = ([None] * sample_size)
for i in range(sample_size):
picked_index = self._pick_index()
while (picked_index in already_picked):
picked_index = self._pick_index()
results[i] = self.items[picked_index]
already_picked.add(picked_index)
return typing.cast(List[T], results) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
.parametrize('membership_id', memberships)
def test_detail(db, client, username, password, project_id, membership_id):
client.login(username=username, password=password)
membership = Membership.objects.filter(project_id=project_id, id=membership_id).first()
url = reverse(urlnames['detail'], args=[project_id, membership_id])
response = client.get(url)
if (membership and (project_id in view_membership_permission_map.get(username, []))):
assert (response.status_code == 200)
assert isinstance(response.json(), dict)
assert (response.json().get('id') == membership_id)
else:
assert (response.status_code == 404) |
class _NeuralNetwork(NeuralNetwork):
def _forward(self, input_data, weights):
batch_size = (input_data.shape[0] if (input_data is not None) else 1)
return np.zeros((batch_size, *self.output_shape))
def _backward(self, input_data, weights):
input_grad = None
batch_size = (input_data.shape[0] if (input_data is not None) else 1)
if (self.num_inputs > 0):
input_grad = np.zeros((batch_size, *self.output_shape, self.num_inputs))
weight_grad = None
if (self.num_weights > 0):
weight_grad = np.zeros((batch_size, *self.output_shape, self.num_weights))
return (input_grad, weight_grad) |
def test_create_hints_item_joke(empty_patches, players_config):
asset_id = 1000
(logbook_node, _, region_list) = _create_region_list(asset_id, PickupIndex(50))
patches = dataclasses.replace(empty_patches, hints={region_list.identifier_for_node(logbook_node): Hint(HintType.JOKE, None)})
rng = MagicMock()
namer = EchoesHintNamer({0: patches}, players_config)
result = hints.create_patches_hints({0: patches}, players_config, region_list, namer, rng)
joke = 'Your Friend Roster is currently empty.'
message = f'&push;&main-color=#45F731;{joke}&pop;'
assert (result[0]['strings'][0] == message)
assert (result == [{'asset_id': asset_id, 'strings': [message, '', message]}]) |
def test_delete_existing_proposal_by_different_author(settings, login, conferences):
client = login[0]
conference = conferences['future']
section = conference.proposal_sections.all()[0]
proposal_type = conference.proposal_types.all()[0]
user = f.create_user()
proposal = f.create_proposal(conference=conference, proposal_section=section, proposal_type=proposal_type, author=user)
kwargs = {'conference_slug': conference.slug, 'slug': proposal.slug}
url = reverse('proposal-delete', kwargs=kwargs)
response = client.post(url)
assert (response.status_code == 403) |
.parametrize('density,expected', [(0, ((- 1684649.41338), (- 350356.81377), 1684649.41338, 2234551.18559)), (100, ((- 1684649.41338), (- ), 1684649.41338, 2234551.18559))])
def test_transform_bounds_densify(density, expected):
transformer = Transformer.from_crs('EPSG:4326', '+proj=laea +lat_0=45 +lon_0=-100 +x_0=0 +y_0=0 +a=6370997 +b=6370997 +units=m +no_defs')
assert numpy.allclose(transformer.transform_bounds(40, (- 120), 64, (- 80), densify_pts=density), expected) |
class BucketTestCase(unittest.TestCase):
q = Auth(access_key, secret_key)
bucket = BucketManager(q)
def test_list(self):
(ret, eof, info) = self.bucket.list(bucket_name, limit=4)
assert (eof is False)
assert (len(ret.get('items')) == 4)
(ret, eof, info) = self.bucket.list(bucket_name, limit=1000)
print(ret, eof, info)
assert (info.status_code == 200)
def test_buckets(self):
(ret, info) = self.bucket.buckets()
print(info)
assert (bucket_name in ret)
def test_prefetch(self):
(ret, info) = self.bucket.prefetch(bucket_name, 'python-sdk.html', hostscache_dir=hostscache_dir)
print(info)
assert (ret['key'] == 'python-sdk.html')
def test_fetch(self):
(ret, info) = self.bucket.fetch(' bucket_name, 'fetch.html', hostscache_dir=hostscache_dir)
print(info)
assert (ret['key'] == 'fetch.html')
assert ('hash' in ret)
def test_fetch_without_key(self):
(ret, info) = self.bucket.fetch(' bucket_name, hostscache_dir=hostscache_dir)
print(info)
assert (ret['key'] == ret['hash'])
assert ('hash' in ret)
def test_stat(self):
(ret, info) = self.bucket.stat(bucket_name, 'python-sdk.html')
print(info)
assert ('hash' in ret)
def test_delete(self):
(ret, info) = self.bucket.delete(bucket_name, 'del')
print(info)
assert (ret is None)
assert (info.status_code == 612)
def test_rename(self):
key = ('renameto' + rand_string(8))
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = (key + 'move')
(ret, info) = self.bucket.rename(bucket_name, key, key2)
print(info)
assert (ret == {})
(ret, info) = self.bucket.delete(bucket_name, key2)
print(info)
assert (ret == {})
def test_copy(self):
key = ('copyto' + rand_string(8))
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
print(info)
assert (ret == {})
(ret, info) = self.bucket.delete(bucket_name, key)
print(info)
assert (ret == {})
def test_change_mime(self):
(ret, info) = self.bucket.change_mime(bucket_name, 'python-sdk.html', 'text/html')
print(info)
assert (ret == {})
def test_change_type(self):
target_key = ('copyto' + rand_string(8))
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, target_key)
(ret, info) = self.bucket.change_type(bucket_name, target_key, 1)
print(info)
assert (ret == {})
(ret, info) = self.bucket.stat(bucket_name, target_key)
print(info)
assert ('type' in ret)
self.bucket.delete(bucket_name, target_key)
def test_copy_force(self):
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true')
print(info)
assert (info.status_code == 200)
def test_batch_copy(self):
key = ('copyto' + rand_string(8))
ops = build_batch_copy(bucket_name, {'copyfrom': key}, bucket_name)
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
ops = build_batch_delete(bucket_name, [key])
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
def test_batch_copy_force(self):
ops = build_batch_copy(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true')
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
def test_batch_move(self):
key = ('moveto' + rand_string(8))
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = (key + 'move')
ops = build_batch_move(bucket_name, {key: key2}, bucket_name)
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
(ret, info) = self.bucket.delete(bucket_name, key2)
print(info)
assert (ret == {})
def test_batch_move_force(self):
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, 'copyfrom', force='true')
print(info)
assert (info.status_code == 200)
ops = build_batch_move(bucket_name, {'copyfrom': 'copyfrom'}, bucket_name, force='true')
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
def test_batch_rename(self):
key = ('rename' + rand_string(8))
self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
key2 = (key + 'rename')
ops = build_batch_move(bucket_name, {key: key2}, bucket_name)
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
(ret, info) = self.bucket.delete(bucket_name, key2)
print(info)
assert (ret == {})
def test_batch_rename_force(self):
(ret, info) = self.bucket.rename(bucket_name, 'copyfrom', 'copyfrom', force='true')
print(info)
assert (info.status_code == 200)
ops = build_batch_rename(bucket_name, {'copyfrom': 'copyfrom'}, force='true')
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
def test_batch_stat(self):
ops = build_batch_stat(bucket_name, ['python-sdk.html'])
(ret, info) = self.bucket.batch(ops)
print(info)
assert (ret[0]['code'] == 200)
def test_delete_after_days(self):
days = '5'
(ret, info) = self.bucket.delete_after_days(bucket_name, 'invaild.html', days)
assert (info.status_code == 612)
key = ('copyto' + rand_string(8))
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
(ret, info) = self.bucket.delete_after_days(bucket_name, key, days)
assert (info.status_code == 200)
def test_set_object_lifecycle(self):
key = ('test_set_object_lifecycle' + rand_string(8))
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
assert (info.status_code == 200)
(ret, info) = self.bucket.set_object_lifecycle(bucket=bucket_name, key=key, to_line_after_days=10, to_archive_ir_after_days=15, to_archive_after_days=20, to_deep_archive_after_days=30, delete_after_days=40)
assert (info.status_code == 200)
def test_set_object_lifecycle_with_cond(self):
key = ('test_set_object_lifecycle_cond' + rand_string(8))
(ret, info) = self.bucket.copy(bucket_name, 'copyfrom', bucket_name, key)
assert (info.status_code == 200)
(ret, info) = self.bucket.stat(bucket_name, key)
assert (info.status_code == 200)
key_hash = ret['hash']
(ret, info) = self.bucket.set_object_lifecycle(bucket=bucket_name, key=key, to_line_after_days=10, to_archive_ir_after_days=15, to_archive_after_days=20, to_deep_archive_after_days=30, delete_after_days=40, cond={'hash': key_hash})
assert (info.status_code == 200)
def test_list_domains(self):
(ret, info) = self.bucket.list_domains(bucket_name)
print(info)
assert (info.status_code == 200)
assert isinstance(ret, list)
_time('1970-01-01')
def test_invalid_x_qiniu_date(self):
(ret, info) = self.bucket.stat(bucket_name, 'python-sdk.html')
assert (ret is None)
assert (info.status_code == 403)
_time('1970-01-01')
def test_invalid_x_qiniu_date_with_disable_date_sign(self):
q = Auth(access_key, secret_key, disable_qiniu_timestamp_signature=True)
bucket = BucketManager(q)
(ret, info) = bucket.stat(bucket_name, 'python-sdk.html')
assert ('hash' in ret)
_time('1970-01-01')
def test_invalid_x_qiniu_date_env(self):
os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = 'True'
(ret, info) = self.bucket.stat(bucket_name, 'python-sdk.html')
os.unsetenv('DISABLE_QINIU_TIMESTAMP_SIGNATURE')
assert ('hash' in ret)
_time('1970-01-01')
def test_invalid_x_qiniu_date_env_be_ignored(self):
os.environ['DISABLE_QINIU_TIMESTAMP_SIGNATURE'] = 'True'
q = Auth(access_key, secret_key, disable_qiniu_timestamp_signature=False)
bucket = BucketManager(q)
(ret, info) = bucket.stat(bucket_name, 'python-sdk.html')
os.unsetenv('DISABLE_QINIU_TIMESTAMP_SIGNATURE')
assert (ret is None)
assert (info.status_code == 403) |
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n(([cross_entropy_mean] + regularization_losses), name='loss_')
return loss_ |
class Env():
total_cards = sorted(((to_char(np.arange(3, 16)) * 4) + ['*', '$']), key=(lambda k: Card.cards_to_value[k]))
def __init__(self, agent_names=('agent1', 'agent2', 'agent3')):
seed = ((id(self) + int(datetime.now().strftime('%Y%m%d%H%M%S%f'))) % )
np.random.seed(seed)
self.agent_names = agent_names
self.reset()
def get_all_agent_names(self):
return self.agent_names
def get_curr_agent_name(self):
return self.curr_player
def reset(self):
self.histories = {n: [] for n in self.agent_names}
self.player_cards = {n: [] for n in self.agent_names}
self.extra_cards = []
self.lord = None
self.controller = None
self.last_cards_char = []
self.out_cards = [[] for _ in range(3)]
self.curr_player = None
def get_role_ID(self):
curr_idx = self.get_current_idx()
assert (0 <= curr_idx <= 2)
if (curr_idx == 0):
return 2
if (curr_idx == 1):
return 3
return 1
def get_current_idx(self):
return self.agent_names.index(self.curr_player)
def prepare(self):
cards = Env.total_cards.copy()
np.random.shuffle(cards)
self.extra_cards = cards[17:20]
self.player_cards[self.agent_names[0]] = sorted(cards[:20], key=(lambda k: Card.cards_to_value[k]))
self.player_cards[self.agent_names[1]] = sorted(cards[20:37], key=(lambda k: Card.cards_to_value[k]))
self.player_cards[self.agent_names[2]] = sorted(cards[37:], key=(lambda k: Card.cards_to_value[k]))
self.lord = self.agent_names[0]
self.controller = self.lord
self.curr_player = self.lord
def step(self, intention):
self.out_cards[self.agent_names.index(self.curr_player)] = intention
if (len(intention) == 0):
self.curr_player = self.agent_names[((self.agent_names.index(self.curr_player) + 1) % len(self.agent_names))]
return (self.curr_player, False)
else:
self.last_cards_char = intention
self.controller = self.curr_player
for card in intention:
self.player_cards[self.curr_player].remove(card)
self.histories[self.curr_player].extend(intention)
if (len(self.player_cards[self.curr_player]) == 0):
return (self.curr_player, True)
else:
self.curr_player = self.agent_names[((self.agent_names.index(self.curr_player) + 1) % len(self.agent_names))]
return (self.curr_player, False)
def get_last_outcards(self):
return (self.last_cards_char.copy() if (self.curr_player != self.controller) else [])
def get_last_two_cards(self):
return [self.out_cards[((self.agent_names.index(self.curr_player) + 2) % len(self.agent_names))].copy(), self.out_cards[((self.agent_names.index(self.curr_player) + 1) % len(self.agent_names))].copy()]
def get_curr_handcards(self):
return self.player_cards[self.curr_player].copy()
def get_state_prob(self):
total_cards = np.ones([60])
total_cards[53:56] = 0
total_cards[57:60] = 0
player_idx = self.get_current_idx()
remain_cards = (total_cards - Card.char2onehot60((((self.get_curr_handcards() + self.histories[self.agent_names[player_idx]]) + self.histories[self.agent_names[((player_idx + 1) % 3)]]) + self.histories[self.agent_names[((player_idx + 2) % 3)]])))
next_cnt = len(self.player_cards[self.agent_names[((player_idx + 1) % len(self.agent_names))]])
next_next_cnt = len(self.player_cards[self.agent_names[((player_idx + 2) % len(self.agent_names))]])
right_prob_state = (remain_cards * (next_cnt / (next_cnt + next_next_cnt)))
left_prob_state = (remain_cards * (next_next_cnt / (next_cnt + next_next_cnt)))
prob_state = np.concatenate([right_prob_state, left_prob_state])
return prob_state |
def _expr(expr):
node = type(expr)
if (node is ast.Name):
return _build_atomic(expr.id)
if (node is ast.Call):
args = _parse_args(expr.args)
kwargs = _parse_kwargs(expr.keywords)
return _build_predicate(expr.func.id, args, kwargs)
if (node is ast.Subscript):
field_expr = expr.slice.value
if (type(field_expr) is ast.Tuple):
field_expr = field_expr.elts
else:
field_expr = (field_expr,)
base = _expr(expr.value)
base['fields'] = [_expr(e) for e in field_expr]
return base
if (node is ast.BinOp):
op = type(expr.op)
left = _expr(expr.left)
right = _expr(expr.right)
if (op is ast.Mod):
left['predicate'] = right
return left
if (op is ast.BitOr):
return _build_union(left, right)
if (op is ast.BitAnd):
return _build_intersection(left, right)
raise ValueError(('Unknown expression: %r' % node)) |
def test_L1_bits_connection():
a = CaseConnectBitsConstToOutComp.DUT()
a.elaborate()
a.apply(StructuralRTLIRGenL1Pass(gen_connections(a)))
connections = a.get_metadata(StructuralRTLIRGenL1Pass.connections)
comp = sexp.CurComp(a, 's')
assert (connections == [(sexp.ConstInstance(Bits32(0), 0), sexp.CurCompAttr(comp, 'out'))]) |
def is_ast_cont_with_surrounding_lambda(k):
from pycket import interpreter as i
cs = [i.LetCont, i.LetrecCont, i.BeginCont, i.Begin0Cont, i.Begin0BodyCont, i.WCMKeyCont, i.WCMValCont]
for c in cs:
if isinstance(k, c):
a = k.get_ast()
if (isinstance(a, i.AST) and a.surrounding_lambda):
return True
return False |
_torch
class TestTrainerExt(TestCasePlus):
def run_seq2seq_quick(self, distributed=False, extra_args_str=None, predict_with_generate=True, do_train=True, do_eval=True, do_predict=True):
output_dir = self.run_trainer(eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=distributed, extra_args_str=extra_args_str, predict_with_generate=predict_with_generate, do_train=do_train, do_eval=do_eval, do_predict=do_predict)
logs = TrainerState.load_from_json(os.path.join(output_dir, 'trainer_state.json')).log_history
if (not do_eval):
return
eval_metrics = [log for log in logs if ('eval_loss' in log.keys())]
first_step_stats = eval_metrics[0]
if predict_with_generate:
assert ('eval_bleu' in first_step_stats)
last_step_stats = eval_metrics[(- 1)]
assert isinstance(last_step_stats['eval_bleu'], float)
assert (not math.isnan(float(last_step_stats['eval_loss']))), 'eval_loss must not be `nan`'
_torch_non_multi_gpu
def test_run_seq2seq_no_dist(self):
self.run_seq2seq_quick()
_torch_multi_gpu
def test_run_seq2seq_dp(self):
self.run_seq2seq_quick(distributed=False)
_torch_multi_gpu
def test_run_seq2seq_ddp(self):
self.run_seq2seq_quick(distributed=True)
_torch_multi_gpu
_fairscale
def test_run_seq2seq_sharded_ddp(self):
self.run_seq2seq_quick(distributed=True, extra_args_str='--sharded_ddp simple')
('Requires an update of the env running those tests')
_torch_multi_gpu
_fairscale
def test_run_seq2seq_sharded_ddp_fp16(self):
self.run_seq2seq_quick(distributed=True, extra_args_str='--sharded_ddp simple --fp16')
_torch_multi_gpu
_fairscale
def test_run_seq2seq_fully_sharded_ddp(self):
self.run_seq2seq_quick(distributed=True, extra_args_str='--sharded_ddp zero_dp_2', predict_with_generate=False)
('Requires an update of the env running those tests')
_torch_multi_gpu
_fairscale
def test_run_seq2seq_fully_sharded_ddp_fp16(self):
self.run_seq2seq_quick(distributed=True, extra_args_str='--sharded_ddp zero_dp_2 --fp16', predict_with_generate=False)
_apex
_torch_gpu
def test_run_seq2seq_apex(self):
self.run_seq2seq_quick(distributed=True, extra_args_str='--fp16 --fp16_backend=apex')
self.run_seq2seq_quick(distributed=True, extra_args_str='--fp16 --fp16_backend=apex')
(['base', 'low', 'high', 'mixed'])
_torch_multi_gpu
def test_trainer_log_level_replica(self, experiment_id):
experiments = dict(base=dict(extra_args_str='', n_matches=1), low=dict(extra_args_str='--log_level debug --log_level_replica debug', n_matches=2), high=dict(extra_args_str='--log_level error --log_level_replica debug', n_matches=1), mixed=dict(extra_args_str='--log_level error --log_level_replica error', n_matches=0))
data = experiments[experiment_id]
kwargs = dict(distributed=True, predict_with_generate=False, do_eval=False, do_predict=False)
log_info_string = 'Running training'
with CaptureStderr() as cl:
self.run_seq2seq_quick(**kwargs, extra_args_str=data['extra_args_str'])
n_matches = len(re.findall(log_info_string, cl.err))
self.assertEqual(n_matches, data['n_matches'])
def test_run_seq2seq_slow(self):
output_dir = self.run_trainer(eval_steps=2, max_len=128, model_name=MARIAN_MODEL, learning_rate=0.0003, num_train_epochs=10, distributed=False)
logs = TrainerState.load_from_json(os.path.join(output_dir, 'trainer_state.json')).log_history
eval_metrics = [log for log in logs if ('eval_loss' in log.keys())]
first_step_stats = eval_metrics[0]
last_step_stats = eval_metrics[(- 1)]
assert (first_step_stats['eval_loss'] > last_step_stats['eval_loss']), 'model learned nothing'
assert isinstance(last_step_stats['eval_bleu'], float)
contents = os.listdir(output_dir)
contents = {os.path.basename(p) for p in contents}
assert ('generated_predictions.txt' in contents)
assert ('predict_results.json' in contents)
def run_trainer(self, eval_steps: int, max_len: int, model_name: str, num_train_epochs: int, learning_rate: float=0.003, distributed: bool=False, extra_args_str: str=None, predict_with_generate: bool=True, do_train: bool=True, do_eval: bool=True, do_predict: bool=True):
data_dir = (self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro')
output_dir = self.get_auto_remove_tmp_dir()
args_train = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(num_train_epochs)}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(eval_steps)}
--group_by_length
--label_smoothing_factor 0.1
--adafactor
--target_lang ro_RO
--source_lang en_XX
'''
args_eval = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(eval_steps)}
'''
args_predict = '\n --do_predict\n '
args = ''
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += '--predict_with_generate'
args = args.split()
if (extra_args_str is not None):
args.extend(extra_args_str.split())
if distributed:
n_gpu = get_gpu_count()
master_port = get_torch_dist_unique_port()
distributed_args = f'''
-m torch.distributed.launch
--nproc_per_node={n_gpu}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
cmd = (([sys.executable] + distributed_args) + args)
execute_subprocess_async(cmd, env=self.get_env())
else:
testargs = (['run_translation.py'] + args)
with patch.object(sys, 'argv', testargs):
main()
return output_dir |
def resnet_retinanet(num_classes, backbone='resnet50', modifier=None, **kwargs):
inputs = keras.layers.Input(shape=(None, None, 3))
if (backbone == 'resnet50'):
resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True)
elif (backbone == 'resnet101'):
resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True)
elif (backbone == 'resnet152'):
resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True)
else:
raise ValueError("Backbone ('{}') is invalid.".format(backbone))
if modifier:
resnet = modifier(resnet)
return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=resnet.outputs[1:], **kwargs) |
def test_it_cannot_solve_other_solver_errors() -> None:
from poetry.mixology.solutions.providers import PythonRequirementSolutionProvider
incompatibility = Incompatibility([Term(Dependency('foo', '^1.0'), True)], NoVersionsCause())
exception = SolverProblemError(SolveFailure(incompatibility))
provider = PythonRequirementSolutionProvider()
assert (not provider.can_solve(exception)) |
def test_dynamic_property_values_update_in_one_instance_leaves_other_unchanged():
generic1 = FakeBase()
generic2 = FakeBase()
generic1.fake_ctrl_values = (0, 33)
generic1.fake_ctrl = 50
generic2.fake_ctrl = 50
assert (generic1.fake_ctrl == 33)
assert (generic2.fake_ctrl == 10) |
class ModelFormTagFieldOptionsTest(TagTestManager, TestCase):
manage_models = [test_models.TagFieldOptionsModel]
def setUpExtra(self):
self.form = test_forms.TagFieldOptionsModelForm
self.model = test_models.TagFieldOptionsModel
_if_mysql
def test_case_sensitive_true(self):
tag_model = self.model.case_sensitive_true.tag_model
self.assertTagModel(tag_model, {'Adam': 0})
form = self.form(data={'case_sensitive_true': 'adam'})
field = form.fields['case_sensitive_true']
self.assertIsInstance(field, tag_forms.TagField)
self.assertEqual(field.tag_options.case_sensitive, True)
self.assertEqual(field.widget.tag_options.case_sensitive, True)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['case_sensitive_true'], ['adam'])
obj = form.save()
self.assertEqual(obj.case_sensitive_true, 'adam')
self.assertTagModel(tag_model, {'Adam': 0, 'adam': 1})
_if_mysql
def test_case_sensitive_false(self):
tag_model = self.model.case_sensitive_false.tag_model
form = self.form(data={'case_sensitive_false': 'adam'})
field = form.fields['case_sensitive_false']
self.assertIsInstance(field, tag_forms.TagField)
self.assertEqual(field.tag_options.case_sensitive, False)
self.assertEqual(field.widget.tag_options.case_sensitive, False)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['case_sensitive_false'], ['adam'])
obj = form.save()
self.assertEqual(obj.case_sensitive_false, 'Adam')
self.assertTagModel(tag_model, {'Adam': 1})
def test_force_lowercase_true(self):
tag_model = self.model.force_lowercase_true.tag_model
form = self.form(data={'force_lowercase_true': 'Adam'})
field = form.fields['force_lowercase_true']
self.assertIsInstance(field, tag_forms.TagField)
self.assertEqual(field.tag_options.force_lowercase, True)
self.assertEqual(field.widget.tag_options.force_lowercase, True)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['force_lowercase_true'], ['adam'])
obj = form.save()
self.assertEqual(obj.force_lowercase_true, 'adam')
self.assertTagModel(tag_model, {'adam': 1})
def test_force_lowercase_false(self):
form = self.form(data={'force_lowercase_false': 'Adam'})
field = form.fields['force_lowercase_false']
self.assertIsInstance(field, tag_forms.TagField)
self.assertEqual(field.tag_options.force_lowercase, False)
self.assertEqual(field.widget.tag_options.force_lowercase, False)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['force_lowercase_false'], ['Adam'])
def test_max_count(self):
form = self.form(data={'max_count': 'one'})
field = form.fields['max_count']
self.assertIsInstance(field, tag_forms.TagField)
self.assertEqual(field.tag_options.max_count, 3)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['max_count'], ['one'])
form = self.form(data={'max_count': 'one, two'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['max_count'], ['one', 'two'])
form = self.form(data={'max_count': 'one, two, three'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['max_count'], sorted(['one', 'two', 'three']))
form = self.form(data={'max_count': 'one, two, three, four'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors.keys()), 1)
self.assertEqual(list(form.errors.keys())[0], 'max_count')
self.assertEqual(len(form.errors['max_count']), 1)
self.assertEqual(form.errors['max_count'][0], 'This field can only have 3 arguments')
def text_max_count_1(self):
class LocalTestForm(forms.Form):
tags = tag_forms.TagField(tag_options=tag_models.TagOptions(max_count=1))
form = self.form(data={'max_count': 'one'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['max_count'], ['one'])
form = self.form(data={'max_count': 'one, two'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors.keys()), 1)
self.assertEqual(list(form.errors.keys())[0], 'max_count')
self.assertEqual(len(form.errors['max_count']), 1)
self.assertEqual(form.errors['max_count'][0], 'This field can only have 1 argument')
def test_initial_without_autocomplete_initial(self):
tag_model = self.model.initial_string.tag_model
tag_model.objects.create(name='David')
tag_model.objects.create(name='Eric')
tag_model.objects.create(name='Frank')
field = self.form().fields['initial_string']
self.assertSequenceEqual([t.name for t in field.autocomplete_tags], [t.name for t in tag_model.objects.all()])
def test_initial_with_autocomplete_initial(self):
tag_model = self.model.initial_list.tag_model
tag_model.objects.create(name='David')
tag_model.objects.create(name='Eric')
tag_model.objects.create(name='Frank')
field = self.form().fields['initial_list']
self.assertSequenceEqual([t.name for t in field.autocomplete_tags], [t.name for t in tag_model.objects.initial()]) |
class MessageBroker():
def __init__(self, stage):
self.stage = stage
self._messages = []
def broadcast(self, message):
self._messages.append(message)
def get_messages(self):
return self._messages
def mark_completed(self):
self._messages.clear() |
def events_for_close(channel_state: NettingChannelState, block_number: BlockNumber, block_hash: BlockHash) -> List[Event]:
events: List[Event] = []
if (get_status(channel_state) in CHANNEL_STATES_PRIOR_TO_CLOSED):
channel_state.close_transaction = TransactionExecutionStatus(block_number, None, None)
balance_proof = channel_state.partner_state.balance_proof
assert ((balance_proof is None) or isinstance(balance_proof, BalanceProofSignedState)), 'BP is not signed'
close_event = ContractSendChannelClose(canonical_identifier=channel_state.canonical_identifier, balance_proof=balance_proof, triggered_by_block_hash=block_hash)
events.append(close_event)
return events |
def make_migration(name):
try:
with Capturing() as output:
call_command('makemigrations', '--name={}'.format(name), app_name, verbosity=0)
except Exception as e:
print('>> makemigration failed for {}:'.format(name))
print('\n'.join(output))
print('')
raise e
clear_migrations() |
class LetsuploadCo(SimpleDownloader):
__name__ = 'LetsuploadCo'
__type__ = 'downloader'
__version__ = '0.03'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Letsupload.co downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('gonapo', 'nh189[AT]uranus.uni-freiburg[DOT]de'), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
WAIT_PATTERN = 'var seconds = (\\d+)'
NAME_PATTERN = '<i class="fa fa-file-text"></i>(?P<N>.+?)<'
SIZE_PATTERN = '<i class="fa fa-hdd-o"></i> size : <p>(?P<S>[\\d.,]+) (?P<U>[\\w^_]+)</p>'
OFFLINE_PATTERN = '> File has been removed\\.<'
LINK_FREE_PATTERN = "<a class='btn btn-free' href='(.+?)'"
def setup(self):
self.multi_dl = True
def check_download(self):
check = self.scan_download({'Html file': re.compile(b'\\A\\s*<script type=["\\\']text/javascript["\\\']')})
if (check is not None):
with open(os.fsencode(self.last_download), 'r') as f:
self.data = f.read()
os.remove(self.last_download)
m = re.search('<a .*href="(.+?\\?download_token=\\w+)"', self.data)
if (m is not None):
self.download(m.group(1))
else:
self.log_warning((self._('Check result: ') + check), self._('Waiting 1 minute and retry'))
self.wait(60, reconnect=True)
self.restart(check)
return SimpleDownloader.check_download(self) |
def find_executable_batch_size(function: callable=None, starting_batch_size: int=128, auto_find_batch_size: bool=False):
if (function is None):
return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size)
if auto_find_batch_size:
requires_backends(find_executable_batch_size, 'accelerate')
import accelerate.memory_utils as mem_utils
return mem_utils.find_executable_batch_size(function=function, starting_batch_size=starting_batch_size)
return functools.partial(function, batch_size=starting_batch_size) |
def nearest_unequal_elements(dts, dt):
if (not dts.is_unique):
raise ValueError('dts must be unique')
if (not dts.is_monotonic_increasing):
raise ValueError('dts must be sorted in increasing order')
if (not len(dts)):
return (None, None)
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
return (dts[(- 1)], None)
if (dt < sortval):
lower_ix = (sortpos - 1)
upper_ix = sortpos
elif (dt == sortval):
lower_ix = (sortpos - 1)
upper_ix = (sortpos + 1)
else:
lower_ix = sortpos
upper_ix = (sortpos + 1)
lower_value = (dts[lower_ix] if (lower_ix >= 0) else None)
upper_value = (dts[upper_ix] if (upper_ix < len(dts)) else None)
return (lower_value, upper_value) |
class CoTAttention(nn.Module):
def __init__(self, dim=512, kernel_size=3):
super().__init__()
self.dim = dim
self.kernel_size = kernel_size
self.key_embed = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=(kernel_size // 2), groups=4, bias=False), nn.BatchNorm2d(dim), nn.ReLU())
self.value_embed = nn.Sequential(nn.Conv2d(dim, dim, 1, bias=False), nn.BatchNorm2d(dim))
factor = 4
self.attention_embed = nn.Sequential(nn.Conv2d((2 * dim), ((2 * dim) // factor), 1, bias=False), nn.BatchNorm2d(((2 * dim) // factor)), nn.ReLU(), nn.Conv2d(((2 * dim) // factor), ((kernel_size * kernel_size) * dim), 1))
def forward(self, x):
(bs, c, h, w) = x.shape
k1 = self.key_embed(x)
v = self.value_embed(x).view(bs, c, (- 1))
y = torch.cat([k1, x], dim=1)
att = self.attention_embed(y)
att = att.reshape(bs, c, (self.kernel_size * self.kernel_size), h, w)
att = att.mean(2, keepdim=False).view(bs, c, (- 1))
k2 = (F.softmax(att, dim=(- 1)) * v)
k2 = k2.view(bs, c, h, w)
return (k1 + k2) |
def inherit_signature(c, method_name):
m = getattr(c, method_name)
sig = inspect.signature(m)
params = []
for param in sig.parameters.values():
if ((param.name == 'self') or (param.annotation is not param.empty)):
params.append(param)
continue
for ancestor in inspect.getmro(c):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
try:
ancestor_param = ancestor_meth.parameters[param.name]
except KeyError:
break
if (ancestor_param.annotation is not param.empty):
param = param.replace(annotation=ancestor_param.annotation)
break
params.append(param)
return_annotation = sig.return_annotation
if (return_annotation is inspect._empty):
for ancestor in inspect.getmro(c):
try:
ancestor_meth = inspect.signature(getattr(ancestor, m.__name__))
except AttributeError:
break
if (ancestor_meth.return_annotation is not inspect._empty):
return_annotation = ancestor_meth.return_annotation
break
return sig.replace(parameters=params, return_annotation=return_annotation) |
def form_loads(explode: bool, name: str, schema_type: str, location: Mapping[(str, Any)]) -> Any:
explode_type = (explode, schema_type)
if (explode_type == (False, 'array')):
return split(location[name], separator=',')
elif (explode_type == (True, 'array')):
if (name not in location):
raise KeyError(name)
if isinstance(location, SuportsGetAll):
return location.getall(name)
if isinstance(location, SuportsGetList):
return location.getlist(name)
return location[name]
value = location[name]
if (explode_type == (False, 'object')):
return dict(map(split, split(value, separator=',', step=2)))
elif (explode_type == (True, 'object')):
return dict(map(partial(split, separator='='), split(value, separator='&')))
return value |
.parametrize('available', [True, False])
def test_is_available(available, mocker):
mock = mocker.patch.object(pdfjs, 'get_pdfjs_res', autospec=True)
if available:
mock.return_value = b'foo'
else:
mock.side_effect = pdfjs.PDFJSNotFound('build/pdf.js')
assert (pdfjs.is_available() == available) |
class AttrVI_ATTR_GPIB_ATN_STATE(EnumAttribute):
resources = [(constants.InterfaceType.gpib, 'INTFC')]
py_name = 'atn_state'
visa_name = 'VI_ATTR_GPIB_ATN_STATE'
visa_type = 'ViInt16'
default = NotAvailable
(read, write, local) = (True, False, False)
enum_type = constants.LineState |
class TestOptionMarker():
.options(debug=False)
def test_not_debug_app(self, app):
assert (not app.debug), 'Ensure the app not in debug mode'
.options(foo=42)
def test_update_application_config(self, request, app, config):
assert (config['FOO'] == 42)
def test_application_config_teardown(self, config):
assert ('FOO' not in config) |
def create_strategy(name=None):
import logging
from bonobo.execution.strategies.base import Strategy
if isinstance(name, Strategy):
return name
if (name is None):
name = DEFAULT_STRATEGY
logging.debug('Creating execution strategy {!r}...'.format(name))
try:
factory = STRATEGIES[name]
except KeyError as exc:
raise RuntimeError('Invalid strategy {}. Available choices: {}.'.format(repr(name), ', '.join(sorted(STRATEGIES.keys())))) from exc
return factory() |
def create_table(table: str, namespace: Optional[str]=None, catalog: Optional[str]=None, lifecycle_state: Optional[LifecycleState]=None, schema: Optional[Union[(pa.Schema, str, bytes)]]=None, schema_consistency: Optional[Dict[(str, SchemaConsistencyType)]]=None, partition_keys: Optional[List[Dict[(str, Any)]]]=None, primary_keys: Optional[Set[str]]=None, sort_keys: Optional[List[SortKey]]=None, description: Optional[str]=None, properties: Optional[Dict[(str, str)]]=None, permissions: Optional[Dict[(str, Any)]]=None, content_types: Optional[List[ContentType]]=None, replace_existing_table: bool=False, *args, **kwargs) -> TableDefinition:
return _get_catalog(catalog).impl.create_table(table, namespace, lifecycle_state, schema, schema_consistency, partition_keys, primary_keys, sort_keys, description, properties, permissions, content_types, replace_existing_table, *args, **kwargs) |
class CudaRNGStatesTracker():
def __init__(self):
self.states_ = {}
self.seeds_ = set()
def reset(self):
self.states_ = {}
self.seeds_ = set()
def get_states(self):
states = {}
for name in self.states_:
states[name] = self.states_[name]
return states
def set_states(self, states):
self.states_ = states
def add(self, name, seed):
if (seed in self.seeds_):
raise Exception('seed {} already exists'.format(seed))
self.seeds_.add(seed)
if (name in self.states_):
raise Exception('cuda rng state {} already exists'.format(name))
orig_rng_state = torch.cuda.get_rng_state()
torch.cuda.manual_seed(seed)
self.states_[name] = torch.cuda.get_rng_state()
_set_cuda_rng_state(orig_rng_state)
def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME):
if (name not in self.states_):
raise Exception('cuda rng state {} is not added'.format(name))
orig_cuda_rng_state = torch.cuda.get_rng_state()
_set_cuda_rng_state(self.states_[name])
try:
(yield)
finally:
self.states_[name] = torch.cuda.get_rng_state()
_set_cuda_rng_state(orig_cuda_rng_state) |
def set_werkzeug_hostname(f):
(f)
def wrapper(*args, **kwargs):
try:
hostname = json.loads(request.form['data'])['hostname']
except Exception:
hostname = None
ret = f(*args, **kwargs)
if hostname:
request.environ['REMOTE_ADDR'] = hostname
return ret
return wrapper |
class MrpcPVP(PVP):
VERBALIZER = {'0': ['Alas'], '1': ['Rather']}
def get_parts(self, example: InputExample) -> FilledPattern:
text_a = self.shortenable(example.text_a)
text_b = self.shortenable(example.text_b)
if (self.pattern_id == 1):
string_list_a = [text_a, '.', self.mask, 'However', ',', text_b]
string_list_b = []
block_flag_a = [0, 1, 0, 0, 0, 0]
block_flag_b = []
assert (len(string_list_a) == len(block_flag_a))
assert (len(string_list_b) == len(block_flag_b))
return (string_list_a, string_list_b, block_flag_a, block_flag_b)
else:
raise ValueError('unknown pattern_id.')
def verbalize(self, label) -> List[str]:
return MrpcPVP.VERBALIZER[label] |
class TestNFP(unittest.TestCase):
def test_enviar(self):
client = SoapClient(wsdl=WSDL, soap_ns='soap12env')
client['Autenticacao'] = SimpleXMLElement((HEADER_XML % ('user', 'password', 'fed_tax_num', 1)))
response = client.Enviar(NomeArquivo='file_name', ConteudoArquivo='content', EnvioNormal=True, Observacoes='')
self.assertEqual(response['EnviarResult'], '206|CNPJ informado invalido')
def test_consultar(self):
client = SoapClient(wsdl=WSDL, soap_ns='soap12env')
client['Autenticacao'] = SimpleXMLElement((HEADER_XML % ('user', 'password', 'fed_tax_num', 1)))
response = client.Consultar(Protocolo='')
self.assertEqual(response['ConsultarResult'], '999|O protocolo informado nao e um numero valido')
def test_retificar(self):
client = SoapClient(wsdl=WSDL, soap_ns='soap12env')
client['Autenticacao'] = SimpleXMLElement((HEADER_XML % ('user', 'password', 'fed_tax_num', 1)))
response = client.Retificar(NomeArquivo='file_name', ConteudoArquivo='content', EnvioNormal=True, Observacoes='')
self.assertEqual(response['RetificarResult'], '206|CNPJ informado invalido') |
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if (len(self.data) < self.max_size):
self.data.append(element)
to_return.append(element)
elif (random.uniform(0, 1) > 0.5):
i = random.randint(0, (self.max_size - 1))
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return)) |
class PororoStoryDataset(Dataset):
def __init__(self, args, split, tokenizer):
self.args = args
self.root = args.dataset_dir
self.feature_extractor = utils.get_feature_extractor_for_model(args.visual_model, image_size=args.image_size, train=False)
self.image_size = args.image_size
self.tokenizer = tokenizer
self.max_len = args.max_len
self.precision = args.precision
self.retrieval_token_idx = args.retrieval_token_idx
self.gen_token_idx = args.gen_token_idx
self.num_tokens = args.num_tokens
self.num_clip_tokens = args.num_clip_tokens
with open(os.path.join(self.root, 'split.json'), 'r') as f:
self.splits = json.load(f)
image_ids = self.splits[split]
self.followings = pickle.load(open(os.path.join(self.root, 'following_cache3.pkl'), 'rb'))
self.labels = pickle.load(open(os.path.join(self.root, 'labels.pkl'), 'rb'))
self.image_ids = [tid for tid in image_ids if (tid in self.followings)]
self.annotations = json.load(open(os.path.join(self.root, 'cleaned_annotations.json'), 'r'))
self.clip_embs = pickle.load(open(os.path.join(self.root, args.clip_emb_file), 'rb'))
self.font = None
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
all_characters = ['Pororo', 'Loopy', 'Crong', 'Eddy', 'Poby', 'Petty', 'Tongtong', 'Rody', 'Harry', 'pororo', 'loopy', 'crong', 'eddy', 'poby', 'petty', 'tongtong', 'rody', 'harry']
female = ['Petty', 'Loopy', 'petty', 'loopy']
image_id = self.image_ids[idx]
globalIDs = ([image_id] + self.followings[image_id])
ref_flags = []
captions = []
images = []
for idx in range(len(globalIDs)):
ref_flag = False
globalID = globalIDs[idx]
item = self.annotations[globalID]
video_array = np.load(os.path.join(self.root, f'video_frames_sampled_4x/{globalID}.npy'))
n_frames = video_array.shape[0]
random_range = random.randrange(n_frames)
image_array = video_array[random_range]
img = Image.fromarray(image_array)
images.append(utils.get_pixel_values_for_model(self.feature_extractor, img))
cap_idx = int(np.ceil((((random_range + 1) / n_frames) * len(item['captions']))))
cap_idx = min(cap_idx, (len(item['captions']) - 1))
caption = item['captions'][cap_idx]
if (idx == 0):
char_name = [x for x in all_characters if (x in caption)]
if (len(char_name) > 1):
if (len(caption[:(- 1)].split('.')) > 1):
char_name = [x for x in all_characters if (x in caption[:(- 1)].split('.')[(- 1)])]
if (len(char_name) > 0):
imidiate_char = char_name[0]
else:
imidiate_char = ''
else:
imidiate_char = char_name[0]
elif (len(char_name) == 1):
imidiate_char = char_name[0]
else:
imidiate_char = ''
pre_caption = caption
else:
match_substring = find_start_match(pre_caption, caption)
match_words = match_substring.split(' ')
for (idx, word) in enumerate(match_words):
if ((word.replace(',', '').replace("'", '').strip() not in all_characters) and (not word.__contains__(',')) and (not word.__contains__('and'))):
break
match_words = match_words[:idx]
if (len(match_words) > 1):
replace_string = ' '.join(match_words)
caption = caption.replace(replace_string, 'They', 1)
char_name = [x for x in all_characters if (x in caption)]
if (len(char_name) > 1):
if (len(caption[:(- 1)].split('.')) > 1):
char_name = [x for x in all_characters if (x in caption[:(- 1)].split('.')[(- 1)])]
if (len(char_name) > 0):
char_name = char_name[0]
else:
char_name = ''
else:
char_name = char_name[0]
elif (len(char_name) == 1):
char_name = char_name[0]
else:
char_name = ''
if ((char_name != '') and (char_name == imidiate_char)):
if (char_name in female):
replace_char = 'She'
ref_flag = True
else:
replace_char = 'He'
ref_flag = True
pre_caption = caption
caption = caption.replace(char_name, replace_char)
else:
imidiate_char = char_name
ref_flags.append(ref_flag)
captions.append(caption)
select_idx = random.sample(range(1, len(captions)), 1)[0]
image_id = globalIDs[select_idx]
caption = ''
for i in range(select_idx):
if self.args.interleave:
caption += ((' Image: <img><ImageHere></img> ' + 'Caption: ') + captions[i])
caption += ((' Caption: ' + captions[select_idx]) + ' Image: ')
for i in range(self.num_tokens):
caption += f'[IMG{i}]'
clip_emb = self.clip_embs[image_id].squeeze()
images = torch.stack(images, dim=0)
return (image_id, images, caption, clip_emb) |
def train_ubr_model(ubr_training_monitor, epoch_num, sess, eval_iter_num, taker, lr, train_batch_size, rec_model, ubr_model, target_train_file, user_feat_dict_file, item_feat_dict_file, context_dict_file, summary_writer, step, b_num):
loss_step = []
reward_step = []
for i in range(epoch_num):
data_loader = DataLoader_Target(train_batch_size, target_train_file, user_feat_dict_file, item_feat_dict_file, context_dict_file)
t = time.time()
i = 0
for batch_data in data_loader:
(target_batch, label_batch) = batch_data
index_batch = ubr_model.get_index(sess, target_batch)
(seq_batch, seq_len_batch) = taker.take_behave(target_batch, index_batch)
new_batch_data = [seq_batch, seq_len_batch, target_batch, label_batch]
rewards = rec_model.get_reward(sess, new_batch_data)
(loss, reward, summary) = ubr_model.train(sess, target_batch, lr, rewards)
loss_step.append(loss)
reward_step.append(reward)
summary_writer.add_summary(summary, step)
step += 1
if ((step % eval_iter_num) == 0):
avg_loss = (sum(loss_step) / len(loss_step))
avg_reward = (sum(reward_step) / len(reward_step))
ubr_training_monitor['loss'].append(avg_loss)
ubr_training_monitor['reward'].append(avg_reward)
loss_step = []
reward_step = []
print(('TIME UNTIL EVAL: %.4f' % (time.time() - t)))
print(('UBR MODEL STEP %d LOSS: %.4f REWARD: %.4f' % (step, avg_loss, avg_reward)))
t = time.time()
model_name = '{}_{}_{}'.format(ubr_model_type, train_batch_size, lr)
if (not os.path.exists('save_model_{}/{}/{}_{}/{}/'.format(data_set_name, b_num, rec_model_type, ubr_model_type, model_name))):
os.makedirs('save_model_{}/{}/{}_{}/{}/'.format(data_set_name, b_num, rec_model_type, ubr_model_type, model_name))
save_path = 'save_model_{}/{}/{}_{}/{}/ckpt'.format(data_set_name, b_num, rec_model_type, ubr_model_type, model_name)
ubr_model.save(sess, save_path)
return step |
class LinesToReadline():
def __init__(self, lines, start):
self.lines = lines
self.current = start
def readline(self):
if (self.current <= self.lines.length()):
self.current += 1
return (self.lines.get_line((self.current - 1)) + '\n')
return ''
def __call__(self):
return self.readline() |
def get_latest_checkpoint(directory: pathlib.PosixPath, args: argparse.Namespace):
latest_checkpoint = None
checkpoint_files = list(directory.glob(f'*{args.checkpoint_id_pattern}*'))
if checkpoint_files:
latest_checkpoint = 0
for checkpoint_file in checkpoint_files:
checkpoint_file = str(checkpoint_file).split('/')[(- 1)]
checkpoint_epoch = re.findall(args.checkpoint_extract_pattern, checkpoint_file)
checkpoint_epoch = int(checkpoint_epoch[0])
if (checkpoint_epoch > latest_checkpoint):
latest_checkpoint = checkpoint_epoch
pass
else:
print('Unable to parse latest checkpoint information')
return latest_checkpoint |
def register_dataframe_method(method):
def inner(*args, **kwargs):
class AccessorMethod():
def __init__(self, pyspark_obj):
self._obj = pyspark_obj
(method)
def __call__(self, *args, **kwargs):
return method(self._obj, *args, **kwargs)
register_dataframe_accessor(method.__name__)(AccessorMethod)
return method
return inner() |
def export_quant_table(quantizers: dict, quant_dir: str, format: str='toml'):
table = {}
def save_tensor(name: str, tensor):
np.save(os.path.join(quant_dir, name), tensor.numpy())
return '{}.npy'.format(name)
for (key, value) in quantizers.items():
quantizer = value[0]
dump = dict()
sym = quantizer.sym
if (not sym):
dump['zero'] = save_tensor(name=(key + '.zero'), tensor=value[2])
dump['scale'] = save_tensor(name=(key + '.scale'), tensor=value[1])
dump['wbits'] = value[4]
dump['groupsize'] = value[5]
if (value[5] > 0):
dump['group_ids'] = save_tensor(name=(key + '.group_ids'), tensor=value[3])
dump['sym'] = sym
dump['perchannel'] = quantizer.perchannel
table[key] = dump
if (not os.path.exists(quant_dir)):
os.mkdir(quant_dir)
with open(os.path.join(quant_dir, 'quant.toml'), 'w') as f:
toml.dump(table, f) |
def test_allows_post_releases_with_post_and_local_min() -> None:
one = Version.parse('3.0.0+local.1')
two = Version.parse('3.0.0-1')
three = Version.parse('3.0.0-1+local.1')
four = Version.parse('3.0.0+local.2')
assert (not VersionRange(min=one, include_min=True).allows(two))
assert VersionRange(min=one, include_min=True).allows(three)
assert VersionRange(min=one, include_min=True).allows(four)
assert (not VersionRange(min=two, include_min=True).allows(one))
assert VersionRange(min=two, include_min=True).allows(three)
assert (not VersionRange(min=two, include_min=True).allows(four))
assert (not VersionRange(min=three, include_min=True).allows(one))
assert (not VersionRange(min=three, include_min=True).allows(two))
assert (not VersionRange(min=three, include_min=True).allows(four))
assert (not VersionRange(min=four, include_min=True).allows(one))
assert (not VersionRange(min=four, include_min=True).allows(two))
assert (not VersionRange(min=four, include_min=True).allows(three)) |
def main():
min_cost_flow = pywrapgraph.SimpleMinCostFlow()
start_nodes = (([0, 0, 0, 0] + [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]) + [5, 6, 7, 8])
end_nodes = (([1, 2, 3, 4] + [5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8, 5, 6, 7, 8]) + [9, 9, 9, 9])
capacities = (([1, 1, 1, 1] + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + [1, 1, 1, 1])
costs = (([0, 0, 0, 0] + [90, 76, 75, 70, 35, 85, 55, 65, 125, 95, 90, 105, 45, 110, 95, 115]) + [0, 0, 0, 0])
supplies = [4, 0, 0, 0, 0, 0, 0, 0, 0, (- 4)]
source = 0
sink = 9
tasks = 4
for i in range(len(start_nodes)):
min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i], end_nodes[i], capacities[i], costs[i])
for i in range(len(supplies)):
min_cost_flow.SetNodeSupply(i, supplies[i])
if (min_cost_flow.Solve() == min_cost_flow.OPTIMAL):
print('Total cost = ', min_cost_flow.OptimalCost())
print()
for arc in range(min_cost_flow.NumArcs()):
if ((min_cost_flow.Tail(arc) != source) and (min_cost_flow.Head(arc) != sink)):
if (min_cost_flow.Flow(arc) > 0):
print(('Worker %d assigned to task %d. Cost = %d' % (min_cost_flow.Tail(arc), min_cost_flow.Head(arc), min_cost_flow.UnitCost(arc))))
else:
print('There was an issue with the min cost flow input.') |
class TerminalView(View):
def __init__(self, app):
super().__init__(app)
def cleanup(self):
print('Goodbye')
def print(self, txt):
if txt:
print('\x1b[92m{}\x1b[39m'.format(txt))
def detail(self, txt):
print(txt)
def error(self, e):
if (not e):
return
print('\n\n{}'.format(('|' * HEADER_LEN)))
print('\x1b[31m{}\x1b[39m'.format(e))
print(('!' * HEADER_LEN))
def header(self):
if (self.app and self.app.app_state):
print('\n\n\x1b[7m{:^{w}}\x1b[0m'.format(self.app.app_state, w=HEADER_LEN)) |
def test_invalid_next_name_ignored():
packet = b'\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x07Android\x05local\x00\x00\xff\x00\x01\xc0\x0c\x00/\x00\x01\x00\x00\x00x\x00\x08\xc02\x00\\x00\x00\x08\xc0\x0c\x00\x01\x00\x01\x00\x00\x00x\x00\x04\xc0\xa8X<'
parsed = r.DNSIncoming(packet)
assert (len(parsed.questions) == 1)
assert (len(parsed.answers()) == 2) |
def inference_detector(model, imgs):
if isinstance(imgs, (list, tuple)):
is_batch = True
else:
imgs = [imgs]
is_batch = False
cfg = model.cfg
device = next(model.parameters()).device
if isinstance(imgs[0], np.ndarray):
cfg = cfg.copy()
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
if isinstance(img, np.ndarray):
data = dict(img=img)
else:
data = dict(img_info=dict(filename=img), img_prefix=None)
data = test_pipeline(data)
datas.append(data)
data = collate(datas, samples_per_gpu=len(imgs))
data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
data['img'] = [img.data[0] for img in data['img']]
if next(model.parameters()).is_cuda:
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert (not isinstance(m, RoIPool)), 'CPU inference with RoIPool is not supported currently.'
with torch.no_grad():
results = model(return_loss=False, rescale=True, **data)
if (not is_batch):
return results[0]
else:
return results |
class NPMRole(Role):
time_format = '%d-%m-%y %H:%M:%S'
key = 'npm-up-to-date'
def provision(self):
self.provision_role(NodeJsRole)
def is_package_installed(self, package_name, version=None):
with settings(warn_only=True):
if version:
package_name = ('%%s' % (package_name, version))
return (package_name in self.execute(("npm --global list | egrep '%s'" % package_name), stdout=False, sudo=True))
def ensure_package_installed(self, package_name, version=None, stdout=False, sudo=True):
if (not self.is_package_installed(package_name, version)):
if version:
package_name = ('%%s' % (package_name, version))
self.log(('%s is not installed (via NPM)! Installing...' % package_name))
self.execute(('npm install --global %s' % package_name), stdout=stdout, sudo=sudo)
self.log(('%s is installed (via NPM).' % package_name))
return True
return False |
def upsampleG(fieldmap, activation_data, shape=None):
(offset, size, step) = fieldmap
input_count = activation_data.shape[0]
if (shape is None):
shape = upsampled_shape(fieldmap, activation_data.shape[1:])
activations = numpy.zeros(((input_count,) + shape))
activations[((slice(None),) + centered_slice(fieldmap, activation_data.shape[1:]))] = (activation_data * numpy.prod(step))
blurred = gaussian_filter(activations, sigma=((0,) + tuple(((t // 1.414) for (o, s, t) in zip(*fieldmap)))), mode='constant')
return blurred |
def tokenize_stories(stories, add_speaker):
(total_count, avg_sum_len, avg_context_len, avg_sum_sent, avg_context_sent) = (0, 0.0, 0.0, 0.0, 0.0)
speaker_count = {}
with open(stories, 'r') as f:
data = json.load(f)
processed_data = []
for sample in data:
total_count += 1
sum = sample['summary']
sum = sum.lower()
sum = nltk.word_tokenize(sum)
avg_sum_len += len(sum)
sum_sents = []
while (len(sum) > 0):
try:
fst_period_idx = sum.index('.')
except ValueError:
fst_period_idx = len(sum)
sent = sum[:(fst_period_idx + 1)]
sum = sum[(fst_period_idx + 1):]
sum_sents.append(' '.join(sent))
avg_sum_sent += len(sum_sents)
context = sample['dialogue']
id = sample['id']
context = context.lower()
context = re.sub('\r\n', '\n', context)
context = re.split('\n', context)
avg_context_sent += len(context)
context = [fix_missing_period(s) for s in context]
context = [nltk.word_tokenize(s) for s in context]
for c in context:
avg_context_len += len(c)
context = [' '.join(s) for s in context]
if (len(context) > 1):
if add_speaker:
final_context = []
speakers = get_speaker(context)
if (len(speakers) > 10):
print(context, sum_sents, speakers)
exit()
if (len(speakers) not in speaker_count.keys()):
speaker_count[len(speakers)] = 1
else:
speaker_count[len(speakers)] += 1
for sent in context:
if (sent.strip() == ''):
continue
sent += ' |'
for s in speakers:
if (sent[:len(s)] != s):
sent += (' ' + s)
final_context.append(sent)
processed_data.append([sum_sents, final_context])
else:
processed_data.append([sum_sents, context])
print(processed_data[0])
print('total count: ', total_count)
print('avg sum len: ', (avg_sum_len / total_count))
print('avg sum sent: ', (avg_sum_sent / total_count))
print('avg context len: ', (avg_context_len / total_count))
print('avg context sent: ', (avg_context_sent / total_count))
print(speaker_count)
return processed_data |
def test_class_method_inherited() -> None:
nodes_ = builder.extract_node('\n class A:\n \n def method(cls):\n return cls\n\n class B(A):\n pass\n\n A().method() #\n A.method() #\n\n B().method() #\n B.method() #\n ')
expected_names = ['A', 'A', 'B', 'B']
for (node, expected) in zip(nodes_, expected_names):
assert isinstance(node, nodes.NodeNG)
inferred = node.inferred()
assert (len(inferred) == 1)
assert isinstance(inferred[0], nodes.ClassDef)
assert (inferred[0].name == expected) |
def get_args():
parser = argparse.ArgumentParser(description='This script creates the\n text form of a subword lexicon FST to be compiled by fstcompile using\n the appropriate symbol tables (phones.txt and words.txt). It will mostly\n be invoked indirectly via utils/prepare_lang_subword.sh. The output\n goes to the stdout. This script is the subword version of make_lexicon_fst.py.\n It only allows optional silence to appear after end-subword or singleton-subword,\n (i.e., subwords without separator). In this version we do not support\n pronunciation probability. (i.e., pron-prob = 1.0)')
parser.add_argument('--sil-phone', type=str, help="Text form of\n optional-silence phone, e.g. 'SIL'. See also the --sil-prob option.")
parser.add_argument('--sil-prob', type=float, default=0.0, help='Probability\n of silence between words (including the beginning and end of word sequence).\n Must be in range [0.0, 1.0). This refer to the optional silence inserted by\n the lexicon; see the --sil-phone option.')
parser.add_argument('--sil-disambig', type=str, help='Disambiguation symbol\n to disambiguate silence, e.g. #5. Will only be supplied if you are creating \n the version of L.fst with disambiguation symbols, intended for use with cyclic \n G.fst. This symbol was introduced to fix a rather obscure source of nondeterminism \n of CLG.fst, that has to do with reordering of disambiguation symbols and phone symbols.')
parser.add_argument('--position-dependent', action='store_true', help='Whether \n the input lexicon is position-dependent.')
parser.add_argument('--separator', type=str, default='', help='Separator\n indicates the position of a subword in a word.\n Subword followed by separator can only appear at the beginning or middle of a word.\n Subword without separator can only appear at the end of a word or is a word itself.\n E.g. "international -> al";\n "nation -> nation"\n The separator should match the separator used in the input lexicon.')
parser.add_argument('lexiconp', type=str, help="Filename of lexicon with\n pronunciation probabilities (normally lexiconp.txt), with lines of the\n form 'subword prob p1 p2...', e.g. 'a, 1.0 ay'")
args = parser.parse_args()
return args |
def main():
global FLAGS
with open(FLAGS.cluster_spec_file, 'r') as fp:
cluster_spec_str = json.load(fp)
config = tf.ConfigProto()
if (FLAGS.job_name == 'ps'):
config.inter_op_parallelism_threads = 768
config.intra_op_parallelism_threads = 0
config.device_count['GPU'] = 0
cluster = tf.train.ClusterSpec(cluster_spec_str)
print('::: Starting {}->{}'.format(FLAGS.job_name, FLAGS.task_id), flush=True)
server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_id, config=config)
server.join() |
class App(models.Model):
module = models.CharField(max_length=100, unique=True)
active = models.BooleanField(default=False)
class Meta():
app_label = 'rapidsms'
def __str__(self):
return self.module
def __repr__(self):
return ('<%s: %s>' % (type(self).__name__, self)) |
class TestHypenation(unittest.TestCase):
def test_hypenation(self):
assert (hyphenation('begegnen', Hyphenator('de_DE')) == ['be', 'geg', 'nen'])
assert (hyphenation(".b,e~g'eg*nen, ", Hyphenator('de_DE')) == ['.b,e', "~g'eg", '*nen, '])
assert (hyphenation('Abend, ', Hyphenator('de_AT')) == None)
assert (hyphenation('Abend.', Hyphenator('de_AT')) == None)
('hyphen.dictools.list_installed')
def test_language_check_has_installed_language(self, mock_list_installed):
mock_list_installed.return_value = ['de', 'de_DE', 'en', 'en_US', 'en_GB']
assert (language_check('en') == 'en_US')
assert (language_check('de') == 'de_DE')
('hyphen.dictools.list_installed')
def test_language_check_not_installed_language(self, mock_list_installed):
mock_list_installed.return_value = []
assert (language_check('fr') == 'fr_FR')
assert (language_check('de') == 'de')
assert (language_check('none') == None) |
class VGG(tf.keras.Model):
def __init__(self, vgg_name, num_classes, weight_decay):
super(VGG, self).__init__()
self.vgg_name = vgg_name
self.num_classes = num_classes
self.wd = weight_decay
self.convlayers = self._make_convlayers(cfg[vgg_name])
self.fc_layers = self._make_fc_layers(num_classes)
def _make_convlayers(self, cfg):
layers = []
for x in cfg:
if (x == 'M'):
layers.append(tf.keras.layers.MaxPooling2D((2, 2)))
else:
layers.append(tf.keras.layers.Conv2D(x, (3, 3), padding='same', kernel_regularizer=regularizers.l2(self.wd)))
channel_axis = (1 if (K.image_data_format() == 'channels_first') else (- 1))
layers.append(tf.keras.layers.BatchNormalization(axis=channel_axis))
layers.append(tf.keras.layers.Activation('relu'))
layers.append(tf.keras.layers.AveragePooling2D(pool_size=(1, 1), strides=1))
return layers
def _make_fc_layers(self, num_classes):
layers = []
layers.append(tf.keras.layers.Flatten())
layers.append(tf.keras.layers.Dense(self.num_classes, kernel_regularizer=regularizers.l2(self.wd)))
return layers
def call(self, inputs):
prev_out = inputs
for layer in self.convlayers:
prev_out = layer(prev_out)
for layer in self.fc_layers:
prev_out = layer(prev_out)
return tf.nn.softmax(prev_out) |
_module()
class RawframeDataset(BaseDataset):
def __init__(self, ann_file, pipeline, data_prefix=None, test_mode=False, filename_tmpl='img_{:05}.jpg', with_offset=False, multi_class=False, num_classes=None, start_index=1, modality='RGB'):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(ann_file, pipeline, data_prefix, test_mode, multi_class, num_classes, start_index, modality)
def load_annotations(self):
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
frame_dir = line_split[idx]
if (self.data_prefix is not None):
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[(idx + 1)])
idx += 2
else:
video_info['total_frames'] = int(line_split[idx])
idx += 1
label = [int(x) for x in line_split[idx:]]
assert len(label), f'missing label in line: {line}'
if self.multi_class:
assert (self.num_classes is not None)
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert (len(label) == 1)
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def prepare_test_frames(self, idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
return self.pipeline(results)
def evaluate(self, results, metrics='top_k_accuracy', topk=(1, 5), logger=None):
if (not isinstance(results, list)):
raise TypeError(f'results must be a list, but got {type(results)}')
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
if (not isinstance(topk, (int, tuple))):
raise TypeError(f'topk must be int or tuple of int, but got {type(topk)}')
if isinstance(topk, int):
topk = (topk,)
metrics = (metrics if isinstance(metrics, (list, tuple)) else [metrics])
allowed_metrics = ['top_k_accuracy', 'mean_class_accuracy', 'mean_average_precision']
for metric in metrics:
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
eval_results = {}
gt_labels = [ann['label'] for ann in self.video_infos]
for metric in metrics:
msg = f'Evaluating {metric}...'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
if (metric == 'top_k_accuracy'):
top_k_acc = top_k_accuracy(results, gt_labels, topk)
log_msg = []
for (k, acc) in zip(topk, top_k_acc):
eval_results[f'top{k}_acc'] = acc
log_msg.append(f'''
top{k}_acc {acc:.4f}''')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if (metric == 'mean_class_accuracy'):
mean_acc = mean_class_accuracy(results, gt_labels)
eval_results['mean_class_accuracy'] = mean_acc
log_msg = f'''
mean_acc {mean_acc:.4f}'''
print_log(log_msg, logger=logger)
continue
if (metric == 'mean_average_precision'):
gt_labels = [label.cpu().numpy() for label in gt_labels]
mAP = mean_average_precision(results, gt_labels)
eval_results['mean_average_precision'] = mAP
log_msg = f'''
mean_average_precision {mAP:.4f}'''
print_log(log_msg, logger=logger)
continue
return eval_results |
class VisaIOError(Error):
def __init__(self, error_code: int) -> None:
(abbreviation, description) = completion_and_error_messages.get(error_code, ('?', 'Unknown code.'))
super(VisaIOError, self).__init__(('%s (%d): %s' % (abbreviation, error_code, description)))
self.error_code = error_code
self.abbreviation = abbreviation
self.description = description
def __reduce__(self) -> Tuple[(type, Tuple[int])]:
return (VisaIOError, (self.error_code,)) |
class FixHasKey(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n anchor=power<\n before=any+\n trailer< '.' 'has_key' >\n trailer<\n '('\n ( not(arglist | argument<any '=' any>) arg=any\n | arglist<(not argument<any '=' any>) arg=any ','>\n )\n ')'\n >\n after=any*\n >\n |\n negation=not_test<\n 'not'\n anchor=power<\n before=any+\n trailer< '.' 'has_key' >\n trailer<\n '('\n ( not(arglist | argument<any '=' any>) arg=any\n | arglist<(not argument<any '=' any>) arg=any ','>\n )\n ')'\n >\n >\n >\n "
def transform(self, node, results):
assert results
syms = self.syms
if ((node.parent.type == syms.not_test) and self.pattern.match(node.parent)):
return None
negation = results.get('negation')
anchor = results['anchor']
prefix = node.prefix
before = [n.clone() for n in results['before']]
arg = results['arg'].clone()
after = results.get('after')
if after:
after = [n.clone() for n in after]
if (arg.type in (syms.comparison, syms.not_test, syms.and_test, syms.or_test, syms.test, syms.lambdef, syms.argument)):
arg = parenthesize(arg)
if (len(before) == 1):
before = before[0]
else:
before = pytree.Node(syms.power, before)
before.prefix = ' '
n_op = Name('in', prefix=' ')
if negation:
n_not = Name('not', prefix=' ')
n_op = pytree.Node(syms.comp_op, (n_not, n_op))
new = pytree.Node(syms.comparison, (arg, n_op, before))
if after:
new = parenthesize(new)
new = pytree.Node(syms.power, ((new,) + tuple(after)))
if (node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.term, syms.factor, syms.power)):
new = parenthesize(new)
new.prefix = prefix
return new |
class Conv1d(Mapper):
def __init__(self, num_filters, filter_size, keep_probs, activation='relu'):
self.keep_probs = keep_probs
self.num_filters = num_filters
self.filter_size = filter_size
self.activation = activation
def apply(self, is_train, x, mask=None):
num_channels = x.get_shape()[3]
filter_ = tf.get_variable('conv1d/filters', shape=[1, self.filter_size, num_channels, self.num_filters], dtype='float')
bias = tf.get_variable('conv1d/bias', shape=[self.num_filters], dtype='float')
strides = [1, 1, 1, 1]
if (self.keep_probs < 1.0):
x = dropout(x, self.keep_probs, is_train)
fn = get_keras_activation(self.activation)
return fn((tf.nn.conv2d(x, filter_, strides, 'VALID') + bias)) |
class KernelInclude(Include):
def run(self):
path = os.path.realpath(os.path.expandvars(self.arguments[0]))
if path.startswith((os.sep + 'etc')):
raise self.severe(('Problems with "%s" directive, prohibited path: %s' % (self.name, path)))
self.arguments[0] = path
return self._run()
def _run(self):
if (not self.state.document.settings.file_insertion_enabled):
raise self.warning(('"%s" directive disabled.' % self.name))
source = self.state_machine.input_lines.source(((self.lineno - self.state_machine.input_offset) - 1))
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if (path.startswith('<') and path.endswith('>')):
path = os.path.join(self.standard_include_path, path[1:(- 1)])
path = os.path.normpath(os.path.join(source_dir, path))
path = nodes.reprunicode(path)
encoding = self.options.get('encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path, encoding=encoding, error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe(('Problems with "%s" directive path:\nCannot encode input file path "%s" (wrong locale?).' % (self.name, SafeString(path))))
except IOError as error:
raise self.severe(('Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if (startline or (endline is not None)):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe(('Problem with "%s" directive:\n%s' % (self.name, ErrorString(error))))
after_text = self.options.get('start-after', None)
if after_text:
after_index = rawtext.find(after_text)
if (after_index < 0):
raise self.severe(('Problem with "start-after" option of "%s" directive:\nText not found.' % self.name))
rawtext = rawtext[(after_index + len(after_text)):]
before_text = self.options.get('end-before', None)
if before_text:
before_index = rawtext.find(before_text)
if (before_index < 0):
raise self.severe(('Problem with "end-before" option of "%s" directive:\nText not found.' % self.name))
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width, convert_whitespace=True)
if ('literal' in self.options):
if (tab_width >= 0):
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path, classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if ('number-lines' in self.options):
try:
startline = int((self.options['number-lines'] or 1))
except ValueError:
raise self.error(':number-lines: with non-integer start value')
endline = (startline + len(include_lines))
if text.endswith('\n'):
text = text[:(- 1)]
tokens = NumberLines([([], text)], startline, endline)
for (classes, value) in tokens:
if classes:
literal_block += nodes.inline(value, value, classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if ('code' in self.options):
self.options['source'] = path
codeblock = CodeBlock(self.name, [self.options.pop('code')], self.options, include_lines, self.lineno, self.content_offset, self.block_text, self.state, self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return [] |
def flatten_nested_unions(types: list[RType]) -> list[RType]:
if (not any((isinstance(t, RUnion) for t in types))):
return types
flat_items: list[RType] = []
for t in types:
if isinstance(t, RUnion):
flat_items.extend(flatten_nested_unions(t.items))
else:
flat_items.append(t)
return flat_items |
class FeatureExtraction(torch.nn.Module):
def __init__(self, train_fe=False, feature_extraction_cnn='vgg', normalization=True, last_layer='', use_cuda=True):
super(FeatureExtraction, self).__init__()
self.normalization = normalization
if (feature_extraction_cnn == 'vgg'):
self.model = models.vgg16(pretrained=True)
vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5']
if (last_layer == ''):
last_layer = 'pool4'
last_layer_idx = vgg_feature_layers.index(last_layer)
self.model = nn.Sequential(*list(self.model.features.children())[:(last_layer_idx + 1)])
if (feature_extraction_cnn == 'resnet101'):
self.model = models.resnet101(pretrained=True)
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if (last_layer == ''):
last_layer = 'layer3'
last_layer_idx = resnet_feature_layers.index(last_layer)
resnet_module_list = [self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4]
self.model = nn.Sequential(*resnet_module_list[:(last_layer_idx + 1)])
if (feature_extraction_cnn == 'resnet50'):
self.model = models.resnet50(pretrained=True)
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if (last_layer == ''):
last_layer = 'layer3'
last_layer_idx = resnet_feature_layers.index(last_layer)
resnet_module_list = [self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4]
self.model = nn.Sequential(*resnet_module_list[:(last_layer_idx + 1)])
if (feature_extraction_cnn == 'resnet50_scratch'):
print('resnet50_scratch')
self.model = models.resnet50(pretrained=False)
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4']
if (last_layer == ''):
last_layer = 'layer3'
last_layer_idx = resnet_feature_layers.index(last_layer)
resnet_module_list = [self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4]
self.model = nn.Sequential(*resnet_module_list[:(last_layer_idx + 1)])
if (feature_extraction_cnn == 'resnet101_v2'):
self.model = models.resnet101(pretrained=True)
self.model = nn.Sequential(*list(self.model.children())[:(- 3)])
if (feature_extraction_cnn == 'densenet201'):
self.model = models.densenet201(pretrained=True)
self.model = nn.Sequential(*list(self.model.features.children())[:(- 4)])
if (not train_fe):
for param in self.model.parameters():
param.requires_grad = False
def forward(self, image_batch):
features = self.model(image_batch)
if self.normalization:
features = featureL2Norm(features)
return features |
class TagReader():
label2id_map = {'<START>': 0}
def read_inst(cls, file, is_labeled, number, opinion_offset):
insts = []
inputs = []
outputs = []
total_p = 0
original_p = 0
f = open(file, 'r', encoding='utf-8')
for line in f:
line = line.strip()
line = line.split('####')
input = line[0].split()
t_output = line[1].split()
o_output = line[2].split()
raw_pairs = eval(line[3])
output = ['O' for x in range(len(input))]
polarity = [0 for x in range(len(input))]
for (i, t) in enumerate(t_output):
t = t.split('=')[1]
if (t != 'O'):
output[i] = t
output_t = cls.ot2bieos_ts(output)
output = ['O' for x in range(len(input))]
for i in range(len(output)):
if (output_t[i] != 'O'):
output[i] = output_t[i].split('-')[0]
new_raw_pairs = []
for new_pair in raw_pairs:
opinion_s = new_pair[1][0]
opinion_e = new_pair[1][(- 1)]
target_s = new_pair[0][0]
target_e = new_pair[0][(- 1)]
if (new_pair[2] == 'NEG'):
polarity = 2
elif (new_pair[2] == 'POS'):
polarity = 1
else:
polarity = 0
if (target_s < opinion_s):
dire = 1
new_raw_pairs.append(([target_s, target_e], polarity, dire, (opinion_s - target_s), (opinion_e - target_s)))
else:
dire = 0
new_raw_pairs.append(([target_s, target_e], polarity, dire, (target_s - opinion_e), (target_s - opinion_s)))
new_raw_pairs.sort(key=(lambda x: x[0][0]))
original_p += len(raw_pairs)
if is_labeled:
new_pairs = []
opinion_idxs = []
remove_idxs = []
for pair in new_raw_pairs:
if ((pair[(- 1)] < opinion_offset) and (0 < pair[(- 2)] <= pair[(- 1)])):
new_pairs.append(pair)
opinion_idxs.extend(list(range(pair[0][0], (pair[0][1] + 1))))
else:
remove_idxs.extend(list(range(pair[0][0], (pair[0][(- 1)] + 1))))
for idx in remove_idxs:
if (idx not in opinion_idxs):
output[idx] = 'O'
else:
new_pairs = new_raw_pairs
output = output
total_p += len(new_pairs)
output = (output, new_pairs)
if (len(new_pairs) > 0):
inst = LinearInstance((len(insts) + 1), 1, input, output)
for label in output[0]:
if ((not (label in TagReader.label2id_map)) and is_labeled):
output_id = len(TagReader.label2id_map)
TagReader.label2id_map[label] = output_id
if is_labeled:
inst.set_labeled()
else:
inst.set_unlabeled()
insts.append(inst)
if ((len(insts) >= number) and (number > 0)):
break
print('# of original triplets: ', original_p)
print('# of triplets for current setup: ', total_p)
return insts
def ot2bieos_ts(ts_tag_sequence):
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = '$$$'
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if (cur_ts_tag == 'O'):
new_ts_sequence.append('O')
cur_pos = 'O'
else:
(cur_pos, cur_sentiment) = cur_ts_tag.split('-')
if (cur_pos != prev_pos):
if (i == (n_tags - 1)):
new_ts_sequence.append(('S-%s' % cur_sentiment))
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append(('S-%s' % cur_sentiment))
else:
new_ts_sequence.append(('B-%s' % cur_sentiment))
elif (i == (n_tags - 1)):
new_ts_sequence.append(('E-%s' % cur_sentiment))
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append(('E-%s' % cur_sentiment))
else:
new_ts_sequence.append(('I-%s' % cur_sentiment))
prev_pos = cur_pos
return new_ts_sequence
def ot2bieos_op(ts_tag_sequence):
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = '$$$'
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if (cur_ts_tag == 'O'):
new_ts_sequence.append('O')
cur_pos = 'O'
else:
cur_pos = cur_ts_tag
if (cur_pos != prev_pos):
if (i == (n_tags - 1)):
new_ts_sequence.append('s-o')
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append('s-o')
else:
new_ts_sequence.append('b-o')
elif (i == (n_tags - 1)):
new_ts_sequence.append('e-o')
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append('e-o')
else:
new_ts_sequence.append('i-o')
prev_pos = cur_pos
return new_ts_sequence |
class Effect5486(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Projectile Turret')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusMBC2'), skill='Minmatar Battlecruiser', **kwargs) |
def aggregate_results(filepaths: List[Path]) -> Dict[(str, Any)]:
metrics = defaultdict(list)
for f in filepaths:
with f.open('r') as fd:
data = json.load(fd)
for (k, v) in data['results'].items():
metrics[k].append(v)
agg = {k: np.mean(v) for (k, v) in metrics.items()}
return agg |
class DeformRoIPoolFunction(Function):
def symbolic(g, input, rois, offset, output_size, spatial_scale, sampling_ratio, gamma):
return g.op('mmcv::MMCVDeformRoIPool', input, rois, offset, pooled_height_i=output_size[0], pooled_width_i=output_size[1], spatial_scale_f=spatial_scale, sampling_ratio_f=sampling_ratio, gamma_f=gamma)
def forward(ctx, input, rois, offset, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
if (offset is None):
offset = input.new_zeros(0)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = float(spatial_scale)
ctx.sampling_ratio = int(sampling_ratio)
ctx.gamma = float(gamma)
assert (rois.size(1) == 5), 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0], ctx.output_size[1])
output = input.new_zeros(output_shape)
ext_module.deform_roi_pool_forward(input, rois, offset, output, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
ctx.save_for_backward(input, rois, offset)
return output
_differentiable
def backward(ctx, grad_output):
(input, rois, offset) = ctx.saved_tensors
grad_input = grad_output.new_zeros(input.shape)
grad_offset = grad_output.new_zeros(offset.shape)
ext_module.deform_roi_pool_backward(grad_output, input, rois, offset, grad_input, grad_offset, pooled_height=ctx.output_size[0], pooled_width=ctx.output_size[1], spatial_scale=ctx.spatial_scale, sampling_ratio=ctx.sampling_ratio, gamma=ctx.gamma)
if (grad_offset.numel() == 0):
grad_offset = None
return (grad_input, None, grad_offset, None, None, None, None) |
def test_dimensions_missing_params():
with pytest.raises(ValueError):
calculate_default_transform('epsg:4326', 'epsg:3857', width=1, height=1, gcps=[1], resolution=1, dst_width=1, dst_height=None)
with pytest.raises(ValueError):
calculate_default_transform('epsg:4326', 'epsg:3857', width=1, height=1, gcps=[1], resolution=1, dst_width=None, dst_height=1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.