hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3b43264146acffd4b9791e5ba180dcff39b787 | 3,442 | py | Python | endorsement/urls.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | 3 | 2017-10-16T17:19:32.000Z | 2019-07-31T22:31:48.000Z | endorsement/urls.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | 284 | 2016-06-17T18:21:31.000Z | 2022-03-21T16:55:03.000Z | endorsement/urls.py | uw-it-aca/service-endorsement | a1ba3e4221bb3fe6c81c9f6947ad5e93f10a4a45 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.urls import re_path
from userservice.views import support as userservice_override
from endorsement.views import page
from endorsement.views.accept import accept
from endorsement.views.support.endorser_search import EndorserSearch
from endorsement.views.support.endorsee_search import EndorseeSearch
from endorsement.views.support.notifications import EndorseeNotifications
from endorsement.views.support.shared_proxy import SharedProxy
from endorsement.views.support.persistent_messages import PersistentMessages
from endorsement.views.support.endorsement_statistics import (
EndorsementStatistics)
from endorsement.views.api.validate import Validate
from endorsement.views.api.endorse import Endorse
from endorsement.views.api.accept import Accept
from endorsement.views.api.endorsee import Endorsee
from endorsement.views.api.endorser import Endorser
from endorsement.views.api.endorsed import Endorsed
from endorsement.views.api.endorsements import Endorsements
from endorsement.views.api.shared import Shared
from endorsement.views.api.shared_owner import SharedOwner
from endorsement.views.api.shared_proxy import SharedProxyEndorse
from endorsement.views.api.statistics import Statistics
from endorsement.views.api.notification import Notification
urlpatterns = [
re_path(r'^logout', page.logout, name='logout'),
re_path(r'^accept/(?P<accept_id>[A-Za-z0-9]{32})$',
accept, name='accept_view'),
re_path(r'^support/?$', EndorsementStatistics.as_view(),
name='endorsement_statistics'),
re_path(r'^support/provisionee/?', EndorseeSearch.as_view(),
name='endorsee_search'),
re_path(r'^support/provisioner/?', EndorserSearch.as_view(),
name='endorser_search'),
re_path(r'^support/notifications/?', EndorseeNotifications.as_view(),
name='endorsee_notifications'),
re_path(r'^support/override/?', userservice_override,
name='userservice_override'),
re_path(r'^support/persistent_messages/?', PersistentMessages.as_view(),
name='manage_persistent_messages_init'),
re_path(r'^support/shared_proxy/?', SharedProxy.as_view(),
name='manage_shared_proxy'),
re_path(r'^api/v1/validate', Validate.as_view(), name='validate_api'),
re_path(r'^api/v1/endorsee/(?P<endorsee>.+)$',
Endorsee.as_view(), name='endorsee_api'),
re_path(r'^api/v1/endorser/(?P<endorser>.+)$',
Endorser.as_view(), name='endorser_api'),
re_path(r'^api/v1/endorsements/?$',
Endorsements.as_view(), name='endorsements_api'),
re_path(r'^api/v1/stats/(?P<type>.+)$',
Statistics.as_view(), name='statistics_api'),
re_path(r'^api/v1/endorsed', Endorsed.as_view(), name='endorsed_api'),
re_path(r'^api/v1/endorse', Endorse.as_view(), name='endorse_api'),
re_path(r'^api/v1/shared_owner/(?P<shared_netid>.*)$',
SharedOwner.as_view(), name='shared_owner_api'),
re_path(r'^api/v1/shared_proxy/?$',
SharedProxyEndorse.as_view(), name='shared_proxy_endorse_api'),
re_path(r'^api/v1/shared', Shared.as_view(), name='shared_api'),
re_path(r'^api/v1/accept', Accept.as_view(), name='accept_api'),
re_path(r'^api/v1/notification', Notification.as_view(),
name='notification_api'),
re_path(r'.*', page.index, name='home'),
]
| 51.373134 | 76 | 0.733295 |
from django.urls import re_path
from userservice.views import support as userservice_override
from endorsement.views import page
from endorsement.views.accept import accept
from endorsement.views.support.endorser_search import EndorserSearch
from endorsement.views.support.endorsee_search import EndorseeSearch
from endorsement.views.support.notifications import EndorseeNotifications
from endorsement.views.support.shared_proxy import SharedProxy
from endorsement.views.support.persistent_messages import PersistentMessages
from endorsement.views.support.endorsement_statistics import (
EndorsementStatistics)
from endorsement.views.api.validate import Validate
from endorsement.views.api.endorse import Endorse
from endorsement.views.api.accept import Accept
from endorsement.views.api.endorsee import Endorsee
from endorsement.views.api.endorser import Endorser
from endorsement.views.api.endorsed import Endorsed
from endorsement.views.api.endorsements import Endorsements
from endorsement.views.api.shared import Shared
from endorsement.views.api.shared_owner import SharedOwner
from endorsement.views.api.shared_proxy import SharedProxyEndorse
from endorsement.views.api.statistics import Statistics
from endorsement.views.api.notification import Notification
urlpatterns = [
re_path(r'^logout', page.logout, name='logout'),
re_path(r'^accept/(?P<accept_id>[A-Za-z0-9]{32})$',
accept, name='accept_view'),
re_path(r'^support/?$', EndorsementStatistics.as_view(),
name='endorsement_statistics'),
re_path(r'^support/provisionee/?', EndorseeSearch.as_view(),
name='endorsee_search'),
re_path(r'^support/provisioner/?', EndorserSearch.as_view(),
name='endorser_search'),
re_path(r'^support/notifications/?', EndorseeNotifications.as_view(),
name='endorsee_notifications'),
re_path(r'^support/override/?', userservice_override,
name='userservice_override'),
re_path(r'^support/persistent_messages/?', PersistentMessages.as_view(),
name='manage_persistent_messages_init'),
re_path(r'^support/shared_proxy/?', SharedProxy.as_view(),
name='manage_shared_proxy'),
re_path(r'^api/v1/validate', Validate.as_view(), name='validate_api'),
re_path(r'^api/v1/endorsee/(?P<endorsee>.+)$',
Endorsee.as_view(), name='endorsee_api'),
re_path(r'^api/v1/endorser/(?P<endorser>.+)$',
Endorser.as_view(), name='endorser_api'),
re_path(r'^api/v1/endorsements/?$',
Endorsements.as_view(), name='endorsements_api'),
re_path(r'^api/v1/stats/(?P<type>.+)$',
Statistics.as_view(), name='statistics_api'),
re_path(r'^api/v1/endorsed', Endorsed.as_view(), name='endorsed_api'),
re_path(r'^api/v1/endorse', Endorse.as_view(), name='endorse_api'),
re_path(r'^api/v1/shared_owner/(?P<shared_netid>.*)$',
SharedOwner.as_view(), name='shared_owner_api'),
re_path(r'^api/v1/shared_proxy/?$',
SharedProxyEndorse.as_view(), name='shared_proxy_endorse_api'),
re_path(r'^api/v1/shared', Shared.as_view(), name='shared_api'),
re_path(r'^api/v1/accept', Accept.as_view(), name='accept_api'),
re_path(r'^api/v1/notification', Notification.as_view(),
name='notification_api'),
re_path(r'.*', page.index, name='home'),
]
| true | true |
1c3b43dfa126113a77aed9d9def8d973b850cef5 | 861 | py | Python | img2coe.py | VladLujerdeanu/Image-to-Coe-File | faab54003982ce5b53f89298a9057680a5b63e1c | [
"MIT"
] | 4 | 2021-03-19T16:21:05.000Z | 2022-02-01T18:19:10.000Z | img2coe.py | VladLujerdeanu/Image-to-Coe-File | faab54003982ce5b53f89298a9057680a5b63e1c | [
"MIT"
] | null | null | null | img2coe.py | VladLujerdeanu/Image-to-Coe-File | faab54003982ce5b53f89298a9057680a5b63e1c | [
"MIT"
] | null | null | null | import numpy as np
import sys
import os
from PIL import Image
def img2coe(path, index):
img = Image.open(path)
arr = np.array(img)
output_file = "img" + str(index) + ".coe"
f = open(output_file, "w")
f.write("memory_initialization_radix=2;\nmemory_initialization_vector=")
for line in arr:
for r, g, b in line:
r = int((r * 16) / 256)
g = int((g * 16) / 256)
b = int((b * 16) / 256)
f.write(str('\n{:04b}'.format(r)) + str('{:04b}'.format(g)) + str('{:04b}'.format(b)) + ",")
f.seek(f.tell() - 1, os.SEEK_SET)
f.truncate()
f.write(";")
if __name__ == "__main__":
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
img2coe(str(sys.argv[i]), i)
else:
print("Insert at least one image path\nFormat: python img2coe.py <path>") | 27.774194 | 104 | 0.547038 | import numpy as np
import sys
import os
from PIL import Image
def img2coe(path, index):
img = Image.open(path)
arr = np.array(img)
output_file = "img" + str(index) + ".coe"
f = open(output_file, "w")
f.write("memory_initialization_radix=2;\nmemory_initialization_vector=")
for line in arr:
for r, g, b in line:
r = int((r * 16) / 256)
g = int((g * 16) / 256)
b = int((b * 16) / 256)
f.write(str('\n{:04b}'.format(r)) + str('{:04b}'.format(g)) + str('{:04b}'.format(b)) + ",")
f.seek(f.tell() - 1, os.SEEK_SET)
f.truncate()
f.write(";")
if __name__ == "__main__":
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
img2coe(str(sys.argv[i]), i)
else:
print("Insert at least one image path\nFormat: python img2coe.py <path>") | true | true |
1c3b446d870b656182874f023654cee42a310142 | 192 | py | Python | commerce/auctions/admin.py | p-schlickmann/e-commerce | fecc1403dde898f1058662e642ed2678c4d7c224 | [
"MIT"
] | null | null | null | commerce/auctions/admin.py | p-schlickmann/e-commerce | fecc1403dde898f1058662e642ed2678c4d7c224 | [
"MIT"
] | null | null | null | commerce/auctions/admin.py | p-schlickmann/e-commerce | fecc1403dde898f1058662e642ed2678c4d7c224 | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
admin.site.register(models.Item)
admin.site.register(models.User)
admin.site.register(models.Category)
admin.site.register(models.Bid)
| 19.2 | 36 | 0.807292 | from django.contrib import admin
from . import models
admin.site.register(models.Item)
admin.site.register(models.User)
admin.site.register(models.Category)
admin.site.register(models.Bid)
| true | true |
1c3b44a2d17a59f486047938d58c7aae75ab6375 | 23,160 | py | Python | Contents/scripts/scnexpl/explorer.py | mochio326/SceneExplorer | 1d93788014ce1eab2dc91258e3efc2c71b7c20cd | [
"MIT"
] | 7 | 2017-03-15T03:09:52.000Z | 2019-09-29T09:34:34.000Z | Contents/scripts/scnexpl/explorer.py | mochio326/SceneExplorer | 1d93788014ce1eab2dc91258e3efc2c71b7c20cd | [
"MIT"
] | null | null | null | Contents/scripts/scnexpl/explorer.py | mochio326/SceneExplorer | 1d93788014ce1eab2dc91258e3efc2c71b7c20cd | [
"MIT"
] | null | null | null | ## -*- coding: utf-8 -*-
import sys
import re
import os.path
import subprocess
from .vendor.Qt import QtCore, QtGui, QtWidgets
from .gui import explorer_ui
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
import maya.OpenMaya as om
import maya.cmds as cmds
class SceneExplorerWeight(MayaQWidgetBaseMixin, QtWidgets.QDialog, explorer_ui.Ui_Form):
TITLE = "SceneExplorer"
URL = "https://github.com/mochio326/SceneExplorer"
FILTER_DESCRIPTION = ['ALL TYPE', 'MAYA SCENE', 'MAYA ASCII', 'MAYA BINARY', 'FBX', 'OBJ']
FILTER_EXTENSION = [['*.*'], ['*.ma', '*.mb'], ['*.ma'], ['*.mb'], ['*.fbx'], ['*.obj']]
def __init__(self, parent=None):
super(SceneExplorerWeight, self).__init__(parent)
#メモリ管理的おまじない
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.setupUi(self)
self.dir_model = None
self.file_model = None
self.path_history = []
self.path_history_current = -1
self.add_path_history_lock = False
self.bookmark_directory = []
self.bookmark_file = []
# オブジェクト名とタイトルの変更
self.setObjectName(self.TITLE)
self.setWindowTitle(self.TITLE)
self.setup_view_directory()
self.setup_view_file()
self.setup_view_history()
self.setup_combo_type()
self.setup_line_filepath()
self.setup_line_filter()
self.setup_view_bookmark()
self.setup_view_history()
# コールバック関数の設定
self.btn_open.clicked.connect(self.callback_open)
self.btn_option.clicked.connect(self.callback_option)
self.btn_return.clicked.connect(self.callback_return)
self.btn_moveon.clicked.connect(self.callback_moveon)
self.btn_currentproj.clicked.connect(self.callback_currentproj)
self.radio_history_file.toggled.connect(self.callback_radio_history_change)
self.radio_bookmark_file.toggled.connect(self.callback_radio_bookmark_change)
self.set_style_sheet()
def set_style_sheet(self):
css = """
QTreeView {
alternate-background-color: #3A3A3A;
background: #333333
}
QTreeView::item {
background-color: transparent;
}
QTreeView::item:hover {
background-color: #415B76;
}
QTreeView::item:selected{
background-color:#678db2;
bfont: bold;
}
"""
self.setStyleSheet(css)
# -----------------------
# ui_setup
# -----------------------
def setup_view_directory(self, currentpath=None):
rootpath = ''
select_path = self.get_view_select(self.view_directory, self.dir_model)
if select_path == currentpath:
return
if currentpath is None:
currentpath = r'C:/'
#フォルダビューではQFileSystemModelだとスクロールが上手く動作しなかった
#self.dir_model = QtWidgets.QFileSystemModel()
self.dir_model = QtWidgets.QDirModel()
self.dir_model.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.AllDirs)
self.view_directory.setModel(self.dir_model)
self.view_directory.setRootIndex(self.dir_model.index(rootpath))
self.view_directory.scrollTo(self.dir_model.index(currentpath), QtWidgets.QAbstractItemView.PositionAtCenter)
self.view_directory.setCurrentIndex(self.dir_model.index(currentpath))
self.view_directory.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
if hasattr(self.view_directory.header(), 'setResizeMode'):
# PySide
self.view_directory.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
# PySide2
self.view_directory.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_directory.header().setVisible(False)
self.view_directory.hideColumn(3)
self.view_directory.hideColumn(2)
self.view_directory.hideColumn(1)
self.view_directory.setAlternatingRowColors(True)
# コールバック関数の設定
# modelをセットし直すとコネクトが解除される?のでここに設置
dir_sel_model = self.view_directory.selectionModel()
dir_sel_model.selectionChanged.connect(self.callback_dir_change)
# QTreeViewにコンテキストを追加
self.view_directory.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_directory.customContextMenuRequested.connect(self.directory_context_menu)
def setup_view_file(self, currentpath=None):
select_path = self.get_view_select(self.view_file, self.file_model)
# currentpathが既に選択されているパスの場合は無駄に更新しないように
if select_path == currentpath:
return
if currentpath is None:
currentpath = select_path
self.file_model = QtWidgets.QFileSystemModel()
self.file_model.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.Files)
self.file_model.setRootPath('')
#フィルターを設定
file_type = self.combo_type.currentIndex()
if file_type == -1:
file_type = 0
filters = self.FILTER_EXTENSION[file_type]
if self.line_filter.text() != '':
tex = self.line_filter.text()
filters = [re.sub(r'^\*?', tex, f) for f in filters]
self.file_model.setNameFilters(filters)
self.view_file.setModel(self.file_model)
if hasattr(self.view_file.header(), 'setResizeMode'):
# PySide
self.view_file.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
# PySide2
self.view_file.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_file.setSortingEnabled(True)
self.view_file.setAlternatingRowColors(True)
self.view_file.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
#view_directoryの選択状態に応じてルートパスを設定
dir_path = self.get_view_select(self.view_directory, self.dir_model)
self.view_file.setRootIndex(self.file_model.index(dir_path))
self.view_file.setCurrentIndex(self.file_model.index(currentpath))
self.repaint()
# コールバック関数の設定
# modelをセットし直すとコネクトが解除される?のでここに設置
file_sel_model = self.view_file.selectionModel()
file_sel_model.selectionChanged.connect(self.callback_file_change)
# QTreeViewにコンテキストを追加
self.view_file.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_file.customContextMenuRequested.connect(self.file_context_menu)
def setup_combo_type(self):
for (des, ex) in zip(self.FILTER_DESCRIPTION, self.FILTER_EXTENSION):
self.combo_type.addItem("{0} [{1}]".format(des, ' | '.join(ex)))
self.combo_type.currentIndexChanged.connect(self.callback_type_change)
def setup_line_filter(self):
self.line_filter.returnPressed.connect(self.callback_filter_change)
def setup_line_filepath(self):
self.line_filepath.returnPressed.connect(self.callback_filepath_change)
def setup_view_history(self):
self.history_model = QtGui.QStandardItemModel()
list = get_history(self)
for l in list:
self.history_model.appendRow(QtGui.QStandardItem(l))
if hasattr(self.view_history.header(), 'setResizeMode'):
# PySide
self.view_history.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
# PySide2
self.view_history.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_history.header().setVisible(False)
self.view_history.setModel(self.history_model)
self.view_history.setAlternatingRowColors(True)
self.view_history.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
his_sel_model = self.view_history.selectionModel()
his_sel_model.selectionChanged.connect(self.callback_history_change)
# QTreeViewにコンテキストを追加
self.view_history.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_history.customContextMenuRequested.connect(self.history_context_menu)
def setup_view_bookmark(self):
self.bookmark_model = QtGui.QStandardItemModel()
list = get_bookmark(self)
for l in list:
self.bookmark_model.appendRow(QtGui.QStandardItem(l))
if hasattr(self.view_bookmark.header(), 'setResizeMode'):
# PySide
self.view_bookmark.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
# PySide2
self.view_bookmark.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_bookmark.header().setVisible(False)
self.view_bookmark.setModel(self.bookmark_model)
self.view_bookmark.setAlternatingRowColors(True)
self.view_bookmark.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
book_sel_model = self.view_bookmark.selectionModel()
book_sel_model.selectionChanged.connect(self.callback_bookmark_change)
# QTreeViewにコンテキストを追加
self.view_bookmark.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_bookmark.customContextMenuRequested.connect(self.bookmark_context_menu)
# -----------------------
# ContextMenu
# -----------------------
def directory_context_menu(self, pos):
add_menu_label = ['Add to bookmark']
action = self.build_context_menu(pos, self.view_directory, self.dir_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_directory, self.dir_model)
add_bookmark('directory', path)
self.setup_view_bookmark()
def file_context_menu(self, pos):
add_menu_label = ['Add to bookmark']
action = self.build_context_menu(pos, self.view_file, self.file_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_file, self.file_model)
add_bookmark('file', path)
self.setup_view_bookmark()
def history_context_menu(self, pos):
self.build_context_menu(pos, self.view_history, self.history_model)
def bookmark_context_menu(self, pos):
add_menu_label = ['Delete']
action = self.build_context_menu(pos, self.view_bookmark, self.bookmark_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_bookmark, self.bookmark_model)
delete_bookmark(self, path)
self.setup_view_bookmark()
def build_context_menu(self, pos, view, model, add_menu_label=None):
'''
コンテキストメニューの実行部分。汎用的処理以外は選択項目情報のみ返す
:param pos: クリック時に渡された位置情報
:param view: ビューインスタンス
:param model: モデルインスタンス
:return:
'''
# メニューを作成
menu = QtWidgets.QMenu(view)
menu_labels = ['Show in Explorer']
if add_menu_label is not None:
menu_labels.extend(add_menu_label)
actionlist = []
for label in menu_labels:
actionlist.append(menu.addAction(label))
action = menu.exec_(view.mapToGlobal(pos))
#menu.close()
# -----実行部分
if action is None:
return None
text = action.text()
# Show in Explorer
if text == menu_labels[0]:
path = self.get_view_select(view, model)
# 日本語ファイル対応
path = path.encode('cp932')
if os.path.isdir(path):
subprocess.Popen(r'explorer {0}'.format(path.replace('/', '\\')))
else:
subprocess.Popen(r'explorer /select,{0}'.format(path.replace('/', '\\')))
return None
return text
# -----------------------
# callback
# -----------------------
def callback_filepath_change(self):
file_path = self.line_filepath.text()
if file_path == '':
return
head, tail = os.path.split(file_path)
name, ex = os.path.splitext(file_path)
# 拡張子が認識できない場合はパスがフォルダを表している事にする
if ex == '':
head = file_path
self.setup_view_directory(head)
self.setup_view_file(file_path)
self.add_path_history()
self.view_directory.resizeColumnToContents(0)
select_path = self.get_view_select(self.view_directory, self.dir_model)
self.view_directory.scrollTo(self.dir_model.index(select_path), QtWidgets.QAbstractItemView.PositionAtCenter)
def callback_filter_change(self):
self.setup_view_file()
def callback_type_change(self):
self.setup_view_file()
def callback_dir_change(self):
self.view_directory.resizeColumnToContents(0)
#vert_pos = self.view_directory.verticalScrollBar().value()
#horiz_pos = self.view_directory.horizontalScrollBar().value()
#print self.view_directory.verticalScrollBar().maximum()
# self.view_directory.horizontalScrollBar().setValue(10)
self.setup_view_file()
def callback_file_change(self, selected, deselected):
select_path = self.get_view_select(self.view_file, self.file_model)
old_state = self.line_filepath.blockSignals(True)
self.line_filepath.setText(select_path)
self.line_filepath.blockSignals(old_state)
self.add_path_history()
def callback_radio_history_change(self):
self.setup_view_history()
def callback_radio_bookmark_change(self):
self.setup_view_bookmark()
def callback_open(self):
rtn = scene_open(self.line_filepath.text(), self.chkbox_setproject.isChecked())
if rtn is not None:
self.close()
def callback_option(self):
open_options()
def callback_return(self):
if self.path_history_current == 0:
return
self.add_path_history_lock = True
self.path_history_current -= 1
file_path = self.path_history[self.path_history_current]
self.line_filepath.setText(file_path)
self.callback_filepath_change()
self.add_path_history_lock = False
def callback_moveon(self):
if self.path_history_current == len(self.path_history)-1:
return
self.add_path_history_lock = True
self.path_history_current += 1
file_path = self.path_history[self.path_history_current]
self.line_filepath.setText(file_path)
self.callback_filepath_change()
self.add_path_history_lock = False
def callback_history_change(self):
file_path = self.get_view_select(self.view_history, self.history_model)
self.line_filepath.setText(file_path)
self.callback_filepath_change()
def callback_bookmark_change(self):
file_path = self.get_view_select(self.view_bookmark, self.bookmark_model)
self.line_filepath.setText(file_path)
self.callback_filepath_change()
def callback_currentproj(self):
path = get_current_ptoject()
self.line_filepath.setText(path)
self.callback_filepath_change()
# -----------------------
# Event
# -----------------------
def keyPressEvent(self, event):
event.accept()
def closeEvent(self, e):
print('closeEvent')
# -----------------------
# Others
# -----------------------
def get_view_select(self, view, model):
'''
ビューの選択している項目のパスを戻す
:param view:
:param model:
:return:
'''
select_model = view.selectionModel()
# 最初の1回。ビューモデルがセットされる前でアトリビュートが存在していない
if hasattr(select_model, 'hasSelection') is False:
return ''
if select_model.hasSelection() is False:
return ''
for index in select_model.selectedIndexes():
if isinstance(model, (QtWidgets.QFileSystemModel, QtWidgets.QDirModel)):
file_path = model.filePath(index)
if isinstance(model, QtGui.QStandardItemModel):
file_path = model.data(index)
return file_path
def add_path_history(self):
# 追加がロックされている
if self.add_path_history_lock is True:
return
file_path = self.line_filepath.text()
if file_path == '':
return
# 現在の位置から後ろは情報を削除
if self.path_history_current != -1:
if len(self.path_history) > 1:
del self.path_history[self.path_history_current+1:]
if len(self.path_history) == 0:
self.path_history.append(file_path)
else:
if self.path_history[-1] != file_path:
self.path_history.append(file_path)
self.path_history_current = len(self.path_history) - 1
# #################################################################################################
# ここから実行関数 Maya依存の部分
# #################################################################################################
def get_bookmark_option_var_name(type):
if type == 'file':
return 'SceneExplorer_BookmarkFileList'
elif type == 'directory':
return 'SceneExplorer_BookmarkDirectoryList'
def get_bookmark(ui):
'''
記録されているブックマーク情報を取得する
:param ui: uiのインスタンス
:return: フルパスのリスト
'''
if ui.radio_bookmark_file.isChecked():
type = 'file'
elif ui.radio_bookmark_directory.isChecked():
type = 'directory'
option_var_name = get_bookmark_option_var_name(type)
ls = cmds.optionVar(q=option_var_name)
if ls == 0:
ls = []
return ls
def add_bookmark(type, value):
'''
ブックマーク情報を追加する
:param type: 情報を追加するタイプ
:param value: 追加するパス
:return:
'''
option_var_name = get_bookmark_option_var_name(type)
ls = cmds.optionVar(q=option_var_name)
if ls == 0:
ls = []
if value not in ls:
ls.append(value)
cmds.optionVar(ca=option_var_name)
[cmds.optionVar(sva=(option_var_name, i)) for i in ls]
return
def delete_bookmark(ui, value):
'''
ブックマーク情報を削除
:param type: 情報を追加するタイプ
:param value: 削除するパス
:return:
'''
if ui.radio_bookmark_file.isChecked():
type = 'file'
elif ui.radio_bookmark_directory.isChecked():
type = 'directory'
option_var_name = get_bookmark_option_var_name(type)
ls = cmds.optionVar(q=option_var_name)
if ls != 0:
if value in ls:
ls.remove(value)
cmds.optionVar(ca=option_var_name)
[cmds.optionVar(sva=(option_var_name, i)) for i in ls]
return
def get_history(ui):
'''
シーン及びプロジェクトの履歴を取得する
:param ui: uiのインスタンス
:return: フルパスのリスト
'''
ls = []
if ui.radio_history_file.isChecked():
ls = cmds.optionVar(q='RecentFilesList')
elif ui.radio_history_project.isChecked():
ls = cmds.optionVar(q='RecentProjectsList')
if ls == 0:
return []
return list(reversed(ls))
def open_options():
'''
読み込みのオプションの表示
:return:
'''
cmds.OpenSceneOptions()
def get_current_ptoject():
return cmds.workspace(fn=True)
def get_project_dir(path):
'''
mayaプロジェクトのフォルダを探す
:param path: フォルダまたはファイルパス
:return: 発見出来ない場合はNone
'''
drive = os.path.splitdrive(path)[0]
parent = os.path.dirname(path)
if drive+'/' == parent:
return None
f = r'{0}/workspace.mel'.format(parent)
if os.path.isfile(f):
return parent
return get_project_dir(parent)
def scene_open(path, set_project):
'''
シーンを開く
:return:
'''
def new_open():
if set_project is True:
cmds.workspace(project_path, openWorkspace=True)
io.open(path, file_type, 1)
add_rectnt_project(project_path)
add_rectnt_file(path, file_type)
types = {'.ma': 'mayaAscii', '.mb': 'mayaBinary', '.fbx': 'FBX', '.obj': 'OBJ'}
if path == '':
return None
head, tail = os.path.split(path)
name, ex = os.path.splitext(path)
if ex not in types.keys():
return None
file_type = types[ex]
project_path = get_project_dir(path)
io = om.MFileIO()
if cmds.file(q=1,sceneName=True) == '':
new_open()
else:
result = cmds.confirmDialog(t='File Open', m='New Scene Open or Import Scene?',
b=['New Scene', 'Import Scene', 'Cancel'],
db='New Scene', cb='Cancel', ds='Cancel')
if result == 'Cancel':
return None
elif result == 'New Scene':
new_open()
elif result == 'Import Scene':
fbx_plugin = 'fbxmaya'
cmds.loadPlugin('{0:}.mll'.format(fbx_plugin), qt=1)
if fbx_plugin not in cmds.pluginInfo(q=1, ls=1):
om.MGlobal.displayError('{0} Plugin in not loaded'.format(fbx_plugin))
return None
io.importFile(path, file_type, 1, str(tail.replace('.', '_')))
# テクスチャのリロード
#ls = cmds.ls(typ='file', type='mentalrayTexture')
#[cmds.setAttr(x + '.ftn', cmds.getAttr(x + '.ftn'), type='string') for x in ls]
return 0
def add_rectnt_project(project):
'''
プロジェクトの使用履歴へ記録
:param project:
:return:
'''
optvar = cmds.optionVar
opt_list = 'RecentProjectsList'
ls = optvar(q=opt_list)
max_size = optvar(q='RecentProjectsMaxSize')
# 履歴内の同名は削除
for i, x in enumerate(ls):
if project == x:
optvar(rfa=[opt_list, i])
optvar(sva=[opt_list, project])
if len(optvar(q=opt_list)) > max_size:
optvar(rfa=[opt_list, 0])
def add_rectnt_file(file_path, file_type):
'''
ファイルの使用履歴へ記録
:param file_path:
:param file_type:
:return:
'''
optvar = cmds.optionVar
opt_list = 'RecentFilesList'
opt_type = 'RecentFilesTypeList'
max_size = optvar(q='RecentFilesMaxSize')
ls = optvar(q=opt_list)
# 履歴内の同名パスは削除
for i, x in enumerate(ls):
if file_path == x:
optvar(rfa=[opt_list, i])
optvar(rfa=[opt_type, i])
optvar(sva=[opt_list, file_path])
optvar(sva=[opt_type, file_type])
if len(optvar(q=opt_list)) > max_size:
optvar(rfa=[opt_list, 0])
optvar(rfa=[opt_type, 0])
def maya_api_version():
return int(cmds.about(api=True))
def get_ui(name, weight_type):
all_ui = {w.objectName(): w for w in QtWidgets.QApplication.allWidgets()}
ui = []
for k, v in all_ui.items():
if name not in k:
continue
# 2017だとインスタンスの型をチェックしないと別の物まで入ってきてしまうらしい
# 2016以前だと比較すると通らなくなる…orz
if maya_api_version() >= 201700:
if v.__class__.__name__ == weight_type:
return v
else:
return v
return None
def main():
# 同名のウインドウが存在したら削除
ui = get_ui(SceneExplorerWeight.TITLE, 'SceneExplorerWeight')
if ui is not None:
ui.close()
app = QtWidgets.QApplication.instance()
window = SceneExplorerWeight()
window.show()
return ui
if __name__ == '__main__':
main()
#-----------------------------------------------------------------------------
# EOF
#----------------------------------------------------------------------------- | 34.311111 | 117 | 0.633679 | mport os.path
import subprocess
from .vendor.Qt import QtCore, QtGui, QtWidgets
from .gui import explorer_ui
from maya.app.general.mayaMixin import MayaQWidgetBaseMixin
import maya.OpenMaya as om
import maya.cmds as cmds
class SceneExplorerWeight(MayaQWidgetBaseMixin, QtWidgets.QDialog, explorer_ui.Ui_Form):
TITLE = "SceneExplorer"
URL = "https://github.com/mochio326/SceneExplorer"
FILTER_DESCRIPTION = ['ALL TYPE', 'MAYA SCENE', 'MAYA ASCII', 'MAYA BINARY', 'FBX', 'OBJ']
FILTER_EXTENSION = [['*.*'], ['*.ma', '*.mb'], ['*.ma'], ['*.mb'], ['*.fbx'], ['*.obj']]
def __init__(self, parent=None):
super(SceneExplorerWeight, self).__init__(parent)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, True)
self.setupUi(self)
self.dir_model = None
self.file_model = None
self.path_history = []
self.path_history_current = -1
self.add_path_history_lock = False
self.bookmark_directory = []
self.bookmark_file = []
self.setObjectName(self.TITLE)
self.setWindowTitle(self.TITLE)
self.setup_view_directory()
self.setup_view_file()
self.setup_view_history()
self.setup_combo_type()
self.setup_line_filepath()
self.setup_line_filter()
self.setup_view_bookmark()
self.setup_view_history()
self.btn_open.clicked.connect(self.callback_open)
self.btn_option.clicked.connect(self.callback_option)
self.btn_return.clicked.connect(self.callback_return)
self.btn_moveon.clicked.connect(self.callback_moveon)
self.btn_currentproj.clicked.connect(self.callback_currentproj)
self.radio_history_file.toggled.connect(self.callback_radio_history_change)
self.radio_bookmark_file.toggled.connect(self.callback_radio_bookmark_change)
self.set_style_sheet()
def set_style_sheet(self):
css = """
QTreeView {
alternate-background-color: #3A3A3A;
background: #333333
}
QTreeView::item {
background-color: transparent;
}
QTreeView::item:hover {
background-color: #415B76;
}
QTreeView::item:selected{
background-color:#678db2;
bfont: bold;
}
"""
self.setStyleSheet(css)
def setup_view_directory(self, currentpath=None):
rootpath = ''
select_path = self.get_view_select(self.view_directory, self.dir_model)
if select_path == currentpath:
return
if currentpath is None:
currentpath = r'C:/'
self.dir_model = QtWidgets.QDirModel()
self.dir_model.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.AllDirs)
self.view_directory.setModel(self.dir_model)
self.view_directory.setRootIndex(self.dir_model.index(rootpath))
self.view_directory.scrollTo(self.dir_model.index(currentpath), QtWidgets.QAbstractItemView.PositionAtCenter)
self.view_directory.setCurrentIndex(self.dir_model.index(currentpath))
self.view_directory.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
if hasattr(self.view_directory.header(), 'setResizeMode'):
self.view_directory.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
self.view_directory.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_directory.header().setVisible(False)
self.view_directory.hideColumn(3)
self.view_directory.hideColumn(2)
self.view_directory.hideColumn(1)
self.view_directory.setAlternatingRowColors(True)
dir_sel_model = self.view_directory.selectionModel()
dir_sel_model.selectionChanged.connect(self.callback_dir_change)
self.view_directory.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_directory.customContextMenuRequested.connect(self.directory_context_menu)
def setup_view_file(self, currentpath=None):
select_path = self.get_view_select(self.view_file, self.file_model)
if select_path == currentpath:
return
if currentpath is None:
currentpath = select_path
self.file_model = QtWidgets.QFileSystemModel()
self.file_model.setFilter(QtCore.QDir.NoDotAndDotDot | QtCore.QDir.Files)
self.file_model.setRootPath('')
file_type = self.combo_type.currentIndex()
if file_type == -1:
file_type = 0
filters = self.FILTER_EXTENSION[file_type]
if self.line_filter.text() != '':
tex = self.line_filter.text()
filters = [re.sub(r'^\*?', tex, f) for f in filters]
self.file_model.setNameFilters(filters)
self.view_file.setModel(self.file_model)
if hasattr(self.view_file.header(), 'setResizeMode'):
self.view_file.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
self.view_file.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_file.setSortingEnabled(True)
self.view_file.setAlternatingRowColors(True)
self.view_file.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
dir_path = self.get_view_select(self.view_directory, self.dir_model)
self.view_file.setRootIndex(self.file_model.index(dir_path))
self.view_file.setCurrentIndex(self.file_model.index(currentpath))
self.repaint()
file_sel_model = self.view_file.selectionModel()
file_sel_model.selectionChanged.connect(self.callback_file_change)
self.view_file.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_file.customContextMenuRequested.connect(self.file_context_menu)
def setup_combo_type(self):
for (des, ex) in zip(self.FILTER_DESCRIPTION, self.FILTER_EXTENSION):
self.combo_type.addItem("{0} [{1}]".format(des, ' | '.join(ex)))
self.combo_type.currentIndexChanged.connect(self.callback_type_change)
def setup_line_filter(self):
self.line_filter.returnPressed.connect(self.callback_filter_change)
def setup_line_filepath(self):
self.line_filepath.returnPressed.connect(self.callback_filepath_change)
def setup_view_history(self):
self.history_model = QtGui.QStandardItemModel()
list = get_history(self)
for l in list:
self.history_model.appendRow(QtGui.QStandardItem(l))
if hasattr(self.view_history.header(), 'setResizeMode'):
self.view_history.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
self.view_history.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_history.header().setVisible(False)
self.view_history.setModel(self.history_model)
self.view_history.setAlternatingRowColors(True)
self.view_history.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
his_sel_model = self.view_history.selectionModel()
his_sel_model.selectionChanged.connect(self.callback_history_change)
self.view_history.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_history.customContextMenuRequested.connect(self.history_context_menu)
def setup_view_bookmark(self):
self.bookmark_model = QtGui.QStandardItemModel()
list = get_bookmark(self)
for l in list:
self.bookmark_model.appendRow(QtGui.QStandardItem(l))
if hasattr(self.view_bookmark.header(), 'setResizeMode'):
self.view_bookmark.header().setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
else:
self.view_bookmark.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.view_bookmark.header().setVisible(False)
self.view_bookmark.setModel(self.bookmark_model)
self.view_bookmark.setAlternatingRowColors(True)
self.view_bookmark.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
book_sel_model = self.view_bookmark.selectionModel()
book_sel_model.selectionChanged.connect(self.callback_bookmark_change)
self.view_bookmark.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.view_bookmark.customContextMenuRequested.connect(self.bookmark_context_menu)
def directory_context_menu(self, pos):
add_menu_label = ['Add to bookmark']
action = self.build_context_menu(pos, self.view_directory, self.dir_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_directory, self.dir_model)
add_bookmark('directory', path)
self.setup_view_bookmark()
def file_context_menu(self, pos):
add_menu_label = ['Add to bookmark']
action = self.build_context_menu(pos, self.view_file, self.file_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_file, self.file_model)
add_bookmark('file', path)
self.setup_view_bookmark()
def history_context_menu(self, pos):
self.build_context_menu(pos, self.view_history, self.history_model)
def bookmark_context_menu(self, pos):
add_menu_label = ['Delete']
action = self.build_context_menu(pos, self.view_bookmark, self.bookmark_model, add_menu_label)
if action == add_menu_label[0]:
path = self.get_view_select(self.view_bookmark, self.bookmark_model)
delete_bookmark(self, path)
self.setup_view_bookmark()
def build_context_menu(self, pos, view, model, add_menu_label=None):
menu = QtWidgets.QMenu(view)
menu_labels = ['Show in Explorer']
if add_menu_label is not None:
menu_labels.extend(add_menu_label)
actionlist = []
for label in menu_labels:
actionlist.append(menu.addAction(label))
action = menu.exec_(view.mapToGlobal(pos))
if action is None:
return None
text = action.text()
if text == menu_labels[0]:
path = self.get_view_select(view, model)
path = path.encode('cp932')
if os.path.isdir(path):
subprocess.Popen(r'explorer {0}'.format(path.replace('/', '\\')))
else:
subprocess.Popen(r'explorer /select,{0}'.format(path.replace('/', '\\')))
return None
return text
def callback_filepath_change(self):
file_path = self.line_filepath.text()
if file_path == '':
return
head, tail = os.path.split(file_path)
name, ex = os.path.splitext(file_path)
if ex == '':
head = file_path
self.setup_view_directory(head)
self.setup_view_file(file_path)
self.add_path_history()
self.view_directory.resizeColumnToContents(0)
select_path = self.get_view_select(self.view_directory, self.dir_model)
self.view_directory.scrollTo(self.dir_model.index(select_path), QtWidgets.QAbstractItemView.PositionAtCenter)
def callback_filter_change(self):
self.setup_view_file()
def callback_type_change(self):
self.setup_view_file()
def callback_dir_change(self):
self.view_directory.resizeColumnToContents(0)
self.setup_view_file()
def callback_file_change(self, selected, deselected):
select_path = self.get_view_select(self.view_file, self.file_model)
old_state = self.line_filepath.blockSignals(True)
self.line_filepath.setText(select_path)
self.line_filepath.blockSignals(old_state)
self.add_path_history()
def callback_radio_history_change(self):
self.setup_view_history()
def callback_radio_bookmark_change(self):
self.setup_view_bookmark()
def callback_open(self):
rtn = scene_open(self.line_filepath.text(), self.chkbox_setproject.isChecked())
if rtn is not None:
self.close()
def callback_option(self):
open_options()
def callback_return(self):
if self.path_history_current == 0:
return
self.add_path_history_lock = True
self.path_history_current -= 1
file_path = self.path_history[self.path_history_current]
self.line_filepath.setText(file_path)
self.callback_filepath_change()
self.add_path_history_lock = False
def callback_moveon(self):
if self.path_history_current == len(self.path_history)-1:
return
self.add_path_history_lock = True
self.path_history_current += 1
file_path = self.path_history[self.path_history_current]
self.line_filepath.setText(file_path)
self.callback_filepath_change()
self.add_path_history_lock = False
def callback_history_change(self):
file_path = self.get_view_select(self.view_history, self.history_model)
self.line_filepath.setText(file_path)
self.callback_filepath_change()
def callback_bookmark_change(self):
file_path = self.get_view_select(self.view_bookmark, self.bookmark_model)
self.line_filepath.setText(file_path)
self.callback_filepath_change()
def callback_currentproj(self):
path = get_current_ptoject()
self.line_filepath.setText(path)
self.callback_filepath_change()
def keyPressEvent(self, event):
event.accept()
def closeEvent(self, e):
print('closeEvent')
def get_view_select(self, view, model):
select_model = view.selectionModel()
if hasattr(select_model, 'hasSelection') is False:
return ''
if select_model.hasSelection() is False:
return ''
for index in select_model.selectedIndexes():
if isinstance(model, (QtWidgets.QFileSystemModel, QtWidgets.QDirModel)):
file_path = model.filePath(index)
if isinstance(model, QtGui.QStandardItemModel):
file_path = model.data(index)
return file_path
def add_path_history(self):
if self.add_path_history_lock is True:
return
file_path = self.line_filepath.text()
if file_path == '':
return
if self.path_history_current != -1:
if len(self.path_history) > 1:
del self.path_history[self.path_history_current+1:]
if len(self.path_history) == 0:
self.path_history.append(file_path)
else:
if self.path_history[-1] != file_path:
self.path_history.append(file_path)
self.path_history_current = len(self.path_history) - 1
| true | true |
1c3b44f4831afbfe369cc749854816caaf8baacb | 402 | py | Python | ros/build/camera_info_publisher/catkin_generated/pkg.develspace.context.pc.py | Emad-W/CarND-Capstone-Project | d058533d0815559918f4128051b12d47b995980d | [
"MIT"
] | null | null | null | ros/build/camera_info_publisher/catkin_generated/pkg.develspace.context.pc.py | Emad-W/CarND-Capstone-Project | d058533d0815559918f4128051b12d47b995980d | [
"MIT"
] | 10 | 2019-12-16T22:12:07.000Z | 2022-02-10T00:24:31.000Z | ros/build/camera_info_publisher/catkin_generated/pkg.develspace.context.pc.py | Emad-W/CarND-Capstone-Project | d058533d0815559918f4128051b12d47b995980d | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "camera_info_publisher"
PROJECT_SPACE_DIR = "/home/student/capstone/CarND-Capstone/ros/devel"
PROJECT_VERSION = "0.0.0"
| 44.666667 | 69 | 0.718905 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "camera_info_publisher"
PROJECT_SPACE_DIR = "/home/student/capstone/CarND-Capstone/ros/devel"
PROJECT_VERSION = "0.0.0"
| true | true |
1c3b45b01c0a93ee7b6ac0d613320683c9148afc | 1,648 | py | Python | test_Data_update_1.py | eduardcd/mas_fc | 990f0465081da52078fd28a95dbde535db073c18 | [
"MIT"
] | null | null | null | test_Data_update_1.py | eduardcd/mas_fc | 990f0465081da52078fd28a95dbde535db073c18 | [
"MIT"
] | null | null | null | test_Data_update_1.py | eduardcd/mas_fc | 990f0465081da52078fd28a95dbde535db073c18 | [
"MIT"
] | null | null | null | from code_1 import Data
from unittest import TestCase
def test_calculate1():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33), ('spoon', 4, .80), ('keys', 5, .95)]
rgb = [('keys', 5, .95), ('spoon', 4, .99),('fork', 3, .99), ('scissor', 2, .95), ('knife',1, .55)]
data = Data(rgbd, rgb)
assert sorted(data.calculate())==sorted([('knife', 1, .99), ('scissor',2, .95), ('fork',3, .99), ('spoon',4, .99), ('keys',5, .95)])
def test_calculate2():
rgbd = []
rgb = []
data2 = Data(rgbd, rgb)
assert sorted(data2.calculate()) == sorted([])
def test_calculate3():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)]
rgb = []
data3 = Data(rgbd, rgb)
assert sorted(data3.calculate()) == sorted([('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)])
def test_calculate4():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)]
rgb = [('KNIFE',1, .99), ('SCISSOR', 2, .65), ('SPOON', 3, .33)]
data4 = Data(rgbd, rgb)
assert sorted(data4.calculate()) == sorted([('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)])
def test_calculate5():
rgbd = [('knife',1, .99), ('scissor', 2, .65)]
rgb = [('fork', 3, .99), ('spoon', 4, .99)]
data5 = Data(rgbd, rgb)
assert sorted(data5.calculate()) == sorted([('knife', 1, .99), ('scissor',2, .65), ('fork',3, .99), ('spoon',4, .99)])
def test_calculate6():
rgbd = [('knife',1, .94),('knife',1, .69),('knife',1, .89)]
rgb = [('knife',1, .99),('fork', 3, .99)]
data6 = Data(rgbd, rgb)
assert sorted(data6.calculate()) == sorted([('knife', 1, .99), ('fork',3, .99)])
| 43.368421 | 136 | 0.517597 | from code_1 import Data
from unittest import TestCase
def test_calculate1():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33), ('spoon', 4, .80), ('keys', 5, .95)]
rgb = [('keys', 5, .95), ('spoon', 4, .99),('fork', 3, .99), ('scissor', 2, .95), ('knife',1, .55)]
data = Data(rgbd, rgb)
assert sorted(data.calculate())==sorted([('knife', 1, .99), ('scissor',2, .95), ('fork',3, .99), ('spoon',4, .99), ('keys',5, .95)])
def test_calculate2():
rgbd = []
rgb = []
data2 = Data(rgbd, rgb)
assert sorted(data2.calculate()) == sorted([])
def test_calculate3():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)]
rgb = []
data3 = Data(rgbd, rgb)
assert sorted(data3.calculate()) == sorted([('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)])
def test_calculate4():
rgbd = [('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)]
rgb = [('KNIFE',1, .99), ('SCISSOR', 2, .65), ('SPOON', 3, .33)]
data4 = Data(rgbd, rgb)
assert sorted(data4.calculate()) == sorted([('knife',1, .99), ('scissor', 2, .65), ('spoon', 3, .33)])
def test_calculate5():
rgbd = [('knife',1, .99), ('scissor', 2, .65)]
rgb = [('fork', 3, .99), ('spoon', 4, .99)]
data5 = Data(rgbd, rgb)
assert sorted(data5.calculate()) == sorted([('knife', 1, .99), ('scissor',2, .65), ('fork',3, .99), ('spoon',4, .99)])
def test_calculate6():
rgbd = [('knife',1, .94),('knife',1, .69),('knife',1, .89)]
rgb = [('knife',1, .99),('fork', 3, .99)]
data6 = Data(rgbd, rgb)
assert sorted(data6.calculate()) == sorted([('knife', 1, .99), ('fork',3, .99)])
| true | true |
1c3b46e3834dbde23cdba1ede29bd6d1494176a7 | 2,149 | py | Python | s3_tar/s3_mpu.py | STARInformatics/s3-tar | 20071e8acc6b8110624fac470d2e51a0b967df55 | [
"MIT"
] | 17 | 2020-02-12T00:14:54.000Z | 2022-03-25T17:53:06.000Z | s3_tar/s3_mpu.py | STARInformatics/s3-tar | 20071e8acc6b8110624fac470d2e51a0b967df55 | [
"MIT"
] | 9 | 2020-02-08T21:32:45.000Z | 2021-03-18T17:49:03.000Z | s3_tar/s3_mpu.py | STARInformatics/s3-tar | 20071e8acc6b8110624fac470d2e51a0b967df55 | [
"MIT"
] | 10 | 2020-03-23T06:53:35.000Z | 2022-01-04T11:52:45.000Z | import logging
logger = logging.getLogger(__name__)
class S3MPU:
def __init__(self, s3, target_bucket, target_key):
self.s3 = s3
self.target_bucket = target_bucket
self.target_key = target_key
self.parts_mapping = []
logger.info("Creating file {}".format(self.target_key))
self.resp = self.s3.create_multipart_upload(
Bucket=self.target_bucket,
Key=self.target_key,
)
logger.debug("Multipart upload start: {}".format(self.resp))
def upload_part(self, source_io):
"""Upload a part of the multipart upload
Save to a parts mapping, needed to complete the multipart upload
Args:
source_io (io.BytesIO): BytesIO object to upload
Returns:
bool: If the upload was successful
"""
part_num = len(self.parts_mapping) + 1
logger.info("Uploading part {} of {}"
.format(part_num, self.target_key))
source_io.seek(0)
resp = self.s3.upload_part(
Bucket=self.target_bucket,
Key=self.target_key,
PartNumber=part_num,
UploadId=self.resp['UploadId'],
Body=source_io.read(),
)
source_io.close() # Cleanup
logger.debug("Multipart upload part: {}".format(resp))
resp_status_code = resp['ResponseMetadata']['HTTPStatusCode']
if resp_status_code == 200:
self.parts_mapping.append({
'ETag': resp['ETag'],
'PartNumber': part_num,
})
return True
return False
def complete(self):
"""Complete to multipart upload in s3
Returns:
bool: If the upload was successful
"""
resp = self.s3.complete_multipart_upload(
Bucket=self.target_bucket,
Key=self.target_key,
UploadId=self.resp['UploadId'],
MultipartUpload={'Parts': self.parts_mapping},
)
logger.debug("Multipart upload complete: {}".format(resp))
return resp['ResponseMetadata']['HTTPStatusCode'] == 200
| 29.847222 | 72 | 0.581201 | import logging
logger = logging.getLogger(__name__)
class S3MPU:
def __init__(self, s3, target_bucket, target_key):
self.s3 = s3
self.target_bucket = target_bucket
self.target_key = target_key
self.parts_mapping = []
logger.info("Creating file {}".format(self.target_key))
self.resp = self.s3.create_multipart_upload(
Bucket=self.target_bucket,
Key=self.target_key,
)
logger.debug("Multipart upload start: {}".format(self.resp))
def upload_part(self, source_io):
part_num = len(self.parts_mapping) + 1
logger.info("Uploading part {} of {}"
.format(part_num, self.target_key))
source_io.seek(0)
resp = self.s3.upload_part(
Bucket=self.target_bucket,
Key=self.target_key,
PartNumber=part_num,
UploadId=self.resp['UploadId'],
Body=source_io.read(),
)
source_io.close()
logger.debug("Multipart upload part: {}".format(resp))
resp_status_code = resp['ResponseMetadata']['HTTPStatusCode']
if resp_status_code == 200:
self.parts_mapping.append({
'ETag': resp['ETag'],
'PartNumber': part_num,
})
return True
return False
def complete(self):
resp = self.s3.complete_multipart_upload(
Bucket=self.target_bucket,
Key=self.target_key,
UploadId=self.resp['UploadId'],
MultipartUpload={'Parts': self.parts_mapping},
)
logger.debug("Multipart upload complete: {}".format(resp))
return resp['ResponseMetadata']['HTTPStatusCode'] == 200
| true | true |
1c3b486cb56678a3a738379132713ae72357262c | 26,769 | py | Python | autoflow/feature_engineer/generate/autofeat/autofeat.py | auto-flow/autoflow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | 49 | 2020-04-16T11:17:28.000Z | 2020-05-06T01:32:44.000Z | autoflow/feature_engineer/generate/autofeat/autofeat.py | auto-flow/autoflow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | null | null | null | autoflow/feature_engineer/generate/autofeat/autofeat.py | auto-flow/autoflow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | 3 | 2020-04-17T00:53:24.000Z | 2020-04-23T03:04:26.000Z | # -*- coding: utf-8 -*-
# Author: Franziska Horn <cod3licious@gmail.com>
# License: MIT
from __future__ import unicode_literals, division, print_function, absolute_import
from builtins import range
from copy import copy
from typing import List, Optional
import numpy as np
import pandas as pd
import pint
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sympy.utilities.lambdify import lambdify
from autoflow.constants import VARIABLE_PATTERN
from autoflow.feature_engineer.select import BorutaFeatureSelector
from autoflow.utils.data import check_n_jobs
from autoflow.utils.logging_ import get_logger
from .feateng import engineer_features, n_cols_generated, colnames2symbols
from .featsel import FeatureSelector
def _parse_units(units, ureg=None, verbose=0):
"""
Convert a dict with string units to pint quantities.
Inputs:
- units: dict with {"variable_name": "unit"}
- ureg: optional: a pint UnitRegistry
- verbose: verbosity level (int; default: 0)
Returns
- parsed_units: dict with {"variable_name": pint Quantity}
"""
parsed_units = {}
if units:
if ureg is None:
ureg = pint.UnitRegistry(auto_reduce_dimensions=True, autoconvert_offset_to_baseunit=True)
for c in units:
try:
parsed_units[c] = ureg.parse_expression(units[c])
except pint.UndefinedUnitError:
if verbose > 0:
print("[AutoFeat] WARNING: unit %r of column %r was not recognized and will be ignored!" % (
units[c], c))
parsed_units[c] = ureg.parse_expression("")
parsed_units[c].__dict__["_magnitude"] = 1.
return parsed_units
class AutoFeatureGenerator(BaseEstimator, TransformerMixin):
def __init__(
self,
problem_type=None,
categorical_cols=None,
feateng_cols=None,
units=None,
max_used_feats=10,
feateng_steps=2,
featsel_runs=3,
max_gb=None,
transformations=None,
apply_pi_theorem=True,
always_return_numpy=False,
n_jobs=-1,
verbose=0,
random_state=0,
consider_other=False,
regularization=None,
div_op=True,
exp_op=False,
log_op=False,
abs_op=False,
sqrt_op=False,
sqr_op=True,
do_final_selection=False,
standardize=False
):
"""
multi-step feature engineering and cross-validated feature selection to generate promising additional
features for your dataset and train a linear prediction model with them.
Inputs:
- problem_type: str, either "regression" or "classification" (default: "regression")
- categorical_cols: list of column names of categorical features; these will be transformed into
0/1 encoding (default: None)
- feateng_cols: list of column names that should be used for the feature engineering part
(default None --> all, with categorical_cols in 0/1 encoding)
- units: dictionary with {col_name: unit} where unit is a string that can be converted into a pint unit.
all columns without units are dimensionless and can be combined with any other column.
Note: it is assumed that all features are of comparable magnitude, i.e., not one variable is in
m and another in mm. If this needs to be accounted for, please scale your variables before
passing them to autofeat!
(default: None --> all columns are dimensionless).
- feateng_steps: number of steps to perform in the feature engineering part (int; default: 2)
- featsel_runs: number of times to perform in the feature selection part with a random fraction of data points (int; default: 5)
- max_gb: if an int is given: maximum number of gigabytes to use in the process (i.e. mostly the
feature engineering part). this is no guarantee! it will lead to subsampling of the
data points if the new dataframe generated is n_rows * n_cols * 32bit > max_gb
Note: this is only an approximate estimate of the final matrix; intermediate representations could easily
take up at least 2 or 3 times that much space...If you can, subsample before, you know your data best.
- transformations: list of transformations that should be applied; possible elements:
"1/", "exp", "log", "abs", "sqrt", "^2", "^3", "1+", "1-", "sin", "cos", "exp-", "2^"
(first 7, i.e., up to ^3, are applied by default)
- apply_pi_theorem: whether or not to apply the pi theorem (if units are given; bool; default: True)
- always_return_numpy: whether to always return a numpy array instead of a pd dataframe when calling (fit_)transform
(default: False; mainly used for sklearn estimator checks)
- n_jobs: how many jobs to run when selecting the features in parallel (int; default: 1)
- verbose: verbosity level (int; default: 0)
Attributes:
- original_columns_: original columns of X when calling fit
- all_columns_: columns of X after calling fit
- categorical_cols_map_: dict mapping from the original categorical columns to a list with new column names
- feateng_cols_: actual columns used for the feature engineering
- feature_formulas_: sympy formulas to generate new features
- feature_functions_: compiled feature functions with columns
- new_feat_cols_: list of good new features that should be generated when calling transform()
- good_cols_: columns selected in the feature selection process, used with the final prediction model
- prediction_model_: sklearn model instance used for the predictions
Note: when giving categorical_cols or feateng_cols, X later (i.e. when calling fit/fit_transform) has to be a DataFrame
"""
self.logger = get_logger(self)
self.standardize = standardize
self.do_final_selection = do_final_selection
if transformations is None:
transformations = []
if div_op:
transformations.append("1/")
if exp_op:
transformations.append("exp")
if log_op:
transformations.append("log")
if abs_op:
transformations.append("abs")
if sqrt_op:
transformations.append("sqrt")
if sqr_op:
transformations.append("^2")
self.sqr_op = sqr_op
self.sqrt_op = sqrt_op
self.abs_op = abs_op
self.log_op = log_op
self.exp_op = exp_op
self.div_op = div_op
self.regularization = regularization
self.consider_other = consider_other
self.random_state = random_state
self.max_used_feats = max_used_feats
self.problem_type = problem_type
self.categorical_cols = categorical_cols
self.feateng_cols = feateng_cols
self.units = units
self.feateng_steps = feateng_steps
self.max_gb = max_gb
self.featsel_runs = featsel_runs
self.transformations = transformations
self.apply_pi_theorem = apply_pi_theorem
self.always_return_numpy = always_return_numpy
self.n_jobs = check_n_jobs(n_jobs)
self.verbose = verbose
def __getstate__(self):
"""
get dict for pickling without feature_functions as they are not pickleable
"""
return {k: self.__dict__[k] if k != "feature_functions_" else {} for k in self.__dict__}
def _transform_categorical_cols(self, df):
"""
Transform categorical features into 0/1 encoding.
Inputs:
- df: pandas dataframe with original features
Returns:
- df: dataframe with categorical features transformed into multiple 0/1 columns
"""
self.categorical_cols_map_ = {}
if self.categorical_cols:
e = OneHotEncoder(sparse=False, categories="auto")
for c in self.categorical_cols:
if c not in df.columns:
raise ValueError("[AutoFeat] categorical_col %r not in df.columns" % c)
ohe = e.fit_transform(df[c].to_numpy()[:, None])
new_cat_cols = ["cat_%s_%r" % (str(c), i) for i in e.categories_[0]]
self.categorical_cols_map_[c] = new_cat_cols
df = df.join(pd.DataFrame(ohe, columns=new_cat_cols, index=df.index))
# remove the categorical column from our columns to consider
df.drop(columns=self.categorical_cols, inplace=True)
return df
def _apply_pi_theorem(self, df):
if self.apply_pi_theorem and self.units:
ureg = pint.UnitRegistry(auto_reduce_dimensions=True, autoconvert_offset_to_baseunit=True)
parsed_units = _parse_units(self.units, ureg, self.verbose)
# use only original features
parsed_units = {c: parsed_units[c] for c in self.feateng_cols_ if not parsed_units[c].dimensionless}
if self.verbose:
print("[AutoFeat] Applying the Pi Theorem")
pi_theorem_results = ureg.pi_theorem(parsed_units)
for i, r in enumerate(pi_theorem_results, 1):
if self.verbose:
print("[AutoFeat] Pi Theorem %i: " % i, pint.formatter(r.items()))
# compute the final result by multiplying and taking the power of
cols = sorted(r)
# only use data points where non of the affected columns are NaNs
not_na_idx = df[cols].notna().all(axis=1)
ptr = df[cols[0]].to_numpy()[not_na_idx] ** r[cols[0]]
for c in cols[1:]:
ptr *= df[c].to_numpy()[not_na_idx] ** r[c]
df.loc[not_na_idx, "PT%i_%s" % (i, pint.formatter(r.items()).replace(" ", ""))] = ptr
return df
def _generate_features(self, df, new_feat_cols):
"""
Generate additional features based on the feature formulas for all data points in the df.
Only works after the model was fitted.
Inputs:
- df: pandas dataframe with original features
- new_feat_cols: names of new features that should be generated (keys of self.feature_formulas_)
Returns:
- df: dataframe with the additional feature columns added
"""
check_is_fitted(self, ["feature_formulas_"])
if not new_feat_cols:
return df
if not new_feat_cols[0] in self.feature_formulas_:
raise RuntimeError("[AutoFeat] First call fit or fit_transform to generate the features!")
if self.verbose:
print("[AutoFeat] Computing %i new features." % len(new_feat_cols))
# generate all good feature; unscaled this time
feat_array = np.zeros((len(df), len(new_feat_cols)))
for i, expr in enumerate(new_feat_cols):
if self.verbose:
print("[AutoFeat] %5i/%5i new features" % (i, len(new_feat_cols)), end="\r")
if expr not in self.feature_functions_:
# generate a substitution expression based on all the original symbols of the original features
# for the given generated feature in good cols
# since sympy can handle only up to 32 original features in ufunctify, we need to check which features
# to consider here, therefore perform some crude check to limit the number of features used
cols = [c for i, c in enumerate(self.feateng_cols_) if colnames2symbols(c, i) in expr]
if not cols:
# this can happen if no features were selected and the expr is "E" (i.e. the constant e)
f = None
else:
try:
f = lambdify([self.feature_formulas_[c] for c in cols], self.feature_formulas_[expr])
except Exception:
print("[AutoFeat] Error while processing expression: %r" % expr)
raise
self.feature_functions_[expr] = (cols, f)
else:
cols, f = self.feature_functions_[expr]
if f is not None:
# only generate features for completely not-nan rows
not_na_idx = df[cols].notna().all(axis=1)
try:
feat_array[not_na_idx, i] = f(*(df[c].to_numpy(dtype=float)[not_na_idx] for c in cols))
feat_array[~not_na_idx, i] = np.nan
except RuntimeWarning:
print("[AutoFeat] WARNING: Problem while evaluating expression: %r with columns %r" % (expr, cols),
" - is the data in a different range then when calling .fit()? Are maybe some values 0 that shouldn't be?")
raise
if self.verbose:
print("[AutoFeat] %5i/%5i new features ...done." % (len(new_feat_cols), len(new_feat_cols)))
df = df.join(pd.DataFrame(feat_array, columns=new_feat_cols, index=df.index))
return df
def convert_colname_to_variables(self, df):
ix = 0
origin_columns = []
new_columns = []
keep_columns = []
input_columns = df.columns.astype(str).tolist()
for column in input_columns:
if not VARIABLE_PATTERN.match(column):
while (f"x{ix:03d}" in df.columns) or (f"x{ix:03d}" in (new_columns + input_columns)):
ix += 1
origin_columns.append(column)
new_columns.append(f"x{ix:03d}")
ix += 1
else:
keep_columns.append(column)
self.column_mapper_ = dict(zip(origin_columns, new_columns))
column_mapper = copy(self.column_mapper_)
column_mapper.update(dict(zip(keep_columns, keep_columns)))
self.column_mapper = column_mapper
df.columns = df.columns.map(column_mapper)
def fit(self, X, y, X_pool: Optional[List[pd.DataFrame]] = None):
"""
Fits the regression model and returns a new dataframe with the additional features.
Inputs:
- X: pandas dataframe or numpy array with original features (n_datapoints x n_features)
- y: pandas dataframe or numpy array with targets for all n_datapoints
Returns:
- new_df: new pandas dataframe with all the original features (except categorical features transformed
into multiple 0/1 columns) and the most promising engineered features. This df can then be
used to train your final model.
Please ensure that X only contains valid feature columns (including possible categorical variables).
Note: we strongly encourage you to name your features X1 ... Xn or something simple like this before passing
a DataFrame to this model. This can help avoid potential problems with sympy later on.
The data should only contain finite values (no NaNs etc.)
"""
# store column names as they'll be lost in the other check
cols = [str(c) for c in X.columns] if isinstance(X, pd.DataFrame) else []
if self.problem_type is None:
if type_of_target(y) == "continuous":
self.problem_type = "regression"
else:
self.problem_type = "classification"
# check input variables
X, target = check_X_y(X, y, y_numeric=self.problem_type == "regression", dtype=None)
if self.regularization is None:
if X.shape[0] > 2000:
self.regularization = "l2"
else:
self.regularization = "l1"
if not cols:
# the additional zeros in the name are because of the variable check in _generate_features,
# where we check if the column name occurs in the the expression. this would lead to many
# false positives if we have features x1 and x10...x19 instead of x001...x019.
cols = ["x%03i" % i for i in range(X.shape[1])]
self.original_columns_ = cols
# transform X into a dataframe (again)
pre_df = pd.DataFrame(X, columns=cols)
# if column_name don't match variable regular-expression-pattern, convert it(keep in mind do same conversion in transform process)
self.convert_colname_to_variables(pre_df)
if pre_df.shape[1] > self.max_used_feats:
# In order to limit the scale of the problem, the number of features is limited to K
base_model_cls = ExtraTreesClassifier if self.problem_type == "classification" else ExtraTreesRegressor
base_model_params = dict(
n_estimators=50,
min_samples_leaf=10,
min_samples_split=10,
random_state=self.random_state,
n_jobs=self.n_jobs
)
feature_importances = base_model_cls(**base_model_params).fit(X, y).feature_importances_
pre_activated_indexes = np.argsort(-feature_importances)[:self.max_used_feats]
else:
pre_activated_indexes = np.arange(pre_df.shape[1])
boruta = BorutaFeatureSelector(max_depth=7, n_estimators="auto", max_iter=10, weak=False,
random_state=self.random_state, verbose=self.verbose).fit(
pre_df.values[:, pre_activated_indexes], y)
if boruta.weak:
boruta_mask = boruta.support_ + boruta.support_weak_
else:
boruta_mask = boruta.support_
activated_indexes = pre_activated_indexes[boruta_mask]
df = pre_df.iloc[:, activated_indexes]
if X_pool:
X_pool_new = []
for X_ in X_pool:
if X_ is None:
continue
if not isinstance(X_, pd.DataFrame):
X_ = pd.DataFrame(X_)
X_ = X_.iloc[:, activated_indexes].copy()
X_.columns = df.columns
X_pool_new.append(X_)
if len(X_pool_new) > 0:
X_pool = pd.concat(X_pool_new)
X_pool.index = range(X_pool.shape[0])
else:
X_pool = None
self.boruta_1 = boruta
self.pre_activated_indexes = pre_activated_indexes
self.activated_indexes = activated_indexes
# possibly convert categorical columns
df = self._transform_categorical_cols(df)
# if we're not given specific feateng_cols, then just take all columns except categorical
if self.feateng_cols:
fcols = []
for c in self.feateng_cols:
if c not in self.original_columns_:
raise ValueError("[AutoFeat] feateng_col %r not in df.columns" % c)
if c in self.categorical_cols_map_:
fcols.extend(self.categorical_cols_map_[c])
else:
fcols.append(c)
self.feateng_cols_ = fcols
else:
self.feateng_cols_ = list(df.columns)
# convert units to proper pint units
if self.units:
# need units for only and all feateng columns
self.units = {c: self.units[c] if c in self.units else "" for c in self.feateng_cols_}
# apply pi-theorem -- additional columns are not used for regular feature engineering (for now)!
df = self._apply_pi_theorem(df)
# subsample data points and targets in case we'll generate too many features
# (n_rows * n_cols * 32/8)/1000000000 <= max_gb
n_cols = n_cols_generated(len(self.feateng_cols_), self.feateng_steps, len(self.transformations))
n_gb = (len(df) * n_cols) / 250000000
if self.verbose:
print("[AutoFeat] The %i step feature engineering process could generate up to %i features." % (
self.feateng_steps, n_cols))
print("[AutoFeat] With %i data points this new feature matrix would use about %.2f gb of space." % (
len(df), n_gb))
# if self.max_gb and n_gb > self.max_gb:
# n_rows = int(self.max_gb * 250000000 / n_cols)
# if self.verbose:
# print(
# "[AutoFeat] As you specified a limit of %.1d gb, the number of data points is subsampled to %i" % (
# self.max_gb, n_rows))
# subsample_idx = np.random.permutation(list(df.index))[:n_rows]
# df_subs = df.iloc[subsample_idx]
# df_subs.reset_index(drop=True, inplace=True)
# target_sub = target[subsample_idx]
# else:
df_subs = df.copy()
target_sub = target.copy()
# generate features
df_subs, self.feature_formulas_ = engineer_features(df_subs, self.feateng_cols_,
_parse_units(self.units, verbose=self.verbose),
self.feateng_steps, self.transformations, self.verbose,
X_pool)
# select predictive features
self.core_selector = FeatureSelector(self.problem_type, self.featsel_runs, None, self.n_jobs, self.verbose,
self.random_state, self.consider_other, self.regularization)
if self.featsel_runs <= 0:
if self.verbose:
print("[AutoFeat] WARNING: Not performing feature selection.")
good_cols = df_subs.columns
else:
good_cols = self.core_selector.fit(df_subs, target_sub).good_cols_
# if no features were selected, take the original features
if not good_cols:
good_cols = list(df.columns)
# filter out those columns that were original features or generated otherwise
self.new_feat_cols_ = [c for c in good_cols if c not in list(df.columns)]
self.feature_functions_ = {}
self.good_cols_ = good_cols
if self.standardize or self.do_final_selection:
df_final = self._generate_features(pre_df, self.new_feat_cols_)
if self.do_final_selection:
boruta = BorutaFeatureSelector(max_depth=7, n_estimators="auto", max_iter=10, weak=False,
random_state=self.random_state, verbose=self.verbose).fit(df_final, y)
support_mask = boruta.support_
self.boruta_2 = boruta
if boruta.weak:
support_mask += boruta.support_weak_
origin_columns = pre_df.columns
gen_columns = df_final.columns[pre_df.shape[1]:]
origin_mask = support_mask[:pre_df.shape[1]]
gen_mask = support_mask[pre_df.shape[1]:]
gen_valid_cols = gen_columns[gen_mask].tolist()
self.new_feat_cols_ = [c for c in self.new_feat_cols_ if c in gen_valid_cols]
origin_valid_cols = origin_columns[origin_mask].tolist()
self.valid_cols_ = origin_valid_cols + gen_valid_cols
df_final = df_final[self.valid_cols_]
else:
self.valid_cols_ = None
if self.standardize:
self.standardizer_ = StandardScaler().fit(df_final)
else:
self.standardizer_ = None
else:
self.standardizer_ = None
self.valid_cols_ = None
return self
def transform(self, X):
"""
Inputs:
- X: pandas dataframe or numpy array with original features (n_datapoints x n_features)
Returns:
- new_df: new pandas dataframe with all the original features (except categorical features transformed
into multiple 0/1 columns) and the most promising engineered features. This df can then be
used to train your final model.
"""
check_is_fitted(self, ["feature_formulas_"])
# store column names as they'll be lost in the other check
cols = [str(c) for c in X.columns] if isinstance(X, pd.DataFrame) else []
# check input variables
X = check_array(X, force_all_finite="allow-nan", dtype=None)
if not cols:
cols = ["x%03i" % i for i in range(X.shape[1])]
if not cols == self.original_columns_:
raise ValueError("[AutoFeat] Not the same features as when calling fit.")
# transform X into a dataframe (again)
df = pd.DataFrame(X, columns=cols)
# convert_colname_to_variables
df.columns = df.columns.map(self.column_mapper)
# possibly convert categorical columns
df = self._transform_categorical_cols(df)
# possibly apply pi-theorem
df = self._apply_pi_theorem(df)
# generate engineered features
df = self._generate_features(df, self.new_feat_cols_)
if self.always_return_numpy:
return df.to_numpy()
if self.valid_cols_ is not None:
df = df[self.valid_cols_]
if self.standardizer_ is not None:
df = pd.DataFrame(self.standardizer_.transform(df.values), columns=df.columns, index=df.index)
# parse inf, nan to median
inf_cnt = np.count_nonzero(~np.isfinite(df), axis=0)
if inf_cnt.sum() > 0:
self.logger.warning(f"inf_cnt.sum() = {inf_cnt.sum()}, "
f"error-columns are: {df.columns[inf_cnt > 0].tolist()} , "
f"using median-fill handle this")
data = df.values
data[~np.isfinite(df)] = np.nan
data = SimpleImputer(strategy="median").fit_transform(data) # fixme: 全为0的列
df = pd.DataFrame(data, columns=df.columns, index=df.index)
return df
| 51.085878 | 140 | 0.606859 |
from __future__ import unicode_literals, division, print_function, absolute_import
from builtins import range
from copy import copy
from typing import List, Optional
import numpy as np
import pandas as pd
import pint
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import ExtraTreesClassifier, ExtraTreesRegressor
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sympy.utilities.lambdify import lambdify
from autoflow.constants import VARIABLE_PATTERN
from autoflow.feature_engineer.select import BorutaFeatureSelector
from autoflow.utils.data import check_n_jobs
from autoflow.utils.logging_ import get_logger
from .feateng import engineer_features, n_cols_generated, colnames2symbols
from .featsel import FeatureSelector
def _parse_units(units, ureg=None, verbose=0):
parsed_units = {}
if units:
if ureg is None:
ureg = pint.UnitRegistry(auto_reduce_dimensions=True, autoconvert_offset_to_baseunit=True)
for c in units:
try:
parsed_units[c] = ureg.parse_expression(units[c])
except pint.UndefinedUnitError:
if verbose > 0:
print("[AutoFeat] WARNING: unit %r of column %r was not recognized and will be ignored!" % (
units[c], c))
parsed_units[c] = ureg.parse_expression("")
parsed_units[c].__dict__["_magnitude"] = 1.
return parsed_units
class AutoFeatureGenerator(BaseEstimator, TransformerMixin):
def __init__(
self,
problem_type=None,
categorical_cols=None,
feateng_cols=None,
units=None,
max_used_feats=10,
feateng_steps=2,
featsel_runs=3,
max_gb=None,
transformations=None,
apply_pi_theorem=True,
always_return_numpy=False,
n_jobs=-1,
verbose=0,
random_state=0,
consider_other=False,
regularization=None,
div_op=True,
exp_op=False,
log_op=False,
abs_op=False,
sqrt_op=False,
sqr_op=True,
do_final_selection=False,
standardize=False
):
self.logger = get_logger(self)
self.standardize = standardize
self.do_final_selection = do_final_selection
if transformations is None:
transformations = []
if div_op:
transformations.append("1/")
if exp_op:
transformations.append("exp")
if log_op:
transformations.append("log")
if abs_op:
transformations.append("abs")
if sqrt_op:
transformations.append("sqrt")
if sqr_op:
transformations.append("^2")
self.sqr_op = sqr_op
self.sqrt_op = sqrt_op
self.abs_op = abs_op
self.log_op = log_op
self.exp_op = exp_op
self.div_op = div_op
self.regularization = regularization
self.consider_other = consider_other
self.random_state = random_state
self.max_used_feats = max_used_feats
self.problem_type = problem_type
self.categorical_cols = categorical_cols
self.feateng_cols = feateng_cols
self.units = units
self.feateng_steps = feateng_steps
self.max_gb = max_gb
self.featsel_runs = featsel_runs
self.transformations = transformations
self.apply_pi_theorem = apply_pi_theorem
self.always_return_numpy = always_return_numpy
self.n_jobs = check_n_jobs(n_jobs)
self.verbose = verbose
def __getstate__(self):
return {k: self.__dict__[k] if k != "feature_functions_" else {} for k in self.__dict__}
def _transform_categorical_cols(self, df):
self.categorical_cols_map_ = {}
if self.categorical_cols:
e = OneHotEncoder(sparse=False, categories="auto")
for c in self.categorical_cols:
if c not in df.columns:
raise ValueError("[AutoFeat] categorical_col %r not in df.columns" % c)
ohe = e.fit_transform(df[c].to_numpy()[:, None])
new_cat_cols = ["cat_%s_%r" % (str(c), i) for i in e.categories_[0]]
self.categorical_cols_map_[c] = new_cat_cols
df = df.join(pd.DataFrame(ohe, columns=new_cat_cols, index=df.index))
df.drop(columns=self.categorical_cols, inplace=True)
return df
def _apply_pi_theorem(self, df):
if self.apply_pi_theorem and self.units:
ureg = pint.UnitRegistry(auto_reduce_dimensions=True, autoconvert_offset_to_baseunit=True)
parsed_units = _parse_units(self.units, ureg, self.verbose)
parsed_units = {c: parsed_units[c] for c in self.feateng_cols_ if not parsed_units[c].dimensionless}
if self.verbose:
print("[AutoFeat] Applying the Pi Theorem")
pi_theorem_results = ureg.pi_theorem(parsed_units)
for i, r in enumerate(pi_theorem_results, 1):
if self.verbose:
print("[AutoFeat] Pi Theorem %i: " % i, pint.formatter(r.items()))
cols = sorted(r)
not_na_idx = df[cols].notna().all(axis=1)
ptr = df[cols[0]].to_numpy()[not_na_idx] ** r[cols[0]]
for c in cols[1:]:
ptr *= df[c].to_numpy()[not_na_idx] ** r[c]
df.loc[not_na_idx, "PT%i_%s" % (i, pint.formatter(r.items()).replace(" ", ""))] = ptr
return df
def _generate_features(self, df, new_feat_cols):
check_is_fitted(self, ["feature_formulas_"])
if not new_feat_cols:
return df
if not new_feat_cols[0] in self.feature_formulas_:
raise RuntimeError("[AutoFeat] First call fit or fit_transform to generate the features!")
if self.verbose:
print("[AutoFeat] Computing %i new features." % len(new_feat_cols))
feat_array = np.zeros((len(df), len(new_feat_cols)))
for i, expr in enumerate(new_feat_cols):
if self.verbose:
print("[AutoFeat] %5i/%5i new features" % (i, len(new_feat_cols)), end="\r")
if expr not in self.feature_functions_:
cols = [c for i, c in enumerate(self.feateng_cols_) if colnames2symbols(c, i) in expr]
if not cols:
f = None
else:
try:
f = lambdify([self.feature_formulas_[c] for c in cols], self.feature_formulas_[expr])
except Exception:
print("[AutoFeat] Error while processing expression: %r" % expr)
raise
self.feature_functions_[expr] = (cols, f)
else:
cols, f = self.feature_functions_[expr]
if f is not None:
not_na_idx = df[cols].notna().all(axis=1)
try:
feat_array[not_na_idx, i] = f(*(df[c].to_numpy(dtype=float)[not_na_idx] for c in cols))
feat_array[~not_na_idx, i] = np.nan
except RuntimeWarning:
print("[AutoFeat] WARNING: Problem while evaluating expression: %r with columns %r" % (expr, cols),
" - is the data in a different range then when calling .fit()? Are maybe some values 0 that shouldn't be?")
raise
if self.verbose:
print("[AutoFeat] %5i/%5i new features ...done." % (len(new_feat_cols), len(new_feat_cols)))
df = df.join(pd.DataFrame(feat_array, columns=new_feat_cols, index=df.index))
return df
def convert_colname_to_variables(self, df):
ix = 0
origin_columns = []
new_columns = []
keep_columns = []
input_columns = df.columns.astype(str).tolist()
for column in input_columns:
if not VARIABLE_PATTERN.match(column):
while (f"x{ix:03d}" in df.columns) or (f"x{ix:03d}" in (new_columns + input_columns)):
ix += 1
origin_columns.append(column)
new_columns.append(f"x{ix:03d}")
ix += 1
else:
keep_columns.append(column)
self.column_mapper_ = dict(zip(origin_columns, new_columns))
column_mapper = copy(self.column_mapper_)
column_mapper.update(dict(zip(keep_columns, keep_columns)))
self.column_mapper = column_mapper
df.columns = df.columns.map(column_mapper)
def fit(self, X, y, X_pool: Optional[List[pd.DataFrame]] = None):
# store column names as they'll be lost in the other check
cols = [str(c) for c in X.columns] if isinstance(X, pd.DataFrame) else []
if self.problem_type is None:
if type_of_target(y) == "continuous":
self.problem_type = "regression"
else:
self.problem_type = "classification"
X, target = check_X_y(X, y, y_numeric=self.problem_type == "regression", dtype=None)
if self.regularization is None:
if X.shape[0] > 2000:
self.regularization = "l2"
else:
self.regularization = "l1"
if not cols:
cols = ["x%03i" % i for i in range(X.shape[1])]
self.original_columns_ = cols
pre_df = pd.DataFrame(X, columns=cols)
self.convert_colname_to_variables(pre_df)
if pre_df.shape[1] > self.max_used_feats:
# In order to limit the scale of the problem, the number of features is limited to K
base_model_cls = ExtraTreesClassifier if self.problem_type == "classification" else ExtraTreesRegressor
base_model_params = dict(
n_estimators=50,
min_samples_leaf=10,
min_samples_split=10,
random_state=self.random_state,
n_jobs=self.n_jobs
)
feature_importances = base_model_cls(**base_model_params).fit(X, y).feature_importances_
pre_activated_indexes = np.argsort(-feature_importances)[:self.max_used_feats]
else:
pre_activated_indexes = np.arange(pre_df.shape[1])
boruta = BorutaFeatureSelector(max_depth=7, n_estimators="auto", max_iter=10, weak=False,
random_state=self.random_state, verbose=self.verbose).fit(
pre_df.values[:, pre_activated_indexes], y)
if boruta.weak:
boruta_mask = boruta.support_ + boruta.support_weak_
else:
boruta_mask = boruta.support_
activated_indexes = pre_activated_indexes[boruta_mask]
df = pre_df.iloc[:, activated_indexes]
if X_pool:
X_pool_new = []
for X_ in X_pool:
if X_ is None:
continue
if not isinstance(X_, pd.DataFrame):
X_ = pd.DataFrame(X_)
X_ = X_.iloc[:, activated_indexes].copy()
X_.columns = df.columns
X_pool_new.append(X_)
if len(X_pool_new) > 0:
X_pool = pd.concat(X_pool_new)
X_pool.index = range(X_pool.shape[0])
else:
X_pool = None
self.boruta_1 = boruta
self.pre_activated_indexes = pre_activated_indexes
self.activated_indexes = activated_indexes
# possibly convert categorical columns
df = self._transform_categorical_cols(df)
# if we're not given specific feateng_cols, then just take all columns except categorical
if self.feateng_cols:
fcols = []
for c in self.feateng_cols:
if c not in self.original_columns_:
raise ValueError("[AutoFeat] feateng_col %r not in df.columns" % c)
if c in self.categorical_cols_map_:
fcols.extend(self.categorical_cols_map_[c])
else:
fcols.append(c)
self.feateng_cols_ = fcols
else:
self.feateng_cols_ = list(df.columns)
if self.units:
self.units = {c: self.units[c] if c in self.units else "" for c in self.feateng_cols_}
df = self._apply_pi_theorem(df)
# (n_rows * n_cols * 32/8)/1000000000 <= max_gb
n_cols = n_cols_generated(len(self.feateng_cols_), self.feateng_steps, len(self.transformations))
n_gb = (len(df) * n_cols) / 250000000
if self.verbose:
print("[AutoFeat] The %i step feature engineering process could generate up to %i features." % (
self.feateng_steps, n_cols))
print("[AutoFeat] With %i data points this new feature matrix would use about %.2f gb of space." % (
len(df), n_gb))
# if self.max_gb and n_gb > self.max_gb:
# n_rows = int(self.max_gb * 250000000 / n_cols)
# if self.verbose:
# print(
# "[AutoFeat] As you specified a limit of %.1d gb, the number of data points is subsampled to %i" % (
# self.max_gb, n_rows))
# subsample_idx = np.random.permutation(list(df.index))[:n_rows]
# df_subs = df.iloc[subsample_idx]
# df_subs.reset_index(drop=True, inplace=True)
# target_sub = target[subsample_idx]
# else:
df_subs = df.copy()
target_sub = target.copy()
# generate features
df_subs, self.feature_formulas_ = engineer_features(df_subs, self.feateng_cols_,
_parse_units(self.units, verbose=self.verbose),
self.feateng_steps, self.transformations, self.verbose,
X_pool)
# select predictive features
self.core_selector = FeatureSelector(self.problem_type, self.featsel_runs, None, self.n_jobs, self.verbose,
self.random_state, self.consider_other, self.regularization)
if self.featsel_runs <= 0:
if self.verbose:
print("[AutoFeat] WARNING: Not performing feature selection.")
good_cols = df_subs.columns
else:
good_cols = self.core_selector.fit(df_subs, target_sub).good_cols_
# if no features were selected, take the original features
if not good_cols:
good_cols = list(df.columns)
# filter out those columns that were original features or generated otherwise
self.new_feat_cols_ = [c for c in good_cols if c not in list(df.columns)]
self.feature_functions_ = {}
self.good_cols_ = good_cols
if self.standardize or self.do_final_selection:
df_final = self._generate_features(pre_df, self.new_feat_cols_)
if self.do_final_selection:
boruta = BorutaFeatureSelector(max_depth=7, n_estimators="auto", max_iter=10, weak=False,
random_state=self.random_state, verbose=self.verbose).fit(df_final, y)
support_mask = boruta.support_
self.boruta_2 = boruta
if boruta.weak:
support_mask += boruta.support_weak_
origin_columns = pre_df.columns
gen_columns = df_final.columns[pre_df.shape[1]:]
origin_mask = support_mask[:pre_df.shape[1]]
gen_mask = support_mask[pre_df.shape[1]:]
gen_valid_cols = gen_columns[gen_mask].tolist()
self.new_feat_cols_ = [c for c in self.new_feat_cols_ if c in gen_valid_cols]
origin_valid_cols = origin_columns[origin_mask].tolist()
self.valid_cols_ = origin_valid_cols + gen_valid_cols
df_final = df_final[self.valid_cols_]
else:
self.valid_cols_ = None
if self.standardize:
self.standardizer_ = StandardScaler().fit(df_final)
else:
self.standardizer_ = None
else:
self.standardizer_ = None
self.valid_cols_ = None
return self
def transform(self, X):
check_is_fitted(self, ["feature_formulas_"])
# store column names as they'll be lost in the other check
cols = [str(c) for c in X.columns] if isinstance(X, pd.DataFrame) else []
X = check_array(X, force_all_finite="allow-nan", dtype=None)
if not cols:
cols = ["x%03i" % i for i in range(X.shape[1])]
if not cols == self.original_columns_:
raise ValueError("[AutoFeat] Not the same features as when calling fit.")
df = pd.DataFrame(X, columns=cols)
df.columns = df.columns.map(self.column_mapper)
df = self._transform_categorical_cols(df)
df = self._apply_pi_theorem(df)
df = self._generate_features(df, self.new_feat_cols_)
if self.always_return_numpy:
return df.to_numpy()
if self.valid_cols_ is not None:
df = df[self.valid_cols_]
if self.standardizer_ is not None:
df = pd.DataFrame(self.standardizer_.transform(df.values), columns=df.columns, index=df.index)
inf_cnt = np.count_nonzero(~np.isfinite(df), axis=0)
if inf_cnt.sum() > 0:
self.logger.warning(f"inf_cnt.sum() = {inf_cnt.sum()}, "
f"error-columns are: {df.columns[inf_cnt > 0].tolist()} , "
f"using median-fill handle this")
data = df.values
data[~np.isfinite(df)] = np.nan
data = SimpleImputer(strategy="median").fit_transform(data)
df = pd.DataFrame(data, columns=df.columns, index=df.index)
return df
| true | true |
1c3b489ac6f0e700b06e5ce05f81b8a4a447c5d4 | 1,525 | bzl | Python | test/com/google/javascript/jscomp/serialization/integration_tests.bzl | lukec611/closure-compiler | f2fa8b35b8127bfb9e8852963a534eafa324e0c6 | [
"Apache-2.0"
] | 6,240 | 2015-01-01T00:20:53.000Z | 2022-03-31T10:33:32.000Z | test/com/google/javascript/jscomp/serialization/integration_tests.bzl | lukec611/closure-compiler | f2fa8b35b8127bfb9e8852963a534eafa324e0c6 | [
"Apache-2.0"
] | 3,139 | 2015-01-03T02:13:16.000Z | 2022-03-31T16:44:22.000Z | test/com/google/javascript/jscomp/serialization/integration_tests.bzl | lukec611/closure-compiler | f2fa8b35b8127bfb9e8852963a534eafa324e0c6 | [
"Apache-2.0"
] | 1,272 | 2015-01-07T01:22:20.000Z | 2022-03-28T07:23:29.000Z | load("//tools/build_defs/js:rules.bzl", "js_binary")
def serialized_ast_file(name, ordered_srcs = []):
"""Creates a single serialized AST file from compiling all of the input files."""
jsast = name
binary_name = name + "_bin"
js_binary(
name = binary_name,
compiler = "//javascript/tools/jscompiler:head",
compile = 1,
defs = [
"--language_out=NO_TRANSPILE",
"--typed_ast_output_file__experimental__DO_NOT_USE=%s" % jsast,
],
include_default_externs = "off",
extra_outputs = [jsast],
srcs = ordered_srcs,
)
def per_file_serialized_asts(name, ordered_srcs = []):
"""Creates a serialized AST file corresponding to each of the input files"""
ijs_files = []
ast_files = []
for src in ordered_srcs:
js_binary(
name = src + ".i",
compiler = "//javascript/tools/jscompiler:head",
defs = ["--incremental_check_mode=GENERATE_IJS"],
include_default_externs = "off",
srcs = [src],
# Due to b/131758317, the manifest generation uses :default with the head flags, which break
# binaries using new flags with :head. Ban this binary from TAP to workaround.
tags = ["notap"],
)
serialized_ast_file(
name = src + ".jsast",
ordered_srcs = ijs_files + [src],
)
ijs_files.append(src + ".i.js")
ast_files.append(src + ".jsast")
return ast_files
| 33.888889 | 104 | 0.588852 | load("//tools/build_defs/js:rules.bzl", "js_binary")
def serialized_ast_file(name, ordered_srcs = []):
jsast = name
binary_name = name + "_bin"
js_binary(
name = binary_name,
compiler = "//javascript/tools/jscompiler:head",
compile = 1,
defs = [
"--language_out=NO_TRANSPILE",
"--typed_ast_output_file__experimental__DO_NOT_USE=%s" % jsast,
],
include_default_externs = "off",
extra_outputs = [jsast],
srcs = ordered_srcs,
)
def per_file_serialized_asts(name, ordered_srcs = []):
ijs_files = []
ast_files = []
for src in ordered_srcs:
js_binary(
name = src + ".i",
compiler = "//javascript/tools/jscompiler:head",
defs = ["--incremental_check_mode=GENERATE_IJS"],
include_default_externs = "off",
srcs = [src],
tags = ["notap"],
)
serialized_ast_file(
name = src + ".jsast",
ordered_srcs = ijs_files + [src],
)
ijs_files.append(src + ".i.js")
ast_files.append(src + ".jsast")
return ast_files
| true | true |
1c3b494b32b82b353c8d58d2e884213ea3910e94 | 729 | py | Python | tests/test_config.py | flxbe/flumine | a03a0b55373f79c460b2baafa3f1b4068f2cb4da | [
"MIT"
] | null | null | null | tests/test_config.py | flxbe/flumine | a03a0b55373f79c460b2baafa3f1b4068f2cb4da | [
"MIT"
] | 24 | 2021-06-01T07:20:01.000Z | 2022-03-29T16:13:08.000Z | tests/test_config.py | lunswor/flumine | f0e7e6542942d00685ceb6d72951456684998739 | [
"MIT"
] | null | null | null | import unittest
from flumine import config
class ConfigTest(unittest.TestCase):
def test_init(self):
self.assertFalse(config.simulated)
self.assertIsInstance(config.hostname, str)
self.assertIsInstance(config.process_id, int)
self.assertIsNone(config.current_time)
self.assertFalse(config.raise_errors)
self.assertEqual(config.max_execution_workers, 32)
self.assertFalse(config.async_place_orders)
self.assertEqual(config.place_latency, 0.120)
self.assertEqual(config.cancel_latency, 0.170)
self.assertEqual(config.update_latency, 0.150)
self.assertEqual(config.replace_latency, 0.280)
self.assertEqual(config.order_sep, "-")
| 36.45 | 58 | 0.721536 | import unittest
from flumine import config
class ConfigTest(unittest.TestCase):
def test_init(self):
self.assertFalse(config.simulated)
self.assertIsInstance(config.hostname, str)
self.assertIsInstance(config.process_id, int)
self.assertIsNone(config.current_time)
self.assertFalse(config.raise_errors)
self.assertEqual(config.max_execution_workers, 32)
self.assertFalse(config.async_place_orders)
self.assertEqual(config.place_latency, 0.120)
self.assertEqual(config.cancel_latency, 0.170)
self.assertEqual(config.update_latency, 0.150)
self.assertEqual(config.replace_latency, 0.280)
self.assertEqual(config.order_sep, "-")
| true | true |
1c3b49fbde9909f298cfa3b82da488df0e433628 | 389 | py | Python | kuring/kuring/wsgi.py | rtubio/kuring | bceb7accbb1e99a66be8112f0e396d0a16896bb9 | [
"Apache-2.0"
] | null | null | null | kuring/kuring/wsgi.py | rtubio/kuring | bceb7accbb1e99a66be8112f0e396d0a16896bb9 | [
"Apache-2.0"
] | 1 | 2021-09-22T19:38:06.000Z | 2021-09-22T19:38:06.000Z | kuring/kuring/wsgi.py | rtubio/kuring | bceb7accbb1e99a66be8112f0e396d0a16896bb9 | [
"Apache-2.0"
] | null | null | null | """
WSGI config for kuring project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kuring.settings')
application = get_wsgi_application()
| 22.882353 | 78 | 0.784062 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kuring.settings')
application = get_wsgi_application()
| true | true |
1c3b4a36806beb973d99a320bab84a2c8338cef2 | 10,119 | bzl | Python | go/private/actions/link.bzl | aignas/rules_go | 2f3533598303e985110e6fff4f3adf2125d4750e | [
"Apache-2.0"
] | null | null | null | go/private/actions/link.bzl | aignas/rules_go | 2f3533598303e985110e6fff4f3adf2125d4750e | [
"Apache-2.0"
] | 1 | 2022-02-18T15:47:32.000Z | 2022-02-18T15:47:32.000Z | go/private/actions/link.bzl | aignas/rules_go | 2f3533598303e985110e6fff4f3adf2125d4750e | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//go/private:common.bzl",
"as_set",
"has_shared_lib_extension",
)
load(
"//go/private:mode.bzl",
"LINKMODE_C_SHARED",
"LINKMODE_NORMAL",
"LINKMODE_PLUGIN",
"extld_from_cc_toolchain",
"extldflags_from_cc_toolchain",
)
load(
"//go/private:rpath.bzl",
"rpath",
)
load(
"@bazel_skylib//lib:collections.bzl",
"collections",
)
def _format_archive(d):
return "{}={}={}".format(d.label, d.importmap, d.file.path)
def _transitive_archives_without_test_archives(archive, test_archives):
# Build the set of transitive dependencies. Currently, we tolerate multiple
# archives with the same importmap (though this will be an error in the
# future), but there is a special case which is difficult to avoid:
# If a go_test has internal and external archives, and the external test
# transitively depends on the library under test, we need to exclude the
# library under test and use the internal test archive instead.
deps = depset(transitive = [d.transitive for d in archive.direct])
return [d for d in deps.to_list() if not any([d.importmap == t.importmap for t in test_archives])]
def emit_link(
go,
archive = None,
test_archives = [],
executable = None,
gc_linkopts = [],
version_file = None,
info_file = None):
"""See go/toolchains.rst#link for full documentation."""
if archive == None:
fail("archive is a required parameter")
if executable == None:
fail("executable is a required parameter")
#TODO: There has to be a better way to work out the rpath
config_strip = len(go._ctx.configuration.bin_dir.path) + 1
pkg_depth = executable.dirname[config_strip:].count("/") + 1
# Exclude -lstdc++ from link options. We don't want to link against it
# unless we actually have some C++ code. _cgo_codegen will include it
# in archives via CGO_LDFLAGS if it's needed.
extldflags = [f for f in extldflags_from_cc_toolchain(go) if f not in ("-lstdc++", "-lc++")]
if go.coverage_enabled:
extldflags.append("--coverage")
gc_linkopts, extldflags = _extract_extldflags(gc_linkopts, extldflags)
builder_args = go.builder_args(go, "link")
tool_args = go.tool_args(go)
# Add in any mode specific behaviours
tool_args.add_all(extld_from_cc_toolchain(go))
if go.mode.race:
tool_args.add("-race")
if go.mode.msan:
tool_args.add("-msan")
if ((go.mode.static and not go.mode.pure) or
go.mode.link != LINKMODE_NORMAL or
go.mode.goos == "windows" and (go.mode.race or go.mode.msan)):
# Force external linking for the following conditions:
# * Mode is static but not pure: -static must be passed to the C
# linker if the binary contains cgo code. See #2168, #2216.
# * Non-normal build mode: may not be strictly necessary, especially
# for modes like "pie".
# * Race or msan build for Windows: Go linker has pairwise
# incompatibilities with mingw, and we get link errors in race mode.
# Using the C linker avoids that. Race and msan always require a
# a C toolchain. See #2614.
tool_args.add("-linkmode", "external")
if go.mode.pure:
# Force internal linking in pure mode. We don't have a C toolchain,
# so external linking is not possible.
tool_args.add("-linkmode", "internal")
if go.mode.static:
extldflags.append("-static")
if go.mode.link != LINKMODE_NORMAL:
builder_args.add("-buildmode", go.mode.link)
if go.mode.link == LINKMODE_PLUGIN:
tool_args.add("-pluginpath", archive.data.importpath)
# TODO: Rework when https://github.com/bazelbuild/bazel/pull/12304 is mainstream
if go.mode.link == LINKMODE_C_SHARED and (go.mode.goos in ["darwin", "ios"]):
extldflags.extend([
"-install_name",
rpath.install_name(executable),
])
arcs = _transitive_archives_without_test_archives(archive, test_archives)
arcs.extend(test_archives)
if (go.coverage_enabled and go.coverdata and
not any([arc.importmap == go.coverdata.data.importmap for arc in arcs])):
arcs.append(go.coverdata.data)
builder_args.add_all(arcs, before_each = "-arc", map_each = _format_archive)
builder_args.add("-package_list", go.package_list)
# Build a list of rpaths for dynamic libraries we need to find.
# rpaths are relative paths from the binary to directories where libraries
# are stored. Binaries that require these will only work when installed in
# the bazel execroot. Most binaries are only dynamically linked against
# system libraries though.
cgo_rpaths = sorted(collections.uniq([
f
for d in archive.cgo_deps.to_list()
if has_shared_lib_extension(d.basename)
for f in rpath.flags(go, d, executable = executable)
]))
extldflags.extend(cgo_rpaths)
# Process x_defs, and record whether stamping is used.
stamp_x_defs = False
for k, v in archive.x_defs.items():
if go.stamp and v.find("{") != -1 and v.find("}") != -1:
stamp_x_defs = True
builder_args.add("-X", "%s=%s" % (k, v))
# Stamping support
stamp_inputs = []
if stamp_x_defs:
stamp_inputs = [info_file, version_file]
builder_args.add_all(stamp_inputs, before_each = "-stamp")
builder_args.add("-o", executable)
builder_args.add("-main", archive.data.file)
builder_args.add("-p", archive.data.importmap)
tool_args.add_all(gc_linkopts)
tool_args.add_all(go.toolchain.flags.link)
# Do not remove, somehow this is needed when building for darwin/arm only.
tool_args.add("-buildid=redacted")
if go.mode.strip:
tool_args.add("-w")
tool_args.add_joined("-extldflags", extldflags, join_with = " ")
conflict_err = _check_conflicts(arcs)
if conflict_err:
# Report package conflict errors in execution instead of analysis.
# We could call fail() with this message, but Bazel prints a stack
# that doesn't give useful information.
builder_args.add("-conflict_err", conflict_err)
inputs_direct = stamp_inputs + [go.sdk.package_list]
if go.coverage_enabled and go.coverdata:
inputs_direct.append(go.coverdata.data.file)
inputs_transitive = [
archive.libs,
archive.cgo_deps,
as_set(go.crosstool),
as_set(go.sdk.tools),
as_set(go.stdlib.libs),
]
inputs = depset(direct = inputs_direct, transitive = inputs_transitive)
go.actions.run(
inputs = inputs,
outputs = [executable],
mnemonic = "GoLink",
executable = go.toolchain._builder,
arguments = [builder_args, "--", tool_args],
env = go.env,
)
def _extract_extldflags(gc_linkopts, extldflags):
"""Extracts -extldflags from gc_linkopts and combines them into a single list.
Args:
gc_linkopts: a list of flags passed in through the gc_linkopts attributes.
ctx.expand_make_variables should have already been applied. -extldflags
may appear multiple times in this list.
extldflags: a list of flags to be passed to the external linker.
Return:
A tuple containing the filtered gc_linkopts with external flags removed,
and a combined list of external flags. Each string in the returned
extldflags list may contain multiple flags, separated by whitespace.
"""
filtered_gc_linkopts = []
is_extldflags = False
for opt in gc_linkopts:
if is_extldflags:
is_extldflags = False
extldflags.append(opt)
elif opt == "-extldflags":
is_extldflags = True
else:
filtered_gc_linkopts.append(opt)
return filtered_gc_linkopts, extldflags
def _check_conflicts(arcs):
importmap_to_label = {}
for arc in arcs:
if arc.importmap in importmap_to_label:
return """package conflict error: {}: multiple copies of package passed to linker:
{}
{}
Set "importmap" to different paths or use 'bazel cquery' to ensure only one
package with this path is linked.""".format(
arc.importmap,
importmap_to_label[arc.importmap],
arc.label,
)
importmap_to_label[arc.importmap] = arc.label
for arc in arcs:
for dep_importmap, dep_label in zip(arc._dep_importmaps, arc._dep_labels):
if dep_importmap not in importmap_to_label:
return "package conflict error: {}: package needed by {} was not passed to linker".format(
dep_importmap,
arc.label,
)
if importmap_to_label[dep_importmap] != dep_label:
err = """package conflict error: {}: package imports {}
was compiled with: {}
but was linked with: {}""".format(
arc.importmap,
dep_importmap,
dep_label,
importmap_to_label[dep_importmap],
)
if importmap_to_label[dep_importmap].name.endswith("_test"):
err += """
This sometimes happens when an external test (package ending with _test)
imports a package that imports the library being tested. This is not supported."""
err += "\nSee https://github.com/bazelbuild/rules_go/issues/1877."
return err
return None
| 39.838583 | 106 | 0.658662 |
load(
"//go/private:common.bzl",
"as_set",
"has_shared_lib_extension",
)
load(
"//go/private:mode.bzl",
"LINKMODE_C_SHARED",
"LINKMODE_NORMAL",
"LINKMODE_PLUGIN",
"extld_from_cc_toolchain",
"extldflags_from_cc_toolchain",
)
load(
"//go/private:rpath.bzl",
"rpath",
)
load(
"@bazel_skylib//lib:collections.bzl",
"collections",
)
def _format_archive(d):
return "{}={}={}".format(d.label, d.importmap, d.file.path)
def _transitive_archives_without_test_archives(archive, test_archives):
deps = depset(transitive = [d.transitive for d in archive.direct])
return [d for d in deps.to_list() if not any([d.importmap == t.importmap for t in test_archives])]
def emit_link(
go,
archive = None,
test_archives = [],
executable = None,
gc_linkopts = [],
version_file = None,
info_file = None):
if archive == None:
fail("archive is a required parameter")
if executable == None:
fail("executable is a required parameter")
config_strip = len(go._ctx.configuration.bin_dir.path) + 1
pkg_depth = executable.dirname[config_strip:].count("/") + 1
# unless we actually have some C++ code. _cgo_codegen will include it
# in archives via CGO_LDFLAGS if it's needed.
extldflags = [f for f in extldflags_from_cc_toolchain(go) if f not in ("-lstdc++", "-lc++")]
if go.coverage_enabled:
extldflags.append("--coverage")
gc_linkopts, extldflags = _extract_extldflags(gc_linkopts, extldflags)
builder_args = go.builder_args(go, "link")
tool_args = go.tool_args(go)
tool_args.add_all(extld_from_cc_toolchain(go))
if go.mode.race:
tool_args.add("-race")
if go.mode.msan:
tool_args.add("-msan")
if ((go.mode.static and not go.mode.pure) or
go.mode.link != LINKMODE_NORMAL or
go.mode.goos == "windows" and (go.mode.race or go.mode.msan)):
tool_args.add("-linkmode", "external")
if go.mode.pure:
# so external linking is not possible.
tool_args.add("-linkmode", "internal")
if go.mode.static:
extldflags.append("-static")
if go.mode.link != LINKMODE_NORMAL:
builder_args.add("-buildmode", go.mode.link)
if go.mode.link == LINKMODE_PLUGIN:
tool_args.add("-pluginpath", archive.data.importpath)
# TODO: Rework when https://github.com/bazelbuild/bazel/pull/12304 is mainstream
if go.mode.link == LINKMODE_C_SHARED and (go.mode.goos in ["darwin", "ios"]):
extldflags.extend([
"-install_name",
rpath.install_name(executable),
])
arcs = _transitive_archives_without_test_archives(archive, test_archives)
arcs.extend(test_archives)
if (go.coverage_enabled and go.coverdata and
not any([arc.importmap == go.coverdata.data.importmap for arc in arcs])):
arcs.append(go.coverdata.data)
builder_args.add_all(arcs, before_each = "-arc", map_each = _format_archive)
builder_args.add("-package_list", go.package_list)
# Build a list of rpaths for dynamic libraries we need to find.
# rpaths are relative paths from the binary to directories where libraries
# are stored. Binaries that require these will only work when installed in
# the bazel execroot. Most binaries are only dynamically linked against
# system libraries though.
cgo_rpaths = sorted(collections.uniq([
f
for d in archive.cgo_deps.to_list()
if has_shared_lib_extension(d.basename)
for f in rpath.flags(go, d, executable = executable)
]))
extldflags.extend(cgo_rpaths)
# Process x_defs, and record whether stamping is used.
stamp_x_defs = False
for k, v in archive.x_defs.items():
if go.stamp and v.find("{") != -1 and v.find("}") != -1:
stamp_x_defs = True
builder_args.add("-X", "%s=%s" % (k, v))
# Stamping support
stamp_inputs = []
if stamp_x_defs:
stamp_inputs = [info_file, version_file]
builder_args.add_all(stamp_inputs, before_each = "-stamp")
builder_args.add("-o", executable)
builder_args.add("-main", archive.data.file)
builder_args.add("-p", archive.data.importmap)
tool_args.add_all(gc_linkopts)
tool_args.add_all(go.toolchain.flags.link)
# Do not remove, somehow this is needed when building for darwin/arm only.
tool_args.add("-buildid=redacted")
if go.mode.strip:
tool_args.add("-w")
tool_args.add_joined("-extldflags", extldflags, join_with = " ")
conflict_err = _check_conflicts(arcs)
if conflict_err:
# Report package conflict errors in execution instead of analysis.
# We could call fail() with this message, but Bazel prints a stack
# that doesn't give useful information.
builder_args.add("-conflict_err", conflict_err)
inputs_direct = stamp_inputs + [go.sdk.package_list]
if go.coverage_enabled and go.coverdata:
inputs_direct.append(go.coverdata.data.file)
inputs_transitive = [
archive.libs,
archive.cgo_deps,
as_set(go.crosstool),
as_set(go.sdk.tools),
as_set(go.stdlib.libs),
]
inputs = depset(direct = inputs_direct, transitive = inputs_transitive)
go.actions.run(
inputs = inputs,
outputs = [executable],
mnemonic = "GoLink",
executable = go.toolchain._builder,
arguments = [builder_args, "--", tool_args],
env = go.env,
)
def _extract_extldflags(gc_linkopts, extldflags):
filtered_gc_linkopts = []
is_extldflags = False
for opt in gc_linkopts:
if is_extldflags:
is_extldflags = False
extldflags.append(opt)
elif opt == "-extldflags":
is_extldflags = True
else:
filtered_gc_linkopts.append(opt)
return filtered_gc_linkopts, extldflags
def _check_conflicts(arcs):
importmap_to_label = {}
for arc in arcs:
if arc.importmap in importmap_to_label:
return """package conflict error: {}: multiple copies of package passed to linker:
{}
{}
Set "importmap" to different paths or use 'bazel cquery' to ensure only one
package with this path is linked.""".format(
arc.importmap,
importmap_to_label[arc.importmap],
arc.label,
)
importmap_to_label[arc.importmap] = arc.label
for arc in arcs:
for dep_importmap, dep_label in zip(arc._dep_importmaps, arc._dep_labels):
if dep_importmap not in importmap_to_label:
return "package conflict error: {}: package needed by {} was not passed to linker".format(
dep_importmap,
arc.label,
)
if importmap_to_label[dep_importmap] != dep_label:
err = """package conflict error: {}: package imports {}
was compiled with: {}
but was linked with: {}""".format(
arc.importmap,
dep_importmap,
dep_label,
importmap_to_label[dep_importmap],
)
if importmap_to_label[dep_importmap].name.endswith("_test"):
err += """
This sometimes happens when an external test (package ending with _test)
imports a package that imports the library being tested. This is not supported."""
err += "\nSee https://github.com/bazelbuild/rules_go/issues/1877."
return err
return None
| true | true |
1c3b4a4e3e663c47a5fcc888a631a24a374932c2 | 2,249 | py | Python | examples/07-filter/05-render.py | pepsipepsi/nodebox_opengl_python3 | cfb2633df1055a028672b11311603cc2241a1378 | [
"BSD-3-Clause"
] | 1 | 2017-03-19T16:56:46.000Z | 2017-03-19T16:56:46.000Z | examples/07-filter/05-render.py | pepsipepsi/nodebox_opengl_python3 | cfb2633df1055a028672b11311603cc2241a1378 | [
"BSD-3-Clause"
] | null | null | null | examples/07-filter/05-render.py | pepsipepsi/nodebox_opengl_python3 | cfb2633df1055a028672b11311603cc2241a1378 | [
"BSD-3-Clause"
] | null | null | null | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.shader import render, blur
# render invokes psyco, and old compiler I'll need to replace here
# The render() command executes a function with drawing commands
# in an offscreen (i.e. hidden) canvas and returns an Image object.
# This is useful if you want to apply filters to text, ellipses, etc.
def hello():
fill(1, 0, 0, 0.5) # Transparent red.
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5) # Transparent green.
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5) # Transparent blue.
ellipse(145, 160, 200, 200)
fill(0)
font("Droid Serif")
text("hello", x=0, y=90, fontsize=70, width=300, align=CENTER)
# We call this a "procedural" image, because it is entirely created in code.
# Procedural images can be useful in many ways:
# - applying effects to text,
# - caching a complex composition that is not frequently updated (for speed),
# - creating on-the-fly textures or shapes that are different every time,
# - using NodeBox from the command line without opening an application window.
img = render(function=hello, width=300, height=300)
# Note that we make the width and height of the offscreen canvas
# a little bit larger than the actual composition.
# This creates a transparent border, so effects don't get cut off
# at the edge of the rendered image.
# Images can be saved to file, even without starting canvas.run().
# To try it out, uncomment the following line:
#img.save("hello.png")
def draw(canvas):
canvas.clear()
# Apply a blur filter to the procedural image and draw it.
image(blur(img, scale=canvas.mouse.relative_x), 20, 100)
# Compare to the same shapes drawn directly to the canvas.
# You may notice that the rendered image has jagged edges...
# For now, there is nothing to be done about that - a soft blur can help.
translate(300,100)
fill(1, 0, 0, 0.5)
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5)
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5)
ellipse(145, 160, 200, 200)
# Start the application:
canvas.fps = 60
canvas.size = 600, 500
canvas.run(draw)
| 34.6 | 78 | 0.692308 | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.shader import render, blur
# The render() command executes a function with drawing commands
# in an offscreen (i.e. hidden) canvas and returns an Image object.
# This is useful if you want to apply filters to text, ellipses, etc.
def hello():
fill(1, 0, 0, 0.5) # Transparent red.
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5) # Transparent green.
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5) # Transparent blue.
ellipse(145, 160, 200, 200)
fill(0)
font("Droid Serif")
text("hello", x=0, y=90, fontsize=70, width=300, align=CENTER)
# We call this a "procedural" image, because it is entirely created in code.
# Procedural images can be useful in many ways:
# - applying effects to text,
# - caching a complex composition that is not frequently updated (for speed),
# - creating on-the-fly textures or shapes that are different every time,
# - using NodeBox from the command line without opening an application window.
img = render(function=hello, width=300, height=300)
# Note that we make the width and height of the offscreen canvas
# a little bit larger than the actual composition.
# This creates a transparent border, so effects don't get cut off
def draw(canvas):
canvas.clear()
image(blur(img, scale=canvas.mouse.relative_x), 20, 100)
translate(300,100)
fill(1, 0, 0, 0.5)
ellipse(120, 120, 200, 200)
fill(0, 1, 0, 0.5)
ellipse(170, 120, 200, 200)
fill(0, 0, 1, 0.5)
ellipse(145, 160, 200, 200)
canvas.fps = 60
canvas.size = 600, 500
canvas.run(draw)
| true | true |
1c3b4b0d67207d18e3ba44a1ef87f6b19942597e | 15,842 | py | Python | tests/components/filter/test_sensor.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 1 | 2020-12-24T23:23:24.000Z | 2020-12-24T23:23:24.000Z | tests/components/filter/test_sensor.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 48 | 2021-01-06T07:02:41.000Z | 2022-03-31T06:10:45.000Z | tests/components/filter/test_sensor.py | dlintott/core | a6c83cc46a34084fdc4c0e7221b6ba493f82cbac | [
"Apache-2.0"
] | 2 | 2021-07-14T20:22:04.000Z | 2021-09-22T08:56:16.000Z | """The test for the data filter sensor platform."""
from datetime import timedelta
from os import path
from pytest import fixture
from homeassistant import config as hass_config
from homeassistant.components.filter.sensor import (
DOMAIN,
LowPassFilter,
OutlierFilter,
RangeFilter,
ThrottleFilter,
TimeSMAFilter,
TimeThrottleFilter,
)
from homeassistant.components.sensor import DEVICE_CLASS_TEMPERATURE
from homeassistant.const import SERVICE_RELOAD, STATE_UNAVAILABLE, STATE_UNKNOWN
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import assert_setup_component, async_init_recorder_component
@fixture
def values():
"""Fixture for a list of test States."""
values = []
raw_values = [20, 19, 18, 21, 22, 0]
timestamp = dt_util.utcnow()
for val in raw_values:
values.append(ha.State("sensor.test_monitored", val, last_updated=timestamp))
timestamp += timedelta(minutes=1)
return values
async def test_setup_fail(hass):
"""Test if filter doesn't exist."""
config = {
"sensor": {
"platform": "filter",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "nonexisting"}],
}
}
with assert_setup_component(0):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
async def test_chain(hass, values):
"""Test if filter chaining works."""
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for value in values:
hass.states.async_set(config["sensor"]["entity_id"], value.state)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert "18.05" == state.state
async def test_chain_history(hass, values, missing=False):
"""Test if filter chaining works."""
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
},
}
await async_init_recorder_component(hass)
assert_setup_component(1, "history")
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
t_3 = dt_util.utcnow() - timedelta(minutes=4)
if missing:
fake_states = {}
else:
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", "unknown", last_changed=t_1),
ha.State("sensor.test_monitored", 19.0, last_changed=t_2),
ha.State("sensor.test_monitored", 18.2, last_changed=t_3),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for value in values:
hass.states.async_set(config["sensor"]["entity_id"], value.state)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
if missing:
assert "18.05" == state.state
else:
assert "17.05" == state.state
async def test_source_state_none(hass, values):
"""Test is source sensor state is null and sets state to STATE_UNKNOWN."""
await async_init_recorder_component(hass)
config = {
"sensor": [
{
"platform": "template",
"sensors": {
"template_test": {
"value_template": "{{ states.sensor.test_state.state }}"
}
},
},
{
"platform": "filter",
"name": "test",
"entity_id": "sensor.template_test",
"filters": [
{
"filter": "time_simple_moving_average",
"window_size": "00:01",
"precision": "2",
}
],
},
]
}
await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", 0)
await hass.async_block_till_done()
state = hass.states.get("sensor.template_test")
assert state.state == "0"
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == "0.0"
# Force Template Reload
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"template/sensor_configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"template",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
# Template state gets to None
state = hass.states.get("sensor.template_test")
assert state is None
# Filter sensor ignores None state setting state to STATE_UNKNOWN
state = hass.states.get("sensor.test")
assert state.state == STATE_UNKNOWN
async def test_chain_history_missing(hass, values):
"""Test if filter chaining works when recorder is enabled but the source is not recorded."""
await test_chain_history(hass, values, missing=True)
async def test_history_time(hass):
"""Test loading from history based on a time window."""
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "time_throttle", "window_size": "00:01"}],
},
}
await async_init_recorder_component(hass)
assert_setup_component(1, "history")
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", 19.0, last_changed=t_1),
ha.State("sensor.test_monitored", 18.2, last_changed=t_2),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert "18.0" == state.state
async def test_setup(hass):
"""Test if filter attributes are inherited."""
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored",
1,
{"icon": "mdi:test", "device_class": DEVICE_CLASS_TEMPERATURE},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.attributes["icon"] == "mdi:test"
assert state.attributes["device_class"] == DEVICE_CLASS_TEMPERATURE
assert state.state == "1.0"
async def test_invalid_state(hass):
"""Test if filter attributes are inherited."""
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", STATE_UNAVAILABLE)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
hass.states.async_set("sensor.test_monitored", "invalid")
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
async def test_outlier(values):
"""Test if outlier filter works."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
for state in values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_outlier_step(values):
"""
Test step-change handling in outlier.
Test if outlier filter handles long-running step-changes correctly.
It should converge to no longer filter once just over half the
window_size is occupied by the new post step-change values.
"""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=1.1)
values[-1].state = 22
for state in values:
filtered = filt.filter_state(state)
assert 22 == filtered.state
def test_initial_outlier(values):
"""Test issue #13363."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", 4000)
for state in [out] + values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_unknown_state_outlier(values):
"""Test issue #32395."""
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 21 == filtered.state
def test_precision_zero(values):
"""Test if precision of zero returns an integer."""
filt = LowPassFilter(window_size=10, precision=0, entity=None, time_constant=10)
for state in values:
filtered = filt.filter_state(state)
assert isinstance(filtered.state, int)
def test_lowpass(values):
"""Test if lowpass filter works."""
filt = LowPassFilter(window_size=10, precision=2, entity=None, time_constant=10)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 18.05 == filtered.state
def test_range(values):
"""Test if range filter works."""
lower = 10
upper = 20
filt = RangeFilter(entity=None, precision=2, lower_bound=lower, upper_bound=upper)
for unf_state in values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_range_zero(values):
"""Test if range filter works with zeroes as bounds."""
lower = 0
upper = 0
filt = RangeFilter(entity=None, precision=2, lower_bound=lower, upper_bound=upper)
for unf_state in values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_throttle(values):
"""Test if lowpass filter works."""
filt = ThrottleFilter(window_size=3, precision=2, entity=None)
filtered = []
for state in values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 21] == [f.state for f in filtered]
def test_time_throttle(values):
"""Test if lowpass filter works."""
filt = TimeThrottleFilter(
window_size=timedelta(minutes=2), precision=2, entity=None
)
filtered = []
for state in values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 18, 22] == [f.state for f in filtered]
def test_time_sma(values):
"""Test if time_sma filter works."""
filt = TimeSMAFilter(
window_size=timedelta(minutes=2), precision=2, entity=None, type="last"
)
for state in values:
filtered = filt.filter_state(state)
assert 21.5 == filtered.state
async def test_reload(hass):
"""Verify we can reload filter sensors."""
await async_init_recorder_component(hass)
hass.states.async_set("sensor.test_monitored", 12345)
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"filter/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test") is None
assert hass.states.get("sensor.filtered_realistic_humidity")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| 32.00404 | 96 | 0.603585 | from datetime import timedelta
from os import path
from pytest import fixture
from homeassistant import config as hass_config
from homeassistant.components.filter.sensor import (
DOMAIN,
LowPassFilter,
OutlierFilter,
RangeFilter,
ThrottleFilter,
TimeSMAFilter,
TimeThrottleFilter,
)
from homeassistant.components.sensor import DEVICE_CLASS_TEMPERATURE
from homeassistant.const import SERVICE_RELOAD, STATE_UNAVAILABLE, STATE_UNKNOWN
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import assert_setup_component, async_init_recorder_component
@fixture
def values():
values = []
raw_values = [20, 19, 18, 21, 22, 0]
timestamp = dt_util.utcnow()
for val in raw_values:
values.append(ha.State("sensor.test_monitored", val, last_updated=timestamp))
timestamp += timedelta(minutes=1)
return values
async def test_setup_fail(hass):
config = {
"sensor": {
"platform": "filter",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "nonexisting"}],
}
}
with assert_setup_component(0):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
async def test_chain(hass, values):
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for value in values:
hass.states.async_set(config["sensor"]["entity_id"], value.state)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert "18.05" == state.state
async def test_chain_history(hass, values, missing=False):
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
},
}
await async_init_recorder_component(hass)
assert_setup_component(1, "history")
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
t_3 = dt_util.utcnow() - timedelta(minutes=4)
if missing:
fake_states = {}
else:
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", "unknown", last_changed=t_1),
ha.State("sensor.test_monitored", 19.0, last_changed=t_2),
ha.State("sensor.test_monitored", 18.2, last_changed=t_3),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
for value in values:
hass.states.async_set(config["sensor"]["entity_id"], value.state)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
if missing:
assert "18.05" == state.state
else:
assert "17.05" == state.state
async def test_source_state_none(hass, values):
await async_init_recorder_component(hass)
config = {
"sensor": [
{
"platform": "template",
"sensors": {
"template_test": {
"value_template": "{{ states.sensor.test_state.state }}"
}
},
},
{
"platform": "filter",
"name": "test",
"entity_id": "sensor.template_test",
"filters": [
{
"filter": "time_simple_moving_average",
"window_size": "00:01",
"precision": "2",
}
],
},
]
}
await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_state", 0)
await hass.async_block_till_done()
state = hass.states.get("sensor.template_test")
assert state.state == "0"
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == "0.0"
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"template/sensor_configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
"template",
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get("sensor.template_test")
assert state is None
state = hass.states.get("sensor.test")
assert state.state == STATE_UNKNOWN
async def test_chain_history_missing(hass, values):
await test_chain_history(hass, values, missing=True)
async def test_history_time(hass):
config = {
"history": {},
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [{"filter": "time_throttle", "window_size": "00:01"}],
},
}
await async_init_recorder_component(hass)
assert_setup_component(1, "history")
t_0 = dt_util.utcnow() - timedelta(minutes=1)
t_1 = dt_util.utcnow() - timedelta(minutes=2)
t_2 = dt_util.utcnow() - timedelta(minutes=3)
fake_states = {
"sensor.test_monitored": [
ha.State("sensor.test_monitored", 18.0, last_changed=t_0),
ha.State("sensor.test_monitored", 19.0, last_changed=t_1),
ha.State("sensor.test_monitored", 18.2, last_changed=t_2),
]
}
with patch(
"homeassistant.components.history.state_changes_during_period",
return_value=fake_states,
):
with patch(
"homeassistant.components.history.get_last_state_changes",
return_value=fake_states,
):
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert "18.0" == state.state
async def test_setup(hass):
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set(
"sensor.test_monitored",
1,
{"icon": "mdi:test", "device_class": DEVICE_CLASS_TEMPERATURE},
)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.attributes["icon"] == "mdi:test"
assert state.attributes["device_class"] == DEVICE_CLASS_TEMPERATURE
assert state.state == "1.0"
async def test_invalid_state(hass):
config = {
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
],
}
}
await async_init_recorder_component(hass)
with assert_setup_component(1, "sensor"):
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", STATE_UNAVAILABLE)
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
hass.states.async_set("sensor.test_monitored", "invalid")
await hass.async_block_till_done()
state = hass.states.get("sensor.test")
assert state.state == STATE_UNAVAILABLE
async def test_outlier(values):
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
for state in values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_outlier_step(values):
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=1.1)
values[-1].state = 22
for state in values:
filtered = filt.filter_state(state)
assert 22 == filtered.state
def test_initial_outlier(values):
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", 4000)
for state in [out] + values:
filtered = filt.filter_state(state)
assert 21 == filtered.state
def test_unknown_state_outlier(values):
filt = OutlierFilter(window_size=3, precision=2, entity=None, radius=4.0)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 21 == filtered.state
def test_precision_zero(values):
filt = LowPassFilter(window_size=10, precision=0, entity=None, time_constant=10)
for state in values:
filtered = filt.filter_state(state)
assert isinstance(filtered.state, int)
def test_lowpass(values):
filt = LowPassFilter(window_size=10, precision=2, entity=None, time_constant=10)
out = ha.State("sensor.test_monitored", "unknown")
for state in [out] + values + [out]:
try:
filtered = filt.filter_state(state)
except ValueError:
assert state.state == "unknown"
assert 18.05 == filtered.state
def test_range(values):
lower = 10
upper = 20
filt = RangeFilter(entity=None, precision=2, lower_bound=lower, upper_bound=upper)
for unf_state in values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_range_zero(values):
lower = 0
upper = 0
filt = RangeFilter(entity=None, precision=2, lower_bound=lower, upper_bound=upper)
for unf_state in values:
unf = float(unf_state.state)
filtered = filt.filter_state(unf_state)
if unf < lower:
assert lower == filtered.state
elif unf > upper:
assert upper == filtered.state
else:
assert unf == filtered.state
def test_throttle(values):
filt = ThrottleFilter(window_size=3, precision=2, entity=None)
filtered = []
for state in values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 21] == [f.state for f in filtered]
def test_time_throttle(values):
filt = TimeThrottleFilter(
window_size=timedelta(minutes=2), precision=2, entity=None
)
filtered = []
for state in values:
new_state = filt.filter_state(state)
if not filt.skip_processing:
filtered.append(new_state)
assert [20, 18, 22] == [f.state for f in filtered]
def test_time_sma(values):
filt = TimeSMAFilter(
window_size=timedelta(minutes=2), precision=2, entity=None, type="last"
)
for state in values:
filtered = filt.filter_state(state)
assert 21.5 == filtered.state
async def test_reload(hass):
await async_init_recorder_component(hass)
hass.states.async_set("sensor.test_monitored", 12345)
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "filter",
"name": "test",
"entity_id": "sensor.test_monitored",
"filters": [
{"filter": "outlier", "window_size": 10, "radius": 4.0},
{"filter": "lowpass", "time_constant": 10, "precision": 2},
{"filter": "throttle", "window_size": 1},
],
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"filter/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("sensor.test") is None
assert hass.states.get("sensor.filtered_realistic_humidity")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
| true | true |
1c3b4b384361661b0a0d69363bc38a617ec79aba | 1,415 | py | Python | hapic/error/serpyco.py | raphj/hapic | b169ee901005bbe535e27ec878a051c2c1226e43 | [
"MIT"
] | 20 | 2017-10-13T11:23:33.000Z | 2021-12-09T12:42:06.000Z | hapic/error/serpyco.py | raphj/hapic | b169ee901005bbe535e27ec878a051c2c1226e43 | [
"MIT"
] | 130 | 2017-10-10T15:09:13.000Z | 2021-12-30T10:36:08.000Z | hapic/error/serpyco.py | raphj/hapic | b169ee901005bbe535e27ec878a051c2c1226e43 | [
"MIT"
] | 7 | 2017-10-17T07:24:42.000Z | 2021-09-16T14:33:17.000Z | # coding: utf-8
import dataclasses
import typing
from hapic.error.main import DefaultErrorBuilder
from hapic.processor.main import ProcessValidationError
from hapic.type import TYPE_SCHEMA
@dataclasses.dataclass
class DefaultErrorSchema(object):
message: str
details: typing.Dict[str, typing.Any] = dataclasses.field(default_factory=lambda: {})
code: typing.Any = dataclasses.field(default=None)
class SerpycoDefaultErrorBuilder(DefaultErrorBuilder):
def get_schema(self) -> TYPE_SCHEMA:
return DefaultErrorSchema
def build_from_exception(
self, exception: Exception, include_traceback: bool = False
) -> DefaultErrorSchema:
"""
See hapic.error.ErrorBuilderInterface#build_from_exception docstring
"""
error_dict = super().build_from_exception(exception, include_traceback)
return DefaultErrorSchema(
message=error_dict["message"], details=error_dict["details"], code=error_dict["code"]
)
def build_from_validation_error(self, error: ProcessValidationError) -> DefaultErrorSchema:
"""
See hapic.error.ErrorBuilderInterface#build_from_validation_error
docstring
"""
error_dict = super().build_from_validation_error(error)
return DefaultErrorSchema(
message=error_dict["message"], details=error_dict["details"], code=error_dict["code"]
)
| 34.512195 | 97 | 0.719435 |
import dataclasses
import typing
from hapic.error.main import DefaultErrorBuilder
from hapic.processor.main import ProcessValidationError
from hapic.type import TYPE_SCHEMA
@dataclasses.dataclass
class DefaultErrorSchema(object):
message: str
details: typing.Dict[str, typing.Any] = dataclasses.field(default_factory=lambda: {})
code: typing.Any = dataclasses.field(default=None)
class SerpycoDefaultErrorBuilder(DefaultErrorBuilder):
def get_schema(self) -> TYPE_SCHEMA:
return DefaultErrorSchema
def build_from_exception(
self, exception: Exception, include_traceback: bool = False
) -> DefaultErrorSchema:
error_dict = super().build_from_exception(exception, include_traceback)
return DefaultErrorSchema(
message=error_dict["message"], details=error_dict["details"], code=error_dict["code"]
)
def build_from_validation_error(self, error: ProcessValidationError) -> DefaultErrorSchema:
error_dict = super().build_from_validation_error(error)
return DefaultErrorSchema(
message=error_dict["message"], details=error_dict["details"], code=error_dict["code"]
)
| true | true |
1c3b4c1ef04754816e4ee8dd71bc0b72e79d526e | 672 | py | Python | bitirmetezi/venv/Lib/site-packages/plot/parameter/update.py | busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD | fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3 | [
"MIT"
] | 1 | 2020-04-10T08:14:43.000Z | 2020-04-10T08:14:43.000Z | bitirmetezi/venv/Lib/site-packages/plot/parameter/update.py | busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD | fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3 | [
"MIT"
] | 1 | 2016-11-30T20:37:27.000Z | 2016-12-12T11:55:50.000Z | bitirmetezi/venv/Lib/site-packages/plot/parameter/update.py | busraltun/IMPLEMENTATIONOFEYECONTROLLEDVIRTUALKEYBOARD | fa3a9b150419a17aa82f41b068a5d69d0ff0d0f3 | [
"MIT"
] | 1 | 2019-12-18T07:56:00.000Z | 2019-12-18T07:56:00.000Z | """
Return an updated parameter dictionary based on
user input dictionary.
"""
from typing import AnyStr, Dict
import os
from ..io.input.parse import parse
def update(user_config_file):
# type: (AnyStr) -> Dict
"""Return an updated parameter dictionary
Parse user configuration file and use the information
to update the default parameter dictionary.
Args:
user_config_file (str): user configuration file name
Returns:
an updated parameter dictionary
"""
here = os.path.dirname(os.path.realpath(__file__))
default_config_file = os.path.join(here, "all.json")
return parse(user_config_file, default_config_file)
| 25.846154 | 60 | 0.721726 | from typing import AnyStr, Dict
import os
from ..io.input.parse import parse
def update(user_config_file):
here = os.path.dirname(os.path.realpath(__file__))
default_config_file = os.path.join(here, "all.json")
return parse(user_config_file, default_config_file)
| true | true |
1c3b4c2556d0ca6a4f8386459171ee3d3882f52c | 5,816 | py | Python | DSB3Tutorial/LUNA_train_unet.py | taoddiao/dr.b | 87f9ae4a5001e1a9248b0e19ad90aa252e426fe9 | [
"Apache-2.0"
] | 10 | 2017-12-15T03:56:56.000Z | 2020-03-17T03:54:49.000Z | DSB3Tutorial/LUNA_train_unet.py | taoddiao/dr.b | 87f9ae4a5001e1a9248b0e19ad90aa252e426fe9 | [
"Apache-2.0"
] | 3 | 2017-12-15T20:22:46.000Z | 2018-04-27T17:56:13.000Z | DSB3Tutorial/LUNA_train_unet.py | taoddiao/dr.b | 87f9ae4a5001e1a9248b0e19ad90aa252e426fe9 | [
"Apache-2.0"
] | 3 | 2017-12-09T10:47:15.000Z | 2019-10-17T16:03:48.000Z | from __future__ import print_function
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
working_path = "/home/qwerty/data/luna16/output/"
K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((1,img_rows, img_cols))
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def train_and_predict(use_existing):
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train = np.load(working_path+"trainImages.npy").astype(np.float32)
imgs_mask_train = np.load(working_path+"trainMasks.npy").astype(np.float32)
imgs_test = np.load(working_path+"testImages.npy").astype(np.float32)
imgs_mask_test_true = np.load(working_path+"testMasks.npy").astype(np.float32)
mean = np.mean(imgs_train) # mean for data centering
std = np.std(imgs_train) # std for data normalization
imgs_train -= mean # images should already be standardized, but just in case
imgs_train /= std
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
# Saving weights to unet.hdf5 at checkpoints
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
#
# Should we load existing weights?
# Set argument for call to train_and_predict to true at end of script
if use_existing:
model.load_weights('./unet.hdf5')
#
# The final results for this tutorial were produced using a multi-GPU
# machine using TitanX's.
# For a home GPU computation benchmark, on my home set up with a GTX970
# I was able to run 20 epochs with a training set size of 320 and
# batch size of 2 in about an hour. I started getting reseasonable masks
# after about 3 hours of training.
#
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=2, epochs=20, verbose=1, shuffle=True,
callbacks=[model_checkpoint])
# loading best weights from training session
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('./unet.hdf5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
num_test = len(imgs_test)
imgs_mask_test = np.ndarray([num_test,1,512,512],dtype=np.float32)
for i in range(num_test):
imgs_mask_test[i] = model.predict([imgs_test[i:i+1]], verbose=0)[0]
np.save('masksTestPredicted.npy', imgs_mask_test)
mean = 0.0
for i in range(num_test):
mean+=dice_coef_np(imgs_mask_test_true[i,0], imgs_mask_test[i,0])
mean/=num_test
print("Mean Dice Coeff : ",mean)
if __name__ == '__main__':
train_and_predict(True)
| 39.564626 | 92 | 0.682256 | from __future__ import print_function
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
working_path = "/home/qwerty/data/luna16/output/"
K.set_image_dim_ordering('th')
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((1,img_rows, img_cols))
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def train_and_predict(use_existing):
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train = np.load(working_path+"trainImages.npy").astype(np.float32)
imgs_mask_train = np.load(working_path+"trainMasks.npy").astype(np.float32)
imgs_test = np.load(working_path+"testImages.npy").astype(np.float32)
imgs_mask_test_true = np.load(working_path+"testMasks.npy").astype(np.float32)
mean = np.mean(imgs_train)
std = np.std(imgs_train)
imgs_train -= mean
imgs_train /= std
print('-'*30)
print('Creating and compiling model...')
print('-'*30)
model = get_unet()
model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
if use_existing:
model.load_weights('./unet.hdf5')
# For a home GPU computation benchmark, on my home set up with a GTX970
# I was able to run 20 epochs with a training set size of 320 and
# batch size of 2 in about an hour. I started getting reseasonable masks
# after about 3 hours of training.
#
print('-'*30)
print('Fitting model...')
print('-'*30)
model.fit(imgs_train, imgs_mask_train, batch_size=2, epochs=20, verbose=1, shuffle=True,
callbacks=[model_checkpoint])
# loading best weights from training session
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model.load_weights('./unet.hdf5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
num_test = len(imgs_test)
imgs_mask_test = np.ndarray([num_test,1,512,512],dtype=np.float32)
for i in range(num_test):
imgs_mask_test[i] = model.predict([imgs_test[i:i+1]], verbose=0)[0]
np.save('masksTestPredicted.npy', imgs_mask_test)
mean = 0.0
for i in range(num_test):
mean+=dice_coef_np(imgs_mask_test_true[i,0], imgs_mask_test[i,0])
mean/=num_test
print("Mean Dice Coeff : ",mean)
if __name__ == '__main__':
train_and_predict(True)
| true | true |
1c3b4c35ea45da38f5085a7c3d225839dc29b221 | 5,952 | py | Python | simulator/game.py | Yuta1004/procon30-battle-simulator-py | dcd0bb34efab3201705ff2188c2fc62f6ac7bc09 | [
"MIT"
] | null | null | null | simulator/game.py | Yuta1004/procon30-battle-simulator-py | dcd0bb34efab3201705ff2188c2fc62f6ac7bc09 | [
"MIT"
] | null | null | null | simulator/game.py | Yuta1004/procon30-battle-simulator-py | dcd0bb34efab3201705ff2188c2fc62f6ac7bc09 | [
"MIT"
] | null | null | null | # Copylight(c) 2019 NakagamiYuta
# LICENCE : MIT
import numpy as np
import json
from simulator.common import flatten_2d, gen_2d_list
class Game:
"""
Gameクラス
Brief:
シミュレーター
"""
def __init__(self, board, agents):
"""
コンストラクタ
Params
----------
board : Board
盤面情報
agents : Agent + List
エージェント情報
"""
self.board = board
self.agents = agents
self.turn = 0
def set_action(self, team_id, agent_id, dx, dy, remove_panel=False):
"""
エージェントに行動をセット
Params
----------
team_id : int
チームID
agent_id : int
エージェントID
dx : int
dy : int
"""
if abs(dx) > 1 or abs(dy) > 1:
return False
for agent in self.agents:
if (agent.team == team_id) and (agent.id == agent_id):
agent.dx = dx
agent.dy = dy
agent.remove_panel = remove_panel
return True
def step(self):
"""
1ターンゲームを進める
Params
----------
None
Returns
----------
safety_agents : list
正常に行動できたエージェントのID
affected_agents : list
競合を起こしたエージェントのID
"""
# 相手陣地への移動を停留扱いに
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if (self.board.tiled[my][mx] != agent.team) and (self.board.tiled[my][mx] != 0)\
and (not agent.remove_panel):
agent.remove_panel = False
agent.dx = 0
agent.dy = 0
# エージェントの行動が影響する範囲をリストアップ
affected_positions = []
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
affected_positions.append((mx, my))
if self.__can_action(agent) and agent.remove_panel:
affected_positions.append((agent.x, agent.y))
# 競合リストアップ
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if not self.__can_action(agent) or not affected_positions.count((mx, my)) == 1:
affected_positions.append((agent.x, agent.y))
# 影響がないエージェントを行動させる
safety_agents = []
affected_agents = []
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if self.__can_action(agent) and (affected_positions.count((mx, my)) <= 1): # 競合確認
agent.move() # 行動
safety_agents.append(agent.id)
if agent.remove_panel:
self.board.tiled[my][mx] = 0
else:
self.board.tiled[my][mx] = agent.team
else:
affected_agents.append(agent.id)
# エージェントリセット
list(map(lambda agent: agent.reset(), self.agents))
self.turn += 1
return safety_agents, affected_agents
def cal_score(self, team_id_list):
"""
スコアを計算する
Params
----------
team_id_list : int + List
スコアを計算するチームIDのリスト
Returns
----------
map<int, int>
チームIDがキー, スコアが値
"""
score_list = {}
for (idx, team_id) in enumerate(team_id_list):
score_list[team_id] = {}
# タイルポイント
tiled_tmp = flatten_2d(self.board.tiled)
points_flat = flatten_2d(self.board.points)
score_list[team_id]["tilePoint"] = sum(map(lambda x, y: (x == team_id) * y, tiled_tmp, points_flat))
# 全ての座標について、囲みが有効か探索
self.rec_tiled = gen_2d_list(self.board.height, self.board.width)
for y in range(self.board.height):
for x in range(self.board.width):
if self.rec_tiled[y][x] == 0:
search_result = self.__recursive_child(x, y, team_id)
self.rec_tiled = self.__search_result_process(self.rec_tiled, search_result)
# ↑探索成功ならrec_tiledに結果を反映、そうでない場合は結果を破棄する
# 領域ポイント : 囲みが有効である座標のスコアを合計する
self.rec_tiled = flatten_2d(self.rec_tiled)
score_list[team_id]["areaPoint"] = sum(map(lambda x, y: abs(x * y), self.rec_tiled, points_flat))
self.rec_tiled = None
return score_list
def __recursive_child(self, x, y, target):
# 盤面の外周に来た = 囲み無効
if self.board.tiled[y][x] == target:
return True
elif (x == 0) or (x == self.board.width - 1) or (y == 0) or (y == self.board.height - 1):
return False
self.rec_tiled[y][x] = 2
# 4方向を調べる
dx_list = [-1, 1, 0, 0]
dy_list = [0, 0, -1, 1]
for (dx, dy) in zip(dx_list, dy_list):
mx = x + dx
my = y + dy
if self.__is_safe_pos(mx, my) and (self.rec_tiled[my][mx] == 0):
if not self.__recursive_child(mx, my, target):
return False
return True
def __cal_mx_my(self, agent):
mx = agent.x + agent.dx
my = agent.y + agent.dy
return mx, my
def __can_action(self, agent):
mx, my = self.__cal_mx_my(agent)
return self.__is_safe_pos(mx, my)
def __is_safe_pos(self, x, y):
return (0 <= x) and (x < self.board.width) and\
(0 <= y) and (y < self.board.height)
def __search_result_process(self, tiled, result):
tiled_np = np.array(tiled)
if result:
tiled_np = tiled_np / 2.0
tiled_np = np.ceil(tiled_np)
else:
tiled_np -= 2
tiled_np = np.abs(tiled_np)
tiled_np = tiled_np == 1
tiled_np = tiled_np.astype(np.int)
return tiled_np.tolist()
| 28.753623 | 112 | 0.514785 |
import numpy as np
import json
from simulator.common import flatten_2d, gen_2d_list
class Game:
def __init__(self, board, agents):
self.board = board
self.agents = agents
self.turn = 0
def set_action(self, team_id, agent_id, dx, dy, remove_panel=False):
if abs(dx) > 1 or abs(dy) > 1:
return False
for agent in self.agents:
if (agent.team == team_id) and (agent.id == agent_id):
agent.dx = dx
agent.dy = dy
agent.remove_panel = remove_panel
return True
def step(self):
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if (self.board.tiled[my][mx] != agent.team) and (self.board.tiled[my][mx] != 0)\
and (not agent.remove_panel):
agent.remove_panel = False
agent.dx = 0
agent.dy = 0
affected_positions = []
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
affected_positions.append((mx, my))
if self.__can_action(agent) and agent.remove_panel:
affected_positions.append((agent.x, agent.y))
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if not self.__can_action(agent) or not affected_positions.count((mx, my)) == 1:
affected_positions.append((agent.x, agent.y))
safety_agents = []
affected_agents = []
for agent in filter(lambda n: n.dx >= -1, self.agents):
mx, my = self.__cal_mx_my(agent)
if self.__can_action(agent) and (affected_positions.count((mx, my)) <= 1):
agent.move()
safety_agents.append(agent.id)
if agent.remove_panel:
self.board.tiled[my][mx] = 0
else:
self.board.tiled[my][mx] = agent.team
else:
affected_agents.append(agent.id)
list(map(lambda agent: agent.reset(), self.agents))
self.turn += 1
return safety_agents, affected_agents
def cal_score(self, team_id_list):
score_list = {}
for (idx, team_id) in enumerate(team_id_list):
score_list[team_id] = {}
tiled_tmp = flatten_2d(self.board.tiled)
points_flat = flatten_2d(self.board.points)
score_list[team_id]["tilePoint"] = sum(map(lambda x, y: (x == team_id) * y, tiled_tmp, points_flat))
self.rec_tiled = gen_2d_list(self.board.height, self.board.width)
for y in range(self.board.height):
for x in range(self.board.width):
if self.rec_tiled[y][x] == 0:
search_result = self.__recursive_child(x, y, team_id)
self.rec_tiled = self.__search_result_process(self.rec_tiled, search_result)
self.rec_tiled = flatten_2d(self.rec_tiled)
score_list[team_id]["areaPoint"] = sum(map(lambda x, y: abs(x * y), self.rec_tiled, points_flat))
self.rec_tiled = None
return score_list
def __recursive_child(self, x, y, target):
if self.board.tiled[y][x] == target:
return True
elif (x == 0) or (x == self.board.width - 1) or (y == 0) or (y == self.board.height - 1):
return False
self.rec_tiled[y][x] = 2
dx_list = [-1, 1, 0, 0]
dy_list = [0, 0, -1, 1]
for (dx, dy) in zip(dx_list, dy_list):
mx = x + dx
my = y + dy
if self.__is_safe_pos(mx, my) and (self.rec_tiled[my][mx] == 0):
if not self.__recursive_child(mx, my, target):
return False
return True
def __cal_mx_my(self, agent):
mx = agent.x + agent.dx
my = agent.y + agent.dy
return mx, my
def __can_action(self, agent):
mx, my = self.__cal_mx_my(agent)
return self.__is_safe_pos(mx, my)
def __is_safe_pos(self, x, y):
return (0 <= x) and (x < self.board.width) and\
(0 <= y) and (y < self.board.height)
def __search_result_process(self, tiled, result):
tiled_np = np.array(tiled)
if result:
tiled_np = tiled_np / 2.0
tiled_np = np.ceil(tiled_np)
else:
tiled_np -= 2
tiled_np = np.abs(tiled_np)
tiled_np = tiled_np == 1
tiled_np = tiled_np.astype(np.int)
return tiled_np.tolist()
| true | true |
1c3b4c5691828c47807f6cb8ed1b32e8d9038956 | 1,102 | py | Python | python/controls/progress/basic_progress.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/progress/basic_progress.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/progress/basic_progress.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | import time
import pglet
from pglet import Progress, Text
with pglet.page("basic-progress") as page:
prog1 = Progress("Copying file1.txt to file2.txt", value=0, width="50%")
page.add(Text("Default Progress", size="xLarge"), prog1)
for i in range(0, 101):
prog1.value = i
prog1.update()
time.sleep(0.005)
prog2 = Progress("Provisioning your account", value=0, width="50%")
page.add(prog2)
prog2.description = "Preparing environment..."
prog2.value = 0
prog2.update()
time.sleep(2)
prog2.description = "Collecting information..."
prog2.value = 20
prog2.update()
time.sleep(2)
prog2.description = "Creatring database entities..."
prog2.value = 40
prog2.update()
time.sleep(2)
prog2.description = "Verifying the data..."
prog2.value = 60
prog2.update()
time.sleep(2)
prog2.description = "Finishing the process, almost done..."
prog2.value = 80
prog2.update()
time.sleep(2)
prog2.description = "Your account has been created!"
prog2.value = 100
prog2.update()
| 23.446809 | 76 | 0.637931 | import time
import pglet
from pglet import Progress, Text
with pglet.page("basic-progress") as page:
prog1 = Progress("Copying file1.txt to file2.txt", value=0, width="50%")
page.add(Text("Default Progress", size="xLarge"), prog1)
for i in range(0, 101):
prog1.value = i
prog1.update()
time.sleep(0.005)
prog2 = Progress("Provisioning your account", value=0, width="50%")
page.add(prog2)
prog2.description = "Preparing environment..."
prog2.value = 0
prog2.update()
time.sleep(2)
prog2.description = "Collecting information..."
prog2.value = 20
prog2.update()
time.sleep(2)
prog2.description = "Creatring database entities..."
prog2.value = 40
prog2.update()
time.sleep(2)
prog2.description = "Verifying the data..."
prog2.value = 60
prog2.update()
time.sleep(2)
prog2.description = "Finishing the process, almost done..."
prog2.value = 80
prog2.update()
time.sleep(2)
prog2.description = "Your account has been created!"
prog2.value = 100
prog2.update()
| true | true |
1c3b4cff38fbd3e0ac656b6c5d5470120e680caa | 9,275 | py | Python | tests/test_caper_workflow_opts.py | dfeinzeig/caper | 35a693448179674acfae95590e329ab5d1eea0b7 | [
"MIT"
] | null | null | null | tests/test_caper_workflow_opts.py | dfeinzeig/caper | 35a693448179674acfae95590e329ab5d1eea0b7 | [
"MIT"
] | null | null | null | tests/test_caper_workflow_opts.py | dfeinzeig/caper | 35a693448179674acfae95590e329ab5d1eea0b7 | [
"MIT"
] | null | null | null | import json
import os
from textwrap import dedent
import pytest
from caper.caper_workflow_opts import CaperWorkflowOpts
from caper.cromwell_backend import BACKEND_AWS, BACKEND_GCP
def test_create_file(tmp_path):
"""Test without docker/singularity.
"""
use_google_cloud_life_sciences = False
gcp_zones = ['us-west-1', 'us-west-2']
slurm_partition = 'my_partition'
slurm_account = 'my_account'
slurm_extra_param = 'my_extra_param'
sge_pe = 'my_pe'
sge_queue = 'my_queue'
sge_extra_param = 'my_extra_param'
pbs_queue = 'my_queue'
pbs_extra_param = 'my_extra_param'
co = CaperWorkflowOpts(
use_google_cloud_life_sciences=use_google_cloud_life_sciences,
gcp_zones=gcp_zones,
slurm_partition=slurm_partition,
slurm_account=slurm_account,
slurm_extra_param=slurm_extra_param,
sge_pe=sge_pe,
sge_queue=sge_queue,
sge_extra_param=sge_extra_param,
pbs_queue=pbs_queue,
pbs_extra_param=pbs_extra_param,
)
wdl = tmp_path / 'test.wdl'
wdl.write_text('')
inputs = None
# check if backend and slurm_partition is replaced with
# that of this custom options file.
custom_options = tmp_path / 'my_custom_options.json'
custom_options_dict = {
'backend': 'world',
CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES: {
'slurm_partition': 'not_my_partition'
},
}
custom_options.write_text(json.dumps(custom_options_dict, indent=4))
backend = 'my_backend'
max_retries = 999
gcp_monitoring_script = 'gs://dummy/gcp_monitoring_script.sh'
basename = 'my_basename.json'
f = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
inputs=inputs,
custom_options=str(custom_options),
docker=None,
singularity=None,
singularity_cachedir=None,
no_build_singularity=False,
backend=backend,
max_retries=max_retries,
gcp_monitoring_script=gcp_monitoring_script,
basename=basename,
)
with open(f) as fp:
d = json.loads(fp.read())
dra = d[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra['zones'] == ' '.join(gcp_zones)
assert dra['slurm_partition'] == 'not_my_partition'
assert dra['slurm_account'] == slurm_account
assert dra['slurm_extra_param'] == slurm_extra_param
assert dra['sge_pe'] == sge_pe
assert dra['sge_queue'] == sge_queue
assert dra['sge_extra_param'] == sge_extra_param
assert dra['pbs_queue'] == pbs_queue
assert dra['pbs_extra_param'] == pbs_extra_param
assert d['backend'] == 'world'
assert dra['maxRetries'] == max_retries
# this should be ignored for non-gcp backends
assert 'monitoring_script' not in d
assert os.path.basename(f) == basename
assert os.path.dirname(f) == str(tmp_path)
# test for gcp backend
f = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='gcp',
docker='ubuntu:latest',
max_retries=max_retries,
gcp_monitoring_script=gcp_monitoring_script,
basename=basename,
)
with open(f) as fp:
d = json.loads(fp.read())
assert d['monitoring_script'] == gcp_monitoring_script
def test_create_file_with_google_cloud_life_sciences(tmp_path):
"""Test with use_google_cloud_life_sciences flag.
zones should not be written to dra.
"""
gcp_zones = ['us-west-1', 'us-west-2']
co = CaperWorkflowOpts(use_google_cloud_life_sciences=True, gcp_zones=gcp_zones)
wdl = tmp_path / 'test.wdl'
wdl.write_text('')
f = co.create_file(directory=str(tmp_path), wdl=str(wdl))
with open(f) as fp:
d = json.loads(fp.read())
dra = d[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'zones' not in dra
def test_create_file_docker(tmp_path):
"""Test with docker and docker defined in WDL.
"""
wdl_contents = dedent(
"""\
version 1.0
workflow test_docker {
meta {
caper_docker: "ubuntu:latest"
}
}
"""
)
wdl = tmp_path / 'docker.wdl'
wdl.write_text(wdl_contents)
co = CaperWorkflowOpts()
# cloud backend gcp should try to find docker in WDL
f_gcp = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
basename='opts_gcp.json',
)
with open(f_gcp) as fp:
d_gcp = json.loads(fp.read())
dra_gcp = d_gcp[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_gcp['docker'] == 'ubuntu:latest'
# cloud backend aws should try to find docker in WDL
f_aws = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
basename='opts_aws.json',
)
with open(f_aws) as fp:
d_aws = json.loads(fp.read())
dra_aws = d_aws[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_aws['docker'] == 'ubuntu:latest'
# local backend should not try to find docker in WDL
# if docker is not defined
f_local = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='my_backend',
basename='opts_local.json',
)
with open(f_local) as fp:
d_local = json.loads(fp.read())
dra_local = d_local[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'docker' not in dra_local
# local backend should use docker if docker is explicitly defined
f_local2 = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
docker='ubuntu:16',
backend='my_backend',
basename='opts_local2.json',
)
with open(f_local2) as fp:
d_local2 = json.loads(fp.read())
dra_local2 = d_local2[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_local2['docker'] == 'ubuntu:16'
def test_create_file_singularity(tmp_path):
"""Test with singularity and singularity defined in WDL.
"""
wdl_contents = dedent(
"""\
version 1.0
workflow test_singularity {
meta {
caper_docker: "ubuntu:latest"
caper_singularity: "docker://ubuntu:latest"
}
}
"""
)
wdl = tmp_path / 'singularity.wdl'
wdl.write_text(wdl_contents)
co = CaperWorkflowOpts()
# cloud backend gcp should not try to find singularity in WDL
f_gcp = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
basename='opts_gcp.json',
)
with open(f_gcp) as fp:
d_gcp = json.loads(fp.read())
dra_gcp = d_gcp[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_gcp
# cloud backend aws should not try to find singularity in WDL
f_aws = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
basename='opts_aws.json',
)
with open(f_aws) as fp:
d_aws = json.loads(fp.read())
dra_aws = d_aws[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_aws
# cloud backend aws/gcp should not work with singularity
with pytest.raises(ValueError):
co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
singularity='',
basename='opts_gcp2.json',
)
with pytest.raises(ValueError):
co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
singularity='',
basename='opts_aws2.json',
)
# local backend should not try to find singularity in WDL
# if singularity is not defined
f_local = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='my_backend',
basename='opts_local.json',
)
with open(f_local) as fp:
d_local = json.loads(fp.read())
dra_local = d_local[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_local
# input JSON to test singularity bindpath
# this will be test thoroughly in other testing module (test_singularity)
inputs = tmp_path / 'inputs.json'
inputs_dict = {
'test.input': '/a/b/c/d.txt',
'test.input2': '/a/b/e.txt',
'test.input3': '/f/g/h.txt',
}
inputs.write_text(json.dumps(inputs_dict, indent=4))
# local backend should use singularity if singularity is explicitly defined
# also, singularity_bindpath should be input JSON.
f_local2 = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
inputs=str(inputs),
singularity='ubuntu:16',
singularity_cachedir='/tmp',
no_build_singularity=True,
backend='my_backend',
basename='opts_local2.json',
)
with open(f_local2) as fp:
d_local2 = json.loads(fp.read())
dra_local2 = d_local2[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_local2['singularity'] == 'ubuntu:16'
assert dra_local2['singularity_cachedir'] == '/tmp'
assert sorted(dra_local2['singularity_bindpath'].split(',')) == ['/a/b', '/f/g']
| 30.610561 | 84 | 0.642372 | import json
import os
from textwrap import dedent
import pytest
from caper.caper_workflow_opts import CaperWorkflowOpts
from caper.cromwell_backend import BACKEND_AWS, BACKEND_GCP
def test_create_file(tmp_path):
use_google_cloud_life_sciences = False
gcp_zones = ['us-west-1', 'us-west-2']
slurm_partition = 'my_partition'
slurm_account = 'my_account'
slurm_extra_param = 'my_extra_param'
sge_pe = 'my_pe'
sge_queue = 'my_queue'
sge_extra_param = 'my_extra_param'
pbs_queue = 'my_queue'
pbs_extra_param = 'my_extra_param'
co = CaperWorkflowOpts(
use_google_cloud_life_sciences=use_google_cloud_life_sciences,
gcp_zones=gcp_zones,
slurm_partition=slurm_partition,
slurm_account=slurm_account,
slurm_extra_param=slurm_extra_param,
sge_pe=sge_pe,
sge_queue=sge_queue,
sge_extra_param=sge_extra_param,
pbs_queue=pbs_queue,
pbs_extra_param=pbs_extra_param,
)
wdl = tmp_path / 'test.wdl'
wdl.write_text('')
inputs = None
custom_options = tmp_path / 'my_custom_options.json'
custom_options_dict = {
'backend': 'world',
CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES: {
'slurm_partition': 'not_my_partition'
},
}
custom_options.write_text(json.dumps(custom_options_dict, indent=4))
backend = 'my_backend'
max_retries = 999
gcp_monitoring_script = 'gs://dummy/gcp_monitoring_script.sh'
basename = 'my_basename.json'
f = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
inputs=inputs,
custom_options=str(custom_options),
docker=None,
singularity=None,
singularity_cachedir=None,
no_build_singularity=False,
backend=backend,
max_retries=max_retries,
gcp_monitoring_script=gcp_monitoring_script,
basename=basename,
)
with open(f) as fp:
d = json.loads(fp.read())
dra = d[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra['zones'] == ' '.join(gcp_zones)
assert dra['slurm_partition'] == 'not_my_partition'
assert dra['slurm_account'] == slurm_account
assert dra['slurm_extra_param'] == slurm_extra_param
assert dra['sge_pe'] == sge_pe
assert dra['sge_queue'] == sge_queue
assert dra['sge_extra_param'] == sge_extra_param
assert dra['pbs_queue'] == pbs_queue
assert dra['pbs_extra_param'] == pbs_extra_param
assert d['backend'] == 'world'
assert dra['maxRetries'] == max_retries
assert 'monitoring_script' not in d
assert os.path.basename(f) == basename
assert os.path.dirname(f) == str(tmp_path)
f = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='gcp',
docker='ubuntu:latest',
max_retries=max_retries,
gcp_monitoring_script=gcp_monitoring_script,
basename=basename,
)
with open(f) as fp:
d = json.loads(fp.read())
assert d['monitoring_script'] == gcp_monitoring_script
def test_create_file_with_google_cloud_life_sciences(tmp_path):
gcp_zones = ['us-west-1', 'us-west-2']
co = CaperWorkflowOpts(use_google_cloud_life_sciences=True, gcp_zones=gcp_zones)
wdl = tmp_path / 'test.wdl'
wdl.write_text('')
f = co.create_file(directory=str(tmp_path), wdl=str(wdl))
with open(f) as fp:
d = json.loads(fp.read())
dra = d[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'zones' not in dra
def test_create_file_docker(tmp_path):
wdl_contents = dedent(
"""\
version 1.0
workflow test_docker {
meta {
caper_docker: "ubuntu:latest"
}
}
"""
)
wdl = tmp_path / 'docker.wdl'
wdl.write_text(wdl_contents)
co = CaperWorkflowOpts()
f_gcp = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
basename='opts_gcp.json',
)
with open(f_gcp) as fp:
d_gcp = json.loads(fp.read())
dra_gcp = d_gcp[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_gcp['docker'] == 'ubuntu:latest'
f_aws = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
basename='opts_aws.json',
)
with open(f_aws) as fp:
d_aws = json.loads(fp.read())
dra_aws = d_aws[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_aws['docker'] == 'ubuntu:latest'
f_local = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='my_backend',
basename='opts_local.json',
)
with open(f_local) as fp:
d_local = json.loads(fp.read())
dra_local = d_local[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'docker' not in dra_local
f_local2 = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
docker='ubuntu:16',
backend='my_backend',
basename='opts_local2.json',
)
with open(f_local2) as fp:
d_local2 = json.loads(fp.read())
dra_local2 = d_local2[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_local2['docker'] == 'ubuntu:16'
def test_create_file_singularity(tmp_path):
wdl_contents = dedent(
"""\
version 1.0
workflow test_singularity {
meta {
caper_docker: "ubuntu:latest"
caper_singularity: "docker://ubuntu:latest"
}
}
"""
)
wdl = tmp_path / 'singularity.wdl'
wdl.write_text(wdl_contents)
co = CaperWorkflowOpts()
f_gcp = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
basename='opts_gcp.json',
)
with open(f_gcp) as fp:
d_gcp = json.loads(fp.read())
dra_gcp = d_gcp[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_gcp
f_aws = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
basename='opts_aws.json',
)
with open(f_aws) as fp:
d_aws = json.loads(fp.read())
dra_aws = d_aws[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_aws
with pytest.raises(ValueError):
co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_GCP,
singularity='',
basename='opts_gcp2.json',
)
with pytest.raises(ValueError):
co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend=BACKEND_AWS,
singularity='',
basename='opts_aws2.json',
)
f_local = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
backend='my_backend',
basename='opts_local.json',
)
with open(f_local) as fp:
d_local = json.loads(fp.read())
dra_local = d_local[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert 'singularity' not in dra_local
inputs = tmp_path / 'inputs.json'
inputs_dict = {
'test.input': '/a/b/c/d.txt',
'test.input2': '/a/b/e.txt',
'test.input3': '/f/g/h.txt',
}
inputs.write_text(json.dumps(inputs_dict, indent=4))
f_local2 = co.create_file(
directory=str(tmp_path),
wdl=str(wdl),
inputs=str(inputs),
singularity='ubuntu:16',
singularity_cachedir='/tmp',
no_build_singularity=True,
backend='my_backend',
basename='opts_local2.json',
)
with open(f_local2) as fp:
d_local2 = json.loads(fp.read())
dra_local2 = d_local2[CaperWorkflowOpts.DEFAULT_RUNTIME_ATTRIBUTES]
assert dra_local2['singularity'] == 'ubuntu:16'
assert dra_local2['singularity_cachedir'] == '/tmp'
assert sorted(dra_local2['singularity_bindpath'].split(',')) == ['/a/b', '/f/g']
| true | true |
1c3b4d30baa1124cda83549bee2b0a5d2cc42353 | 6,811 | py | Python | .ipynb_checkpoints/Model-checkpoint.py | acse-jl8920/IRP-Johnson | 2a70ab9b286726847cc5d5bb65232b2b241f4d5a | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Model-checkpoint.py | acse-jl8920/IRP-Johnson | 2a70ab9b286726847cc5d5bb65232b2b241f4d5a | [
"MIT"
] | null | null | null | .ipynb_checkpoints/Model-checkpoint.py | acse-jl8920/IRP-Johnson | 2a70ab9b286726847cc5d5bb65232b2b241f4d5a | [
"MIT"
] | null | null | null | #coding=utf-8
import tensorflow as tf
import keras
from keras.models import *
from keras.layers import *
import numpy as np
from metrics import metrics
from losses import LOSS_FACTORY
from keras.callbacks import History
from keras.callbacks import ModelCheckpoint
def conv_block(input, filters):
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def up_conv(input, filters):
out = UpSampling2D()(input)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
class UNet():
def __init__(self):
self.model_weights_path = ''
self.model = self.__build_UNet()
self.height = 416
self.width = 416
def __build_UNet(self,nClasses = 2, input_height=416, input_width=416):
"""
UNet - Basic Implementation
Paper : https://arxiv.org/abs/1505.04597
"""
inputs = Input(shape=(input_height, input_width, 1))
n1 = 32
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
conv1 = conv_block(inputs, n1)
conv2 = MaxPooling2D(strides=2)(conv1)
conv2 = conv_block(conv2, filters[1])
conv3 = MaxPooling2D(strides=2)(conv2)
conv3 = conv_block(conv3, filters[2])
conv4 = MaxPooling2D(strides=2)(conv3)
conv4 = conv_block(conv4, filters[3])
conv5 = MaxPooling2D(strides=2)(conv4)
conv5 = conv_block(conv5, filters[4])
d5 = up_conv(conv5, filters[3])
d5 = Add()([conv4, d5])
d4 = up_conv(d5, filters[2])
d4 = Add()([conv3, d4])
d4 = conv_block(d4, filters[2])
d3 = up_conv(d4, filters[1])
d3 = Add()([conv2, d3])
d3 = conv_block(d3, filters[1])
d2 = up_conv(d3, filters[0])
d2 = Add()([conv1, d2])
d2 = conv_block(d2, filters[0])
o = Conv2D(nClasses, (3, 3), padding='same')(d2)
outputHeight = Model(inputs, o).output_shape[1]
outputWidth = Model(inputs, o).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(o)
out = Activation('softmax')(out)
model = Model(inputs=inputs, outputs=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def load_weights(self, weights_path):
self.model.load_weights(weights_path)
def complie_model(self, optimizer=None, version = '0', loss = 'ce'):
'''
Parameters
----------
optimizer : object, optional
The default is None. It require a optimizer such as Adam or SGD.
version : str, optional
The version of your model test. The default is '0'.
loss : Str, optional
'ce' Cross Entropy
'weighted_ce' Weighted Categorical loss
'b_focal' Binary Focal loss
'c_focal' Categorical Focal loss
'dice' Dice loss Yes
'bce_dice' BCE + Dice loss
'ce_dice' CE + Dice loss
'g_dice' Generalized Dice loss
'jaccard' Jaccard loss
'bce_jaccard' BCE + Jaccard loss
'ce_jaccard' CE + Jaccard loss
'tversky Tversky' loss
'f_tversky' Focal Tversky loss
The default is 'ce'.
Returns
-------
None.
'''
csv_logger = CSVLogger(log_file_path, append=False)
# early_stop = EarlyStopping('loss', min_delta=0.1, patience=patience, verbose=1)
history = History()
#set the log save dir, it will save the network value by every epochs in tensorboards.
tb_cb = keras.callbacks.TensorBoard(log_dir='weights/exp1/'+version+'/log/' , write_images=1, histogram_freq=0)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
self.call_backs = [csv_logger, tb_cb, reduce_lr]
self.version = version
if(optimizer == None):
opt = optimizers.Adam()
else:
opt = optimizer
loss = LOSS_FACTORY[loss]
self.model.compile(opt, loss =loss, metrics=['accuracy', 'iou_score','f1_score'])
def train(self, X_train, y_train, X_val, y_val,epochs=20,
batch_sizes = 6, weight_pth='weights/exp1/'):
hist = self.model.fit(X_train,y_train,batch_size = batch_sizes,
callbacks = self.call_backs,epochs=epochs,
validation_data=(X_val,y_val), shuffle=True)
self.model.save_weights(weight_pth+self.version+'.h5')
def test(self, img, ground_turth):
'''
ground_turth: array of mask(shape[num_imgs, height * width, channel(2)]
'''
loss = LOSS_FACTORY['ce']
adam = optimizers.Adam()
self.model.compile(adam, loss =loss, metrics=['accuracy', 'iou_score','f1_score'])
if(len(ground_turth.shape)>4):
shape = ground_turth.shape
ground_turth.reshape(shape[0], self.width*self.height,2)
self.model.evaluate(img, ground_turth)
def detect_mult_img(self, imgs):
'''
Parameters
----------
imgs : array
Batch of image with shape [num_img, width, weight]
for the model in this project is (n,416,416)
Returns
-------
r1 : arrays
mask of each images, with shape (n, 416, 416)
'''
imgs = np.asarray(imgs)
result = self.model.predict(imgs)
result = result.reshape(imgs.shape[0],imgs.shape[1],imgs.shape[2],2)
r1 = np.zeros((imgs.shape[0],imgs.shape[1],imgs.shape[2]))
r1[result[:,:,:,0]<result[:,:,:,1]] = 1
return r1
def detect_single_img(self,img, model):
'''
detect single image
Parameters
----------
imgs : array
Batch of image with shape [num_img, width, weight]
for the model in this project is (n,416,416)
Returns
-------
r1 : arrays
mask of each images, with shape (n, 416, 416)
'''
img = np.asarray(img)
result = self.model.predict(img)
result = result.reshape(img.shape[0],img.shape[1],2)
r1 = np.zeros((img.shape[0],img.shape[1]))
r1[result[:,:,0]<result[:,:,1]] = 1
return r1 | 33.885572 | 119 | 0.56673 |
import tensorflow as tf
import keras
from keras.models import *
from keras.layers import *
import numpy as np
from metrics import metrics
from losses import LOSS_FACTORY
from keras.callbacks import History
from keras.callbacks import ModelCheckpoint
def conv_block(input, filters):
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(input)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
def up_conv(input, filters):
out = UpSampling2D()(input)
out = Conv2D(filters, kernel_size=(3,3), strides=1, padding='same')(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
return out
class UNet():
def __init__(self):
self.model_weights_path = ''
self.model = self.__build_UNet()
self.height = 416
self.width = 416
def __build_UNet(self,nClasses = 2, input_height=416, input_width=416):
inputs = Input(shape=(input_height, input_width, 1))
n1 = 32
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
conv1 = conv_block(inputs, n1)
conv2 = MaxPooling2D(strides=2)(conv1)
conv2 = conv_block(conv2, filters[1])
conv3 = MaxPooling2D(strides=2)(conv2)
conv3 = conv_block(conv3, filters[2])
conv4 = MaxPooling2D(strides=2)(conv3)
conv4 = conv_block(conv4, filters[3])
conv5 = MaxPooling2D(strides=2)(conv4)
conv5 = conv_block(conv5, filters[4])
d5 = up_conv(conv5, filters[3])
d5 = Add()([conv4, d5])
d4 = up_conv(d5, filters[2])
d4 = Add()([conv3, d4])
d4 = conv_block(d4, filters[2])
d3 = up_conv(d4, filters[1])
d3 = Add()([conv2, d3])
d3 = conv_block(d3, filters[1])
d2 = up_conv(d3, filters[0])
d2 = Add()([conv1, d2])
d2 = conv_block(d2, filters[0])
o = Conv2D(nClasses, (3, 3), padding='same')(d2)
outputHeight = Model(inputs, o).output_shape[1]
outputWidth = Model(inputs, o).output_shape[2]
out = (Reshape((outputHeight * outputWidth, nClasses)))(o)
out = Activation('softmax')(out)
model = Model(inputs=inputs, outputs=out)
model.outputHeight = outputHeight
model.outputWidth = outputWidth
return model
def load_weights(self, weights_path):
self.model.load_weights(weights_path)
def complie_model(self, optimizer=None, version = '0', loss = 'ce'):
csv_logger = CSVLogger(log_file_path, append=False)
history = History()
tb_cb = keras.callbacks.TensorBoard(log_dir='weights/exp1/'+version+'/log/' , write_images=1, histogram_freq=0)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=10, mode='auto')
self.call_backs = [csv_logger, tb_cb, reduce_lr]
self.version = version
if(optimizer == None):
opt = optimizers.Adam()
else:
opt = optimizer
loss = LOSS_FACTORY[loss]
self.model.compile(opt, loss =loss, metrics=['accuracy', 'iou_score','f1_score'])
def train(self, X_train, y_train, X_val, y_val,epochs=20,
batch_sizes = 6, weight_pth='weights/exp1/'):
hist = self.model.fit(X_train,y_train,batch_size = batch_sizes,
callbacks = self.call_backs,epochs=epochs,
validation_data=(X_val,y_val), shuffle=True)
self.model.save_weights(weight_pth+self.version+'.h5')
def test(self, img, ground_turth):
loss = LOSS_FACTORY['ce']
adam = optimizers.Adam()
self.model.compile(adam, loss =loss, metrics=['accuracy', 'iou_score','f1_score'])
if(len(ground_turth.shape)>4):
shape = ground_turth.shape
ground_turth.reshape(shape[0], self.width*self.height,2)
self.model.evaluate(img, ground_turth)
def detect_mult_img(self, imgs):
imgs = np.asarray(imgs)
result = self.model.predict(imgs)
result = result.reshape(imgs.shape[0],imgs.shape[1],imgs.shape[2],2)
r1 = np.zeros((imgs.shape[0],imgs.shape[1],imgs.shape[2]))
r1[result[:,:,:,0]<result[:,:,:,1]] = 1
return r1
def detect_single_img(self,img, model):
img = np.asarray(img)
result = self.model.predict(img)
result = result.reshape(img.shape[0],img.shape[1],2)
r1 = np.zeros((img.shape[0],img.shape[1]))
r1[result[:,:,0]<result[:,:,1]] = 1
return r1 | true | true |
1c3b4de8c952d03489927337b1b07d56f5cdc1d5 | 106 | py | Python | regex_field/__init__.py | millarm/django-regex-field | f9f8f41d576ac78f36159ec9408d1cf65bdb9532 | [
"MIT"
] | 14 | 2015-06-01T19:29:02.000Z | 2021-12-23T14:33:51.000Z | regex_field/__init__.py | millarm/django-regex-field | f9f8f41d576ac78f36159ec9408d1cf65bdb9532 | [
"MIT"
] | 15 | 2015-03-27T14:40:28.000Z | 2021-11-16T13:36:33.000Z | regex_field/__init__.py | millarm/django-regex-field | f9f8f41d576ac78f36159ec9408d1cf65bdb9532 | [
"MIT"
] | 15 | 2015-03-27T13:38:16.000Z | 2021-12-23T14:33:53.000Z | # flake8: noqa
from .version import __version__
default_app_config = 'regex_field.apps.RegexFieldConfig'
| 21.2 | 56 | 0.820755 |
from .version import __version__
default_app_config = 'regex_field.apps.RegexFieldConfig'
| true | true |
1c3b4e50c4ef9848007a3e4dc4cfa4018dff5357 | 42,431 | py | Python | sympy/functions/elementary/complexes.py | hackman01/sympy | 4a74b6f1952b863dfbafc9e14557427e63698dcd | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/complexes.py | hackman01/sympy | 4a74b6f1952b863dfbafc9e14557427e63698dcd | [
"BSD-3-Clause"
] | null | null | null | sympy/functions/elementary/complexes.py | hackman01/sympy | 4a74b6f1952b863dfbafc9e14557427e63698dcd | [
"BSD-3-Clause"
] | null | null | null | from sympy.core import S, Add, Mul, sympify, Symbol, Dummy, Basic
from sympy.core.expr import Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.core.logic import fuzzy_not, fuzzy_or
from sympy.core.numbers import pi, I, oo
from sympy.core.relational import Eq
from sympy.functions.elementary.exponential import exp, exp_polar, log
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import atan, atan2
###############################################################################
######################### REAL and IMAGINARY PARTS ############################
###############################################################################
class re(Function):
"""
Returns real part of expression. This function performs only
elementary analysis and so it will fail to decompose properly
more complicated expressions. If completely simplified result
is needed then use Basic.as_real_imag() or perform complex
expansion on instance of this function.
Examples
========
>>> from sympy import re, im, I, E, symbols
>>> x, y = symbols('x y', real=True)
>>> re(2*E)
2*E
>>> re(2*I + 17)
17
>>> re(2*I)
0
>>> re(im(x) + x*I + 2)
2
>>> re(5 + I + 2)
7
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
expr : Expr
Real part of expression.
See Also
========
im
"""
is_extended_real = True
unbranched = True # implicitly works on the projection to C
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg is S.ComplexInfinity:
return S.NaN
elif arg.is_extended_real:
return arg
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_extended_real:
return S.Zero
elif arg.is_Matrix:
return arg.as_real_imag()[0]
elif arg.is_Function and isinstance(arg, conjugate):
return re(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_extended_real:
reverted.append(coeff)
elif not term.has(S.ImaginaryUnit) and term.is_extended_real:
excluded.append(term)
else:
# Try to do some advanced expansion. If
# impossible, don't try to do re(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[0])
else:
included.append(term)
if len(args) != len(included):
a, b, c = (Add(*xs) for xs in [included, reverted, excluded])
return cls(a) - im(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Returns the real number with a zero imaginary part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_extended_real or self.args[0].is_extended_real:
return re(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* im(Derivative(self.args[0], x, evaluate=True))
def _eval_rewrite_as_im(self, arg, **kwargs):
return self.args[0] - S.ImaginaryUnit*im(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_is_zero(self):
# is_imaginary implies nonzero
return fuzzy_or([self.args[0].is_imaginary, self.args[0].is_zero])
def _eval_is_finite(self):
if self.args[0].is_finite:
return True
def _eval_is_complex(self):
if self.args[0].is_finite:
return True
def _sage_(self):
import sage.all as sage
return sage.real_part(self.args[0]._sage_())
class im(Function):
"""
Returns imaginary part of expression. This function performs only
elementary analysis and so it will fail to decompose properly more
complicated expressions. If completely simplified result is needed then
use Basic.as_real_imag() or perform complex expansion on instance of
this function.
Examples
========
>>> from sympy import re, im, E, I
>>> from sympy.abc import x, y
>>> im(2*E)
0
>>> im(2*I + 17)
2
>>> im(x*I)
re(x)
>>> im(re(x) + y)
im(y)
>>> im(2 + 3*I)
3
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
expr : Expr
Imaginary part of expression.
See Also
========
re
"""
is_extended_real = True
unbranched = True # implicitly works on the projection to C
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
elif arg is S.ComplexInfinity:
return S.NaN
elif arg.is_extended_real:
return S.Zero
elif arg.is_imaginary or (S.ImaginaryUnit*arg).is_extended_real:
return -S.ImaginaryUnit * arg
elif arg.is_Matrix:
return arg.as_real_imag()[1]
elif arg.is_Function and isinstance(arg, conjugate):
return -im(arg.args[0])
else:
included, reverted, excluded = [], [], []
args = Add.make_args(arg)
for term in args:
coeff = term.as_coefficient(S.ImaginaryUnit)
if coeff is not None:
if not coeff.is_extended_real:
reverted.append(coeff)
else:
excluded.append(coeff)
elif term.has(S.ImaginaryUnit) or not term.is_extended_real:
# Try to do some advanced expansion. If
# impossible, don't try to do im(arg) again
# (because this is what we are trying to do now).
real_imag = term.as_real_imag(ignore=arg)
if real_imag:
excluded.append(real_imag[1])
else:
included.append(term)
if len(args) != len(included):
a, b, c = (Add(*xs) for xs in [included, reverted, excluded])
return cls(a) + re(b) + c
def as_real_imag(self, deep=True, **hints):
"""
Return the imaginary part with a zero real part.
"""
return (self, S.Zero)
def _eval_derivative(self, x):
if x.is_extended_real or self.args[0].is_extended_real:
return im(Derivative(self.args[0], x, evaluate=True))
if x.is_imaginary or self.args[0].is_imaginary:
return -S.ImaginaryUnit \
* re(Derivative(self.args[0], x, evaluate=True))
def _sage_(self):
import sage.all as sage
return sage.imag_part(self.args[0]._sage_())
def _eval_rewrite_as_re(self, arg, **kwargs):
return -S.ImaginaryUnit*(self.args[0] - re(self.args[0]))
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_is_zero(self):
return self.args[0].is_extended_real
def _eval_is_finite(self):
if self.args[0].is_finite:
return True
def _eval_is_complex(self):
if self.args[0].is_finite:
return True
###############################################################################
############### SIGN, ABSOLUTE VALUE, ARGUMENT and CONJUGATION ################
###############################################################################
class sign(Function):
"""
Returns the complex sign of an expression:
Explanation
===========
If the expression is real the sign will be:
* 1 if expression is positive
* 0 if expression is equal to zero
* -1 if expression is negative
If the expression is imaginary the sign will be:
* I if im(expression) is positive
* -I if im(expression) is negative
Otherwise an unevaluated expression will be returned. When evaluated, the
result (in general) will be ``cos(arg(expr)) + I*sin(arg(expr))``.
Examples
========
>>> from sympy.functions import sign
>>> from sympy.core.numbers import I
>>> sign(-1)
-1
>>> sign(0)
0
>>> sign(-3*I)
-I
>>> sign(1 + I)
sign(1 + I)
>>> _.evalf()
0.707106781186548 + 0.707106781186548*I
Parameters
==========
arg : Expr
Real or imaginary expression.
Returns
=======
expr : Expr
Complex sign of expression.
See Also
========
Abs, conjugate
"""
is_complex = True
_singularities = True
def doit(self, **hints):
if self.args[0].is_zero is False:
return self.args[0] / Abs(self.args[0])
return self
@classmethod
def eval(cls, arg):
# handle what we can
if arg.is_Mul:
c, args = arg.as_coeff_mul()
unk = []
s = sign(c)
for a in args:
if a.is_extended_negative:
s = -s
elif a.is_extended_positive:
pass
else:
if a.is_imaginary:
ai = im(a)
if ai.is_comparable: # i.e. a = I*real
s *= S.ImaginaryUnit
if ai.is_extended_negative:
# can't use sign(ai) here since ai might not be
# a Number
s = -s
else:
unk.append(a)
else:
unk.append(a)
if c is S.One and len(unk) == len(args):
return None
return s * cls(arg._new_rawargs(*unk))
if arg is S.NaN:
return S.NaN
if arg.is_zero: # it may be an Expr that is zero
return S.Zero
if arg.is_extended_positive:
return S.One
if arg.is_extended_negative:
return S.NegativeOne
if arg.is_Function:
if isinstance(arg, sign):
return arg
if arg.is_imaginary:
if arg.is_Pow and arg.exp is S.Half:
# we catch this because non-trivial sqrt args are not expanded
# e.g. sqrt(1-sqrt(2)) --x--> to I*sqrt(sqrt(2) - 1)
return S.ImaginaryUnit
arg2 = -S.ImaginaryUnit * arg
if arg2.is_extended_positive:
return S.ImaginaryUnit
if arg2.is_extended_negative:
return -S.ImaginaryUnit
def _eval_Abs(self):
if fuzzy_not(self.args[0].is_zero):
return S.One
def _eval_conjugate(self):
return sign(conjugate(self.args[0]))
def _eval_derivative(self, x):
if self.args[0].is_extended_real:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(self.args[0])
elif self.args[0].is_imaginary:
from sympy.functions.special.delta_functions import DiracDelta
return 2 * Derivative(self.args[0], x, evaluate=True) \
* DiracDelta(-S.ImaginaryUnit * self.args[0])
def _eval_is_nonnegative(self):
if self.args[0].is_nonnegative:
return True
def _eval_is_nonpositive(self):
if self.args[0].is_nonpositive:
return True
def _eval_is_imaginary(self):
return self.args[0].is_imaginary
def _eval_is_integer(self):
return self.args[0].is_extended_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_power(self, other):
if (
fuzzy_not(self.args[0].is_zero) and
other.is_integer and
other.is_even
):
return S.One
def _eval_nseries(self, x, n, logx, cdir=0):
arg0 = self.args[0]
x0 = arg0.subs(x, 0)
if x0 != 0:
return self.func(x0)
if cdir != 0:
cdir = arg0.dir(x, cdir)
return -S.One if re(cdir) < 0 else S.One
def _sage_(self):
import sage.all as sage
return sage.sgn(self.args[0]._sage_())
def _eval_rewrite_as_Piecewise(self, arg, **kwargs):
if arg.is_extended_real:
return Piecewise((1, arg > 0), (-1, arg < 0), (0, True))
def _eval_rewrite_as_Heaviside(self, arg, **kwargs):
from sympy.functions.special.delta_functions import Heaviside
if arg.is_extended_real:
return Heaviside(arg, H0=S(1)/2) * 2 - 1
def _eval_rewrite_as_Abs(self, arg, **kwargs):
return Piecewise((0, Eq(arg, 0)), (arg / Abs(arg), True))
def _eval_simplify(self, **kwargs):
return self.func(factor_terms(self.args[0])) # XXX include doit?
class Abs(Function):
"""
Return the absolute value of the argument.
Explanation
===========
This is an extension of the built-in function abs() to accept symbolic
values. If you pass a SymPy expression to the built-in abs(), it will
pass it automatically to Abs().
Examples
========
>>> from sympy import Abs, Symbol, S, I
>>> Abs(-1)
1
>>> x = Symbol('x', real=True)
>>> Abs(-x)
Abs(x)
>>> Abs(x**2)
x**2
>>> abs(-x) # The Python built-in
Abs(x)
>>> Abs(3*x + 2*I)
sqrt(9*x**2 + 4)
>>> Abs(8*I)
8
Note that the Python built-in will return either an Expr or int depending on
the argument::
>>> type(abs(-1))
<... 'int'>
>>> type(abs(S.NegativeOne))
<class 'sympy.core.numbers.One'>
Abs will always return a sympy object.
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
expr : Expr
Absolute value returned can be an expression or integer depending on
input arg.
See Also
========
sign, conjugate
"""
is_extended_real = True
is_extended_negative = False
is_extended_nonnegative = True
unbranched = True
_singularities = True # non-holomorphic
def fdiff(self, argindex=1):
"""
Get the first derivative of the argument to Abs().
"""
if argindex == 1:
return sign(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
from sympy.simplify.simplify import signsimp
from sympy.core.function import expand_mul
from sympy.core.power import Pow
if hasattr(arg, '_eval_Abs'):
obj = arg._eval_Abs()
if obj is not None:
return obj
if not isinstance(arg, Expr):
raise TypeError("Bad argument type for Abs(): %s" % type(arg))
# handle what we can
arg = signsimp(arg, evaluate=False)
n, d = arg.as_numer_denom()
if d.free_symbols and not n.free_symbols:
return cls(n)/cls(d)
if arg.is_Mul:
known = []
unk = []
for t in arg.args:
if t.is_Pow and t.exp.is_integer and t.exp.is_negative:
bnew = cls(t.base)
if isinstance(bnew, cls):
unk.append(t)
else:
known.append(Pow(bnew, t.exp))
else:
tnew = cls(t)
if isinstance(tnew, cls):
unk.append(t)
else:
known.append(tnew)
known = Mul(*known)
unk = cls(Mul(*unk), evaluate=False) if unk else S.One
return known*unk
if arg is S.NaN:
return S.NaN
if arg is S.ComplexInfinity:
return S.Infinity
if arg.is_Pow:
base, exponent = arg.as_base_exp()
if base.is_extended_real:
if exponent.is_integer:
if exponent.is_even:
return arg
if base is S.NegativeOne:
return S.One
return Abs(base)**exponent
if base.is_extended_nonnegative:
return base**re(exponent)
if base.is_extended_negative:
return (-base)**re(exponent)*exp(-S.Pi*im(exponent))
return
elif not base.has(Symbol): # complex base
# express base**exponent as exp(exponent*log(base))
a, b = log(base).as_real_imag()
z = a + I*b
return exp(re(exponent*z))
if isinstance(arg, exp):
return exp(re(arg.args[0]))
if isinstance(arg, AppliedUndef):
if arg.is_positive:
return arg
elif arg.is_negative:
return -arg
return
if arg.is_Add and arg.has(S.Infinity, S.NegativeInfinity):
if any(a.is_infinite for a in arg.as_real_imag()):
return S.Infinity
if arg.is_zero:
return S.Zero
if arg.is_extended_nonnegative:
return arg
if arg.is_extended_nonpositive:
return -arg
if arg.is_imaginary:
arg2 = -S.ImaginaryUnit * arg
if arg2.is_extended_nonnegative:
return arg2
# reject result if all new conjugates are just wrappers around
# an expression that was already in the arg
conj = signsimp(arg.conjugate(), evaluate=False)
new_conj = conj.atoms(conjugate) - arg.atoms(conjugate)
if new_conj and all(arg.has(i.args[0]) for i in new_conj):
return
if arg != conj and arg != -conj:
ignore = arg.atoms(Abs)
abs_free_arg = arg.xreplace({i: Dummy(real=True) for i in ignore})
unk = [a for a in abs_free_arg.free_symbols if a.is_extended_real is None]
if not unk or not all(conj.has(conjugate(u)) for u in unk):
return sqrt(expand_mul(arg*conj))
def _eval_is_real(self):
if self.args[0].is_finite:
return True
def _eval_is_integer(self):
if self.args[0].is_extended_real:
return self.args[0].is_integer
def _eval_is_extended_nonzero(self):
return fuzzy_not(self._args[0].is_zero)
def _eval_is_zero(self):
return self._args[0].is_zero
def _eval_is_extended_positive(self):
is_z = self.is_zero
if is_z is not None:
return not is_z
def _eval_is_rational(self):
if self.args[0].is_extended_real:
return self.args[0].is_rational
def _eval_is_even(self):
if self.args[0].is_extended_real:
return self.args[0].is_even
def _eval_is_odd(self):
if self.args[0].is_extended_real:
return self.args[0].is_odd
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
def _eval_power(self, exponent):
if self.args[0].is_extended_real and exponent.is_integer:
if exponent.is_even:
return self.args[0]**exponent
elif exponent is not S.NegativeOne and exponent.is_Integer:
return self.args[0]**(exponent - 1)*self
return
def _eval_nseries(self, x, n, logx, cdir=0):
direction = self.args[0].leadterm(x)[0]
if direction.has(log(x)):
direction = direction.subs(log(x), logx)
s = self.args[0]._eval_nseries(x, n=n, logx=logx)
return (sign(direction)*s).expand()
def _sage_(self):
import sage.all as sage
return sage.abs_symbolic(self.args[0]._sage_())
def _eval_derivative(self, x):
if self.args[0].is_extended_real or self.args[0].is_imaginary:
return Derivative(self.args[0], x, evaluate=True) \
* sign(conjugate(self.args[0]))
rv = (re(self.args[0]) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
return rv.rewrite(sign)
def _eval_rewrite_as_Heaviside(self, arg, **kwargs):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
from sympy.functions.special.delta_functions import Heaviside
if arg.is_extended_real:
return arg*(Heaviside(arg) - Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg, **kwargs):
if arg.is_extended_real:
return Piecewise((arg, arg >= 0), (-arg, True))
elif arg.is_imaginary:
return Piecewise((I*arg, I*arg >= 0), (-I*arg, True))
def _eval_rewrite_as_sign(self, arg, **kwargs):
return arg/sign(arg)
def _eval_rewrite_as_conjugate(self, arg, **kwargs):
return (arg*conjugate(arg))**S.Half
class arg(Function):
"""
returns the argument (in radians) of a complex number. The argument is
evaluated in consistent convention with atan2 where the branch-cut is
taken along the negative real axis and arg(z) is in the interval
(-pi,pi]. For a positive number, the argument is always 0.
Examples
========
>>> from sympy.functions import arg
>>> from sympy import I, sqrt
>>> arg(2.0)
0
>>> arg(I)
pi/2
>>> arg(sqrt(2) + I*sqrt(2))
pi/4
>>> arg(sqrt(3)/2 + I/2)
pi/6
>>> arg(4 + 3*I)
atan(3/4)
>>> arg(0.8 + 0.6*I)
0.643501108793284
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
value : Expr
Returns arc tangent of arg measured in radians.
"""
is_extended_real = True
is_real = True
is_finite = True
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
if isinstance(arg, exp_polar):
return periodic_argument(arg, oo)
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
if any(i.is_extended_positive is None for i in arg_.atoms(AppliedUndef)):
return
x, y = arg_.as_real_imag()
rv = atan2(y, x)
if rv.is_number:
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = self.args[0].as_real_imag()
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg, **kwargs):
x, y = self.args[0].as_real_imag()
return atan2(y, x)
class conjugate(Function):
"""
Returns the `complex conjugate` Ref[1] of an argument.
In mathematics, the complex conjugate of a complex number
is given by changing the sign of the imaginary part.
Thus, the conjugate of the complex number
:math:`a + ib` (where a and b are real numbers) is :math:`a - ib`
Examples
========
>>> from sympy import conjugate, I
>>> conjugate(2)
2
>>> conjugate(I)
-I
>>> conjugate(3 + 2*I)
3 - 2*I
>>> conjugate(5 - I)
5 + I
Parameters
==========
arg : Expr
Real or complex expression.
Returns
=======
arg : Expr
Complex conjugate of arg as real, imaginary or mixed expression.
See Also
========
sign, Abs
References
==========
.. [1] https://en.wikipedia.org/wiki/Complex_conjugation
"""
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
"""
Linear map transposition.
Examples
========
>>> from sympy.functions import transpose
>>> from sympy.matrices import MatrixSymbol
>>> from sympy import Matrix
>>> A = MatrixSymbol('A', 25, 9)
>>> transpose(A)
A.T
>>> B = MatrixSymbol('B', 9, 22)
>>> transpose(B)
B.T
>>> transpose(A*B)
B.T*A.T
>>> M = Matrix([[4, 5], [2, 1], [90, 12]])
>>> M
Matrix([
[ 4, 5],
[ 2, 1],
[90, 12]])
>>> transpose(M)
Matrix([
[4, 2, 90],
[5, 1, 12]])
Parameters
==========
arg : Matrix
Matrix or matrix expression to take the transpose of.
Returns
=======
value : Matrix
Transpose of arg.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
"""
Conjugate transpose or Hermite conjugation.
Examples
========
>>> from sympy import adjoint
>>> from sympy.matrices import MatrixSymbol
>>> A = MatrixSymbol('A', 10, 5)
>>> adjoint(A)
Adjoint(A)
Parameters
==========
arg : Matrix
Matrix or matrix expression to take the adjoint of.
Returns
=======
value : Matrix
Represents the conjugate transpose or Hermite
conjugation of arg.
"""
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dagger}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm('\N{DAGGER}')
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
"""
Lift argument to the Riemann surface of the logarithm, using the
standard branch.
Examples
========
>>> from sympy import Symbol, polar_lift, I
>>> p = Symbol('p', polar=True)
>>> x = Symbol('x')
>>> polar_lift(4)
4*exp_polar(0)
>>> polar_lift(-4)
4*exp_polar(I*pi)
>>> polar_lift(-I)
exp_polar(-I*pi/2)
>>> polar_lift(I + 2)
polar_lift(2 + I)
>>> polar_lift(4*x)
4*polar_lift(x)
>>> polar_lift(4*p)
4*p
Parameters
==========
arg : Expr
Real or complex expression.
See Also
========
sympy.functions.elementary.exponential.exp_polar
periodic_argument
"""
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy.functions.elementary.complexes import arg as argument
if arg.is_number:
ar = argument(arg)
# In general we want to affirm that something is known,
# e.g. `not ar.has(argument) and not ar.has(atan)`
# but for now we will just be more restrictive and
# see that it has evaluated to one of the known values.
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
""" Careful! any evalf of polar numbers is flaky """
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
"""
Represent the argument on a quotient of the Riemann surface of the
logarithm. That is, given a period $P$, always return a value in
(-P/2, P/2], by using exp(P*I) == 1.
Examples
========
>>> from sympy import exp_polar, periodic_argument
>>> from sympy import I, pi
>>> periodic_argument(exp_polar(10*I*pi), 2*pi)
0
>>> periodic_argument(exp_polar(5*I*pi), 4*pi)
pi
>>> from sympy import exp_polar, periodic_argument
>>> from sympy import I, pi
>>> periodic_argument(exp_polar(5*I*pi), 2*pi)
pi
>>> periodic_argument(exp_polar(5*I*pi), 3*pi)
-pi
>>> periodic_argument(exp_polar(5*I*pi), pi)
0
Parameters
==========
ar : Expr
A polar number.
period : ExprT
The period $P$.
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
principal_branch
"""
@classmethod
def _getunbranched(cls, ar):
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif isinstance(a, exp_polar):
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif isinstance(a, polar_lift):
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
# Our strategy is to evaluate the argument on the Riemann surface of the
# logarithm, and then reduce.
# NOTE evidently this means it is a rather bad idea to use this with
# period != 2*pi and non-polar numbers.
if not period.is_extended_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if isinstance(ar, polar_lift) and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S.Half)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S.Half)*period)._eval_evalf(prec)
def unbranched_argument(arg):
'''
Returns periodic argument of arg with period as infinity.
Examples
========
>>> from sympy import exp_polar, unbranched_argument
>>> from sympy import I, pi
>>> unbranched_argument(exp_polar(15*I*pi))
15*pi
>>> unbranched_argument(exp_polar(7*I*pi))
7*pi
See also
========
periodic_argument
'''
return periodic_argument(arg, oo)
class principal_branch(Function):
"""
Represent a polar number reduced to its principal branch on a quotient
of the Riemann surface of the logarithm.
Explanation
===========
This is a function of two arguments. The first argument is a polar
number `z`, and the second one a positive real number or infinity, `p`.
The result is "z mod exp_polar(I*p)".
Examples
========
>>> from sympy import exp_polar, principal_branch, oo, I, pi
>>> from sympy.abc import z
>>> principal_branch(z, oo)
z
>>> principal_branch(exp_polar(2*pi*I)*3, 2*pi)
3*exp_polar(0)
>>> principal_branch(exp_polar(2*pi*I)*3*z, 2*pi)
3*principal_branch(z, 2*pi)
Parameters
==========
x : Expr
A polar number.
period : Expr
Positive real number or infinity.
See Also
========
sympy.functions.elementary.exponential.exp_polar
polar_lift : Lift argument to the Riemann surface of the logarithm
periodic_argument
"""
is_polar = True
is_comparable = False # cannot always be evalf'd
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
# Recompute unbranched argument
ub = periodic_argument(pl, oo)
if not pl.has(polar_lift):
if ub != barg:
res = exp_polar(I*(barg - ub))*pl
else:
res = pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
def _polarify(eq, lift, pause=False):
from sympy import Integral
if eq.is_polar:
return eq
if eq.is_number and not pause:
return polar_lift(eq)
if isinstance(eq, Symbol) and not pause and lift:
return polar_lift(eq)
elif eq.is_Atom:
return eq
elif eq.is_Add:
r = eq.func(*[_polarify(arg, lift, pause=True) for arg in eq.args])
if lift:
return polar_lift(r)
return r
elif eq.is_Pow and eq.base == S.Exp1:
return eq.func(S.Exp1, _polarify(eq.exp, lift, pause=False))
elif eq.is_Function:
return eq.func(*[_polarify(arg, lift, pause=False) for arg in eq.args])
elif isinstance(eq, Integral):
# Don't lift the integration variable
func = _polarify(eq.function, lift, pause=pause)
limits = []
for limit in eq.args[1:]:
var = _polarify(limit[0], lift=False, pause=pause)
rest = _polarify(limit[1:], lift=lift, pause=pause)
limits.append((var,) + rest)
return Integral(*((func,) + tuple(limits)))
else:
return eq.func(*[_polarify(arg, lift, pause=pause)
if isinstance(arg, Expr) else arg for arg in eq.args])
def polarify(eq, subs=True, lift=False):
"""
Turn all numbers in eq into their polar equivalents (under the standard
choice of argument).
Note that no attempt is made to guess a formal convention of adding
polar numbers, expressions like 1 + x will generally not be altered.
Note also that this function does not promote exp(x) to exp_polar(x).
If ``subs`` is True, all symbols which are not already polar will be
substituted for polar dummies; in this case the function behaves much
like posify.
If ``lift`` is True, both addition statements and non-polar symbols are
changed to their polar_lift()ed versions.
Note that lift=True implies subs=False.
Examples
========
>>> from sympy import polarify, sin, I
>>> from sympy.abc import x, y
>>> expr = (-x)**y
>>> expr.expand()
(-x)**y
>>> polarify(expr)
((_x*exp_polar(I*pi))**_y, {_x: x, _y: y})
>>> polarify(expr)[0].expand()
_x**_y*exp_polar(_y*I*pi)
>>> polarify(x, lift=True)
polar_lift(x)
>>> polarify(x*(1+y), lift=True)
polar_lift(x)*polar_lift(y + 1)
Adds are treated carefully:
>>> polarify(1 + sin((1 + I)*x))
(sin(_x*polar_lift(1 + I)) + 1, {_x: x})
"""
if lift:
subs = False
eq = _polarify(sympify(eq), lift)
if not subs:
return eq
reps = {s: Dummy(s.name, polar=True) for s in eq.free_symbols}
eq = eq.subs(reps)
return eq, {r: s for s, r in reps.items()}
def _unpolarify(eq, exponents_only, pause=False):
if not isinstance(eq, Basic) or eq.is_Atom:
return eq
if not pause:
if isinstance(eq, exp_polar):
return exp(_unpolarify(eq.exp, exponents_only))
if isinstance(eq, principal_branch) and eq.args[1] == 2*pi:
return _unpolarify(eq.args[0], exponents_only)
if (
eq.is_Add or eq.is_Mul or eq.is_Boolean or
eq.is_Relational and (
eq.rel_op in ('==', '!=') and 0 in eq.args or
eq.rel_op not in ('==', '!='))
):
return eq.func(*[_unpolarify(x, exponents_only) for x in eq.args])
if isinstance(eq, polar_lift):
return _unpolarify(eq.args[0], exponents_only)
if eq.is_Pow:
expo = _unpolarify(eq.exp, exponents_only)
base = _unpolarify(eq.base, exponents_only,
not (expo.is_integer and not pause))
return base**expo
if eq.is_Function and getattr(eq.func, 'unbranched', False):
return eq.func(*[_unpolarify(x, exponents_only, exponents_only)
for x in eq.args])
return eq.func(*[_unpolarify(x, exponents_only, True) for x in eq.args])
def unpolarify(eq, subs={}, exponents_only=False):
"""
If p denotes the projection from the Riemann surface of the logarithm to
the complex line, return a simplified version eq' of `eq` such that
p(eq') == p(eq).
Also apply the substitution subs in the end. (This is a convenience, since
``unpolarify``, in a certain sense, undoes polarify.)
Examples
========
>>> from sympy import unpolarify, polar_lift, sin, I
>>> unpolarify(polar_lift(I + 2))
2 + I
>>> unpolarify(sin(polar_lift(I + 7)))
sin(7 + I)
"""
if isinstance(eq, bool):
return eq
eq = sympify(eq)
if subs != {}:
return unpolarify(eq.subs(subs))
changed = True
pause = False
if exponents_only:
pause = True
while changed:
changed = False
res = _unpolarify(eq, exponents_only, pause)
if res != eq:
changed = True
eq = res
if isinstance(res, bool):
return res
# Finally, replacing Exp(0) by 1 is always correct.
# So is polar_lift(0) -> 0.
return res.subs({exp_polar(0): 1, polar_lift(0): 0})
| 29.364014 | 86 | 0.55278 | from sympy.core import S, Add, Mul, sympify, Symbol, Dummy, Basic
from sympy.core.expr import Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import (Function, Derivative, ArgumentIndexError,
AppliedUndef)
from sympy.core.logic import fuzzy_not, fuzzy_or
from sympy.core.numbers import pi, I, oo
from sympy.core.relational import Eq
from sympy.functions.elementary.exponential import exp, exp_polar, log
from sympy.functions.elementary.integers import ceiling
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import atan, atan2
) * Derivative(re(self.args[0]), x,
evaluate=True) + im(self.args[0]) * Derivative(im(self.args[0]),
x, evaluate=True)) / Abs(self.args[0])
return rv.rewrite(sign)
def _eval_rewrite_as_Heaviside(self, arg, **kwargs):
# Note this only holds for real arg (since Heaviside is not defined
# for complex arguments).
from sympy.functions.special.delta_functions import Heaviside
if arg.is_extended_real:
return arg*(Heaviside(arg) - Heaviside(-arg))
def _eval_rewrite_as_Piecewise(self, arg, **kwargs):
if arg.is_extended_real:
return Piecewise((arg, arg >= 0), (-arg, True))
elif arg.is_imaginary:
return Piecewise((I*arg, I*arg >= 0), (-I*arg, True))
def _eval_rewrite_as_sign(self, arg, **kwargs):
return arg/sign(arg)
def _eval_rewrite_as_conjugate(self, arg, **kwargs):
return (arg*conjugate(arg))**S.Half
class arg(Function):
is_extended_real = True
is_real = True
is_finite = True
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
if isinstance(arg, exp_polar):
return periodic_argument(arg, oo)
if not arg.is_Atom:
c, arg_ = factor_terms(arg).as_coeff_Mul()
if arg_.is_Mul:
arg_ = Mul(*[a if (sign(a) not in (-1, 1)) else
sign(a) for a in arg_.args])
arg_ = sign(c)*arg_
else:
arg_ = arg
if any(i.is_extended_positive is None for i in arg_.atoms(AppliedUndef)):
return
x, y = arg_.as_real_imag()
rv = atan2(y, x)
if rv.is_number:
return rv
if arg_ != arg:
return cls(arg_, evaluate=False)
def _eval_derivative(self, t):
x, y = self.args[0].as_real_imag()
return (x * Derivative(y, t, evaluate=True) - y *
Derivative(x, t, evaluate=True)) / (x**2 + y**2)
def _eval_rewrite_as_atan2(self, arg, **kwargs):
x, y = self.args[0].as_real_imag()
return atan2(y, x)
class conjugate(Function):
_singularities = True # non-holomorphic
@classmethod
def eval(cls, arg):
obj = arg._eval_conjugate()
if obj is not None:
return obj
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
def _eval_adjoint(self):
return transpose(self.args[0])
def _eval_conjugate(self):
return self.args[0]
def _eval_derivative(self, x):
if x.is_real:
return conjugate(Derivative(self.args[0], x, evaluate=True))
elif x.is_imaginary:
return -conjugate(Derivative(self.args[0], x, evaluate=True))
def _eval_transpose(self):
return adjoint(self.args[0])
def _eval_is_algebraic(self):
return self.args[0].is_algebraic
class transpose(Function):
@classmethod
def eval(cls, arg):
obj = arg._eval_transpose()
if obj is not None:
return obj
def _eval_adjoint(self):
return conjugate(self.args[0])
def _eval_conjugate(self):
return adjoint(self.args[0])
def _eval_transpose(self):
return self.args[0]
class adjoint(Function):
@classmethod
def eval(cls, arg):
obj = arg._eval_adjoint()
if obj is not None:
return obj
obj = arg._eval_transpose()
if obj is not None:
return conjugate(obj)
def _eval_adjoint(self):
return self.args[0]
def _eval_conjugate(self):
return transpose(self.args[0])
def _eval_transpose(self):
return conjugate(self.args[0])
def _latex(self, printer, exp=None, *args):
arg = printer._print(self.args[0])
tex = r'%s^{\dagger}' % arg
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
pform = printer._print(self.args[0], *args)
if printer._use_unicode:
pform = pform**prettyForm('\N{DAGGER}')
else:
pform = pform**prettyForm('+')
return pform
###############################################################################
############### HANDLING OF POLAR NUMBERS #####################################
###############################################################################
class polar_lift(Function):
is_polar = True
is_comparable = False # Cannot be evalf'd.
@classmethod
def eval(cls, arg):
from sympy.functions.elementary.complexes import arg as argument
if arg.is_number:
ar = argument(arg)
if ar in (0, pi/2, -pi/2, pi):
return exp_polar(I*ar)*abs(arg)
if arg.is_Mul:
args = arg.args
else:
args = [arg]
included = []
excluded = []
positive = []
for arg in args:
if arg.is_polar:
included += [arg]
elif arg.is_positive:
positive += [arg]
else:
excluded += [arg]
if len(excluded) < len(args):
if excluded:
return Mul(*(included + positive))*polar_lift(Mul(*excluded))
elif included:
return Mul(*(included + positive))
else:
return Mul(*positive)*exp_polar(0)
def _eval_evalf(self, prec):
return self.args[0]._eval_evalf(prec)
def _eval_Abs(self):
return Abs(self.args[0], evaluate=True)
class periodic_argument(Function):
@classmethod
def _getunbranched(cls, ar):
if ar.is_Mul:
args = ar.args
else:
args = [ar]
unbranched = 0
for a in args:
if not a.is_polar:
unbranched += arg(a)
elif isinstance(a, exp_polar):
unbranched += a.exp.as_real_imag()[1]
elif a.is_Pow:
re, im = a.exp.as_real_imag()
unbranched += re*unbranched_argument(
a.base) + im*log(abs(a.base))
elif isinstance(a, polar_lift):
unbranched += arg(a.args[0])
else:
return None
return unbranched
@classmethod
def eval(cls, ar, period):
if not period.is_extended_positive:
return None
if period == oo and isinstance(ar, principal_branch):
return periodic_argument(*ar.args)
if isinstance(ar, polar_lift) and period >= 2*pi:
return periodic_argument(ar.args[0], period)
if ar.is_Mul:
newargs = [x for x in ar.args if not x.is_positive]
if len(newargs) != len(ar.args):
return periodic_argument(Mul(*newargs), period)
unbranched = cls._getunbranched(ar)
if unbranched is None:
return None
if unbranched.has(periodic_argument, atan2, atan):
return None
if period == oo:
return unbranched
if period != oo:
n = ceiling(unbranched/period - S.Half)*period
if not n.has(ceiling):
return unbranched - n
def _eval_evalf(self, prec):
z, period = self.args
if period == oo:
unbranched = periodic_argument._getunbranched(z)
if unbranched is None:
return self
return unbranched._eval_evalf(prec)
ub = periodic_argument(z, oo)._eval_evalf(prec)
return (ub - ceiling(ub/period - S.Half)*period)._eval_evalf(prec)
def unbranched_argument(arg):
return periodic_argument(arg, oo)
class principal_branch(Function):
is_polar = True
is_comparable = False
@classmethod
def eval(self, x, period):
from sympy import oo, exp_polar, I, Mul, polar_lift, Symbol
if isinstance(x, polar_lift):
return principal_branch(x.args[0], period)
if period == oo:
return x
ub = periodic_argument(x, oo)
barg = periodic_argument(x, period)
if ub != barg and not ub.has(periodic_argument) \
and not barg.has(periodic_argument):
pl = polar_lift(x)
def mr(expr):
if not isinstance(expr, Symbol):
return polar_lift(expr)
return expr
pl = pl.replace(polar_lift, mr)
# Recompute unbranched argument
ub = periodic_argument(pl, oo)
if not pl.has(polar_lift):
if ub != barg:
res = exp_polar(I*(barg - ub))*pl
else:
res = pl
if not res.is_polar and not res.has(exp_polar):
res *= exp_polar(0)
return res
if not x.free_symbols:
c, m = x, ()
else:
c, m = x.as_coeff_mul(*x.free_symbols)
others = []
for y in m:
if y.is_positive:
c *= y
else:
others += [y]
m = tuple(others)
arg = periodic_argument(c, period)
if arg.has(periodic_argument):
return None
if arg.is_number and (unbranched_argument(c) != arg or
(arg == 0 and m != () and c != 1)):
if arg == 0:
return abs(c)*principal_branch(Mul(*m), period)
return principal_branch(exp_polar(I*arg)*Mul(*m), period)*abs(c)
if arg.is_number and ((abs(arg) < period/2) == True or arg == period/2) \
and m == ():
return exp_polar(arg*I)*abs(c)
def _eval_evalf(self, prec):
from sympy import exp, pi, I
z, period = self.args
p = periodic_argument(z, period)._eval_evalf(prec)
if abs(p) > pi or p == -pi:
return self # Cannot evalf for this argument.
return (abs(z)*exp(I*p))._eval_evalf(prec)
def _polarify(eq, lift, pause=False):
from sympy import Integral
if eq.is_polar:
return eq
if eq.is_number and not pause:
return polar_lift(eq)
if isinstance(eq, Symbol) and not pause and lift:
return polar_lift(eq)
elif eq.is_Atom:
return eq
elif eq.is_Add:
r = eq.func(*[_polarify(arg, lift, pause=True) for arg in eq.args])
if lift:
return polar_lift(r)
return r
elif eq.is_Pow and eq.base == S.Exp1:
return eq.func(S.Exp1, _polarify(eq.exp, lift, pause=False))
elif eq.is_Function:
return eq.func(*[_polarify(arg, lift, pause=False) for arg in eq.args])
elif isinstance(eq, Integral):
# Don't lift the integration variable
func = _polarify(eq.function, lift, pause=pause)
limits = []
for limit in eq.args[1:]:
var = _polarify(limit[0], lift=False, pause=pause)
rest = _polarify(limit[1:], lift=lift, pause=pause)
limits.append((var,) + rest)
return Integral(*((func,) + tuple(limits)))
else:
return eq.func(*[_polarify(arg, lift, pause=pause)
if isinstance(arg, Expr) else arg for arg in eq.args])
def polarify(eq, subs=True, lift=False):
if lift:
subs = False
eq = _polarify(sympify(eq), lift)
if not subs:
return eq
reps = {s: Dummy(s.name, polar=True) for s in eq.free_symbols}
eq = eq.subs(reps)
return eq, {r: s for s, r in reps.items()}
def _unpolarify(eq, exponents_only, pause=False):
if not isinstance(eq, Basic) or eq.is_Atom:
return eq
if not pause:
if isinstance(eq, exp_polar):
return exp(_unpolarify(eq.exp, exponents_only))
if isinstance(eq, principal_branch) and eq.args[1] == 2*pi:
return _unpolarify(eq.args[0], exponents_only)
if (
eq.is_Add or eq.is_Mul or eq.is_Boolean or
eq.is_Relational and (
eq.rel_op in ('==', '!=') and 0 in eq.args or
eq.rel_op not in ('==', '!='))
):
return eq.func(*[_unpolarify(x, exponents_only) for x in eq.args])
if isinstance(eq, polar_lift):
return _unpolarify(eq.args[0], exponents_only)
if eq.is_Pow:
expo = _unpolarify(eq.exp, exponents_only)
base = _unpolarify(eq.base, exponents_only,
not (expo.is_integer and not pause))
return base**expo
if eq.is_Function and getattr(eq.func, 'unbranched', False):
return eq.func(*[_unpolarify(x, exponents_only, exponents_only)
for x in eq.args])
return eq.func(*[_unpolarify(x, exponents_only, True) for x in eq.args])
def unpolarify(eq, subs={}, exponents_only=False):
if isinstance(eq, bool):
return eq
eq = sympify(eq)
if subs != {}:
return unpolarify(eq.subs(subs))
changed = True
pause = False
if exponents_only:
pause = True
while changed:
changed = False
res = _unpolarify(eq, exponents_only, pause)
if res != eq:
changed = True
eq = res
if isinstance(res, bool):
return res
return res.subs({exp_polar(0): 1, polar_lift(0): 0})
| true | true |
1c3b4fa254f1daec3297d254064842c05b66f54d | 318 | py | Python | models/schemas.py | muneeb-bashir/Lost_and_Found | e6d8c9f4323e4e0ecf69afd6af9615ac6e48a522 | [
"Apache-2.0"
] | null | null | null | models/schemas.py | muneeb-bashir/Lost_and_Found | e6d8c9f4323e4e0ecf69afd6af9615ac6e48a522 | [
"Apache-2.0"
] | null | null | null | models/schemas.py | muneeb-bashir/Lost_and_Found | e6d8c9f4323e4e0ecf69afd6af9615ac6e48a522 | [
"Apache-2.0"
] | null | null | null | from pydantic import BaseModel
from datetime import date
class User(BaseModel):
name: str
email: str
password: str
contact :str
class Item(BaseModel):
name: str
description: str
lostlocation: str
foundlocation: str
status: bool
Date : date
user_id: int
| 16.736842 | 31 | 0.632075 | from pydantic import BaseModel
from datetime import date
class User(BaseModel):
name: str
email: str
password: str
contact :str
class Item(BaseModel):
name: str
description: str
lostlocation: str
foundlocation: str
status: bool
Date : date
user_id: int
| true | true |
1c3b50bde722640d7c94b1b392bc478bcaf503b5 | 1,193 | py | Python | 6/6.2/favorite_languages.py | liqiwa/python_work | 3d1198d5616b28a37fee7dfba5bbef0e1d489c2d | [
"Apache-2.0"
] | null | null | null | 6/6.2/favorite_languages.py | liqiwa/python_work | 3d1198d5616b28a37fee7dfba5bbef0e1d489c2d | [
"Apache-2.0"
] | null | null | null | 6/6.2/favorite_languages.py | liqiwa/python_work | 3d1198d5616b28a37fee7dfba5bbef0e1d489c2d | [
"Apache-2.0"
] | null | null | null | favorite_languages = {
'jen':'python',
'sarah':'c',
'edward':'ruby',
'phil':'python',}
print("Sarah`s favorite_language is "+str(favorite_languages['sarah'].title())+'.')
#6-1
people = {'name':'mm','xing':'he','age':'30','city':'sjz'}
print(people)
words = {'liebiao':'liebiao,yuansu jihe','yuanzu':'bukebian xulie','zidian':'jianzhidui'}
for k,v in words.items():
print(k+"\n"+v)
for name,language in favorite_languages.items():
print(name.title()+ "'s favorite language "+language.title())
for language in set(favorite_languages.values()):
print(language)
favorite_languages['zifuchuang'] = 'chulizifu'
favorite_languages['for xunhuan'] = 'xunhuan henduo'
print(favorite_languages)
nations = {'nile':'egypt','changjiang':'china','Amazon River':'South America'}
for river,nation in nations.items():
print("The "+river.title()+" runs through "+nation.title())
for river in nations.keys():
print(river)
for nation in nations.values():
print(nation+"\n")
invite = ['jen','phil','for xunhuan']
for name,language in favorite_languages.items():
if name in invite:
print(name+" Thank you!")
else:
print(name+" invite join survey")
print(favorite_languages)
| 27.744186 | 89 | 0.686505 | favorite_languages = {
'jen':'python',
'sarah':'c',
'edward':'ruby',
'phil':'python',}
print("Sarah`s favorite_language is "+str(favorite_languages['sarah'].title())+'.')
people = {'name':'mm','xing':'he','age':'30','city':'sjz'}
print(people)
words = {'liebiao':'liebiao,yuansu jihe','yuanzu':'bukebian xulie','zidian':'jianzhidui'}
for k,v in words.items():
print(k+"\n"+v)
for name,language in favorite_languages.items():
print(name.title()+ "'s favorite language "+language.title())
for language in set(favorite_languages.values()):
print(language)
favorite_languages['zifuchuang'] = 'chulizifu'
favorite_languages['for xunhuan'] = 'xunhuan henduo'
print(favorite_languages)
nations = {'nile':'egypt','changjiang':'china','Amazon River':'South America'}
for river,nation in nations.items():
print("The "+river.title()+" runs through "+nation.title())
for river in nations.keys():
print(river)
for nation in nations.values():
print(nation+"\n")
invite = ['jen','phil','for xunhuan']
for name,language in favorite_languages.items():
if name in invite:
print(name+" Thank you!")
else:
print(name+" invite join survey")
print(favorite_languages)
| true | true |
1c3b51b9bba5ad52ab6d1413235c5067c07ac3ac | 5,115 | py | Python | model/unet.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | 11 | 2018-01-28T04:22:57.000Z | 2018-12-20T10:09:40.000Z | model/unet.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | null | null | null | model/unet.py | shvetsiya/carvana | acc594cba53c44d577c9e3e326e0163eea8b4862 | [
"MIT"
] | 2 | 2017-10-04T00:58:10.000Z | 2019-02-14T17:47:25.000Z | import torch
from torch import nn
from torch.nn import functional as F
class Conv3BN(nn.Module):
"""A module which applies the following actions:
- convolution with 3x3 kernel;
- batch normalization (if enabled);
- ELU.
Attributes:
in_ch: Number of input channels.
out_ch: Number of output channels.
bn: A boolean indicating if Batch Normalization is enabled or not.
"""
def __init__(self, in_ch: int, out_ch: int, bn=True):
super(Conv3BN, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 3, padding=1)
self.bn = nn.BatchNorm2d(out_ch) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetEncoder(nn.Module):
"""UNetEncoder module. Applies
- MaxPool2d to reduce the input sice twice
- twice Conv3BN, first with different size of channels and then with the same numbers of channels
Attributes:
in_ch: Number of input channels.
out_ch: Number of output channels.
"""
def __init__(self, in_ch: int, out_ch: int):
super(UNetEncoder, self).__init__()
self.encode = nn.Sequential(nn.MaxPool2d(2, 2),
Conv3BN(in_ch, out_ch),
Conv3BN(out_ch, out_ch),
)
def forward(self, x):
x = self.encode(x)
return x
class UNetDecoder(nn.Module):
"""UNetDecoder module. Applies
- Upsample with scale_factor = 2
- concatanation of miror slice with upsampled image along rows as a result the number of chanal increases
- twice Conv3BN
Attributes:
in_ch: Number of input channels.
out_ch: Number of output channels.
"""
def __init__(self, in_ch: int, out_ch: int):
super(UNetDecoder, self).__init__()
self.decode = nn.Sequential(Conv3BN(in_ch, out_ch),
Conv3BN(out_ch, out_ch),
Conv3BN(out_ch, out_ch),
)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, x_copy, x_down):
#N, C, H, W = x_copy.size()
x_up = self.upsample(x_down) #F.upsample(x_down, size=(H, W), mode='bilinear')
x_up = torch.cat([x_copy, x_up], 1)
x_new = self.decode(x_up)
return x_new
class UNet(nn.Module):
"""A UNet module. Applies
- once input_layer
- depth times of
- UNetEncoder
- UNetDecoder
- activation (sigmoid)
The number of output channels of each UNetEncoder/UNetDecoder is twice larger/less than the previous
number of input channels;
Attributes:
num_classes: Number of output channels.
input_channels: Number of input image channels.
filter_base: Number of out channels of the first UNet layer and base size for the each next.
depth: number of UNet layers UNetEncoder/UNetDecoder on the way down/up.
filter_base and depthe are connected as filter_base*2**depth = 1024 - the number of channels on the bottom layer
"""
def __init__(self,
num_classes: int=1,
input_channels: int=3,
filters_base: int=8,
depth: int=7):
super(UNet, self).__init__()
#filter sizes for down, center and up
down_filter_sizes = [filters_base * 2**i for i in range(depth+1)] # 32, 64, 128, 256, 512, 1024
up_filter_sizes = list(reversed(down_filter_sizes))
# input layer
self.input_layer = nn.Sequential(Conv3BN(input_channels, filters_base),
Conv3BN(filters_base, filters_base),
)
# Going down:
self.down, self.up = nn.ModuleList(), nn.ModuleList()
# depth filters to go down
for i in range(1, depth+1):
self.down.append(UNetEncoder(down_filter_sizes[i-1], down_filter_sizes[i]))
#depth filters to go up
for i in range(1, depth+1): # the number of channel increseas after concatenation
self.up.append(UNetDecoder(up_filter_sizes[i-1]+up_filter_sizes[i], up_filter_sizes[i]))
# Final layer and activation:
self.output = nn.Conv2d(up_filter_sizes[-1], out_channels=num_classes, kernel_size=1)
self.activation = F.sigmoid
def forward(self, x):
x = self.input_layer(x)
xs = [x] # collect slices from down side to copy them to up side
#go down
for module in self.down:
x = module(x)
xs.append(x)
xs.reverse()
#go up
x = xs[0]
for xc, module in zip(xs[1:], self.up):
x = module(xc, x)
x = self.output(x)
x = self.activation(x)
return x
| 35.275862 | 120 | 0.577517 | import torch
from torch import nn
from torch.nn import functional as F
class Conv3BN(nn.Module):
def __init__(self, in_ch: int, out_ch: int, bn=True):
super(Conv3BN, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 3, padding=1)
self.bn = nn.BatchNorm2d(out_ch) if bn else None
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
x = self.activation(x)
return x
class UNetEncoder(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(UNetEncoder, self).__init__()
self.encode = nn.Sequential(nn.MaxPool2d(2, 2),
Conv3BN(in_ch, out_ch),
Conv3BN(out_ch, out_ch),
)
def forward(self, x):
x = self.encode(x)
return x
class UNetDecoder(nn.Module):
def __init__(self, in_ch: int, out_ch: int):
super(UNetDecoder, self).__init__()
self.decode = nn.Sequential(Conv3BN(in_ch, out_ch),
Conv3BN(out_ch, out_ch),
Conv3BN(out_ch, out_ch),
)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, x_copy, x_down):
x_up = self.upsample(x_down)
x_up = torch.cat([x_copy, x_up], 1)
x_new = self.decode(x_up)
return x_new
class UNet(nn.Module):
def __init__(self,
num_classes: int=1,
input_channels: int=3,
filters_base: int=8,
depth: int=7):
super(UNet, self).__init__()
down_filter_sizes = [filters_base * 2**i for i in range(depth+1)]
up_filter_sizes = list(reversed(down_filter_sizes))
self.input_layer = nn.Sequential(Conv3BN(input_channels, filters_base),
Conv3BN(filters_base, filters_base),
)
self.down, self.up = nn.ModuleList(), nn.ModuleList()
for i in range(1, depth+1):
self.down.append(UNetEncoder(down_filter_sizes[i-1], down_filter_sizes[i]))
for i in range(1, depth+1):
self.up.append(UNetDecoder(up_filter_sizes[i-1]+up_filter_sizes[i], up_filter_sizes[i]))
self.output = nn.Conv2d(up_filter_sizes[-1], out_channels=num_classes, kernel_size=1)
self.activation = F.sigmoid
def forward(self, x):
x = self.input_layer(x)
xs = [x]
for module in self.down:
x = module(x)
xs.append(x)
xs.reverse()
x = xs[0]
for xc, module in zip(xs[1:], self.up):
x = module(xc, x)
x = self.output(x)
x = self.activation(x)
return x
| true | true |
1c3b52422aeca1ef8527235caf16ec3b660eddfb | 3,425 | py | Python | mdemo.py | vorticityxyz/Gaia-api | 04e2a9ee2448830df72156aecf432eda0c6eb504 | [
"MIT"
] | null | null | null | mdemo.py | vorticityxyz/Gaia-api | 04e2a9ee2448830df72156aecf432eda0c6eb504 | [
"MIT"
] | null | null | null | mdemo.py | vorticityxyz/Gaia-api | 04e2a9ee2448830df72156aecf432eda0c6eb504 | [
"MIT"
] | null | null | null | # Description:
#
# This example uses Vorticity gaia API's mf28pml operator to run a forward model.
# The operator takes a velocity model and returns a simulated shot record which
# is then plotted using matplotlib.
#
# mf28pml allows for larger velocity models and faster solving than f28pml
#
# Input parameters for the operator is generated by the
# function generate_test_data() and is as follows:
#
# model - 3D numpy array representing the velocity model
# shot - 1D numpy array representing the shot profile spanning the all timesteps
# shotxyz - Cartesian coordinates of the shot location
# recxxyyz - Cartesian coordinates of the receiver locations
# deltas - dx, dy, dz and dt for the simulation
# pml - width and amplitude of the PML layer
#
# Output: simulated shot record in the form of a 3d numpy array of the format
# shot_record[timestep, x_position, y_position]
#
# (C) Vorticity Inc. Mountain View, CA 2021
# Licence: MIT
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import gaia
# Plot results using matplotlib
def plot_results(shot_record):
fig = plt.figure(figsize=(15, 15))
scale = np.max(shot_record) / 5000.
extent = [0, 1, 1, 0]
plot = plt.imshow(shot_record, vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)
plt.xlabel('X position')
plt.ylabel('Time')
plt.show()
# Generate shot profile
def generate_ricker(nt, freq, dt):
max_amplitude = 1000
npt = nt * dt
t = np.arange(-float(npt)/2, float(npt)/2, dt)
# generate the short waveform
rick1 = max_amplitude * (1 - t * t * freq**2 * np.pi**2) * np.exp(-t**2 * np.pi**2 * freq**2)
# Overlay the short waveform over the full length of timesteps
rick = np.zeros(nt, dtype=np.float32)
rick[0: nt - (round(nt/2) - round(1/freq/dt) + 1)] = rick1[round(nt/2) - round(1/freq/dt) + 1: nt];
return rick
def generate_test_data():
# Earth model dimensions
nx = 1001
ny = 1001
nz = 1601
# Spacial discretization
dx = 2.5
dy = dx
dz = dx
# temporal discretization
dt = 0.0004
# number of timesteps
nt = 2500
# Absorbing boundaries
pmlw = 50
pmla = 100
# Shot parameters
freq = 30 # Frequency
xs = round(nx/2)
ys = round(ny/2)
zs = 4
# Receiver parameters
xt1 = 104
xt2 = (nx - 105)
yt1 = round(ny/2)
yt2 = round(ny/2)
zt = 4
# Earth model velocities
c1 = 1500
c2 = 2500
# Build earth model
model = np.full((nx, ny, nz), c1, dtype=np.float32) # Smooth model
model[:, :, 151:] = c2 # Now insert step
# Generate rest of the parameters
shot = generate_ricker(nt, freq, dt)
shotxyz = np.array([xs, ys, zs], dtype=np.int32)
recxxyz = np.array([xt1, xt2, yt1, yt2, zt], dtype=np.int32)
deltas = np.array([dx, dy, dz, dt], dtype=np.float32)
pml = np.array([pmlw, pmla], dtype=np.int32)
return model, shot, shotxyz, recxxyz, deltas, pml
if __name__ == '__main__':
# generate test data
print("Generating test data.")
model, shot, shotxyz, recxxyz, deltas, pml = generate_test_data()
# Call gaia function
shot_record = gaia.mf28pml(model, shot, shotxyz, recxxyz, deltas, pml)
# Plot results
plot_results(shot_record[:, :, 0])
# Save shot record for rtm later
np.save("data/shot_record", shot_record) | 29.525862 | 103 | 0.649051 |
# The operator takes a velocity model and returns a simulated shot record which
# is then plotted using matplotlib.
#
# mf28pml allows for larger velocity models and faster solving than f28pml
#
# Input parameters for the operator is generated by the
# function generate_test_data() and is as follows:
#
# model - 3D numpy array representing the velocity model
# shot - 1D numpy array representing the shot profile spanning the all timesteps
# shotxyz - Cartesian coordinates of the shot location
# recxxyyz - Cartesian coordinates of the receiver locations
# deltas - dx, dy, dz and dt for the simulation
# pml - width and amplitude of the PML layer
#
# Output: simulated shot record in the form of a 3d numpy array of the format
# shot_record[timestep, x_position, y_position]
#
# (C) Vorticity Inc. Mountain View, CA 2021
# Licence: MIT
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import gaia
# Plot results using matplotlib
def plot_results(shot_record):
fig = plt.figure(figsize=(15, 15))
scale = np.max(shot_record) / 5000.
extent = [0, 1, 1, 0]
plot = plt.imshow(shot_record, vmin=-scale, vmax=scale, cmap=cm.gray, extent=extent)
plt.xlabel('X position')
plt.ylabel('Time')
plt.show()
# Generate shot profile
def generate_ricker(nt, freq, dt):
max_amplitude = 1000
npt = nt * dt
t = np.arange(-float(npt)/2, float(npt)/2, dt)
# generate the short waveform
rick1 = max_amplitude * (1 - t * t * freq**2 * np.pi**2) * np.exp(-t**2 * np.pi**2 * freq**2)
# Overlay the short waveform over the full length of timesteps
rick = np.zeros(nt, dtype=np.float32)
rick[0: nt - (round(nt/2) - round(1/freq/dt) + 1)] = rick1[round(nt/2) - round(1/freq/dt) + 1: nt];
return rick
def generate_test_data():
# Earth model dimensions
nx = 1001
ny = 1001
nz = 1601
# Spacial discretization
dx = 2.5
dy = dx
dz = dx
# temporal discretization
dt = 0.0004
# number of timesteps
nt = 2500
# Absorbing boundaries
pmlw = 50
pmla = 100
# Shot parameters
freq = 30 # Frequency
xs = round(nx/2)
ys = round(ny/2)
zs = 4
# Receiver parameters
xt1 = 104
xt2 = (nx - 105)
yt1 = round(ny/2)
yt2 = round(ny/2)
zt = 4
# Earth model velocities
c1 = 1500
c2 = 2500
# Build earth model
model = np.full((nx, ny, nz), c1, dtype=np.float32) # Smooth model
model[:, :, 151:] = c2 # Now insert step
# Generate rest of the parameters
shot = generate_ricker(nt, freq, dt)
shotxyz = np.array([xs, ys, zs], dtype=np.int32)
recxxyz = np.array([xt1, xt2, yt1, yt2, zt], dtype=np.int32)
deltas = np.array([dx, dy, dz, dt], dtype=np.float32)
pml = np.array([pmlw, pmla], dtype=np.int32)
return model, shot, shotxyz, recxxyz, deltas, pml
if __name__ == '__main__':
# generate test data
print("Generating test data.")
model, shot, shotxyz, recxxyz, deltas, pml = generate_test_data()
# Call gaia function
shot_record = gaia.mf28pml(model, shot, shotxyz, recxxyz, deltas, pml)
# Plot results
plot_results(shot_record[:, :, 0])
# Save shot record for rtm later
np.save("data/shot_record", shot_record) | true | true |
1c3b5276307a51b1eefd9095f0058c30e16f3a28 | 159 | py | Python | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_BestCycle_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_BestCycle_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_BestCycle_LSTM.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['BestCycle'] , ['LSTM'] ); | 39.75 | 81 | 0.748428 | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['BestCycle'] , ['LSTM'] ); | true | true |
1c3b5355646cd2b8381429e96d194c9a69b6a3a1 | 2,638 | py | Python | src/spaceone/monitoring/manager/project_alert_config_manager.py | xellos00/monitoring | deb5363a2152e7b3f85a08d27bdede0e00023824 | [
"Apache-2.0"
] | null | null | null | src/spaceone/monitoring/manager/project_alert_config_manager.py | xellos00/monitoring | deb5363a2152e7b3f85a08d27bdede0e00023824 | [
"Apache-2.0"
] | null | null | null | src/spaceone/monitoring/manager/project_alert_config_manager.py | xellos00/monitoring | deb5363a2152e7b3f85a08d27bdede0e00023824 | [
"Apache-2.0"
] | null | null | null | import logging
from spaceone.core.manager import BaseManager
from spaceone.monitoring.error.project_alert_config import *
from spaceone.monitoring.model.project_alert_config_model import ProjectAlertConfig
_LOGGER = logging.getLogger(__name__)
class ProjectAlertConfigManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project_alert_config_model: ProjectAlertConfig = self.locator.get_model('ProjectAlertConfig')
def create_project_alert_config(self, params):
def _rollback(project_alert_config_vo: ProjectAlertConfig):
_LOGGER.info(f'[create_project_alert_config._rollback] '
f'Delete project alert config : {project_alert_config_vo.project_id}')
project_alert_config_vo.delete()
project_alert_config_vo: ProjectAlertConfig = self.project_alert_config_model.create(params)
self.transaction.add_rollback(_rollback, project_alert_config_vo)
return project_alert_config_vo
def update_project_alert_config(self, params):
project_alert_config_vo: ProjectAlertConfig = self.get_project_alert_config(params['project_id'],
params['domain_id'])
return self.update_project_alert_config_by_vo(params, project_alert_config_vo)
def update_project_alert_config_by_vo(self, params, project_alert_config_vo):
def _rollback(old_data):
_LOGGER.info(f'[update_project_alert_config_by_vo._rollback] Revert Data : '
f'{old_data["project_id"]}')
project_alert_config_vo.update(old_data)
self.transaction.add_rollback(_rollback, project_alert_config_vo.to_dict())
return project_alert_config_vo.update(params)
def delete_project_alert_config(self, project_id, domain_id):
project_alert_config_vo: ProjectAlertConfig = self.get_project_alert_config(project_id, domain_id)
project_alert_config_vo.delete()
def get_project_alert_config(self, project_id, domain_id, only=None):
try:
return self.project_alert_config_model.get(project_id=project_id, domain_id=domain_id, only=only)
except ERROR_NOT_FOUND as e:
raise ERROR_ALERT_FEATURE_IS_NOT_ACTIVATED(project_id=project_id)
except Exception as e:
raise e
def list_project_alert_configs(self, query={}):
return self.project_alert_config_model.query(**query)
def stat_project_alert_configs(self, query):
return self.project_alert_config_model.stat(**query)
| 44.711864 | 109 | 0.723275 | import logging
from spaceone.core.manager import BaseManager
from spaceone.monitoring.error.project_alert_config import *
from spaceone.monitoring.model.project_alert_config_model import ProjectAlertConfig
_LOGGER = logging.getLogger(__name__)
class ProjectAlertConfigManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.project_alert_config_model: ProjectAlertConfig = self.locator.get_model('ProjectAlertConfig')
def create_project_alert_config(self, params):
def _rollback(project_alert_config_vo: ProjectAlertConfig):
_LOGGER.info(f'[create_project_alert_config._rollback] '
f'Delete project alert config : {project_alert_config_vo.project_id}')
project_alert_config_vo.delete()
project_alert_config_vo: ProjectAlertConfig = self.project_alert_config_model.create(params)
self.transaction.add_rollback(_rollback, project_alert_config_vo)
return project_alert_config_vo
def update_project_alert_config(self, params):
project_alert_config_vo: ProjectAlertConfig = self.get_project_alert_config(params['project_id'],
params['domain_id'])
return self.update_project_alert_config_by_vo(params, project_alert_config_vo)
def update_project_alert_config_by_vo(self, params, project_alert_config_vo):
def _rollback(old_data):
_LOGGER.info(f'[update_project_alert_config_by_vo._rollback] Revert Data : '
f'{old_data["project_id"]}')
project_alert_config_vo.update(old_data)
self.transaction.add_rollback(_rollback, project_alert_config_vo.to_dict())
return project_alert_config_vo.update(params)
def delete_project_alert_config(self, project_id, domain_id):
project_alert_config_vo: ProjectAlertConfig = self.get_project_alert_config(project_id, domain_id)
project_alert_config_vo.delete()
def get_project_alert_config(self, project_id, domain_id, only=None):
try:
return self.project_alert_config_model.get(project_id=project_id, domain_id=domain_id, only=only)
except ERROR_NOT_FOUND as e:
raise ERROR_ALERT_FEATURE_IS_NOT_ACTIVATED(project_id=project_id)
except Exception as e:
raise e
def list_project_alert_configs(self, query={}):
return self.project_alert_config_model.query(**query)
def stat_project_alert_configs(self, query):
return self.project_alert_config_model.stat(**query)
| true | true |
1c3b5466c6c835d5b8d3616e9c7fe92a30b00a93 | 732 | py | Python | Sanctuary/Cogs/Utils/database.py | LeoHartUK/Sanctuary | 8d1d2ddb3a18bcf62a0cecc47bf152f88c90d2b1 | [
"MIT"
] | null | null | null | Sanctuary/Cogs/Utils/database.py | LeoHartUK/Sanctuary | 8d1d2ddb3a18bcf62a0cecc47bf152f88c90d2b1 | [
"MIT"
] | null | null | null | Sanctuary/Cogs/Utils/database.py | LeoHartUK/Sanctuary | 8d1d2ddb3a18bcf62a0cecc47bf152f88c90d2b1 | [
"MIT"
] | 1 | 2018-10-01T12:44:24.000Z | 2018-10-01T12:44:24.000Z | import asyncpg
class DatabaseConnection(object):
config = None
def __init__(self):
self.db = None
async def __aenter__(self):
self.db = await self.get_database_connection()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.db.close()
async def get_database_connection(self):
'''
Creates the database connection to postgres using the data from the config files
'''
conn = await asyncpg.connect(**DatabaseConnection.config)
return conn
async def __call__(self, sql:str, *args):
'''
Runs a line of SQL using the internal database
'''
# Runs the SQL
x = await self.db.fetch(sql, *args)
# If it got something, return the dict, else None
if x:
return x
return None
| 19.263158 | 82 | 0.704918 | import asyncpg
class DatabaseConnection(object):
config = None
def __init__(self):
self.db = None
async def __aenter__(self):
self.db = await self.get_database_connection()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.db.close()
async def get_database_connection(self):
conn = await asyncpg.connect(**DatabaseConnection.config)
return conn
async def __call__(self, sql:str, *args):
x = await self.db.fetch(sql, *args)
if x:
return x
return None
| true | true |
1c3b55432f94cbcc20756c830407214cf91aa54f | 3,566 | py | Python | demo/demo_dec.py | Shuai-Xie/RSRailway | f710b6720abd1a8356004bd0b1b4db4dab2592ab | [
"MIT"
] | 1 | 2020-10-22T09:33:58.000Z | 2020-10-22T09:33:58.000Z | demo/demo_dec.py | Shuai-Xie/RSRailway | f710b6720abd1a8356004bd0b1b4db4dab2592ab | [
"MIT"
] | null | null | null | demo/demo_dec.py | Shuai-Xie/RSRailway | f710b6720abd1a8356004bd0b1b4db4dab2592ab | [
"MIT"
] | null | null | null | """
地物检测,17类目标检测
"""
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_HOME"] = "/nfs/xs/local/cuda-10.2"
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch
import cv2
import matplotlib.pyplot as plt
from models.ctrbox_net import CTRBOX
from tqdm import tqdm
from datasets.config.railway import dec_label_names
from utils.func_utils import *
from utils.decoder import DecDecoder
from utils.misc import *
from pprint import pprint
# dota
dec_classes = 17
input_w, input_h = 960, 540
category = dec_label_names
def load_dec_model():
# create model
heads = {
'hm': dec_classes,
'wh': 10,
'reg': 2, # offset
'cls_theta': 1, # orientation cls
}
model = CTRBOX(heads,
pretrained=False, down_ratio=4,
final_kernel=1, head_channels=256)
# load param
resume = 'runs/railway/dec_res101_epoch100_data1501_Oct22_143548/model_best.pth'
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
state_dict_ = checkpoint['model_state_dict']
model.load_state_dict(state_dict_, strict=True)
print('loaded dec model from {}, epoch {}'.format(resume, checkpoint['epoch']))
return model.eval().cuda()
@torch.no_grad()
def detect(model, image, decoder, input_w, input_h, ori_w, ori_h):
pr_decs = model(image)
# heatmap point nms + topK + conf_thresh + HBB/RBB 解析
predictions = decoder.ctdet_decode(pr_decs) # np -> 1,num_obj,12 = 2+8+1+1
# 解析 predictions 得到 dict 类型结果
cat_pts, cat_scores = decode_prediction(predictions, category, input_w, input_h, ori_w, ori_h, down_ratio=4)
results = {cat: None for cat in category}
# multi-label nms 逐类 nms
for cat in category:
pts, scores = cat_pts[cat], cat_scores[cat]
pts = np.asarray(pts, np.float32)
scores = np.asarray(scores, np.float32)
if pts.shape[0]: # 存在 obj
results[cat] = non_maximum_suppression(pts, scores) # n,9
# 剩下的框统一 nms
dets = np.zeros((0, 9))
cats = []
for cat, result in results.items():
if result is None:
continue
dets = np.vstack((dets, result))
cats += [cat] * result.shape[0]
keep_index = py_cpu_nms_poly_fast(dets=dets, thresh=0.05) # 0.1
results = {cat: [] for cat in category}
for idx in keep_index:
# 对应类别添加对应 dec
results[cats[idx]].append(dets[idx])
return results
def demo_dir():
img_dir = 'data/railway/img'
# img_dir = 'data/geo_hazard/6_汽车误入'
model = load_dec_model()
decoder = DecDecoder(K=500, conf_thresh=0.18, num_classes=dec_classes)
for img in tqdm(os.listdir(img_dir)):
if img == '@eaDir' or img.endswith('seg.png') or img.endswith('dec.png'): # 跳过 dec/seg 结果
continue
print(img)
# preprocess
ori_image = cv2.imread(os.path.join(img_dir, img))
ori_h, ori_w, _ = ori_image.shape
image = preprocess(ori_image, input_w, input_h).cuda()
# detect
results = detect(model, image, decoder,
input_w, input_h, ori_w, ori_h)
# vis_plt
plt_results(results, ori_image, vis=False, save_path=f'data/railway/dec_plt/{img}')
# vis_cv
dec_img = draw_results(results, ori_image)
cv2.imwrite(f'data/railway/dec_cv/{img}', dec_img)
if __name__ == '__main__':
demo_dir()
pass
| 29.229508 | 113 | 0.620303 | import os
import torch
import cv2
import matplotlib.pyplot as plt
from models.ctrbox_net import CTRBOX
from tqdm import tqdm
from datasets.config.railway import dec_label_names
from utils.func_utils import *
from utils.decoder import DecDecoder
from utils.misc import *
from pprint import pprint
dec_classes = 17
input_w, input_h = 960, 540
category = dec_label_names
def load_dec_model():
heads = {
'hm': dec_classes,
'wh': 10,
'reg': 2,
'cls_theta': 1,
}
model = CTRBOX(heads,
pretrained=False, down_ratio=4,
final_kernel=1, head_channels=256)
resume = 'runs/railway/dec_res101_epoch100_data1501_Oct22_143548/model_best.pth'
checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)
state_dict_ = checkpoint['model_state_dict']
model.load_state_dict(state_dict_, strict=True)
print('loaded dec model from {}, epoch {}'.format(resume, checkpoint['epoch']))
return model.eval().cuda()
@torch.no_grad()
def detect(model, image, decoder, input_w, input_h, ori_w, ori_h):
pr_decs = model(image)
predictions = decoder.ctdet_decode(pr_decs)
cat_pts, cat_scores = decode_prediction(predictions, category, input_w, input_h, ori_w, ori_h, down_ratio=4)
results = {cat: None for cat in category}
for cat in category:
pts, scores = cat_pts[cat], cat_scores[cat]
pts = np.asarray(pts, np.float32)
scores = np.asarray(scores, np.float32)
if pts.shape[0]:
results[cat] = non_maximum_suppression(pts, scores)
dets = np.zeros((0, 9))
cats = []
for cat, result in results.items():
if result is None:
continue
dets = np.vstack((dets, result))
cats += [cat] * result.shape[0]
keep_index = py_cpu_nms_poly_fast(dets=dets, thresh=0.05)
results = {cat: [] for cat in category}
for idx in keep_index:
results[cats[idx]].append(dets[idx])
return results
def demo_dir():
img_dir = 'data/railway/img'
model = load_dec_model()
decoder = DecDecoder(K=500, conf_thresh=0.18, num_classes=dec_classes)
for img in tqdm(os.listdir(img_dir)):
if img == '@eaDir' or img.endswith('seg.png') or img.endswith('dec.png'):
continue
print(img)
ori_image = cv2.imread(os.path.join(img_dir, img))
ori_h, ori_w, _ = ori_image.shape
image = preprocess(ori_image, input_w, input_h).cuda()
results = detect(model, image, decoder,
input_w, input_h, ori_w, ori_h)
plt_results(results, ori_image, vis=False, save_path=f'data/railway/dec_plt/{img}')
dec_img = draw_results(results, ori_image)
cv2.imwrite(f'data/railway/dec_cv/{img}', dec_img)
if __name__ == '__main__':
demo_dir()
pass
| true | true |
1c3b55ddf826dcafe1ddb15f497f79a7db57b86f | 1,137 | py | Python | typish/functions/_common_ancestor.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 16 | 2019-08-03T13:57:17.000Z | 2021-11-08T11:51:52.000Z | typish/functions/_common_ancestor.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 27 | 2019-09-11T13:24:38.000Z | 2022-02-11T07:04:12.000Z | typish/functions/_common_ancestor.py | georgeharker/typish | 1c043beb74d89e62b10339a2a964f60ec175adfa | [
"MIT"
] | 7 | 2019-11-18T16:50:09.000Z | 2021-11-01T14:34:39.000Z | import typing
def common_ancestor(*args: object) -> type:
"""
Get the closest common ancestor of the given objects.
:param args: any objects.
:return: the ``type`` of the closest common ancestor of the given ``args``.
"""
return _common_ancestor(args, False)
def common_ancestor_of_types(*args: type) -> type:
"""
Get the closest common ancestor of the given classes.
:param args: any classes.
:return: the ``type`` of the closest common ancestor of the given ``args``.
"""
return _common_ancestor(args, True)
def _common_ancestor(args: typing.Sequence[object], types: bool) -> type:
from typish.functions._get_type import get_type
from typish.functions._get_mro import get_mro
if len(args) < 1:
raise TypeError('common_ancestor() requires at least 1 argument')
tmap = (lambda x: x) if types else get_type
mros = [get_mro(tmap(elem)) for elem in args]
for cls in mros[0]:
for mro in mros:
if cls not in mro:
break
else:
# cls is in every mro; a common ancestor is found!
return cls
| 30.72973 | 79 | 0.644679 | import typing
def common_ancestor(*args: object) -> type:
return _common_ancestor(args, False)
def common_ancestor_of_types(*args: type) -> type:
return _common_ancestor(args, True)
def _common_ancestor(args: typing.Sequence[object], types: bool) -> type:
from typish.functions._get_type import get_type
from typish.functions._get_mro import get_mro
if len(args) < 1:
raise TypeError('common_ancestor() requires at least 1 argument')
tmap = (lambda x: x) if types else get_type
mros = [get_mro(tmap(elem)) for elem in args]
for cls in mros[0]:
for mro in mros:
if cls not in mro:
break
else:
return cls
| true | true |
1c3b56185a74e835a8e187ab844d383e4d9b1e36 | 3,378 | py | Python | test/MSVS/vs-10.0Exp-exec.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 1,403 | 2017-11-23T14:24:01.000Z | 2022-03-30T20:59:39.000Z | test/MSVS/vs-10.0Exp-exec.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 3,708 | 2017-11-27T13:47:12.000Z | 2022-03-29T17:21:17.000Z | test/MSVS/vs-10.0Exp-exec.py | Valkatraz/scons | 5e70c65f633dcecc035751c9f0c6f894088df8a0 | [
"MIT"
] | 281 | 2017-12-01T23:48:38.000Z | 2022-03-31T15:25:44.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can actually build a simple program using our generated
Visual Studio 10.0 project (.vcxproj) and solution (.sln) files
using Visual C++ 10.0 Express edition.
"""
import os
import sys
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
if sys.platform != 'win32':
msg = "Skipping Visual Studio test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
msvs_version = '10.0Exp'
if not msvs_version in test.msvs_versions():
msg = "Visual Studio %s not installed; skipping test.\n" % msvs_version
test.skip_test(msg)
# Let SCons figure out the Visual Studio environment variables for us and
# print out a statement that we can exec to suck them into our external
# environment so we can execute devenv and really try to build something.
test.run(arguments = '-n -q -Q -f -', stdin = """\
env = Environment(tools = ['msvc'], MSVS_VERSION='%(msvs_version)s')
if env.WhereIs('cl'):
print("os.environ.update(%%s)" %% repr(env['ENV']))
""" % locals())
if test.stdout() == "":
msg = "Visual Studio %s missing cl.exe; skipping test.\n" % msvs_version
test.skip_test(msg)
exec(test.stdout())
test.subdir('sub dir')
test.write(['sub dir', 'SConstruct'], """\
env=Environment(MSVS_VERSION = '%(msvs_version)s')
env.MSVSProject(target = 'foo.vcxproj',
srcs = ['foo.c'],
buildtarget = 'foo.exe',
variant = 'Release')
env.Program('foo.c')
""" % locals())
test.write(['sub dir', 'foo.c'], r"""
int
main(int argc, char *argv)
{
printf("foo.c\n");
exit (0);
}
""")
test.run(chdir='sub dir', arguments='.')
test.vcproj_sys_path(test.workpath('sub dir', 'foo.vcxproj'))
import SCons.Platform.win32
system_dll_path = os.path.join( SCons.Platform.win32.get_system_root(), 'System32' )
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + system_dll_path
test.run(chdir='sub dir',
program=[test.get_msvs_executable(msvs_version)],
arguments=['foo.sln', '/build', 'Release'])
test.run(program=test.workpath('sub dir', 'foo'), stdout="foo.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 29.373913 | 85 | 0.705151 |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import sys
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
if sys.platform != 'win32':
msg = "Skipping Visual Studio test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
msvs_version = '10.0Exp'
if not msvs_version in test.msvs_versions():
msg = "Visual Studio %s not installed; skipping test.\n" % msvs_version
test.skip_test(msg)
test.run(arguments = '-n -q -Q -f -', stdin = """\
env = Environment(tools = ['msvc'], MSVS_VERSION='%(msvs_version)s')
if env.WhereIs('cl'):
print("os.environ.update(%%s)" %% repr(env['ENV']))
""" % locals())
if test.stdout() == "":
msg = "Visual Studio %s missing cl.exe; skipping test.\n" % msvs_version
test.skip_test(msg)
exec(test.stdout())
test.subdir('sub dir')
test.write(['sub dir', 'SConstruct'], """\
env=Environment(MSVS_VERSION = '%(msvs_version)s')
env.MSVSProject(target = 'foo.vcxproj',
srcs = ['foo.c'],
buildtarget = 'foo.exe',
variant = 'Release')
env.Program('foo.c')
""" % locals())
test.write(['sub dir', 'foo.c'], r"""
int
main(int argc, char *argv)
{
printf("foo.c\n");
exit (0);
}
""")
test.run(chdir='sub dir', arguments='.')
test.vcproj_sys_path(test.workpath('sub dir', 'foo.vcxproj'))
import SCons.Platform.win32
system_dll_path = os.path.join( SCons.Platform.win32.get_system_root(), 'System32' )
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + system_dll_path
test.run(chdir='sub dir',
program=[test.get_msvs_executable(msvs_version)],
arguments=['foo.sln', '/build', 'Release'])
test.run(program=test.workpath('sub dir', 'foo'), stdout="foo.c\n")
test.pass_test()
| true | true |
1c3b572e793165fe911d98ddd9cd2c94e5258db8 | 11,345 | py | Python | ps5_II.2_II.3.py | gerkamspiano/QuantMacro | f7e6e4ff7ae075d556f73cb1434c45652b4180cb | [
"MIT"
] | null | null | null | ps5_II.2_II.3.py | gerkamspiano/QuantMacro | f7e6e4ff7ae075d556f73cb1434c45652b4180cb | [
"MIT"
] | null | null | null | ps5_II.2_II.3.py | gerkamspiano/QuantMacro | f7e6e4ff7ae075d556f73cb1434c45652b4180cb | [
"MIT"
] | null | null | null | # Problem Set 5 - Germán Sánchez Arce
# In collaboration with María González
# Import packages
import numpy as np
from numpy import vectorize
from itertools import product
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import BSpline
from scipy.interpolate import interp1d
# Parametrization of the model:
ro = 0.06
beta = 1/(1+ro)
w = 1
r = 0.04
gamma = 0.5
sigmay = 0.2
# Transition matrix for the Markov Process
pi = np.array([((1+gamma)/2, (1-gamma)/2),((1-gamma)/2, (1+gamma)/2)])
#%% II.2 - The infinitely-lived households economy (Discrete method)
########################### Quadratic Utility #################################
Y = (1-sigmay, 1+sigmay)
cbar = 100*Y[1] # parameter for avoiding saturation of any consumer
A = np.linspace(((-(1+r)/r)*Y[0]), 30, 80) # grid over assets tomorrow
ay = list(product(Y, A, A))
ay = np.array(ay)
y = ay[:, 0]
ai = ay[:, 1]
aj = ay[:, 2]
c = y+(1+r)*ai-aj
@vectorize
def M(c):
return -0.5*(c-cbar)**2
M = M(c)
M = np.reshape(M, (1, 12800))
M = np.reshape(M, (160, 80))
# Initial guess for the value function is a vector of zeros:
Vs = np.zeros(160)
# Compute the matrix W:
def W1(A):
return pi[0, 0]*(-0.5*(Y[0] + (1+r)*A - A - cbar)**2)/(1-beta) + pi[0, 1]*(-0.5*(Y[1] + (1+r)*A - A - cbar)**2)/(1-beta)
def W2(A):
return pi[1, 0]*(-0.5*(Y[0] + (1+r)*A - A - cbar)**2)/(1-beta) + pi[1, 1]*(-0.5*(Y[1] + (1+r)*A - A - cbar)**2)/(1-beta)
W1 = W1(A)
W1 = np.reshape(W1, (80,1))
W1 = np.tile(W1, 80)
W1 = np.transpose(W1)
W2 = W2(A)
W2 = np.reshape(W2, (80,1))
W2 = np.tile(W2, 80)
W2 = np.transpose(W2)
W = [W1, W2]
W = np.reshape(W, (160,80))
# Compute the matrix X:
X = M + beta*W
Vs1 = np.amax(X, axis = 1)
diffVs = Vs - Vs1
count = 0
# If differences are larger than 1, we iterate taking as new value functions
# Vs1 up to obtain convergence:
for diffVs in range(1, 8000):
Vss = Vs1
Vs = [Vss[0:80], Vss[80:]]
Vs = np.array(Vs)
def W1(Vs):
return pi[0, 0]*Vs[0, :] + pi[0, 1]*Vs[1, :]
def W2(Vs):
return pi[1, 0]*Vs[0, :] + pi[1, 1]*Vs[1, :]
W1 = W1(Vs)
W1 = np.reshape(W1, (1,80))
W1 = np.tile(W1, 80)
W1 = np.reshape(W1, (80,80))
W2 = W2(Vs)
W2 = np.reshape(W2, (1,80))
W2 = np.tile(W2, 80)
W2 = np.reshape(W2, (80,80))
W = [W1, W2]
W = np.reshape(W, (160, 80))
X = M + beta*W
Vs1 = np.amax(X, axis = 1)
diffVs = Vss - Vs1
count += 1
# Once we obtain convergence, redefine the matrix X:
X = M + beta*W
# The value function given different realizations of y:
V_y1 = Vs1[0:80]
V_y2 = Vs1[80:]
# Now we can obtain the decision rule, which give us column number that
# maximizes row i of the X matrix:
g = np.argmax(X, axis = 1)
# For the first 45 periods:
aopt_y1 = A[g[0:80]] # optimal decision of assets given y1
aopt_y2 = A[g[80:]] # optimal decision of assets given y2
c_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1
c_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2
for i in range(0, 80):
if c_y1[i] < 0:
c_y1[i] = 0
if c_y2[i] < 0:
c_y2[i] = 0
# Plot the value function and the optimal policy:
plt.figure()
plt.plot(A, V_y1, label = 'Value function for negative shock')
plt.plot(A, V_y2, label = 'Value function for positive shock')
plt.title('Value Function Iteration')
plt.legend()
plt.ylabel('Value Function')
plt.xlabel('Assets')
plt.show()
plt.figure()
plt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')
plt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')
plt.title('Policy rule for assets')
plt.legend()
plt.ylabel('Assets tomorrow')
plt.xlabel('Assets today')
plt.show()
plt.figure()
plt.plot(A, c_y1, label = 'Optimal consumption for negative shock')
plt.plot(A, c_y2, label = 'Optimal consumption for positive shock')
plt.title('Policy rule for consumption')
plt.legend()
plt.ylabel('Consumption')
plt.xlabel('Assets')
plt.show()
#%% II.3 - The life-cycle economy (Backwards)
########################### Quadratic Utility #################################
W = np.zeros((160, 80))
count = 0
while count < 45:
W = np.amax((M + beta*W), axis = 1)
W = np.reshape(W,(160, 1))
W = W*np.ones((160, 80))
count += 1
plt.plot(A, W[0:80, 0], label = 'Value function for negative shock')
plt.plot(A, W[80:, 0], label = 'Value function for positive shock')
plt.legend()
plt.title('Value function for finite horizon')
plt.ylabel('Value function')
plt.xlabel('Assets')
plt.show()
X = M + beta*W
g = np.argmax(X, axis = 1)
aopt_y1 = A[g[0:80]] # optimal decision of assets given y1
aopt_y2 = A[g[80:]] # optimal decision of assets given y2
c_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1
c_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2
for i in range(0, 80):
if c_y1[i] < 0:
c_y1[i] = 0
if c_y2[i] < 0:
c_y2[i] = 0
plt.figure()
plt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')
plt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')
plt.legend()
plt.title('Policy rule for assets')
plt.ylabel('Assets tomorrow')
plt.xlabel('Assets today')
plt.show()
plt.figure()
plt.plot(A, c_y1, label = 'Optimal consumption for negative shock')
plt.plot(A, c_y2, label = 'Optimal consumption for positive shock')
plt.title('Policy rule for consumption')
plt.legend()
plt.ylabel('Consumption')
plt.xlabel('Assets')
plt.show()
#%% II.2 - The infinitely-lived households economy (Discrete method)
########################### CRRA Utility #####################################
sigma = 2
A = np.linspace(((-(1+r)/r)*Y[0]), 30, 80) # grid over assets tomorrow
ay = list(product(Y, A, A))
ay = np.array(ay)
y = ay[:, 0]
ai = ay[:, 1]
aj = ay[:, 2]
c = y + (1+r)*ai - aj
M = np.zeros(12800)
for i in range(0, 12800):
if c[i] >= 0:
M[i] = ((c[i]**(1-sigma))-1)/(1-sigma)
if c[i] < 0:
M[i] = -100000
M = np.reshape(M, (1, 12800))
M = np.reshape(M, (160, 80))
# Initial guess for the value function is a vector of zeros:
Vs = np.zeros(160)
# Compute the matrix W:
def W1(A):
return pi[0, 0]*(((Y[0] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta)) + pi[0, 1]*(((Y[1] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta))
def W2(A):
return pi[1, 0]*(((Y[0] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta)) + pi[1, 1]*(((Y[1] + (1+r)*A - A)**(1-sigma))-1)/((1-sigma)*(1-beta))
W1 = W1(A)
W1 = np.reshape(W1, (80,1))
W1 = np.tile(W1, 80)
W1 = np.transpose(W1)
W2 = W2(A)
W2 = np.reshape(W2, (80,1))
W2 = np.tile(W2, 80)
W2 = np.transpose(W2)
W = [W1, W2]
W = np.reshape(W, (160,80))
# Compute the matrix X:
X = M + beta*W
Vs1 = np.amax(X, axis = 1)
diffVs = Vs - Vs1
count = 0
# If differences are larger than 1, we iterate taking as new value functions
# Vs1 up to obtain convergence:
for diffVs in range(1, 8000):
Vss = Vs1
Vs = [Vss[0:80], Vss[80:]]
Vs = np.array(Vs)
def W1(Vs):
return pi[0, 0]*Vs[0, :] + pi[0, 1]*Vs[1, :]
def W2(Vs):
return pi[1, 0]*Vs[0, :] + pi[1, 1]*Vs[1, :]
W1 = W1(Vs)
W1 = np.reshape(W1, (1,80))
W1 = np.tile(W1, 80)
W1 = np.reshape(W1, (80,80))
W2 = W2(Vs)
W2 = np.reshape(W2, (1,80))
W2 = np.tile(W2, 80)
W2 = np.reshape(W2, (80,80))
W = [W1, W2]
W = np.reshape(W, (160, 80))
X = M + beta*W
Vs1 = np.amax(X, axis = 1)
diffVs = Vss - Vs1
count += 1
# Once we obtain convergence, redefine the matrix X:
X = M + beta*W
# The value function given different realizations of y:
V_y1 = Vs1[0:80]
V_y2 = Vs1[80:]
# Now we can obtain the decision rule, which give us column number that
# maximizes row i of the X matrix:
g = np.argmax(X, axis = 1)
# For the first 45 periods:
aopt_y1 = A[g[0:80]] # optimal decision of assets given y1
aopt_y2 = A[g[80:]] # optimal decision of assets given y2
for i in range(0, 2):
aopt_y1[i] = 0
aopt_y2[i] = 0
c_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1
c_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2
for i in range(0, 80):
if c_y1[i] < 0:
c_y1[i] = 0
if c_y2[i] < 0:
c_y2[i] = 0
# Plot the value function and the optimal policy:
plt.figure()
plt.plot(A[3:], V_y1[3:], label = 'Value function for negative shock')
plt.plot(A[3:], V_y2[3:], label = 'Value function for positive shock')
plt.title('Value Function Iteration')
plt.legend()
plt.ylabel('Value Function')
plt.xlabel('Assets')
plt.show()
plt.figure()
plt.plot(A[3:], aopt_y1[3:], label = 'Optimal assets for negative shock')
plt.plot(A[3:], aopt_y2[3:], label = 'Optimal assets for positive shock')
plt.title('Policy rule for assets')
plt.legend()
plt.ylabel('Assets tomorrow')
plt.xlabel('Assets today')
plt.show()
plt.figure()
plt.plot(A, c_y1, label = 'Optimal consumption for negative shock')
plt.plot(A, c_y2, label = 'Optimal consumption for positive shock')
plt.title('Policy rule for consumption')
plt.legend()
plt.ylabel('Consumption')
plt.xlabel('Assets')
plt.show()
#%% II.3 - The life-cycle economy (Backwards)
########################### CRRA Utility #####################################
W = np.zeros((160, 80))
count = 0
while count < 45:
W = np.amax((M + beta*W), axis = 1)
W = np.reshape(W,(160, 1))
W = W*np.ones((160, 80))
count += 1
plt.plot(A[1:], W[1:80, 1], label = 'Value function for negative shock')
plt.plot(A[1:], W[81:, 1], label = 'Value function for positive shock')
plt.title('Value function for finite horizon')
plt.legend()
plt.ylabel('Value function')
plt.xlabel('Assets')
plt.show()
X = M + beta*W
g = np.argmax(X, axis = 1)
aopt_y1 = A[g[0:80]] # optimal decision of assets given y1
aopt_y2 = A[g[80:]] # optimal decision of assets given y2
c_y1 = Y[0]*np.ones(80) + (1+r)*A - aopt_y1
c_y2 = Y[1]*np.ones(80) + (1+r)*A - aopt_y2
for i in range(0, 80):
if c_y1[i] < 0:
c_y1[i] = 0
if c_y2[i] < 0:
c_y2[i] = 0
plt.figure()
plt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')
plt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')
plt.title('Policy rule for assets')
plt.legend()
plt.ylabel('Assets tomorrow')
plt.xlabel('Assets today')
plt.show()
plt.figure()
plt.plot(A, c_y1, label = 'Optimal consumption for negative shock')
plt.plot(A, c_y2, label = 'Optimal consumption for positive shock')
plt.title('Policy rule for consumption')
plt.legend()
plt.ylabel('Consumption')
plt.xlabel('Assets')
plt.show()
| 22.781124 | 149 | 0.548876 |
import numpy as np
from numpy import vectorize
from itertools import product
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import BSpline
from scipy.interpolate import interp1d
ro = 0.06
beta = 1/(1+ro)
w = 1
r = 0.04
gamma = 0.5
sigmay = 0.2
pi = np.array([((1+gamma)/2, (1-gamma)/2),((1-gamma)/2, (1+gamma)/2)])
tion')
plt.xlabel('Assets')
plt.show()
plt.figure()
plt.plot(A, aopt_y1, label = 'Optimal assets for negative shock')
plt.plot(A, aopt_y2, label = 'Optimal assets for positive shock')
plt.title('Policy rule for assets')
plt.legend()
plt.ylabel('Assets tomorrow')
plt.xlabel('Assets today')
plt.show()
plt.figure()
plt.plot(A, c_y1, label = 'Optimal consumption for negative shock')
plt.plot(A, c_y2, label = 'Optimal consumption for positive shock')
plt.title('Policy rule for consumption')
plt.legend()
plt.ylabel('Consumption')
plt.xlabel('Assets')
plt.show()
| true | true |
1c3b574492692395aa10d6c247780a0fddbb2853 | 5,525 | py | Python | mmcls/models/losses/asymmetric_loss.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-03-15T07:36:04.000Z | 2022-03-15T07:36:04.000Z | mmcls/models/losses/asymmetric_loss.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | null | null | null | mmcls/models/losses/asymmetric_loss.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-03-25T08:40:07.000Z | 2022-03-25T08:40:07.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import convert_to_one_hot, weight_reduce_loss
def asymmetric_loss(pred,
target,
weight=None,
gamma_pos=1.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
avg_factor=None,
use_sigmoid=True,
eps=1e-8):
r"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Defaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = nn.functional.softmax(pred, dim=-1)
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid +
clip).clamp(max=1) * (1 - target) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg *
(1 - target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class AsymmetricLoss(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
use_sigmoid (bool): Whether the prediction uses sigmoid instead
of softmax. Defaults to True.
eps (float): The minimum value of the argument of logarithm. Defaults
to 1e-8.
"""
def __init__(self,
gamma_pos=0.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
loss_weight=1.0,
use_sigmoid=True,
eps=1e-8):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
self.use_sigmoid = use_sigmoid
self.eps = eps
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
r"""asymmetric loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \*), N or (N,1).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):
target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])
loss_cls = self.loss_weight * asymmetric_loss(
pred,
target,
weight,
gamma_pos=self.gamma_pos,
gamma_neg=self.gamma_neg,
clip=self.clip,
reduction=reduction,
avg_factor=avg_factor,
use_sigmoid=self.use_sigmoid,
eps=self.eps)
return loss_cls
| 36.833333 | 79 | 0.566878 |
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import convert_to_one_hot, weight_reduce_loss
def asymmetric_loss(pred,
target,
weight=None,
gamma_pos=1.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
avg_factor=None,
use_sigmoid=True,
eps=1e-8):
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
if use_sigmoid:
pred_sigmoid = pred.sigmoid()
else:
pred_sigmoid = nn.functional.softmax(pred, dim=-1)
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid +
clip).clamp(max=1) * (1 - target) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg *
(1 - target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class AsymmetricLoss(nn.Module):
def __init__(self,
gamma_pos=0.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
loss_weight=1.0,
use_sigmoid=True,
eps=1e-8):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
self.use_sigmoid = use_sigmoid
self.eps = eps
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1):
target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1])
loss_cls = self.loss_weight * asymmetric_loss(
pred,
target,
weight,
gamma_pos=self.gamma_pos,
gamma_neg=self.gamma_neg,
clip=self.clip,
reduction=reduction,
avg_factor=avg_factor,
use_sigmoid=self.use_sigmoid,
eps=self.eps)
return loss_cls
| true | true |
1c3b57690461d1837c56b4565a9ecc8958073025 | 7,896 | py | Python | python_modules/dagster/dagster/core/execution/context/compute.py | JBrVJxsc/dagster | 680aa23387308335eb0eccfa9241b26d10a2d627 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/context/compute.py | JBrVJxsc/dagster | 680aa23387308335eb0eccfa9241b26d10a2d627 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/context/compute.py | JBrVJxsc/dagster | 680aa23387308335eb0eccfa9241b26d10a2d627 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional
from dagster import check
from dagster.core.definitions.dependency import Solid, SolidHandle
from dagster.core.definitions.mode import ModeDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.resource import Resources
from dagster.core.definitions.solid import SolidDefinition
from dagster.core.definitions.step_launcher import StepLauncher
from dagster.core.errors import DagsterInvalidPropertyError
from dagster.core.instance import DagsterInstance
from dagster.core.log_manager import DagsterLogManager
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.utils.forked_pdb import ForkedPdb
from .system import StepExecutionContext
class AbstractComputeExecutionContext(ABC): # pylint: disable=no-init
"""Base class for solid context implemented by SolidExecutionContext and DagstermillExecutionContext"""
@abstractmethod
def has_tag(self, key) -> bool:
"""Implement this method to check if a logging tag is set."""
@abstractmethod
def get_tag(self, key: str) -> str:
"""Implement this method to get a logging tag."""
@abstractproperty
def run_id(self) -> str:
"""The run id for the context."""
@abstractproperty
def solid_def(self) -> SolidDefinition:
"""The solid definition corresponding to the execution step being executed."""
@abstractproperty
def solid(self) -> Solid:
"""The solid corresponding to the execution step being executed."""
@abstractproperty
def pipeline_def(self) -> PipelineDefinition:
"""The pipeline being executed."""
@abstractproperty
def pipeline_run(self) -> PipelineRun:
"""The PipelineRun object corresponding to the execution."""
@abstractproperty
def resources(self) -> Any:
"""Resources available in the execution context."""
@abstractproperty
def log(self) -> DagsterLogManager:
"""The log manager available in the execution context."""
@abstractproperty
def solid_config(self) -> Any:
"""The parsed config specific to this solid."""
@property
def op_config(self) -> Any:
return self.solid_config
class SolidExecutionContext(AbstractComputeExecutionContext):
"""The ``context`` object that can be made available as the first argument to a solid's compute
function.
The context object provides system information such as resources, config, and logging to a
solid's compute function. Users should not instantiate this object directly.
Example:
.. code-block:: python
@solid
def hello_world(context: SolidExecutionContext):
context.log.info("Hello, world!")
"""
__slots__ = ["_step_execution_context"]
def __init__(self, step_execution_context: StepExecutionContext):
self._step_execution_context = check.inst_param(
step_execution_context,
"step_execution_context",
StepExecutionContext,
)
self._pdb: Optional[ForkedPdb] = None
@property
def solid_config(self) -> Any:
solid_config = self._step_execution_context.resolved_run_config.solids.get(
str(self.solid_handle)
)
return solid_config.config if solid_config else None
@property
def pipeline_run(self) -> PipelineRun:
"""PipelineRun: The current pipeline run"""
return self._step_execution_context.pipeline_run
@property
def instance(self) -> DagsterInstance:
"""DagsterInstance: The current Dagster instance"""
return self._step_execution_context.instance
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.
Example:
.. code-block:: python
@solid
def debug_solid(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def file_manager(self):
"""Deprecated access to the file manager.
:meta private:
"""
raise DagsterInvalidPropertyError(
"You have attempted to access the file manager which has been moved to resources in 0.10.0. "
"Please access it via `context.resources.file_manager` instead."
)
@property
def resources(self) -> Resources:
"""Resources: The currently available resources."""
return self._step_execution_context.resources
@property
def step_launcher(self) -> Optional[StepLauncher]:
"""Optional[StepLauncher]: The current step launcher, if any."""
return self._step_execution_context.step_launcher
@property
def run_id(self) -> str:
"""str: The id of the current execution's run."""
return self._step_execution_context.run_id
@property
def run_config(self) -> dict:
"""dict: The run config for the current execution."""
return self._step_execution_context.run_config
@property
def pipeline_def(self) -> PipelineDefinition:
"""PipelineDefinition: The currently executing pipeline."""
return self._step_execution_context.pipeline_def
@property
def pipeline_name(self) -> str:
"""str: The name of the currently executing pipeline."""
return self._step_execution_context.pipeline_name
@property
def mode_def(self) -> ModeDefinition:
"""ModeDefinition: The mode of the current execution."""
return self._step_execution_context.mode_def
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: The log manager available in the execution context."""
return self._step_execution_context.log
@property
def solid_handle(self) -> SolidHandle:
"""SolidHandle: The current solid's handle.
:meta private:
"""
return self._step_execution_context.solid_handle
@property
def solid(self) -> Solid:
"""Solid: The current solid object.
:meta private:
"""
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self) -> SolidDefinition:
"""SolidDefinition: The current solid definition."""
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle).definition
def has_tag(self, key: str) -> bool:
"""Check if a logging tag is set.
Args:
key (str): The tag to check.
Returns:
bool: Whether the tag is set.
"""
return self._step_execution_context.has_tag(key)
def get_tag(self, key: str) -> str:
"""Get a logging tag.
Args:
key (tag): The tag to get.
Returns:
str: The value of the tag.
"""
return self._step_execution_context.get_tag(key)
def get_step_execution_context(self) -> StepExecutionContext:
"""Allows advanced users (e.g. framework authors) to punch through to the underlying
step execution context.
:meta private:
Returns:
StepExecutionContext: The underlying system context.
"""
return self._step_execution_context
@property
def retry_number(self) -> int:
"""
Which retry attempt is currently executing i.e. 0 for initial attempt, 1 for first retry, etc.
"""
return self._step_execution_context.previous_attempt_count
def get_mapping_key(self) -> Optional[str]:
"""
Which mapping_key this execution is for if downstream of a DynamicOutput, otherwise None.
"""
return self._step_execution_context.step.get_mapping_key()
| 31.710843 | 107 | 0.674772 | from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional
from dagster import check
from dagster.core.definitions.dependency import Solid, SolidHandle
from dagster.core.definitions.mode import ModeDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.resource import Resources
from dagster.core.definitions.solid import SolidDefinition
from dagster.core.definitions.step_launcher import StepLauncher
from dagster.core.errors import DagsterInvalidPropertyError
from dagster.core.instance import DagsterInstance
from dagster.core.log_manager import DagsterLogManager
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.utils.forked_pdb import ForkedPdb
from .system import StepExecutionContext
class AbstractComputeExecutionContext(ABC):
@abstractmethod
def has_tag(self, key) -> bool:
@abstractmethod
def get_tag(self, key: str) -> str:
@abstractproperty
def run_id(self) -> str:
@abstractproperty
def solid_def(self) -> SolidDefinition:
@abstractproperty
def solid(self) -> Solid:
@abstractproperty
def pipeline_def(self) -> PipelineDefinition:
@abstractproperty
def pipeline_run(self) -> PipelineRun:
@abstractproperty
def resources(self) -> Any:
@abstractproperty
def log(self) -> DagsterLogManager:
@abstractproperty
def solid_config(self) -> Any:
@property
def op_config(self) -> Any:
return self.solid_config
class SolidExecutionContext(AbstractComputeExecutionContext):
__slots__ = ["_step_execution_context"]
def __init__(self, step_execution_context: StepExecutionContext):
self._step_execution_context = check.inst_param(
step_execution_context,
"step_execution_context",
StepExecutionContext,
)
self._pdb: Optional[ForkedPdb] = None
@property
def solid_config(self) -> Any:
solid_config = self._step_execution_context.resolved_run_config.solids.get(
str(self.solid_handle)
)
return solid_config.config if solid_config else None
@property
def pipeline_run(self) -> PipelineRun:
return self._step_execution_context.pipeline_run
@property
def instance(self) -> DagsterInstance:
return self._step_execution_context.instance
@property
def pdb(self) -> ForkedPdb:
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def file_manager(self):
raise DagsterInvalidPropertyError(
"You have attempted to access the file manager which has been moved to resources in 0.10.0. "
"Please access it via `context.resources.file_manager` instead."
)
@property
def resources(self) -> Resources:
return self._step_execution_context.resources
@property
def step_launcher(self) -> Optional[StepLauncher]:
return self._step_execution_context.step_launcher
@property
def run_id(self) -> str:
return self._step_execution_context.run_id
@property
def run_config(self) -> dict:
return self._step_execution_context.run_config
@property
def pipeline_def(self) -> PipelineDefinition:
return self._step_execution_context.pipeline_def
@property
def pipeline_name(self) -> str:
return self._step_execution_context.pipeline_name
@property
def mode_def(self) -> ModeDefinition:
return self._step_execution_context.mode_def
@property
def log(self) -> DagsterLogManager:
return self._step_execution_context.log
@property
def solid_handle(self) -> SolidHandle:
return self._step_execution_context.solid_handle
@property
def solid(self) -> Solid:
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self) -> SolidDefinition:
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle).definition
def has_tag(self, key: str) -> bool:
return self._step_execution_context.has_tag(key)
def get_tag(self, key: str) -> str:
return self._step_execution_context.get_tag(key)
def get_step_execution_context(self) -> StepExecutionContext:
return self._step_execution_context
@property
def retry_number(self) -> int:
return self._step_execution_context.previous_attempt_count
def get_mapping_key(self) -> Optional[str]:
return self._step_execution_context.step.get_mapping_key()
| true | true |
1c3b57690a12ceecf8678804ef6ad197cfd0f51d | 154,195 | py | Python | test/orm/test_eager_relations.py | balabit-deps/balabit-os-6-sqlalchemy | 61defc84f2aab5e579080a1958375b805470b461 | [
"MIT"
] | null | null | null | test/orm/test_eager_relations.py | balabit-deps/balabit-os-6-sqlalchemy | 61defc84f2aab5e579080a1958375b805470b461 | [
"MIT"
] | null | null | null | test/orm/test_eager_relations.py | balabit-deps/balabit-os-6-sqlalchemy | 61defc84f2aab5e579080a1958375b805470b461 | [
"MIT"
] | null | null | null | """tests of joined-eager loaded attributes"""
from sqlalchemy.testing import eq_, is_, is_not_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.orm import joinedload, deferred, undefer, \
joinedload_all, backref, Session,\
defaultload, Load, load_only
from sqlalchemy import Integer, String, Date, ForeignKey, and_, select, \
func, text
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, \
lazyload, aliased, column_property
from sqlalchemy.sql import operators
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing import fixtures, expect_warnings
from test.orm import _fixtures
from sqlalchemy.util import OrderedDict as odict
import datetime
class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_inserts = 'once'
run_deletes = None
__dialect__ = 'default'
def test_basic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses), lazy='joined', order_by=Address.id)
})
sess = create_session()
q = sess.query(User)
eq_([User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.order_by(User.id).all())
def test_late_compile(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
m = mapper(User, users)
sess = create_session()
sess.query(User).all()
m.add_property("addresses", relationship(mapper(Address, addresses)))
sess.expunge_all()
def go():
eq_(
[User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
sess.query(User).options(
joinedload('addresses')).filter(User.id == 7).all()
)
self.assert_sql_count(testing.db, go, 1)
def test_no_orphan(self):
"""An eagerly loaded child object is not marked as an orphan"""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
Address, cascade="all,delete-orphan", lazy='joined')
})
mapper(Address, addresses)
sess = create_session()
user = sess.query(User).get(7)
assert getattr(User, 'addresses').\
hasparent(
sa.orm.attributes.instance_state(
user.addresses[0]), optimistic=True)
assert not sa.orm.class_mapper(Address).\
_is_orphan(
sa.orm.attributes.instance_state(user.addresses[0]))
def test_orderby(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.email_address),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_multi(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined',
order_by=[addresses.c.email_address, addresses.c.id]),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_related(self):
"""A regular mapper select on a single table can
order by a relationship to a second table"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
))
q = create_session().query(User)
l = q.filter(User.id == Address.user_id).order_by(
Address.email_address).all()
eq_([
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=7, addresses=[
Address(id=1)
]),
], l)
def test_orderby_desc(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined',
order_by=[sa.desc(addresses.c.email_address)]),
))
sess = create_session()
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=3, email_address='ed@bettyboop.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], sess.query(User).order_by(User.id).all())
def test_no_ad_hoc_orderby(self):
"""part of #2992; make sure string label references can't
access an eager loader, else an eager load can corrupt the query.
"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address),
))
sess = create_session()
q = sess.query(User).\
join("addresses").\
options(joinedload("addresses")).\
order_by("email_address")
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, addresses_1.user_id AS "
"addresses_1_user_id, addresses_1.email_address AS "
"addresses_1_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id LEFT OUTER JOIN addresses "
"AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses.email_address"
)
q = sess.query(User).options(joinedload("addresses")).\
order_by("email_address")
with expect_warnings("Can't resolve label reference 'email_address'"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, addresses_1.user_id AS "
"addresses_1_user_id, addresses_1.email_address AS "
"addresses_1_email_address FROM users LEFT OUTER JOIN "
"addresses AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY email_address"
)
def test_deferred_fk_col(self):
users, Dingaling, User, dingalings, Address, addresses = (
self.tables.users,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.addresses)
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
'user': relationship(User, lazy='joined')
})
mapper(User, users)
sess = create_session()
for q in [
sess.query(Address).filter(
Address.id.in_([1, 4, 5])
).order_by(Address.id),
sess.query(Address).filter(
Address.id.in_([1, 4, 5])
).order_by(Address.id).limit(3)
]:
sess.expunge_all()
eq_(q.all(),
[Address(id=1, user=User(id=7)),
Address(id=4, user=User(id=8)),
Address(id=5, user=User(id=9))]
)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).all()[0]
# 1.0 change! we don't automatically undefer user_id here.
# if the user wants a column undeferred, add the option.
def go():
eq_(a.user_id, 7)
# self.assert_sql_count(testing.db, go, 0)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).first()
def go():
eq_(a.user_id, 7)
# same, 1.0 doesn't check these
# self.assert_sql_count(testing.db, go, 0)
self.assert_sql_count(testing.db, go, 1)
# do the mapping in reverse
# (we would have just used an "addresses" backref but the test
# fixtures then require the whole backref to be set up, lazy loaders
# trigger, etc.)
sa.orm.clear_mappers()
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
})
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined')})
for q in [
sess.query(User).filter(User.id == 7),
sess.query(User).filter(User.id == 7).limit(1)
]:
sess.expunge_all()
eq_(q.all(),
[User(id=7, addresses=[Address(id=1)])]
)
sess.expunge_all()
u = sess.query(User).get(7)
def go():
eq_(u.addresses[0].user_id, 7)
# assert that the eager loader didn't have to affect 'user_id' here
# and that its still deferred
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined',
order_by=addresses.c.id)})
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
'dingalings': relationship(Dingaling, lazy='joined')})
mapper(Dingaling, dingalings, properties={
'address_id': deferred(dingalings.c.address_id)})
sess.expunge_all()
def go():
u = sess.query(User).get(8)
eq_(User(id=8,
addresses=[Address(id=2, dingalings=[Dingaling(id=1)]),
Address(id=3),
Address(id=4)]),
u)
self.assert_sql_count(testing.db, go, 1)
def test_options_pathing(self):
users, Keyword, orders, items, order_items, \
Order, Item, User, keywords, item_keywords = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
mapper(User, users, properties={
'orders': relationship(Order, order_by=orders.c.id), # o2m, m2o
})
mapper(Order, orders, properties={
'items': relationship(
Item,
secondary=order_items, order_by=items.c.id), # m2m
})
mapper(Item, items, properties={
'keywords': relationship(Keyword,
secondary=item_keywords,
order_by=keywords.c.id) # m2m
})
mapper(Keyword, keywords)
for opt, count in [
((
joinedload(User.orders, Order.items),
), 10),
((joinedload("orders.items"), ), 10),
((
joinedload(User.orders, ),
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
), 1),
((
joinedload(User.orders, Order.items, Item.keywords),
), 10),
((
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
), 5),
]:
sess = create_session()
def go():
eq_(
sess.query(User).options(*opt).order_by(User.id).all(),
self.static.user_item_keyword_result
)
self.assert_sql_count(testing.db, go, count)
def test_disable_dynamic(self):
"""test no joined option on a dynamic."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, lazy="dynamic")
})
mapper(Address, addresses)
sess = create_session()
assert_raises_message(
sa.exc.InvalidRequestError,
"User.addresses' does not support object "
"population - eager loading cannot be applied.",
sess.query(User).options(joinedload(User.addresses)).first,
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(Keyword, secondary=item_keywords,
lazy='joined', order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
q.join('keywords').filter(Keyword.name == 'red').all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.join('keywords', aliased=True).
filter(Keyword.name == 'red')).all())
self.assert_sql_count(testing.db, go, 1)
def test_eager_option(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(
Keyword, secondary=item_keywords, lazy='select',
order_by=keywords.c.id)))
q = create_session().query(Item)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.options(
joinedload('keywords')
).join('keywords').
filter(keywords.c.name == 'red')).order_by(Item.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_cyclical(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined',
backref=sa.orm.backref('user', lazy='joined'),
order_by=Address.id)
))
eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined')
eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined')
sess = create_session()
eq_(
self.static.user_address_result,
sess.query(User).order_by(User.id).all())
def test_double(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
openorders = sa.alias(orders, 'openorders')
closedorders = sa.alias(orders, 'closedorders')
mapper(Address, addresses)
mapper(Order, orders)
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(openorders.c.isopen == 1,
users.c.id == openorders.c.user_id),
lazy='joined', order_by=openorders.c.id),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id),
lazy='joined', order_by=closedorders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3)],
closed_orders=[Order(id=1), Order(id=5)]
),
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
open_orders=[],
closed_orders=[]
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4)],
closed_orders=[Order(id=2)]
),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 1)
def test_double_same_mappers(self):
"""Eager loading with two relationships simulatneously,
from the same table, using aliases."""
addresses, items, order_items, orders, \
Item, User, Address, Order, users = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items, lazy='joined',
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
open_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 1,
users.c.id == orders.c.user_id),
lazy='joined', order_by=orders.c.id),
closed_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 0,
users.c.id == orders.c.user_id),
lazy='joined', order_by=orders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(id=7,
addresses=[
Address(id=1)],
open_orders=[Order(id=3,
items=[
Item(id=3),
Item(id=4),
Item(id=5)])],
closed_orders=[Order(id=1,
items=[
Item(id=1),
Item(id=2),
Item(id=3)]),
Order(id=5,
items=[
Item(id=5)])]),
User(id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4)],
open_orders=[],
closed_orders=[]),
User(id=9,
addresses=[
Address(id=5)],
open_orders=[
Order(id=4,
items=[
Item(id=1),
Item(id=5)])],
closed_orders=[
Order(id=2,
items=[
Item(id=1),
Item(id=2),
Item(id=3)])]),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 1)
def test_no_false_hits(self):
"""Eager loaders don't interpret main table columns as
part of their eager load."""
addresses, orders, User, Address, Order, users = (
self.tables.addresses,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined'),
'orders': relationship(Order, lazy='joined')
})
mapper(Address, addresses)
mapper(Order, orders)
self.allusers = create_session().query(User).all()
# using a textual select, the columns will be 'id' and 'name'. the
# eager loaders have aliases which should not hit on those columns,
# they should be required to locate only their aliased/fully table
# qualified column name.
noeagers = create_session().query(User).\
from_statement(text("select * from users")).all()
assert 'orders' not in noeagers[0].__dict__
assert 'addresses' not in noeagers[0].__dict__
def test_limit(self):
"""Limit operations combined with lazy-load relationships."""
users, items, order_items, orders, Item, \
User, Address, Order, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
mapper(Item, items)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items, lazy='joined',
order_by=items.c.id)
})
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id),
'orders': relationship(Order, lazy='select', order_by=orders.c.id)
})
sess = create_session()
q = sess.query(User)
l = q.order_by(User.id).limit(2).offset(1).all()
eq_(self.static.user_all_result[1:3], l)
def test_distinct(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
# this is an involved 3x union of the users table to get a lot of rows.
# then see if the "distinct" works its way out. you actually get
# the same result with or without the distinct, just via less or
# more rows.
u2 = users.alias('u2')
s = sa.union_all(
u2.select(use_labels=True), u2.select(use_labels=True),
u2.select(use_labels=True)).alias('u')
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id),
})
sess = create_session()
q = sess.query(User)
def go():
l = q.filter(s.c.u2_id == User.id).distinct().\
order_by(User.id).all()
eq_(self.static.user_address_result, l)
self.assert_sql_count(testing.db, go, 1)
def test_limit_2(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(
Keyword, secondary=item_keywords,
lazy='joined', order_by=[keywords.c.id]),
))
sess = create_session()
q = sess.query(Item)
l = q.filter((Item.description == 'item 2') |
(Item.description == 'item 5') |
(Item.description == 'item 3')).\
order_by(Item.id).limit(2).all()
eq_(self.static.item_keyword_result[1:3], l)
def test_limit_3(self):
"""test that the ORDER BY is propagated from the inner
select to the outer select, when using the
'wrapped' select statement resulting from the combination of
eager loading and limit/offset clauses."""
addresses, items, order_items, orders, \
Item, User, Address, Order, users = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(Item, items)
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy='joined')
))
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
orders=relationship(Order, lazy='joined', order_by=orders.c.id),
))
sess = create_session()
q = sess.query(User)
if not testing.against('mssql'):
l = q.join('orders').order_by(
Order.user_id.desc()).limit(2).offset(1)
eq_([
User(id=9,
orders=[Order(id=2), Order(id=4)],
addresses=[Address(id=5)]
),
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
], l.all())
l = q.join('addresses').order_by(
Address.email_address.desc()).limit(1).offset(0)
eq_([
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
], l.all())
def test_limit_4(self):
User, Order, addresses, users, orders = (self.classes.User,
self.classes.Order,
self.tables.addresses,
self.tables.users,
self.tables.orders)
# tests the LIMIT/OFFSET aliasing on a mapper
# against a select. original issue from ticket #904
sel = sa.select([users, addresses.c.email_address],
users.c.id == addresses.c.user_id).alias('useralias')
mapper(User, sel, properties={
'orders': relationship(
Order, primaryjoin=sel.c.id == orders.c.user_id,
lazy='joined', order_by=orders.c.id)
})
mapper(Order, orders)
sess = create_session()
eq_(sess.query(User).first(),
User(name='jack', orders=[
Order(
address_id=1,
description='order 1',
isopen=0,
user_id=7,
id=1),
Order(
address_id=1,
description='order 3',
isopen=1,
user_id=7,
id=3),
Order(
address_id=None, description='order 5', isopen=0,
user_id=7, id=5)],
email_address='jack@bean.com', id=7)
)
def test_useget_cancels_eager(self):
"""test that a one to many lazyload cancels the unnecessary
eager many-to-one join on the other side."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='joined', backref='addresses')
})
sess = create_session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{'param_1': 8})
)
def test_manytoone_limit(self):
"""test that the subquery wrapping only occurs with
limit/offset and m2m or o2m joins present."""
users, items, order_items, Order, Item, User, \
Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties=odict(
orders=relationship(Order, backref='user')
))
mapper(Order, orders, properties=odict([
('items', relationship(Item, secondary=order_items,
backref='orders')),
('address', relationship(Address))
]))
mapper(Address, addresses)
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS "
"orders_1 ON anon_1.users_id = orders_1.user_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(Order).options(joinedload(Order.user)).limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{'param_1': 10}
)
self.assert_compile(
sess.query(Order).options(
joinedload(Order.user, innerjoin=True)).limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(
joinedload_all("orders.address")).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN orders AS orders_1 "
"ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN "
"addresses AS addresses_1 ON addresses_1.id = orders_1.address_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(joinedload_all("orders.items"),
joinedload("orders.address")),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS "
"addresses_1_email_address, orders_1.id AS orders_1_id, "
"orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description "
"AS orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id"
)
self.assert_compile(
sess.query(User).options(
joinedload("orders"),
joinedload(
"orders.address",
innerjoin=True)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users"
" LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"(orders AS orders_1 JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id) ON "
"anon_1.users_id = orders_1.user_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(
joinedload("orders", innerjoin=True),
joinedload("orders.address", innerjoin=True)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 JOIN orders "
"AS orders_1 ON anon_1.users_id = "
"orders_1.user_id JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id",
{'param_1': 10}
)
def test_one_to_many_scalar(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
address=relationship(mapper(Address, addresses),
lazy='joined', uselist=False)
))
q = create_session().query(User)
def go():
l = q.filter(users.c.id == 7).all()
eq_([User(id=7, address=Address(id=1))], l)
self.assert_sql_count(testing.db, go, 1)
def test_one_to_many_scalar_subq_wrapping(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
address=relationship(mapper(Address, addresses),
lazy='joined', uselist=False)
))
q = create_session().query(User)
q = q.filter(users.c.id == 7).limit(1)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"WHERE users.id = :id_1 "
"LIMIT :param_1",
checkparams={'id_1': 7, 'param_1': 1}
)
def test_many_to_one(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties=dict(
user=relationship(mapper(User, users), lazy='joined')
))
sess = create_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id == 1).one()
is_not_(a.user, None)
u1 = sess.query(User).get(7)
is_(a.user, u1)
self.assert_sql_count(testing.db, go, 1)
def test_many_to_one_null(self):
"""test that a many-to-one eager load which loads None does
not later trigger a lazy load.
"""
Order, Address, addresses, orders = (self.classes.Order,
self.classes.Address,
self.tables.addresses,
self.tables.orders)
# use a primaryjoin intended to defeat SA's usage of
# query.get() for a many-to-one lazyload
mapper(Order, orders, properties=dict(
address=relationship(
mapper(Address, addresses),
primaryjoin=and_(
addresses.c.id == orders.c.address_id,
addresses.c.email_address != None
),
lazy='joined')
))
sess = create_session()
def go():
o1 = sess.query(Order).options(
lazyload('address')).filter(
Order.id == 5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
o1 = sess.query(Order).filter(Order.id == 5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 1)
def test_one_and_many(self):
"""tests eager load for a parent object with a child object that
contains a many-to-many relationship to a third object."""
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order)
mapper(User, users, properties={
'orders': relationship(Order, lazy='joined', order_by=orders.c.id)
})
mapper(Item, items)
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy='joined',
order_by=items.c.id)
))
q = create_session().query(User)
l = q.filter(text("users.id in (7, 8, 9)")).order_by(text("users.id"))
def go():
eq_(self.static.user_order_result[0:3], l.all())
self.assert_sql_count(testing.db, go, 1)
def test_double_with_aggregate(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
max_orders_by_user = sa.select([
sa.func.max(orders.c.id).label('order_id')],
group_by=[orders.c.user_id]
).alias('max_orders_by_user')
max_orders = orders.select(
orders.c.id == max_orders_by_user.c.order_id).\
alias('max_orders')
mapper(Order, orders)
mapper(User, users, properties={
'orders': relationship(Order, backref='user', lazy='joined',
order_by=orders.c.id),
'max_order': relationship(
mapper(Order, max_orders, non_primary=True),
lazy='joined', uselist=False)
})
q = create_session().query(User)
def go():
eq_([
User(id=7, orders=[
Order(id=1),
Order(id=3),
Order(id=5),
],
max_order=Order(id=5)
),
User(id=8, orders=[]),
User(id=9, orders=[Order(id=2), Order(id=4)],
max_order=Order(id=4)
),
User(id=10),
], q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_uselist_false_warning(self):
"""test that multiple rows received by a
uselist=False raises a warning."""
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
mapper(User, users, properties={
'order': relationship(Order, uselist=False)
})
mapper(Order, orders)
s = create_session()
assert_raises(sa.exc.SAWarning,
s.query(User).options(joinedload(User.order)).all)
def test_wide(self):
users, items, order_items, Order, Item, \
User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy='joined',
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relationship(
mapper(
Address,
addresses),
lazy=False,
order_by=addresses.c.id),
orders=relationship(Order, lazy=False, order_by=orders.c.id),
))
q = create_session().query(User)
def go():
eq_(self.static.user_all_result, q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_against_select(self):
"""test eager loading of a mapper which is against a select"""
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order)
s = sa.select([orders], orders.c.isopen == 1).alias('openorders')
mapper(Order, s, properties={
'user': relationship(User, lazy='joined')
})
mapper(User, users)
mapper(Item, items)
q = create_session().query(Order)
eq_([
Order(id=3, user=User(id=7)),
Order(id=4, user=User(id=9))
], q.all())
q = q.select_from(s.join(order_items).join(items)).filter(
~Item.id.in_([1, 2, 5]))
eq_([
Order(id=3, user=User(id=7)),
], q.all())
def test_aliasing(self):
"""test that eager loading uses aliases to insulate the eager
load from regular criterion against those tables."""
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
addresses=relationship(mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id)
))
q = create_session().query(User)
l = q.filter(addresses.c.email_address == 'ed@lala.com').filter(
Address.user_id == User.id).order_by(User.id)
eq_(self.static.user_address_result[1:2], l.all())
def test_inner_join(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
addresses=relationship(mapper(Address, addresses), lazy='joined',
innerjoin=True, order_by=addresses.c.id)
))
sess = create_session()
eq_(
[User(id=7, addresses=[Address(id=1)]),
User(id=8,
addresses=[Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'), ]),
User(id=9, addresses=[Address(id=5)])], sess.query(User).all()
)
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users JOIN "
"addresses AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses_1.id")
def test_inner_join_unnested_chaining_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, innerjoin="unnested",
lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin="unnested")
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items AS order_items_1 "
"ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id"
)
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id"
)
self.assert_compile(
sess.query(User).options(
joinedload(
User.orders,
Order.items,
innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id"
)
def test_inner_join_nested_chaining_negative_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, innerjoin=True,
lazy=False, order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin=True, order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items "
"AS order_items_1 ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id ORDER BY orders_1.id, items_1.id"
)
q = sess.query(User).options(joinedload(User.orders, innerjoin=False))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id ORDER BY orders_1.id, items_1.id"
)
eq_(
[
User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=8, orders=[]),
User(id=9, orders=[
Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])
]
),
User(id=10, orders=[])
],
q.order_by(User.id).all()
)
self.assert_compile(
sess.query(User).options(
joinedload(
User.orders,
Order.items,
innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY "
"orders_1.id, items_1.id"
)
def test_inner_join_nested_chaining_positive_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
q = sess.query(User).options(
joinedload("orders", innerjoin=False).
joinedload("items", innerjoin=True)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, items_1.description "
"AS items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id "
"AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN (orders AS orders_1 "
"JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS "
"items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id "
"ORDER BY orders_1.id, items_1.id"
)
eq_(
[
User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=8, orders=[]),
User(id=9, orders=[
Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])
]
),
User(id=10, orders=[])
],
q.order_by(User.id).all()
)
def test_unnested_outerjoin_propagation_only_on_correct_path(self):
# test #3131
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users, properties=odict([
('orders', relationship(Order)),
('addresses', relationship(Address))
]))
mapper(Order, orders)
mapper(Address, addresses)
sess = create_session()
q = sess.query(User).options(
joinedload("orders"),
joinedload("addresses", innerjoin="unnested"),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id"
)
def test_nested_outerjoin_propagation_only_on_correct_path(self):
# test #3131
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users, properties=odict([
('orders', relationship(Order)),
('addresses', relationship(Address))
]))
mapper(Order, orders)
mapper(Address, addresses)
sess = create_session()
q = sess.query(User).options(
joinedload("orders"),
joinedload("addresses", innerjoin=True),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id"
)
def test_catch_the_right_target(self):
# test eager join chaining to the "nested" join on the left,
# a new feature as of [ticket:2369]
users, Keyword, orders, items, order_items, Order, Item, \
User, keywords, item_keywords = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
mapper(User, users, properties={
'orders': relationship(Order, backref='user'), # o2m, m2o
})
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items,
order_by=items.c.id), # m2m
})
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords,
order_by=keywords.c.id) # m2m
})
mapper(Keyword, keywords)
sess = create_session()
q = sess.query(User).join(User.orders).join(Order.items).\
options(joinedload_all("orders.items.keywords"))
# here, the eager join for keywords can catch onto
# join(Order.items) or the nested (orders LEFT OUTER JOIN items),
# it should catch the latter
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"keywords_1.id AS keywords_1_id, keywords_1.name "
"AS keywords_1_name, "
"items_1.id AS items_1_id, items_1.description AS "
"items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_2 "
"JOIN items AS items_1 ON items_1.id = order_items_2.item_id) "
"ON orders_1.id = order_items_2.order_id "
"LEFT OUTER JOIN (item_keywords AS item_keywords_1 "
"JOIN keywords AS keywords_1 ON keywords_1.id = "
"item_keywords_1.keyword_id) "
"ON items_1.id = item_keywords_1.item_id "
"ORDER BY items_1.id, keywords_1.id"
)
def test_inner_join_unnested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin="unnested")
))
mapper(Item, items)
sess = create_session()
# joining from user, its all LEFT OUTER JOINs
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN "
"orders AS orders_1 ON "
"users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 ON "
"items_1.id = "
"order_items_1.item_id) ON orders_1.id = "
"order_items_1.order_id"
)
# joining just from Order, innerjoin=True can be respected
self.assert_compile(
sess.query(Order),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, orders.description AS "
"orders_description, orders.isopen AS orders_isopen, items_1.id "
"AS items_1_id, items_1.description AS items_1_description FROM "
"orders JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id"
)
def test_inner_join_nested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin='nested')
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id"
)
def test_inner_join_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, backref=backref('user', innerjoin=True),
order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=True)),
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id ORDER BY orders_1.id")
self.assert_compile(
sess.query(User).options(
joinedload_all(User.orders, Order.items, innerjoin=True)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON "
"items_1.id = order_items_1.item_id ORDER BY orders_1.id, "
"items_1.id")
def go():
eq_(
sess.query(User).options(
joinedload(User.orders, innerjoin=True),
joinedload(User.orders, Order.items, innerjoin=True)).
order_by(User.id).all(),
[User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=9, orders=[
Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])])
]
)
self.assert_sql_count(testing.db, go, 1)
# test that default innerjoin setting is used for options
self.assert_compile(
sess.query(Order).options(
joinedload(
Order.user)).filter(
Order.description == 'foo'),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS "
"orders_isopen, users_1.id AS users_1_id, users_1.name "
"AS users_1_name "
"FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id "
"WHERE orders.description = :description_1"
)
def test_propagated_lazyload_wildcard_unbound(self):
self._test_propagated_lazyload_wildcard(False)
def test_propagated_lazyload_wildcard_bound(self):
self._test_propagated_lazyload_wildcard(True)
def _test_propagated_lazyload_wildcard(self, use_load):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy="select")
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy="joined")
))
mapper(Item, items)
sess = create_session()
if use_load:
opt = Load(User).defaultload("orders").lazyload("*")
else:
opt = defaultload("orders").lazyload("*")
q = sess.query(User).filter(User.id == 7).options(opt)
def go():
for u in q:
u.orders
self.sql_eq_(go, [
("SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1", {"id_1": 7}),
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders "
"WHERE :param_1 = orders.user_id", {"param_1": 7}),
])
class InnerJoinSplicingTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
__backend__ = True # exercise hardcore join nesting on backends
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True)
)
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id')),
Column('value', String(10)),
)
Table('c1', metadata,
Column('id', Integer, primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')),
Column('value', String(10)),
)
Table('c2', metadata,
Column('id', Integer, primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')),
Column('value', String(10)),
)
Table('d1', metadata,
Column('id', Integer, primary_key=True),
Column('c1_id', Integer, ForeignKey('c1.id')),
Column('value', String(10)),
)
Table('d2', metadata,
Column('id', Integer, primary_key=True),
Column('c2_id', Integer, ForeignKey('c2.id')),
Column('value', String(10)),
)
Table('e1', metadata,
Column('id', Integer, primary_key=True),
Column('d1_id', Integer, ForeignKey('d1.id')),
Column('value', String(10)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C1(cls.Comparable):
pass
class C2(cls.Comparable):
pass
class D1(cls.Comparable):
pass
class D2(cls.Comparable):
pass
class E1(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A, cls.classes.B, cls.classes.C1,
cls.classes.C2, cls.classes.D1, cls.classes.D2, cls.classes.E1)
mapper(A, cls.tables.a, properties={
'bs': relationship(B)
})
mapper(B, cls.tables.b, properties=odict([
('c1s', relationship(C1, order_by=cls.tables.c1.c.id)),
('c2s', relationship(C2, order_by=cls.tables.c2.c.id))
]))
mapper(C1, cls.tables.c1, properties={
'd1s': relationship(D1, order_by=cls.tables.d1.c.id)
})
mapper(C2, cls.tables.c2, properties={
'd2s': relationship(D2, order_by=cls.tables.d2.c.id)
})
mapper(D1, cls.tables.d1, properties={
'e1s': relationship(E1, order_by=cls.tables.e1.c.id)
})
mapper(D2, cls.tables.d2)
mapper(E1, cls.tables.e1)
@classmethod
def _fixture_data(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A, cls.classes.B, cls.classes.C1,
cls.classes.C2, cls.classes.D1, cls.classes.D2, cls.classes.E1)
return [
A(id=1, bs=[
B(
id=1,
c1s=[C1(
id=1, value='C11',
d1s=[
D1(id=1, e1s=[E1(id=1)]), D1(id=2, e1s=[E1(id=2)])
]
)
],
c2s=[C2(id=1, value='C21', d2s=[D2(id=3)]),
C2(id=2, value='C22', d2s=[D2(id=4)])]
),
B(
id=2,
c1s=[
C1(
id=4, value='C14',
d1s=[D1(
id=3, e1s=[
E1(id=3, value='E13'),
E1(id=4, value="E14")
]),
D1(id=4, e1s=[E1(id=5)])
]
)
],
c2s=[C2(id=4, value='C24', d2s=[])]
),
]),
A(id=2, bs=[
B(
id=3,
c1s=[
C1(
id=8,
d1s=[D1(id=5, value='D15', e1s=[E1(id=6)])]
)
],
c2s=[C2(id=8, d2s=[D2(id=6, value='D26')])]
)
])
]
@classmethod
def insert_data(cls):
s = Session(testing.db)
s.add_all(cls._fixture_data())
s.commit()
def _assert_result(self, query):
eq_(
query.all(),
self._fixture_data()
)
def test_nested_innerjoin_propagation_multiple_paths_one(self):
A, B, C1, C2 = (
self.classes.A, self.classes.B, self.classes.C1,
self.classes.C2)
s = Session()
q = s.query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.c1s, innerjoin=True).
joinedload(C1.d1s, innerjoin=True),
defaultload(A.bs).joinedload(B.c2s, innerjoin=True).
joinedload(C2.d2s, innerjoin=False)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, d2_1.id AS d2_1_id, "
"d2_1.c2_id AS d2_1_c2_id, d2_1.value AS d2_1_value, "
"c2_1.id AS c2_1_id, c2_1.b_id AS c2_1_b_id, "
"c2_1.value AS c2_1_value, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a "
"LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id "
"JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id, d2_1.id"
)
self._assert_result(q)
def test_nested_innerjoin_propagation_multiple_paths_two(self):
# test #3447
A = self.classes.A
s = Session()
q = s.query(A).options(
joinedload('bs'),
joinedload('bs.c2s', innerjoin=True),
joinedload('bs.c1s', innerjoin=True),
joinedload('bs.c1s.d1s')
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, "
"b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id"
)
self._assert_result(q)
def test_multiple_splice_points(self):
A = self.classes.A
s = Session()
q = s.query(A).options(
joinedload('bs', innerjoin=False),
joinedload('bs.c1s', innerjoin=True),
joinedload('bs.c2s', innerjoin=True),
joinedload('bs.c1s.d1s', innerjoin=False),
joinedload('bs.c2s.d2s'),
joinedload('bs.c1s.d1s.e1s', innerjoin=True)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, e1_1.id AS e1_1_id, "
"e1_1.d1_id AS e1_1_d1_id, e1_1.value AS e1_1_value, "
"d1_1.id AS d1_1_id, d1_1.c1_id AS d1_1_c1_id, "
"d1_1.value AS d1_1_value, c1_1.id AS c1_1_id, "
"c1_1.b_id AS c1_1_b_id, c1_1.value AS c1_1_value, "
"d2_1.id AS d2_1_id, d2_1.c2_id AS d2_1_c2_id, "
"d2_1.value AS d2_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN ("
"d1 AS d1_1 JOIN e1 AS e1_1 ON d1_1.id = e1_1.d1_id) "
"ON c1_1.id = d1_1.c1_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, e1_1.id, c2_1.id, d2_1.id"
)
self._assert_result(q)
def test_splice_onto_np_mapper(self):
A = self.classes.A
B = self.classes.B
C1 = self.classes.C1
b_table = self.tables.b
c1_table = self.tables.c1
from sqlalchemy import inspect
weird_selectable = b_table.outerjoin(c1_table)
b_np = mapper(
B, weird_selectable, non_primary=True, properties=odict([
# note we need to make this fixed with lazy=False until
# [ticket:3348] is resolved
('c1s', relationship(C1, lazy=False, innerjoin=True)),
('c_id', c1_table.c.id),
('b_value', b_table.c.value),
])
)
a_mapper = inspect(A)
a_mapper.add_property(
"bs_np", relationship(b_np)
)
s = Session()
q = s.query(A).options(
joinedload('bs_np', innerjoin=False)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, c1_2.id AS c1_2_id, "
"b_1.value AS b_1_value, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, c1_2.b_id AS c1_2_b_id, "
"c1_2.value AS c1_2_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 LEFT OUTER JOIN c1 AS c1_2 ON b_1.id = c1_2.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id"
)
class InnerJoinSplicingWSecondaryTest(
fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
__backend__ = True # exercise hardcore join nesting on backends
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column('id', Integer, primary_key=True),
Column('bid', ForeignKey('b.id'))
)
Table(
'b', metadata,
Column('id', Integer, primary_key=True),
Column('cid', ForeignKey('c.id'))
)
Table(
'c', metadata,
Column('id', Integer, primary_key=True),
)
Table('ctod', metadata,
Column('cid', ForeignKey('c.id'), primary_key=True),
Column('did', ForeignKey('d.id'), primary_key=True),
)
Table('d', metadata,
Column('id', Integer, primary_key=True),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = (
cls.classes.A, cls.classes.B, cls.classes.C,
cls.classes.D)
mapper(A, cls.tables.a, properties={
'b': relationship(B)
})
mapper(B, cls.tables.b, properties=odict([
('c', relationship(C)),
]))
mapper(C, cls.tables.c, properties=odict([
('ds', relationship(D, secondary=cls.tables.ctod,
order_by=cls.tables.d.c.id)),
]))
mapper(D, cls.tables.d)
@classmethod
def _fixture_data(cls):
A, B, C, D = (
cls.classes.A, cls.classes.B, cls.classes.C,
cls.classes.D)
d1, d2, d3 = D(id=1), D(id=2), D(id=3)
return [
A(
id=1,
b=B(
id=1,
c=C(
id=1,
ds=[d1, d2]
)
)
),
A(
id=2,
b=B(
id=2,
c=C(
id=2,
ds=[d2, d3]
)
)
)
]
@classmethod
def insert_data(cls):
s = Session(testing.db)
s.add_all(cls._fixture_data())
s.commit()
def _assert_result(self, query):
def go():
eq_(
query.all(),
self._fixture_data()
)
self.assert_sql_count(
testing.db,
go,
1
)
def test_joined_across(self):
A = self.classes.A
s = Session()
q = s.query(A) \
.options(
joinedload('b').
joinedload('c', innerjoin=True).
joinedload('ds', innerjoin=True))
self.assert_compile(
q,
"SELECT a.id AS a_id, a.bid AS a_bid, d_1.id AS d_1_id, "
"c_1.id AS c_1_id, b_1.id AS b_1_id, b_1.cid AS b_1_cid "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN "
"(c AS c_1 JOIN ctod AS ctod_1 ON c_1.id = ctod_1.cid) "
"ON c_1.id = b_1.cid "
"JOIN d AS d_1 ON d_1.id = ctod_1.did) ON b_1.id = a.bid "
"ORDER BY d_1.id"
)
self._assert_result(q)
class SubqueryAliasingTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
"""test #2188"""
__dialect__ = 'default'
run_create_tables = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True)
)
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id')),
Column('value', Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def _fixture(self, props):
A, B = self.classes.A, self.classes.B
b_table, a_table = self.tables.b, self.tables.a
mapper(A, a_table, properties=props)
mapper(B, b_table, properties={
'a': relationship(A, backref="bs")
})
def test_column_property(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_column_property_desc(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation.desc()).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 DESC "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2 DESC"
)
def test_column_property_correlated(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_standalone_subquery_unlabeled(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).as_scalar()
# up until 0.8, this was ordering by a new subquery.
# the removal of a separate _make_proxy() from ScalarSelect
# fixed that.
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 "
"AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE "
"b.a_id = a.id) AS anon_2 FROM a ORDER BY (SELECT "
"sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_standalone_subquery_labeled(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).as_scalar().label('foo')
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.foo "
"AS anon_1_foo, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE "
"b.a_id = a.id) AS foo FROM a ORDER BY foo "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY "
"anon_1.foo"
)
def test_standalone_negated(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).\
as_scalar()
# test a different unary operator
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(~cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 "
"AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, NOT (SELECT sum(b.value) AS sum_1 FROM b "
"WHERE b.a_id = a.id) FROM a ORDER BY NOT (SELECT "
"sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
class LoadOnExistingTest(_fixtures.FixtureTest):
"""test that loaders from a base Query fully populate."""
run_inserts = 'once'
run_deletes = None
def _collection_to_scalar_fixture(self):
User, Address, Dingaling = self.classes.User, \
self.classes.Address, self.classes.Dingaling
mapper(User, self.tables.users, properties={
'addresses': relationship(Address),
})
mapper(Address, self.tables.addresses, properties={
'dingaling': relationship(Dingaling)
})
mapper(Dingaling, self.tables.dingalings)
sess = Session(autoflush=False)
return User, Address, Dingaling, sess
def _collection_to_collection_fixture(self):
User, Order, Item = self.classes.User, \
self.classes.Order, self.classes.Item
mapper(User, self.tables.users, properties={
'orders': relationship(Order),
})
mapper(Order, self.tables.orders, properties={
'items': relationship(Item, secondary=self.tables.order_items),
})
mapper(Item, self.tables.items)
sess = Session(autoflush=False)
return User, Order, Item, sess
def _eager_config_fixture(self):
User, Address = self.classes.User, self.classes.Address
mapper(User, self.tables.users, properties={
'addresses': relationship(Address, lazy="joined"),
})
mapper(Address, self.tables.addresses)
sess = Session(autoflush=False)
return User, Address, sess
def test_no_query_on_refresh(self):
User, Address, sess = self._eager_config_fixture()
u1 = sess.query(User).get(8)
assert 'addresses' in u1.__dict__
sess.expire(u1)
def go():
eq_(u1.id, 8)
self.assert_sql_count(testing.db, go, 1)
assert 'addresses' not in u1.__dict__
def test_loads_second_level_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).get(8)
a1 = Address()
u1.addresses.append(a1)
a2 = u1.addresses[0]
a2.email_address = 'foo'
sess.query(User).options(joinedload_all("addresses.dingaling")).\
filter_by(id=8).all()
assert u1.addresses[-1] is a1
for a in u1.addresses:
if a is not a1:
assert 'dingaling' in a.__dict__
else:
assert 'dingaling' not in a.__dict__
if a is a2:
eq_(a2.email_address, 'foo')
def test_loads_second_level_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).get(7)
u1.orders
o1 = Order()
u1.orders.append(o1)
sess.query(User).options(joinedload_all("orders.items")).\
filter_by(id=7).all()
for o in u1.orders:
if o is not o1:
assert 'items' in o.__dict__
else:
assert 'items' not in o.__dict__
def test_load_two_levels_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).filter_by(
id=8).options(
joinedload("addresses")).one()
sess.query(User).filter_by(
id=8).options(
joinedload_all("addresses.dingaling")).first()
assert 'dingaling' in u1.addresses[0].__dict__
def test_load_two_levels_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).filter_by(
id=7).options(
joinedload("orders")).one()
sess.query(User).filter_by(
id=7).options(
joinedload_all("orders.items")).first()
assert 'items' in u1.orders[0].__dict__
class AddEntityTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
def _assert_result(self):
Item, Address, Order, User = (self.classes.Item,
self.classes.Address,
self.classes.Order,
self.classes.User)
return [
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=1,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=3,
items=[Item(id=3), Item(id=4), Item(id=5)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=5,
items=[Item(id=5)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=2,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=4,
items=[Item(id=1), Item(id=5)]
),
)
]
def test_mapper_configured(self):
users, items, order_items, Order, \
Item, User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined'),
'orders': relationship(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy='joined',
order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
def test_options(self):
users, items, order_items, Order,\
Item, User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address),
'orders': relationship(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).options(joinedload('addresses')).\
join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 6)
sess.expunge_all()
def go():
ret = sess.query(User, oalias).\
options(joinedload('addresses'),
joinedload(oalias.items)).\
join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
class OrderBySecondaryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('m2m', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b.id')))
Table('a', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
Table('b', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def fixtures(cls):
return dict(
a=(('id', 'data'),
(1, 'a1'),
(2, 'a2')),
b=(('id', 'data'),
(1, 'b1'),
(2, 'b2'),
(3, 'b3'),
(4, 'b4')),
m2m=(('id', 'aid', 'bid'),
(2, 1, 1),
(4, 2, 4),
(1, 1, 3),
(6, 2, 2),
(3, 1, 2),
(5, 2, 3)))
def test_ordering(self):
a, m2m, b = (
self.tables.a,
self.tables.m2m,
self.tables.b)
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, a, properties={
'bs': relationship(
B, secondary=m2m, lazy='joined', order_by=m2m.c.id)
})
mapper(B, b)
sess = create_session()
eq_(sess.query(A).all(),
[
A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]),
A(bs=[B(data='b4'), B(data='b3'), B(data='b2')])
])
class SelfReferentialEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
def test_basic(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node,
lazy='joined',
join_depth=3, order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').all()[0]
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 1)
def test_lazy_fallback_doesnt_affect_eager(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined', join_depth=1,
order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
# eager load with join depth 1. when eager load of 'n1' hits the
# children of 'n12', no columns are present, eager loader degrades to
# lazy loader; fine. but then, 'n12' is *also* in the first level of
# columns since we're loading the whole table. when those rows
# arrive, now we *can* eager load its children and an eager collection
# should be initialized. essentially the 'n12' instance is present in
# not just two different rows but two distinct sets of columns in this
# result set.
def go():
allnodes = sess.query(Node).order_by(Node.data).all()
n12 = allnodes[2]
eq_(n12.data, 'n12')
eq_([
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
], list(n12.children))
self.assert_sql_count(testing.db, go, 1)
def test_with_deferred(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined', join_depth=3,
order_by=nodes.c.id),
'data': deferred(nodes.c.data)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
eq_(
Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).order_by(Node.id).first(),
)
self.assert_sql_count(testing.db, go, 4)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).
options(undefer('data')).order_by(Node.id).first())
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).options(undefer('data'),
undefer('children.data')).first())
self.assert_sql_count(testing.db, go, 1)
def test_options(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='select', order_by=nodes.c.id)
}, order_by=nodes.c.id)
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').\
options(joinedload('children.children')).first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 2)
def go():
sess.query(Node).filter_by(data='n1').\
options(joinedload('children.children')).first()
# test that the query isn't wrapping the initial query for eager
# loading.
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, nodes.data AS nodes_data FROM nodes "
"WHERE nodes.data = :data_1 ORDER BY nodes.id LIMIT :param_1",
{'data_1': 'n1'}
)
)
def test_no_depth(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined')
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 3)
class MixedSelfReferentialEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True)
)
Table('b_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('parent_b1_id', Integer, ForeignKey('b_table.id')),
Column('parent_a_id', Integer, ForeignKey('a_table.id')),
Column('parent_b2_id', Integer, ForeignKey('b_table.id')))
@classmethod
def setup_mappers(cls):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
mapper(A, a_table)
mapper(B, b_table, properties={
'parent_b1': relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b1_id == b_table.c.id),
order_by=b_table.c.id
),
'parent_z': relationship(A, lazy=True),
'parent_b2': relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b2_id == b_table.c.id),
order_by = b_table.c.id
)
})
@classmethod
def insert_data(cls):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
a_table.insert().execute(dict(id=1), dict(id=2), dict(id=3))
b_table.insert().execute(
dict(id=1, parent_a_id=2, parent_b1_id=None, parent_b2_id=None),
dict(id=2, parent_a_id=1, parent_b1_id=1, parent_b2_id=None),
dict(id=3, parent_a_id=1, parent_b1_id=1, parent_b2_id=2),
dict(id=4, parent_a_id=3, parent_b1_id=1, parent_b2_id=None),
dict(id=5, parent_a_id=3, parent_b1_id=None, parent_b2_id=2),
dict(id=6, parent_a_id=1, parent_b1_id=1, parent_b2_id=3),
dict(id=7, parent_a_id=2, parent_b1_id=None, parent_b2_id=3),
dict(id=8, parent_a_id=2, parent_b1_id=1, parent_b2_id=2),
dict(id=9, parent_a_id=None, parent_b1_id=1, parent_b2_id=None),
dict(id=10, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
dict(id=11, parent_a_id=3, parent_b1_id=1, parent_b2_id=8),
dict(id=12, parent_a_id=2, parent_b1_id=5, parent_b2_id=2),
dict(id=13, parent_a_id=3, parent_b1_id=4, parent_b2_id=4),
dict(id=14, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
)
def test_eager_load(self):
A, B = self.classes.A, self.classes.B
session = create_session()
def go():
eq_(
session.query(B).
options(
joinedload('parent_b1'),
joinedload('parent_b2'),
joinedload('parent_z')
).
filter(B.id.in_([2, 8, 11])).order_by(B.id).all(),
[
B(id=2,
parent_z=A(id=1),
parent_b1=B(id=1),
parent_b2=None),
B(id=8,
parent_z=A(id=2),
parent_b1=B(id=1),
parent_b2=B(id=2)),
B(id=11,
parent_z=A(id=3),
parent_b1=B(id=1),
parent_b2=B(id=8))
]
)
self.assert_sql_count(testing.db, go, 1)
class SelfReferentialM2MEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('widget', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', sa.String(40), nullable=False, unique=True),
)
Table('widget_rel', metadata,
Column('parent_id', Integer, ForeignKey('widget.id')),
Column('child_id', Integer, ForeignKey('widget.id')),
sa.UniqueConstraint('parent_id', 'child_id'),
)
def test_basic(self):
widget, widget_rel = self.tables.widget, self.tables.widget_rel
class Widget(fixtures.ComparableEntity):
pass
mapper(Widget, widget, properties={
'children': relationship(
Widget, secondary=widget_rel,
primaryjoin=widget_rel.c.parent_id == widget.c.id,
secondaryjoin=widget_rel.c.child_id == widget.c.id,
lazy='joined', join_depth=1,
)
})
sess = create_session()
w1 = Widget(name='w1')
w2 = Widget(name='w2')
w1.children.append(w2)
sess.add(w1)
sess.flush()
sess.expunge_all()
eq_([Widget(name='w1', children=[Widget(name='w2')])],
sess.query(Widget).filter(Widget.name == 'w1').all())
class MixedEntitiesTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
__dialect__ = 'default'
__prefer_backends__ = ('postgresql', 'mysql', 'oracle')
@classmethod
def setup_mappers(cls):
users, Keyword, items, order_items, orders, \
Item, User, Address, keywords, Order, \
item_keywords, addresses = (
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.tables.order_items,
cls.tables.orders,
cls.classes.Item,
cls.classes.User,
cls.classes.Address,
cls.tables.keywords,
cls.classes.Order,
cls.tables.item_keywords,
cls.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address, backref='user'),
'orders': relationship(Order, backref='user'), # o2m, m2o
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id), # m2m
})
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords) # m2m
})
mapper(Keyword, keywords)
def test_two_entities(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# two FROM clauses
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]),
Order(id=2, items=[
Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]),
Order(id=4, items=[
Item(id=1), Item(id=5)])),
],
sess.query(User, Order).filter(User.id == Order.user_id).
options(joinedload(User.addresses), joinedload(Order.items)).
filter(User.id == 9).
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
# one FROM clause
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]),
Order(id=2, items=[
Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]),
Order(id=4, items=[
Item(id=1), Item(id=5)])),
],
sess.query(User, Order).join(User.orders).
options(joinedload(User.addresses), joinedload(Order.items)).
filter(User.id == 9).
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
@testing.exclude(
'sqlite', '>', (0, ), "sqlite flat out blows it on the multiple JOINs")
def test_two_entities_with_joins(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
# two FROM clauses where there's a join on each one
def go():
u1 = aliased(User)
o1 = aliased(Order)
eq_(
[
(
User(addresses=[
Address(email_address='fred@fred.com')],
name='fred'),
Order(description='order 2', isopen=0,
items=[
Item(description='item 1'),
Item(description='item 2'),
Item(description='item 3')]),
User(addresses=[
Address(email_address='jack@bean.com')],
name='jack'),
Order(description='order 3', isopen=1,
items=[
Item(description='item 3'),
Item(description='item 4'),
Item(description='item 5')])
),
(
User(
addresses=[
Address(
email_address='fred@fred.com')],
name='fred'),
Order(
description='order 2', isopen=0, items=[
Item(
description='item 1'), Item(
description='item 2'), Item(
description='item 3')]),
User(
addresses=[
Address(
email_address='jack@bean.com')],
name='jack'),
Order(
address_id=None,
description='order 5',
isopen=0,
items=[
Item(
description='item 5')])
),
(
User(
addresses=[
Address(
email_address='fred@fred.com')],
name='fred'),
Order(
description='order 4', isopen=1, items=[
Item(
description='item 1'), Item(
description='item 5')]),
User(
addresses=[
Address(
email_address='jack@bean.com')],
name='jack'),
Order(
address_id=None,
description='order 5',
isopen=0,
items=[
Item(
description='item 5')])
),
],
sess.query(User, Order, u1, o1).
join(Order, User.orders).
options(joinedload(User.addresses),
joinedload(Order.items)).filter(User.id == 9).
join(o1, u1.orders).
options(joinedload(u1.addresses),
joinedload(o1.items)).filter(u1.id == 7).
filter(Order.id < o1.id).
order_by(User.id, Order.id, u1.id, o1.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_one(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias = sa.orm.aliased(Order)
# two FROM clauses
def go():
eq_(
[
(
User(
id=9, addresses=[
Address(
id=5)]), Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(
id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).filter(User.id == oalias.user_id).
options(
joinedload(User.addresses),
joinedload(oalias.items)).filter(User.id == 9).
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_two(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias = sa.orm.aliased(Order)
# one FROM clause
def go():
eq_(
[
(
User(
id=9, addresses=[
Address(
id=5)]), Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(
id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).join(oalias, User.orders).
options(joinedload(User.addresses),
joinedload(oalias.items)).
filter(User.id == 9).
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_three(self):
Order, User = (
self.classes.Order,
self.classes.User)
sess = create_session()
oalias = sa.orm.aliased(Order)
# improper setup: oalias in the columns clause but join to usual
# orders alias. this should create two FROM clauses even though the
# query has a from_clause set up via the join
self.assert_compile(
sess.query(User, oalias).join(User.orders).
options(joinedload(oalias.items)).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, items_1.id AS items_1_id, "
"items_1.description AS items_1_description FROM users "
"JOIN orders ON users.id = orders.user_id, "
"orders AS orders_1 LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY items_1.id"
)
class SubqueryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(16))
)
Table('tags_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey("users_table.id")),
Column('score1', sa.Float),
Column('score2', sa.Float),
)
def test_label_anonymizing(self):
"""Eager loading works with subqueries with labels,
Even if an explicit labelname which conflicts with a label on the
parent.
There's not much reason a column_property() would ever need to have a
label of a specific name (and they don't even need labels these days),
unless you'd like the name to line up with a name that you may be
using for a straight textual statement used for loading instances of
that type.
"""
tags_table, users_table = self.tables.tags_table, \
self.tables.users_table
class User(fixtures.ComparableEntity):
@property
def prop_score(self):
return sum([tag.prop_score for tag in self.tags])
class Tag(fixtures.ComparableEntity):
@property
def prop_score(self):
return self.score1 * self.score2
for labeled, labelname in [(True, 'score'), (True, None),
(False, None)]:
sa.orm.clear_mappers()
tag_score = (tags_table.c.score1 * tags_table.c.score2)
user_score = sa.select([sa.func.sum(tags_table.c.score1 *
tags_table.c.score2)],
tags_table.c.user_id == users_table.c.id)
if labeled:
tag_score = tag_score.label(labelname)
user_score = user_score.label(labelname)
else:
user_score = user_score.as_scalar()
mapper(Tag, tags_table, properties={
'query_score': sa.orm.column_property(tag_score),
})
mapper(User, users_table, properties={
'tags': relationship(Tag, backref='user', lazy='joined'),
'query_score': sa.orm.column_property(user_score),
})
session = create_session()
session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0),
Tag(score1=55.0, score2=1.0)]))
session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0),
Tag(score1=50.0, score2=1.0),
Tag(score1=15.0, score2=2.0)]))
session.flush()
session.expunge_all()
for user in session.query(User).all():
eq_(user.query_score, user.prop_score)
def go():
u = session.query(User).filter_by(name='joe').one()
eq_(u.query_score, u.prop_score)
self.assert_sql_count(testing.db, go, 1)
for t in (tags_table, users_table):
t.delete().execute()
class CorrelatedSubqueryTest(fixtures.MappedTest):
"""tests for #946, #947, #948.
The "users" table is joined to "stuff", and the relationship
would like to pull only the "stuff" entry with the most recent date.
Exercises a variety of ways to configure this.
"""
# another argument for joinedload learning about inner joins
__requires__ = ('correlated_outer_joins', )
@classmethod
def define_tables(cls, metadata):
Table(
'users', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50))
)
Table(
'stuff', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('date', Date),
Column('user_id', Integer, ForeignKey('users.id')))
@classmethod
def insert_data(cls):
stuff, users = cls.tables.stuff, cls.tables.users
users.insert().execute(
{'id': 1, 'name': 'user1'},
{'id': 2, 'name': 'user2'},
{'id': 3, 'name': 'user3'},
)
stuff.insert().execute(
{'id': 1, 'user_id': 1, 'date': datetime.date(2007, 10, 15)},
{'id': 2, 'user_id': 1, 'date': datetime.date(2007, 12, 15)},
{'id': 3, 'user_id': 1, 'date': datetime.date(2007, 11, 15)},
{'id': 4, 'user_id': 2, 'date': datetime.date(2008, 1, 15)},
{'id': 5, 'user_id': 3, 'date': datetime.date(2007, 6, 15)},
{'id': 6, 'user_id': 3, 'date': datetime.date(2007, 3, 15)},
)
def test_labeled_on_date_noalias(self):
self._do_test('label', True, False)
def test_scalar_on_date_noalias(self):
self._do_test('scalar', True, False)
def test_plain_on_date_noalias(self):
self._do_test('none', True, False)
def test_labeled_on_limitid_noalias(self):
self._do_test('label', False, False)
def test_scalar_on_limitid_noalias(self):
self._do_test('scalar', False, False)
def test_plain_on_limitid_noalias(self):
self._do_test('none', False, False)
def test_labeled_on_date_alias(self):
self._do_test('label', True, True)
def test_scalar_on_date_alias(self):
self._do_test('scalar', True, True)
def test_plain_on_date_alias(self):
self._do_test('none', True, True)
def test_labeled_on_limitid_alias(self):
self._do_test('label', False, True)
def test_scalar_on_limitid_alias(self):
self._do_test('scalar', False, True)
def test_plain_on_limitid_alias(self):
self._do_test('none', False, True)
def _do_test(self, labeled, ondate, aliasstuff):
stuff, users = self.tables.stuff, self.tables.users
class User(fixtures.ComparableEntity):
pass
class Stuff(fixtures.ComparableEntity):
pass
mapper(Stuff, stuff)
if aliasstuff:
salias = stuff.alias()
else:
# if we don't alias the 'stuff' table within the correlated
# subquery,
# it gets aliased in the eager load along with the "stuff" table
# to "stuff_1".
# but it's a scalar subquery, and this doesn't actually matter
salias = stuff
if ondate:
# the more 'relational' way to do this, join on the max date
stuff_view = select([func.max(salias.c.date).label('max_date')]).\
where(salias.c.user_id == users.c.id).correlate(users)
else:
# a common method with the MySQL crowd, which actually might
# perform better in some
# cases - subquery does a limit with order by DESC, join on the id
stuff_view = select([salias.c.id]).\
where(salias.c.user_id == users.c.id).\
correlate(users).order_by(salias.c.date.desc()).limit(1)
# can't win on this one
if testing.against("mssql"):
operator = operators.in_op
else:
operator = operators.eq
if labeled == 'label':
stuff_view = stuff_view.label('foo')
operator = operators.eq
elif labeled == 'scalar':
stuff_view = stuff_view.as_scalar()
if ondate:
mapper(User, users, properties={
'stuff': relationship(
Stuff,
primaryjoin=and_(users.c.id == stuff.c.user_id,
operator(stuff.c.date, stuff_view)))
})
else:
mapper(User, users, properties={
'stuff': relationship(
Stuff,
primaryjoin=and_(users.c.id == stuff.c.user_id,
operator(stuff.c.id, stuff_view)))
})
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(
joinedload('stuff')).all(),
[
User(name='user1', stuff=[Stuff(id=2)]),
User(name='user2', stuff=[Stuff(id=4)]),
User(name='user3', stuff=[Stuff(id=5)])
]
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 2)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(
joinedload('stuff')).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).filter(User.id == 2).options(
joinedload('stuff')).one(),
User(name='user2', stuff=[Stuff(id=4)])
)
self.assert_sql_count(testing.db, go, 1)
class CyclicalInheritingEagerTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
't1', metadata,
Column(
'c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30))
)
Table('t2', metadata,
Column('c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30)),
Column('t1.id', Integer, ForeignKey('t1.c1')))
def test_basic(self):
t2, t1 = self.tables.t2, self.tables.t1
class T(object):
pass
class SubT(T):
pass
class T2(object):
pass
class SubT2(T2):
pass
mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1')
mapper(
SubT, None, inherits=T, polymorphic_identity='subt1',
properties={
't2s': relationship(
SubT2, lazy='joined',
backref=sa.orm.backref('subt', lazy='joined'))
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(SubT2, None, inherits=T2, polymorphic_identity='subt2')
# testing a particular endless loop condition in eager load setup
create_session().query(SubT).all()
class CyclicalInheritingEagerTestTwo(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = 'persistent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Movie(PersistentObject):
__tablename__ = 'movie'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
director_id = Column(Integer, ForeignKey('director.id'))
title = Column(String(50))
class Director(PersistentObject):
__tablename__ = 'director'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
movies = relationship("Movie", foreign_keys=Movie.director_id)
name = Column(String(50))
def test_from_subclass(self):
Director = self.classes.Director
s = create_session()
self.assert_compile(
s.query(Director).options(joinedload('*')),
"SELECT director.id AS director_id, "
"persistent.id AS persistent_id, "
"director.name AS director_name, movie_1.id AS movie_1_id, "
"persistent_1.id AS persistent_1_id, "
"movie_1.director_id AS movie_1_director_id, "
"movie_1.title AS movie_1_title "
"FROM persistent JOIN director ON persistent.id = director.id "
"LEFT OUTER JOIN "
"(persistent AS persistent_1 JOIN movie AS movie_1 "
"ON persistent_1.id = movie_1.id) "
"ON director.id = movie_1.director_id"
)
def test_integrate(self):
Director = self.classes.Director
Movie = self.classes.Movie
session = Session(testing.db)
rscott = Director(name="Ridley Scott")
alien = Movie(title="Alien")
brunner = Movie(title="Blade Runner")
rscott.movies.append(brunner)
rscott.movies.append(alien)
session.add_all([rscott, alien, brunner])
session.commit()
session.close_all()
self.d = session.query(Director).options(joinedload('*')).first()
assert len(list(session)) == 3
class CyclicalInheritingEagerTestThree(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = 'persistent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
__mapper_args__ = {'with_polymorphic': "*"}
class Director(PersistentObject):
__tablename__ = 'director'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
other_id = Column(Integer, ForeignKey('persistent.id'))
name = Column(String(50))
other = relationship(PersistentObject,
primaryjoin=other_id == PersistentObject.id,
lazy=False)
__mapper_args__ = {"inherit_condition": id == PersistentObject.id}
def test_gen_query_nodepth(self):
PersistentObject = self.classes.PersistentObject
sess = create_session()
self.assert_compile(
sess.query(PersistentObject),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id,"
" director.other_id AS director_other_id, "
"director.name AS director_name FROM persistent "
"LEFT OUTER JOIN director ON director.id = persistent.id"
)
def test_gen_query_depth(self):
PersistentObject = self.classes.PersistentObject
Director = self.classes.Director
sess = create_session()
self.assert_compile(
sess.query(PersistentObject).options(joinedload(Director.other)),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id, "
"director.other_id AS director_other_id, "
"director.name AS director_name, persistent_1.id AS "
"persistent_1_id, director_1.id AS director_1_id, "
"director_1.other_id AS director_1_other_id, "
"director_1.name AS director_1_name "
"FROM persistent LEFT OUTER JOIN director "
"ON director.id = persistent.id "
"LEFT OUTER JOIN (persistent AS persistent_1 "
"LEFT OUTER JOIN director AS director_1 ON "
"director_1.id = persistent_1.id) "
"ON director.other_id = persistent_1.id"
)
class EnsureColumnsAddedTest(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
arb = Column(Integer, unique=True)
data = Column(Integer)
o2mchild = relationship("O2MChild")
m2mchild = relationship("M2MChild", secondary=Table(
'parent_to_m2m', Base.metadata,
Column('parent_id', ForeignKey('parent.arb')),
Column('child_id', ForeignKey('m2mchild.id'))
))
class O2MChild(Base):
__tablename__ = 'o2mchild'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
parent_id = Column(ForeignKey('parent.arb'))
class M2MChild(Base):
__tablename__ = 'm2mchild'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def test_joinedload_defered_pk_limit_o2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.o2mchild)).limit(10),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.parent_arb AS anon_1_parent_arb, "
"o2mchild_1.id AS o2mchild_1_id, "
"o2mchild_1.parent_id AS o2mchild_1_parent_id "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb FROM parent LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN o2mchild AS o2mchild_1 "
"ON anon_1.parent_arb = o2mchild_1.parent_id"
)
def test_joinedload_defered_pk_limit_m2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.m2mchild)).limit(10),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.parent_arb AS anon_1_parent_arb, "
"m2mchild_1.id AS m2mchild_1_id "
"FROM (SELECT parent.id AS parent_id, "
"parent.data AS parent_data, parent.arb AS parent_arb "
"FROM parent LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
"JOIN m2mchild AS m2mchild_1 "
"ON m2mchild_1.id = parent_to_m2m_1.child_id) "
"ON anon_1.parent_arb = parent_to_m2m_1.parent_id"
)
def test_joinedload_defered_pk_o2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.o2mchild)),
"SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb, o2mchild_1.id AS o2mchild_1_id, "
"o2mchild_1.parent_id AS o2mchild_1_parent_id "
"FROM parent LEFT OUTER JOIN o2mchild AS o2mchild_1 "
"ON parent.arb = o2mchild_1.parent_id"
)
def test_joinedload_defered_pk_m2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.m2mchild)),
"SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb, m2mchild_1.id AS m2mchild_1_id "
"FROM parent LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
"JOIN m2mchild AS m2mchild_1 "
"ON m2mchild_1.id = parent_to_m2m_1.child_id) "
"ON parent.arb = parent_to_m2m_1.parent_id"
)
| 36.879933 | 79 | 0.513331 |
from sqlalchemy.testing import eq_, is_, is_not_
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy.orm import joinedload, deferred, undefer, \
joinedload_all, backref, Session,\
defaultload, Load, load_only
from sqlalchemy import Integer, String, Date, ForeignKey, and_, select, \
func, text
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, \
lazyload, aliased, column_property
from sqlalchemy.sql import operators
from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing import fixtures, expect_warnings
from test.orm import _fixtures
from sqlalchemy.util import OrderedDict as odict
import datetime
class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_inserts = 'once'
run_deletes = None
__dialect__ = 'default'
def test_basic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses), lazy='joined', order_by=Address.id)
})
sess = create_session()
q = sess.query(User)
eq_([User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.order_by(User.id).all())
def test_late_compile(self):
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
m = mapper(User, users)
sess = create_session()
sess.query(User).all()
m.add_property("addresses", relationship(mapper(Address, addresses)))
sess.expunge_all()
def go():
eq_(
[User(id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])],
sess.query(User).options(
joinedload('addresses')).filter(User.id == 7).all()
)
self.assert_sql_count(testing.db, go, 1)
def test_no_orphan(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
Address, cascade="all,delete-orphan", lazy='joined')
})
mapper(Address, addresses)
sess = create_session()
user = sess.query(User).get(7)
assert getattr(User, 'addresses').\
hasparent(
sa.orm.attributes.instance_state(
user.addresses[0]), optimistic=True)
assert not sa.orm.class_mapper(Address).\
_is_orphan(
sa.orm.attributes.instance_state(user.addresses[0]))
def test_orderby(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.email_address),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_multi(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined',
order_by=[addresses.c.email_address, addresses.c.id]),
})
q = create_session().query(User)
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=2, email_address='ed@wood.com')
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], q.order_by(User.id).all())
def test_orderby_related(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
))
q = create_session().query(User)
l = q.filter(User.id == Address.user_id).order_by(
Address.email_address).all()
eq_([
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=7, addresses=[
Address(id=1)
]),
], l)
def test_orderby_desc(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined',
order_by=[sa.desc(addresses.c.email_address)]),
))
sess = create_session()
eq_([
User(id=7, addresses=[
Address(id=1)
]),
User(id=8, addresses=[
Address(id=2, email_address='ed@wood.com'),
Address(id=4, email_address='ed@lala.com'),
Address(id=3, email_address='ed@bettyboop.com'),
]),
User(id=9, addresses=[
Address(id=5)
]),
User(id=10, addresses=[])
], sess.query(User).order_by(User.id).all())
def test_no_ad_hoc_orderby(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address),
))
sess = create_session()
q = sess.query(User).\
join("addresses").\
options(joinedload("addresses")).\
order_by("email_address")
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, addresses_1.user_id AS "
"addresses_1_user_id, addresses_1.email_address AS "
"addresses_1_email_address FROM users JOIN addresses "
"ON users.id = addresses.user_id LEFT OUTER JOIN addresses "
"AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses.email_address"
)
q = sess.query(User).options(joinedload("addresses")).\
order_by("email_address")
with expect_warnings("Can't resolve label reference 'email_address'"):
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, addresses_1.user_id AS "
"addresses_1_user_id, addresses_1.email_address AS "
"addresses_1_email_address FROM users LEFT OUTER JOIN "
"addresses AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY email_address"
)
def test_deferred_fk_col(self):
users, Dingaling, User, dingalings, Address, addresses = (
self.tables.users,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.addresses)
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
'user': relationship(User, lazy='joined')
})
mapper(User, users)
sess = create_session()
for q in [
sess.query(Address).filter(
Address.id.in_([1, 4, 5])
).order_by(Address.id),
sess.query(Address).filter(
Address.id.in_([1, 4, 5])
).order_by(Address.id).limit(3)
]:
sess.expunge_all()
eq_(q.all(),
[Address(id=1, user=User(id=7)),
Address(id=4, user=User(id=8)),
Address(id=5, user=User(id=9))]
)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).all()[0]
# 1.0 change! we don't automatically undefer user_id here.
def go():
eq_(a.user_id, 7)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
a = sess.query(Address).filter(Address.id == 1).first()
def go():
eq_(a.user_id, 7)
# self.assert_sql_count(testing.db, go, 0)
self.assert_sql_count(testing.db, go, 1)
# do the mapping in reverse
# (we would have just used an "addresses" backref but the test
# fixtures then require the whole backref to be set up, lazy loaders
# trigger, etc.)
sa.orm.clear_mappers()
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
})
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined')})
for q in [
sess.query(User).filter(User.id == 7),
sess.query(User).filter(User.id == 7).limit(1)
]:
sess.expunge_all()
eq_(q.all(),
[User(id=7, addresses=[Address(id=1)])]
)
sess.expunge_all()
u = sess.query(User).get(7)
def go():
eq_(u.addresses[0].user_id, 7)
# assert that the eager loader didn't have to affect 'user_id' here
self.assert_sql_count(testing.db, go, 1)
sa.orm.clear_mappers()
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined',
order_by=addresses.c.id)})
mapper(Address, addresses, properties={
'user_id': deferred(addresses.c.user_id),
'dingalings': relationship(Dingaling, lazy='joined')})
mapper(Dingaling, dingalings, properties={
'address_id': deferred(dingalings.c.address_id)})
sess.expunge_all()
def go():
u = sess.query(User).get(8)
eq_(User(id=8,
addresses=[Address(id=2, dingalings=[Dingaling(id=1)]),
Address(id=3),
Address(id=4)]),
u)
self.assert_sql_count(testing.db, go, 1)
def test_options_pathing(self):
users, Keyword, orders, items, order_items, \
Order, Item, User, keywords, item_keywords = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
mapper(User, users, properties={
'orders': relationship(Order, order_by=orders.c.id),
})
mapper(Order, orders, properties={
'items': relationship(
Item,
secondary=order_items, order_by=items.c.id),
})
mapper(Item, items, properties={
'keywords': relationship(Keyword,
secondary=item_keywords,
order_by=keywords.c.id)
})
mapper(Keyword, keywords)
for opt, count in [
((
joinedload(User.orders, Order.items),
), 10),
((joinedload("orders.items"), ), 10),
((
joinedload(User.orders, ),
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
), 1),
((
joinedload(User.orders, Order.items, Item.keywords),
), 10),
((
joinedload(User.orders, Order.items),
joinedload(User.orders, Order.items, Item.keywords),
), 5),
]:
sess = create_session()
def go():
eq_(
sess.query(User).options(*opt).order_by(User.id).all(),
self.static.user_item_keyword_result
)
self.assert_sql_count(testing.db, go, count)
def test_disable_dynamic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, lazy="dynamic")
})
mapper(Address, addresses)
sess = create_session()
assert_raises_message(
sa.exc.InvalidRequestError,
"User.addresses' does not support object "
"population - eager loading cannot be applied.",
sess.query(User).options(joinedload(User.addresses)).first,
)
def test_many_to_many(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(Keyword, secondary=item_keywords,
lazy='joined', order_by=keywords.c.id)))
q = create_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
q.join('keywords').filter(Keyword.name == 'red').all())
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.join('keywords', aliased=True).
filter(Keyword.name == 'red')).all())
self.assert_sql_count(testing.db, go, 1)
def test_eager_option(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(
Keyword, secondary=item_keywords, lazy='select',
order_by=keywords.c.id)))
q = create_session().query(Item)
def go():
eq_(self.static.item_keyword_result[0:2],
(q.options(
joinedload('keywords')
).join('keywords').
filter(keywords.c.name == 'red')).order_by(Item.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_cyclical(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined',
backref=sa.orm.backref('user', lazy='joined'),
order_by=Address.id)
))
eq_(sa.orm.class_mapper(User).get_property('addresses').lazy, 'joined')
eq_(sa.orm.class_mapper(Address).get_property('user').lazy, 'joined')
sess = create_session()
eq_(
self.static.user_address_result,
sess.query(User).order_by(User.id).all())
def test_double(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
openorders = sa.alias(orders, 'openorders')
closedorders = sa.alias(orders, 'closedorders')
mapper(Address, addresses)
mapper(Order, orders)
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(openorders.c.isopen == 1,
users.c.id == openorders.c.user_id),
lazy='joined', order_by=openorders.c.id),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id),
lazy='joined', order_by=closedorders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3)],
closed_orders=[Order(id=1), Order(id=5)]
),
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
open_orders=[],
closed_orders=[]
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4)],
closed_orders=[Order(id=2)]
),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 1)
def test_double_same_mappers(self):
addresses, items, order_items, orders, \
Item, User, Address, Order, users = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items, lazy='joined',
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
open_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 1,
users.c.id == orders.c.user_id),
lazy='joined', order_by=orders.c.id),
closed_orders=relationship(
Order,
primaryjoin=sa.and_(orders.c.isopen == 0,
users.c.id == orders.c.user_id),
lazy='joined', order_by=orders.c.id)))
q = create_session().query(User).order_by(User.id)
def go():
eq_([
User(id=7,
addresses=[
Address(id=1)],
open_orders=[Order(id=3,
items=[
Item(id=3),
Item(id=4),
Item(id=5)])],
closed_orders=[Order(id=1,
items=[
Item(id=1),
Item(id=2),
Item(id=3)]),
Order(id=5,
items=[
Item(id=5)])]),
User(id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4)],
open_orders=[],
closed_orders=[]),
User(id=9,
addresses=[
Address(id=5)],
open_orders=[
Order(id=4,
items=[
Item(id=1),
Item(id=5)])],
closed_orders=[
Order(id=2,
items=[
Item(id=1),
Item(id=2),
Item(id=3)])]),
User(id=10)
], q.all())
self.assert_sql_count(testing.db, go, 1)
def test_no_false_hits(self):
addresses, orders, User, Address, Order, users = (
self.tables.addresses,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined'),
'orders': relationship(Order, lazy='joined')
})
mapper(Address, addresses)
mapper(Order, orders)
self.allusers = create_session().query(User).all()
# using a textual select, the columns will be 'id' and 'name'. the
# eager loaders have aliases which should not hit on those columns,
# they should be required to locate only their aliased/fully table
# qualified column name.
noeagers = create_session().query(User).\
from_statement(text("select * from users")).all()
assert 'orders' not in noeagers[0].__dict__
assert 'addresses' not in noeagers[0].__dict__
def test_limit(self):
users, items, order_items, orders, Item, \
User, Address, Order, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
mapper(Item, items)
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items, lazy='joined',
order_by=items.c.id)
})
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id),
'orders': relationship(Order, lazy='select', order_by=orders.c.id)
})
sess = create_session()
q = sess.query(User)
l = q.order_by(User.id).limit(2).offset(1).all()
eq_(self.static.user_all_result[1:3], l)
def test_distinct(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
# this is an involved 3x union of the users table to get a lot of rows.
# then see if the "distinct" works its way out. you actually get
# the same result with or without the distinct, just via less or
# more rows.
u2 = users.alias('u2')
s = sa.union_all(
u2.select(use_labels=True), u2.select(use_labels=True),
u2.select(use_labels=True)).alias('u')
mapper(User, users, properties={
'addresses': relationship(
mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id),
})
sess = create_session()
q = sess.query(User)
def go():
l = q.filter(s.c.u2_id == User.id).distinct().\
order_by(User.id).all()
eq_(self.static.user_address_result, l)
self.assert_sql_count(testing.db, go, 1)
def test_limit_2(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item)
mapper(Keyword, keywords)
mapper(Item, items, properties=dict(
keywords=relationship(
Keyword, secondary=item_keywords,
lazy='joined', order_by=[keywords.c.id]),
))
sess = create_session()
q = sess.query(Item)
l = q.filter((Item.description == 'item 2') |
(Item.description == 'item 5') |
(Item.description == 'item 3')).\
order_by(Item.id).limit(2).all()
eq_(self.static.item_keyword_result[1:3], l)
def test_limit_3(self):
addresses, items, order_items, orders, \
Item, User, Address, Order, users = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users)
mapper(Item, items)
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy='joined')
))
mapper(Address, addresses)
mapper(User, users, properties=dict(
addresses=relationship(
Address, lazy='joined', order_by=addresses.c.id),
orders=relationship(Order, lazy='joined', order_by=orders.c.id),
))
sess = create_session()
q = sess.query(User)
if not testing.against('mssql'):
l = q.join('orders').order_by(
Order.user_id.desc()).limit(2).offset(1)
eq_([
User(id=9,
orders=[Order(id=2), Order(id=4)],
addresses=[Address(id=5)]
),
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
], l.all())
l = q.join('addresses').order_by(
Address.email_address.desc()).limit(1).offset(0)
eq_([
User(id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
addresses=[Address(id=1)]
)
], l.all())
def test_limit_4(self):
User, Order, addresses, users, orders = (self.classes.User,
self.classes.Order,
self.tables.addresses,
self.tables.users,
self.tables.orders)
# tests the LIMIT/OFFSET aliasing on a mapper
# against a select. original issue from ticket #904
sel = sa.select([users, addresses.c.email_address],
users.c.id == addresses.c.user_id).alias('useralias')
mapper(User, sel, properties={
'orders': relationship(
Order, primaryjoin=sel.c.id == orders.c.user_id,
lazy='joined', order_by=orders.c.id)
})
mapper(Order, orders)
sess = create_session()
eq_(sess.query(User).first(),
User(name='jack', orders=[
Order(
address_id=1,
description='order 1',
isopen=0,
user_id=7,
id=1),
Order(
address_id=1,
description='order 3',
isopen=1,
user_id=7,
id=3),
Order(
address_id=None, description='order 5', isopen=0,
user_id=7, id=5)],
email_address='jack@bean.com', id=7)
)
def test_useget_cancels_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='joined', backref='addresses')
})
sess = create_session()
u1 = sess.query(User).filter(User.id == 8).one()
def go():
eq_(u1.addresses[0].user, u1)
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE :param_1 = "
"addresses.user_id",
{'param_1': 8})
)
def test_manytoone_limit(self):
users, items, order_items, Order, Item, User, \
Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties=odict(
orders=relationship(Order, backref='user')
))
mapper(Order, orders, properties=odict([
('items', relationship(Item, secondary=order_items,
backref='orders')),
('address', relationship(Address))
]))
mapper(Address, addresses)
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN orders AS "
"orders_1 ON anon_1.users_id = orders_1.user_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(Order).options(joinedload(Order.user)).limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{'param_1': 10}
)
self.assert_compile(
sess.query(Order).options(
joinedload(Order.user, innerjoin=True)).limit(10),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM orders JOIN users AS "
"users_1 ON users_1.id = orders.user_id LIMIT :param_1",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(
joinedload_all("orders.address")).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM "
"(SELECT users.id AS users_id, users.name AS users_name "
"FROM users LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN orders AS orders_1 "
"ON anon_1.users_id = orders_1.user_id LEFT OUTER JOIN "
"addresses AS addresses_1 ON addresses_1.id = orders_1.address_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(joinedload_all("orders.items"),
joinedload("orders.address")),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS "
"addresses_1_email_address, orders_1.id AS orders_1_id, "
"orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description "
"AS orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id"
)
self.assert_compile(
sess.query(User).options(
joinedload("orders"),
joinedload(
"orders.address",
innerjoin=True)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name "
"AS anon_1_users_name, addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users"
" LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"(orders AS orders_1 JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id) ON "
"anon_1.users_id = orders_1.user_id",
{'param_1': 10}
)
self.assert_compile(
sess.query(User).options(
joinedload("orders", innerjoin=True),
joinedload("orders.address", innerjoin=True)).limit(10),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users "
"LIMIT :param_1) AS anon_1 JOIN orders "
"AS orders_1 ON anon_1.users_id = "
"orders_1.user_id JOIN addresses AS addresses_1 "
"ON addresses_1.id = orders_1.address_id",
{'param_1': 10}
)
def test_one_to_many_scalar(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
address=relationship(mapper(Address, addresses),
lazy='joined', uselist=False)
))
q = create_session().query(User)
def go():
l = q.filter(users.c.id == 7).all()
eq_([User(id=7, address=Address(id=1))], l)
self.assert_sql_count(testing.db, go, 1)
def test_one_to_many_scalar_subq_wrapping(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
address=relationship(mapper(Address, addresses),
lazy='joined', uselist=False)
))
q = create_session().query(User)
q = q.filter(users.c.id == 7).limit(1)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"WHERE users.id = :id_1 "
"LIMIT :param_1",
checkparams={'id_1': 7, 'param_1': 1}
)
def test_many_to_one(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties=dict(
user=relationship(mapper(User, users), lazy='joined')
))
sess = create_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id == 1).one()
is_not_(a.user, None)
u1 = sess.query(User).get(7)
is_(a.user, u1)
self.assert_sql_count(testing.db, go, 1)
def test_many_to_one_null(self):
Order, Address, addresses, orders = (self.classes.Order,
self.classes.Address,
self.tables.addresses,
self.tables.orders)
# use a primaryjoin intended to defeat SA's usage of
mapper(Order, orders, properties=dict(
address=relationship(
mapper(Address, addresses),
primaryjoin=and_(
addresses.c.id == orders.c.address_id,
addresses.c.email_address != None
),
lazy='joined')
))
sess = create_session()
def go():
o1 = sess.query(Order).options(
lazyload('address')).filter(
Order.id == 5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
o1 = sess.query(Order).filter(Order.id == 5).one()
eq_(o1.address, None)
self.assert_sql_count(testing.db, go, 1)
def test_one_and_many(self):
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order)
mapper(User, users, properties={
'orders': relationship(Order, lazy='joined', order_by=orders.c.id)
})
mapper(Item, items)
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy='joined',
order_by=items.c.id)
))
q = create_session().query(User)
l = q.filter(text("users.id in (7, 8, 9)")).order_by(text("users.id"))
def go():
eq_(self.static.user_order_result[0:3], l.all())
self.assert_sql_count(testing.db, go, 1)
def test_double_with_aggregate(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
max_orders_by_user = sa.select([
sa.func.max(orders.c.id).label('order_id')],
group_by=[orders.c.user_id]
).alias('max_orders_by_user')
max_orders = orders.select(
orders.c.id == max_orders_by_user.c.order_id).\
alias('max_orders')
mapper(Order, orders)
mapper(User, users, properties={
'orders': relationship(Order, backref='user', lazy='joined',
order_by=orders.c.id),
'max_order': relationship(
mapper(Order, max_orders, non_primary=True),
lazy='joined', uselist=False)
})
q = create_session().query(User)
def go():
eq_([
User(id=7, orders=[
Order(id=1),
Order(id=3),
Order(id=5),
],
max_order=Order(id=5)
),
User(id=8, orders=[]),
User(id=9, orders=[Order(id=2), Order(id=4)],
max_order=Order(id=4)
),
User(id=10),
], q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_uselist_false_warning(self):
User, users, orders, Order = (self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order)
mapper(User, users, properties={
'order': relationship(Order, uselist=False)
})
mapper(Order, orders)
s = create_session()
assert_raises(sa.exc.SAWarning,
s.query(User).options(joinedload(User.order)).all)
def test_wide(self):
users, items, order_items, Order, Item, \
User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy='joined',
order_by=items.c.id)})
mapper(Item, items)
mapper(User, users, properties=dict(
addresses=relationship(
mapper(
Address,
addresses),
lazy=False,
order_by=addresses.c.id),
orders=relationship(Order, lazy=False, order_by=orders.c.id),
))
q = create_session().query(User)
def go():
eq_(self.static.user_all_result, q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
def test_against_select(self):
users, items, order_items, orders, Item, User, Order = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Order)
s = sa.select([orders], orders.c.isopen == 1).alias('openorders')
mapper(Order, s, properties={
'user': relationship(User, lazy='joined')
})
mapper(User, users)
mapper(Item, items)
q = create_session().query(Order)
eq_([
Order(id=3, user=User(id=7)),
Order(id=4, user=User(id=9))
], q.all())
q = q.select_from(s.join(order_items).join(items)).filter(
~Item.id.in_([1, 2, 5]))
eq_([
Order(id=3, user=User(id=7)),
], q.all())
def test_aliasing(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
addresses=relationship(mapper(Address, addresses),
lazy='joined', order_by=addresses.c.id)
))
q = create_session().query(User)
l = q.filter(addresses.c.email_address == 'ed@lala.com').filter(
Address.user_id == User.id).order_by(User.id)
eq_(self.static.user_address_result[1:2], l.all())
def test_inner_join(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(User, users, properties=dict(
addresses=relationship(mapper(Address, addresses), lazy='joined',
innerjoin=True, order_by=addresses.c.id)
))
sess = create_session()
eq_(
[User(id=7, addresses=[Address(id=1)]),
User(id=8,
addresses=[Address(id=2, email_address='ed@wood.com'),
Address(id=3, email_address='ed@bettyboop.com'),
Address(id=4, email_address='ed@lala.com'), ]),
User(id=9, addresses=[Address(id=5)])], sess.query(User).all()
)
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users JOIN "
"addresses AS addresses_1 ON users.id = addresses_1.user_id "
"ORDER BY addresses_1.id")
def test_inner_join_unnested_chaining_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, innerjoin="unnested",
lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin="unnested")
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items AS order_items_1 "
"ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id"
)
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id"
)
self.assert_compile(
sess.query(User).options(
joinedload(
User.orders,
Order.items,
innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id"
)
def test_inner_join_nested_chaining_negative_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, innerjoin=True,
lazy=False, order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin=True, order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users "
"JOIN orders AS orders_1 ON "
"users.id = orders_1.user_id JOIN order_items "
"AS order_items_1 ON orders_1.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id ORDER BY orders_1.id, items_1.id"
)
q = sess.query(User).options(joinedload(User.orders, innerjoin=False))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description "
"AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id ORDER BY orders_1.id, items_1.id"
)
eq_(
[
User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=8, orders=[]),
User(id=9, orders=[
Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])
]
),
User(id=10, orders=[])
],
q.order_by(User.id).all()
)
self.assert_compile(
sess.query(User).options(
joinedload(
User.orders,
Order.items,
innerjoin=False)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY "
"orders_1.id, items_1.id"
)
def test_inner_join_nested_chaining_positive_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
q = sess.query(User).options(
joinedload("orders", innerjoin=False).
joinedload("items", innerjoin=True)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, items_1.description "
"AS items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id "
"AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN (orders AS orders_1 "
"JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS "
"items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id "
"ORDER BY orders_1.id, items_1.id"
)
eq_(
[
User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=8, orders=[]),
User(id=9, orders=[
Order(id=2, items=[Item(id=1), Item(id=2), Item(id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])
]
),
User(id=10, orders=[])
],
q.order_by(User.id).all()
)
def test_unnested_outerjoin_propagation_only_on_correct_path(self):
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users, properties=odict([
('orders', relationship(Order)),
('addresses', relationship(Address))
]))
mapper(Order, orders)
mapper(Address, addresses)
sess = create_session()
q = sess.query(User).options(
joinedload("orders"),
joinedload("addresses", innerjoin="unnested"),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id"
)
def test_nested_outerjoin_propagation_only_on_correct_path(self):
User, users = self.classes.User, self.tables.users
Order, orders = self.classes.Order, self.tables.orders
Address, addresses = self.classes.Address, self.tables.addresses
mapper(User, users, properties=odict([
('orders', relationship(Order)),
('addresses', relationship(Address))
]))
mapper(Order, orders)
mapper(Address, addresses)
sess = create_session()
q = sess.query(User).options(
joinedload("orders"),
joinedload("addresses", innerjoin=True),
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id"
)
def test_catch_the_right_target(self):
users, Keyword, orders, items, order_items, Order, Item, \
User, keywords, item_keywords = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords)
mapper(User, users, properties={
'orders': relationship(Order, backref='user'),
})
mapper(Order, orders, properties={
'items': relationship(Item, secondary=order_items,
order_by=items.c.id),
})
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords,
order_by=keywords.c.id)
})
mapper(Keyword, keywords)
sess = create_session()
q = sess.query(User).join(User.orders).join(Order.items).\
options(joinedload_all("orders.items.keywords"))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"keywords_1.id AS keywords_1_id, keywords_1.name "
"AS keywords_1_name, "
"items_1.id AS items_1_id, items_1.description AS "
"items_1_description, "
"orders_1.id AS orders_1_id, orders_1.user_id AS "
"orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id "
"LEFT OUTER JOIN (order_items AS order_items_2 "
"JOIN items AS items_1 ON items_1.id = order_items_2.item_id) "
"ON orders_1.id = order_items_2.order_id "
"LEFT OUTER JOIN (item_keywords AS item_keywords_1 "
"JOIN keywords AS keywords_1 ON keywords_1.id = "
"item_keywords_1.keyword_id) "
"ON items_1.id = item_keywords_1.item_id "
"ORDER BY items_1.id, keywords_1.id"
)
def test_inner_join_unnested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin="unnested")
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen FROM users LEFT OUTER JOIN "
"orders AS orders_1 ON "
"users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 ON "
"items_1.id = "
"order_items_1.item_id) ON orders_1.id = "
"order_items_1.order_id"
)
self.assert_compile(
sess.query(Order),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, orders.description AS "
"orders_description, orders.isopen AS orders_isopen, items_1.id "
"AS items_1_id, items_1.description AS items_1_description FROM "
"orders JOIN order_items AS order_items_1 ON orders.id = "
"order_items_1.order_id JOIN items AS items_1 ON items_1.id = "
"order_items_1.item_id"
)
def test_inner_join_nested_chaining_fixed(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy=False)
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy=False,
innerjoin='nested')
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS "
"items_1_id, items_1.description AS items_1_description, "
"orders_1.id AS "
"orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS "
"orders_1_address_id, orders_1.description AS "
"orders_1_description, "
"orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN "
"(orders AS orders_1 JOIN order_items AS order_items_1 "
"ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON users.id = orders_1.user_id"
)
def test_inner_join_options(self):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, backref=backref('user', innerjoin=True),
order_by=orders.c.id)
))
mapper(Order, orders, properties=dict(
items=relationship(
Item,
secondary=order_items,
order_by=items.c.id)
))
mapper(Item, items)
sess = create_session()
self.assert_compile(
sess.query(User).options(joinedload(User.orders, innerjoin=True)),
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id AS "
"orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id ORDER BY orders_1.id")
self.assert_compile(
sess.query(User).options(
joinedload_all(User.orders, Order.items, innerjoin=True)),
"SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, "
"items_1.description AS items_1_description, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, "
"orders_1.description AS orders_1_description, orders_1.isopen "
"AS orders_1_isopen "
"FROM users JOIN orders AS orders_1 ON users.id = "
"orders_1.user_id JOIN order_items AS "
"order_items_1 ON orders_1.id = order_items_1.order_id "
"JOIN items AS items_1 ON "
"items_1.id = order_items_1.item_id ORDER BY orders_1.id, "
"items_1.id")
def go():
eq_(
sess.query(User).options(
joinedload(User.orders, innerjoin=True),
joinedload(User.orders, Order.items, innerjoin=True)).
order_by(User.id).all(),
[User(id=7,
orders=[
Order(
id=1, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(
id=3, items=[
Item(
id=3), Item(
id=4), Item(
id=5)]),
Order(id=5, items=[Item(id=5)])]),
User(id=9, orders=[
Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)]),
Order(id=4, items=[Item(id=1), Item(id=5)])])
]
)
self.assert_sql_count(testing.db, go, 1)
self.assert_compile(
sess.query(Order).options(
joinedload(
Order.user)).filter(
Order.description == 'foo'),
"SELECT orders.id AS orders_id, orders.user_id AS orders_user_id, "
"orders.address_id AS "
"orders_address_id, orders.description AS orders_description, "
"orders.isopen AS "
"orders_isopen, users_1.id AS users_1_id, users_1.name "
"AS users_1_name "
"FROM orders JOIN users AS users_1 ON users_1.id = orders.user_id "
"WHERE orders.description = :description_1"
)
def test_propagated_lazyload_wildcard_unbound(self):
self._test_propagated_lazyload_wildcard(False)
def test_propagated_lazyload_wildcard_bound(self):
self._test_propagated_lazyload_wildcard(True)
def _test_propagated_lazyload_wildcard(self, use_load):
users, items, order_items, Order, Item, User, orders = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(User, users, properties=dict(
orders=relationship(Order, lazy="select")
))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items, lazy="joined")
))
mapper(Item, items)
sess = create_session()
if use_load:
opt = Load(User).defaultload("orders").lazyload("*")
else:
opt = defaultload("orders").lazyload("*")
q = sess.query(User).filter(User.id == 7).options(opt)
def go():
for u in q:
u.orders
self.sql_eq_(go, [
("SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1", {"id_1": 7}),
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders "
"WHERE :param_1 = orders.user_id", {"param_1": 7}),
])
class InnerJoinSplicingTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True)
)
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id')),
Column('value', String(10)),
)
Table('c1', metadata,
Column('id', Integer, primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')),
Column('value', String(10)),
)
Table('c2', metadata,
Column('id', Integer, primary_key=True),
Column('b_id', Integer, ForeignKey('b.id')),
Column('value', String(10)),
)
Table('d1', metadata,
Column('id', Integer, primary_key=True),
Column('c1_id', Integer, ForeignKey('c1.id')),
Column('value', String(10)),
)
Table('d2', metadata,
Column('id', Integer, primary_key=True),
Column('c2_id', Integer, ForeignKey('c2.id')),
Column('value', String(10)),
)
Table('e1', metadata,
Column('id', Integer, primary_key=True),
Column('d1_id', Integer, ForeignKey('d1.id')),
Column('value', String(10)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C1(cls.Comparable):
pass
class C2(cls.Comparable):
pass
class D1(cls.Comparable):
pass
class D2(cls.Comparable):
pass
class E1(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A, cls.classes.B, cls.classes.C1,
cls.classes.C2, cls.classes.D1, cls.classes.D2, cls.classes.E1)
mapper(A, cls.tables.a, properties={
'bs': relationship(B)
})
mapper(B, cls.tables.b, properties=odict([
('c1s', relationship(C1, order_by=cls.tables.c1.c.id)),
('c2s', relationship(C2, order_by=cls.tables.c2.c.id))
]))
mapper(C1, cls.tables.c1, properties={
'd1s': relationship(D1, order_by=cls.tables.d1.c.id)
})
mapper(C2, cls.tables.c2, properties={
'd2s': relationship(D2, order_by=cls.tables.d2.c.id)
})
mapper(D1, cls.tables.d1, properties={
'e1s': relationship(E1, order_by=cls.tables.e1.c.id)
})
mapper(D2, cls.tables.d2)
mapper(E1, cls.tables.e1)
@classmethod
def _fixture_data(cls):
A, B, C1, C2, D1, D2, E1 = (
cls.classes.A, cls.classes.B, cls.classes.C1,
cls.classes.C2, cls.classes.D1, cls.classes.D2, cls.classes.E1)
return [
A(id=1, bs=[
B(
id=1,
c1s=[C1(
id=1, value='C11',
d1s=[
D1(id=1, e1s=[E1(id=1)]), D1(id=2, e1s=[E1(id=2)])
]
)
],
c2s=[C2(id=1, value='C21', d2s=[D2(id=3)]),
C2(id=2, value='C22', d2s=[D2(id=4)])]
),
B(
id=2,
c1s=[
C1(
id=4, value='C14',
d1s=[D1(
id=3, e1s=[
E1(id=3, value='E13'),
E1(id=4, value="E14")
]),
D1(id=4, e1s=[E1(id=5)])
]
)
],
c2s=[C2(id=4, value='C24', d2s=[])]
),
]),
A(id=2, bs=[
B(
id=3,
c1s=[
C1(
id=8,
d1s=[D1(id=5, value='D15', e1s=[E1(id=6)])]
)
],
c2s=[C2(id=8, d2s=[D2(id=6, value='D26')])]
)
])
]
@classmethod
def insert_data(cls):
s = Session(testing.db)
s.add_all(cls._fixture_data())
s.commit()
def _assert_result(self, query):
eq_(
query.all(),
self._fixture_data()
)
def test_nested_innerjoin_propagation_multiple_paths_one(self):
A, B, C1, C2 = (
self.classes.A, self.classes.B, self.classes.C1,
self.classes.C2)
s = Session()
q = s.query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.c1s, innerjoin=True).
joinedload(C1.d1s, innerjoin=True),
defaultload(A.bs).joinedload(B.c2s, innerjoin=True).
joinedload(C2.d2s, innerjoin=False)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, d2_1.id AS d2_1_id, "
"d2_1.c2_id AS d2_1_c2_id, d2_1.value AS d2_1_value, "
"c2_1.id AS c2_1_id, c2_1.b_id AS c2_1_b_id, "
"c2_1.value AS c2_1_value, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a "
"LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id "
"JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id, d2_1.id"
)
self._assert_result(q)
def test_nested_innerjoin_propagation_multiple_paths_two(self):
A = self.classes.A
s = Session()
q = s.query(A).options(
joinedload('bs'),
joinedload('bs.c2s', innerjoin=True),
joinedload('bs.c1s', innerjoin=True),
joinedload('bs.c1s.d1s')
)
self.assert_compile(
q,
"SELECT a.id AS a_id, d1_1.id AS d1_1_id, "
"d1_1.c1_id AS d1_1_c1_id, d1_1.value AS d1_1_value, "
"c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, "
"b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN d1 AS d1_1 ON c1_1.id = d1_1.c1_id "
"ORDER BY c1_1.id, d1_1.id, c2_1.id"
)
self._assert_result(q)
def test_multiple_splice_points(self):
A = self.classes.A
s = Session()
q = s.query(A).options(
joinedload('bs', innerjoin=False),
joinedload('bs.c1s', innerjoin=True),
joinedload('bs.c2s', innerjoin=True),
joinedload('bs.c1s.d1s', innerjoin=False),
joinedload('bs.c2s.d2s'),
joinedload('bs.c1s.d1s.e1s', innerjoin=True)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, e1_1.id AS e1_1_id, "
"e1_1.d1_id AS e1_1_d1_id, e1_1.value AS e1_1_value, "
"d1_1.id AS d1_1_id, d1_1.c1_id AS d1_1_c1_id, "
"d1_1.value AS d1_1_value, c1_1.id AS c1_1_id, "
"c1_1.b_id AS c1_1_b_id, c1_1.value AS c1_1_value, "
"d2_1.id AS d2_1_id, d2_1.c2_id AS d2_1_c2_id, "
"d2_1.value AS d2_1_value, c2_1.id AS c2_1_id, "
"c2_1.b_id AS c2_1_b_id, c2_1.value AS c2_1_value, "
"b_1.id AS b_1_id, b_1.a_id AS b_1_a_id, b_1.value AS b_1_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN c2 AS c2_1 ON b_1.id = c2_1.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id "
"LEFT OUTER JOIN ("
"d1 AS d1_1 JOIN e1 AS e1_1 ON d1_1.id = e1_1.d1_id) "
"ON c1_1.id = d1_1.c1_id "
"LEFT OUTER JOIN d2 AS d2_1 ON c2_1.id = d2_1.c2_id "
"ORDER BY c1_1.id, d1_1.id, e1_1.id, c2_1.id, d2_1.id"
)
self._assert_result(q)
def test_splice_onto_np_mapper(self):
A = self.classes.A
B = self.classes.B
C1 = self.classes.C1
b_table = self.tables.b
c1_table = self.tables.c1
from sqlalchemy import inspect
weird_selectable = b_table.outerjoin(c1_table)
b_np = mapper(
B, weird_selectable, non_primary=True, properties=odict([
('c1s', relationship(C1, lazy=False, innerjoin=True)),
('c_id', c1_table.c.id),
('b_value', b_table.c.value),
])
)
a_mapper = inspect(A)
a_mapper.add_property(
"bs_np", relationship(b_np)
)
s = Session()
q = s.query(A).options(
joinedload('bs_np', innerjoin=False)
)
self.assert_compile(
q,
"SELECT a.id AS a_id, c1_1.id AS c1_1_id, c1_1.b_id AS c1_1_b_id, "
"c1_1.value AS c1_1_value, c1_2.id AS c1_2_id, "
"b_1.value AS b_1_value, b_1.id AS b_1_id, "
"b_1.a_id AS b_1_a_id, c1_2.b_id AS c1_2_b_id, "
"c1_2.value AS c1_2_value "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 LEFT OUTER JOIN c1 AS c1_2 ON b_1.id = c1_2.b_id "
"JOIN c1 AS c1_1 ON b_1.id = c1_1.b_id) ON a.id = b_1.a_id"
)
class InnerJoinSplicingWSecondaryTest(
fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column('id', Integer, primary_key=True),
Column('bid', ForeignKey('b.id'))
)
Table(
'b', metadata,
Column('id', Integer, primary_key=True),
Column('cid', ForeignKey('c.id'))
)
Table(
'c', metadata,
Column('id', Integer, primary_key=True),
)
Table('ctod', metadata,
Column('cid', ForeignKey('c.id'), primary_key=True),
Column('did', ForeignKey('d.id'), primary_key=True),
)
Table('d', metadata,
Column('id', Integer, primary_key=True),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = (
cls.classes.A, cls.classes.B, cls.classes.C,
cls.classes.D)
mapper(A, cls.tables.a, properties={
'b': relationship(B)
})
mapper(B, cls.tables.b, properties=odict([
('c', relationship(C)),
]))
mapper(C, cls.tables.c, properties=odict([
('ds', relationship(D, secondary=cls.tables.ctod,
order_by=cls.tables.d.c.id)),
]))
mapper(D, cls.tables.d)
@classmethod
def _fixture_data(cls):
A, B, C, D = (
cls.classes.A, cls.classes.B, cls.classes.C,
cls.classes.D)
d1, d2, d3 = D(id=1), D(id=2), D(id=3)
return [
A(
id=1,
b=B(
id=1,
c=C(
id=1,
ds=[d1, d2]
)
)
),
A(
id=2,
b=B(
id=2,
c=C(
id=2,
ds=[d2, d3]
)
)
)
]
@classmethod
def insert_data(cls):
s = Session(testing.db)
s.add_all(cls._fixture_data())
s.commit()
def _assert_result(self, query):
def go():
eq_(
query.all(),
self._fixture_data()
)
self.assert_sql_count(
testing.db,
go,
1
)
def test_joined_across(self):
A = self.classes.A
s = Session()
q = s.query(A) \
.options(
joinedload('b').
joinedload('c', innerjoin=True).
joinedload('ds', innerjoin=True))
self.assert_compile(
q,
"SELECT a.id AS a_id, a.bid AS a_bid, d_1.id AS d_1_id, "
"c_1.id AS c_1_id, b_1.id AS b_1_id, b_1.cid AS b_1_cid "
"FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN "
"(c AS c_1 JOIN ctod AS ctod_1 ON c_1.id = ctod_1.cid) "
"ON c_1.id = b_1.cid "
"JOIN d AS d_1 ON d_1.id = ctod_1.did) ON b_1.id = a.bid "
"ORDER BY d_1.id"
)
self._assert_result(q)
class SubqueryAliasingTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True)
)
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('a.id')),
Column('value', Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def _fixture(self, props):
A, B = self.classes.A, self.classes.B
b_table, a_table = self.tables.b, self.tables.a
mapper(A, a_table, properties=props)
mapper(B, b_table, properties={
'a': relationship(A, backref="bs")
})
def test_column_property(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_column_property_desc(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation.desc()).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 DESC "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2 DESC"
)
def test_column_property_correlated(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table)
self._fixture({
'summation': column_property(cp)
})
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(A.summation).
limit(50),
"SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.a_id "
"AS anon_1_a_id, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT "
"(SELECT sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"AS anon_2, a.id AS a_id FROM a ORDER BY anon_2 "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 ON "
"anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_standalone_subquery_unlabeled(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).as_scalar()
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 "
"AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE "
"b.a_id = a.id) AS anon_2 FROM a ORDER BY (SELECT "
"sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
def test_standalone_subquery_labeled(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).as_scalar().label('foo')
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.foo "
"AS anon_1_foo, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, (SELECT sum(b.value) AS sum_1 FROM b WHERE "
"b.a_id = a.id) AS foo FROM a ORDER BY foo "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY "
"anon_1.foo"
)
def test_standalone_negated(self):
A = self.classes.A
b_table, a_table = self.tables.b, self.tables.a
self._fixture({})
cp = select([func.sum(b_table.c.value)]).\
where(b_table.c.a_id == a_table.c.id).\
correlate(a_table).\
as_scalar()
self.assert_compile(
create_session().query(A).options(joinedload_all('bs')).
order_by(~cp).
limit(50),
"SELECT anon_1.a_id AS anon_1_a_id, anon_1.anon_2 "
"AS anon_1_anon_2, b_1.id AS b_1_id, b_1.a_id AS "
"b_1_a_id, b_1.value AS b_1_value FROM (SELECT a.id "
"AS a_id, NOT (SELECT sum(b.value) AS sum_1 FROM b "
"WHERE b.a_id = a.id) FROM a ORDER BY NOT (SELECT "
"sum(b.value) AS sum_1 FROM b WHERE b.a_id = a.id) "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN b AS b_1 "
"ON anon_1.a_id = b_1.a_id ORDER BY anon_1.anon_2"
)
class LoadOnExistingTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
def _collection_to_scalar_fixture(self):
User, Address, Dingaling = self.classes.User, \
self.classes.Address, self.classes.Dingaling
mapper(User, self.tables.users, properties={
'addresses': relationship(Address),
})
mapper(Address, self.tables.addresses, properties={
'dingaling': relationship(Dingaling)
})
mapper(Dingaling, self.tables.dingalings)
sess = Session(autoflush=False)
return User, Address, Dingaling, sess
def _collection_to_collection_fixture(self):
User, Order, Item = self.classes.User, \
self.classes.Order, self.classes.Item
mapper(User, self.tables.users, properties={
'orders': relationship(Order),
})
mapper(Order, self.tables.orders, properties={
'items': relationship(Item, secondary=self.tables.order_items),
})
mapper(Item, self.tables.items)
sess = Session(autoflush=False)
return User, Order, Item, sess
def _eager_config_fixture(self):
User, Address = self.classes.User, self.classes.Address
mapper(User, self.tables.users, properties={
'addresses': relationship(Address, lazy="joined"),
})
mapper(Address, self.tables.addresses)
sess = Session(autoflush=False)
return User, Address, sess
def test_no_query_on_refresh(self):
User, Address, sess = self._eager_config_fixture()
u1 = sess.query(User).get(8)
assert 'addresses' in u1.__dict__
sess.expire(u1)
def go():
eq_(u1.id, 8)
self.assert_sql_count(testing.db, go, 1)
assert 'addresses' not in u1.__dict__
def test_loads_second_level_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).get(8)
a1 = Address()
u1.addresses.append(a1)
a2 = u1.addresses[0]
a2.email_address = 'foo'
sess.query(User).options(joinedload_all("addresses.dingaling")).\
filter_by(id=8).all()
assert u1.addresses[-1] is a1
for a in u1.addresses:
if a is not a1:
assert 'dingaling' in a.__dict__
else:
assert 'dingaling' not in a.__dict__
if a is a2:
eq_(a2.email_address, 'foo')
def test_loads_second_level_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).get(7)
u1.orders
o1 = Order()
u1.orders.append(o1)
sess.query(User).options(joinedload_all("orders.items")).\
filter_by(id=7).all()
for o in u1.orders:
if o is not o1:
assert 'items' in o.__dict__
else:
assert 'items' not in o.__dict__
def test_load_two_levels_collection_to_scalar(self):
User, Address, Dingaling, sess = self._collection_to_scalar_fixture()
u1 = sess.query(User).filter_by(
id=8).options(
joinedload("addresses")).one()
sess.query(User).filter_by(
id=8).options(
joinedload_all("addresses.dingaling")).first()
assert 'dingaling' in u1.addresses[0].__dict__
def test_load_two_levels_collection_to_collection(self):
User, Order, Item, sess = self._collection_to_collection_fixture()
u1 = sess.query(User).filter_by(
id=7).options(
joinedload("orders")).one()
sess.query(User).filter_by(
id=7).options(
joinedload_all("orders.items")).first()
assert 'items' in u1.orders[0].__dict__
class AddEntityTest(_fixtures.FixtureTest):
run_inserts = 'once'
run_deletes = None
def _assert_result(self):
Item, Address, Order, User = (self.classes.Item,
self.classes.Address,
self.classes.Order,
self.classes.User)
return [
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=1,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=3,
items=[Item(id=3), Item(id=4), Item(id=5)]
),
),
(
User(id=7,
addresses=[Address(id=1)]
),
Order(id=5,
items=[Item(id=5)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=2,
items=[Item(id=1), Item(id=2), Item(id=3)]
),
),
(
User(id=9,
addresses=[Address(id=5)]
),
Order(id=4,
items=[Item(id=1), Item(id=5)]
),
)
]
def test_mapper_configured(self):
users, items, order_items, Order, \
Item, User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address, lazy='joined'),
'orders': relationship(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy='joined',
order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
def test_options(self):
users, items, order_items, Order,\
Item, User, Address, orders, addresses = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.orders,
self.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address),
'orders': relationship(Order)
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id)
})
mapper(Item, items)
sess = create_session()
oalias = sa.orm.aliased(Order)
def go():
ret = sess.query(User, oalias).options(joinedload('addresses')).\
join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 6)
sess.expunge_all()
def go():
ret = sess.query(User, oalias).\
options(joinedload('addresses'),
joinedload(oalias.items)).\
join(oalias, 'orders').\
order_by(User.id, oalias.id).all()
eq_(ret, self._assert_result())
self.assert_sql_count(testing.db, go, 1)
class OrderBySecondaryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('m2m', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('aid', Integer, ForeignKey('a.id')),
Column('bid', Integer, ForeignKey('b.id')))
Table('a', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
Table('b', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
@classmethod
def fixtures(cls):
return dict(
a=(('id', 'data'),
(1, 'a1'),
(2, 'a2')),
b=(('id', 'data'),
(1, 'b1'),
(2, 'b2'),
(3, 'b3'),
(4, 'b4')),
m2m=(('id', 'aid', 'bid'),
(2, 1, 1),
(4, 2, 4),
(1, 1, 3),
(6, 2, 2),
(3, 1, 2),
(5, 2, 3)))
def test_ordering(self):
a, m2m, b = (
self.tables.a,
self.tables.m2m,
self.tables.b)
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, a, properties={
'bs': relationship(
B, secondary=m2m, lazy='joined', order_by=m2m.c.id)
})
mapper(B, b)
sess = create_session()
eq_(sess.query(A).all(),
[
A(data='a1', bs=[B(data='b3'), B(data='b1'), B(data='b2')]),
A(bs=[B(data='b4'), B(data='b3'), B(data='b2')])
])
class SelfReferentialEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('nodes', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('parent_id', Integer, ForeignKey('nodes.id')),
Column('data', String(30)))
def test_basic(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node,
lazy='joined',
join_depth=3, order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').all()[0]
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 1)
def test_lazy_fallback_doesnt_affect_eager(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined', join_depth=1,
order_by=nodes.c.id)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
# arrive, now we *can* eager load its children and an eager collection
# should be initialized. essentially the 'n12' instance is present in
# not just two different rows but two distinct sets of columns in this
# result set.
def go():
allnodes = sess.query(Node).order_by(Node.data).all()
n12 = allnodes[2]
eq_(n12.data, 'n12')
eq_([
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
], list(n12.children))
self.assert_sql_count(testing.db, go, 1)
def test_with_deferred(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined', join_depth=3,
order_by=nodes.c.id),
'data': deferred(nodes.c.data)
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
eq_(
Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).order_by(Node.id).first(),
)
self.assert_sql_count(testing.db, go, 4)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).
options(undefer('data')).order_by(Node.id).first())
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
def go():
eq_(Node(data='n1', children=[Node(data='n11'), Node(data='n12')]),
sess.query(Node).options(undefer('data'),
undefer('children.data')).first())
self.assert_sql_count(testing.db, go, 1)
def test_options(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='select', order_by=nodes.c.id)
}, order_by=nodes.c.id)
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').\
options(joinedload('children.children')).first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 2)
def go():
sess.query(Node).filter_by(data='n1').\
options(joinedload('children.children')).first()
# test that the query isn't wrapping the initial query for eager
self.assert_sql_execution(
testing.db, go,
CompiledSQL(
"SELECT nodes.id AS nodes_id, nodes.parent_id AS "
"nodes_parent_id, nodes.data AS nodes_data FROM nodes "
"WHERE nodes.data = :data_1 ORDER BY nodes.id LIMIT :param_1",
{'data_1': 'n1'}
)
)
def test_no_depth(self):
nodes = self.tables.nodes
class Node(fixtures.ComparableEntity):
def append(self, node):
self.children.append(node)
mapper(Node, nodes, properties={
'children': relationship(Node, lazy='joined')
})
sess = create_session()
n1 = Node(data='n1')
n1.append(Node(data='n11'))
n1.append(Node(data='n12'))
n1.append(Node(data='n13'))
n1.children[1].append(Node(data='n121'))
n1.children[1].append(Node(data='n122'))
n1.children[1].append(Node(data='n123'))
sess.add(n1)
sess.flush()
sess.expunge_all()
def go():
d = sess.query(Node).filter_by(data='n1').first()
eq_(Node(data='n1', children=[
Node(data='n11'),
Node(data='n12', children=[
Node(data='n121'),
Node(data='n122'),
Node(data='n123')
]),
Node(data='n13')
]), d)
self.assert_sql_count(testing.db, go, 3)
class MixedSelfReferentialEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('a_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True)
)
Table('b_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('parent_b1_id', Integer, ForeignKey('b_table.id')),
Column('parent_a_id', Integer, ForeignKey('a_table.id')),
Column('parent_b2_id', Integer, ForeignKey('b_table.id')))
@classmethod
def setup_mappers(cls):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
mapper(A, a_table)
mapper(B, b_table, properties={
'parent_b1': relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b1_id == b_table.c.id),
order_by=b_table.c.id
),
'parent_z': relationship(A, lazy=True),
'parent_b2': relationship(
B,
remote_side=[b_table.c.id],
primaryjoin=(b_table.c.parent_b2_id == b_table.c.id),
order_by = b_table.c.id
)
})
@classmethod
def insert_data(cls):
b_table, a_table = cls.tables.b_table, cls.tables.a_table
a_table.insert().execute(dict(id=1), dict(id=2), dict(id=3))
b_table.insert().execute(
dict(id=1, parent_a_id=2, parent_b1_id=None, parent_b2_id=None),
dict(id=2, parent_a_id=1, parent_b1_id=1, parent_b2_id=None),
dict(id=3, parent_a_id=1, parent_b1_id=1, parent_b2_id=2),
dict(id=4, parent_a_id=3, parent_b1_id=1, parent_b2_id=None),
dict(id=5, parent_a_id=3, parent_b1_id=None, parent_b2_id=2),
dict(id=6, parent_a_id=1, parent_b1_id=1, parent_b2_id=3),
dict(id=7, parent_a_id=2, parent_b1_id=None, parent_b2_id=3),
dict(id=8, parent_a_id=2, parent_b1_id=1, parent_b2_id=2),
dict(id=9, parent_a_id=None, parent_b1_id=1, parent_b2_id=None),
dict(id=10, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
dict(id=11, parent_a_id=3, parent_b1_id=1, parent_b2_id=8),
dict(id=12, parent_a_id=2, parent_b1_id=5, parent_b2_id=2),
dict(id=13, parent_a_id=3, parent_b1_id=4, parent_b2_id=4),
dict(id=14, parent_a_id=3, parent_b1_id=7, parent_b2_id=2),
)
def test_eager_load(self):
A, B = self.classes.A, self.classes.B
session = create_session()
def go():
eq_(
session.query(B).
options(
joinedload('parent_b1'),
joinedload('parent_b2'),
joinedload('parent_z')
).
filter(B.id.in_([2, 8, 11])).order_by(B.id).all(),
[
B(id=2,
parent_z=A(id=1),
parent_b1=B(id=1),
parent_b2=None),
B(id=8,
parent_z=A(id=2),
parent_b1=B(id=1),
parent_b2=B(id=2)),
B(id=11,
parent_z=A(id=3),
parent_b1=B(id=1),
parent_b2=B(id=8))
]
)
self.assert_sql_count(testing.db, go, 1)
class SelfReferentialM2MEagerTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('widget', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', sa.String(40), nullable=False, unique=True),
)
Table('widget_rel', metadata,
Column('parent_id', Integer, ForeignKey('widget.id')),
Column('child_id', Integer, ForeignKey('widget.id')),
sa.UniqueConstraint('parent_id', 'child_id'),
)
def test_basic(self):
widget, widget_rel = self.tables.widget, self.tables.widget_rel
class Widget(fixtures.ComparableEntity):
pass
mapper(Widget, widget, properties={
'children': relationship(
Widget, secondary=widget_rel,
primaryjoin=widget_rel.c.parent_id == widget.c.id,
secondaryjoin=widget_rel.c.child_id == widget.c.id,
lazy='joined', join_depth=1,
)
})
sess = create_session()
w1 = Widget(name='w1')
w2 = Widget(name='w2')
w1.children.append(w2)
sess.add(w1)
sess.flush()
sess.expunge_all()
eq_([Widget(name='w1', children=[Widget(name='w2')])],
sess.query(Widget).filter(Widget.name == 'w1').all())
class MixedEntitiesTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
__dialect__ = 'default'
__prefer_backends__ = ('postgresql', 'mysql', 'oracle')
@classmethod
def setup_mappers(cls):
users, Keyword, items, order_items, orders, \
Item, User, Address, keywords, Order, \
item_keywords, addresses = (
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.tables.order_items,
cls.tables.orders,
cls.classes.Item,
cls.classes.User,
cls.classes.Address,
cls.tables.keywords,
cls.classes.Order,
cls.tables.item_keywords,
cls.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(Address, backref='user'),
'orders': relationship(Order, backref='user'),
})
mapper(Address, addresses)
mapper(Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id),
})
mapper(Item, items, properties={
'keywords': relationship(Keyword, secondary=item_keywords)
})
mapper(Keyword, keywords)
def test_two_entities(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]),
Order(id=2, items=[
Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]),
Order(id=4, items=[
Item(id=1), Item(id=5)])),
],
sess.query(User, Order).filter(User.id == Order.user_id).
options(joinedload(User.addresses), joinedload(Order.items)).
filter(User.id == 9).
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def go():
eq_(
[
(User(id=9, addresses=[Address(id=5)]),
Order(id=2, items=[
Item(id=1), Item(id=2), Item(id=3)])),
(User(id=9, addresses=[Address(id=5)]),
Order(id=4, items=[
Item(id=1), Item(id=5)])),
],
sess.query(User, Order).join(User.orders).
options(joinedload(User.addresses), joinedload(Order.items)).
filter(User.id == 9).
order_by(User.id, Order.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
@testing.exclude(
'sqlite', '>', (0, ), "sqlite flat out blows it on the multiple JOINs")
def test_two_entities_with_joins(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
def go():
u1 = aliased(User)
o1 = aliased(Order)
eq_(
[
(
User(addresses=[
Address(email_address='fred@fred.com')],
name='fred'),
Order(description='order 2', isopen=0,
items=[
Item(description='item 1'),
Item(description='item 2'),
Item(description='item 3')]),
User(addresses=[
Address(email_address='jack@bean.com')],
name='jack'),
Order(description='order 3', isopen=1,
items=[
Item(description='item 3'),
Item(description='item 4'),
Item(description='item 5')])
),
(
User(
addresses=[
Address(
email_address='fred@fred.com')],
name='fred'),
Order(
description='order 2', isopen=0, items=[
Item(
description='item 1'), Item(
description='item 2'), Item(
description='item 3')]),
User(
addresses=[
Address(
email_address='jack@bean.com')],
name='jack'),
Order(
address_id=None,
description='order 5',
isopen=0,
items=[
Item(
description='item 5')])
),
(
User(
addresses=[
Address(
email_address='fred@fred.com')],
name='fred'),
Order(
description='order 4', isopen=1, items=[
Item(
description='item 1'), Item(
description='item 5')]),
User(
addresses=[
Address(
email_address='jack@bean.com')],
name='jack'),
Order(
address_id=None,
description='order 5',
isopen=0,
items=[
Item(
description='item 5')])
),
],
sess.query(User, Order, u1, o1).
join(Order, User.orders).
options(joinedload(User.addresses),
joinedload(Order.items)).filter(User.id == 9).
join(o1, u1.orders).
options(joinedload(u1.addresses),
joinedload(o1.items)).filter(u1.id == 7).
filter(Order.id < o1.id).
order_by(User.id, Order.id, u1.id, o1.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_one(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias = sa.orm.aliased(Order)
# two FROM clauses
def go():
eq_(
[
(
User(
id=9, addresses=[
Address(
id=5)]), Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(
id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).filter(User.id == oalias.user_id).
options(
joinedload(User.addresses),
joinedload(oalias.items)).filter(User.id == 9).
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_two(self):
Item, Order, User, Address = (self.classes.Item,
self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
oalias = sa.orm.aliased(Order)
# one FROM clause
def go():
eq_(
[
(
User(
id=9, addresses=[
Address(
id=5)]), Order(
id=2, items=[
Item(
id=1), Item(
id=2), Item(
id=3)])),
(User(id=9, addresses=[Address(id=5)]), Order(
id=4, items=[Item(id=1), Item(id=5)])),
],
sess.query(User, oalias).join(oalias, User.orders).
options(joinedload(User.addresses),
joinedload(oalias.items)).
filter(User.id == 9).
order_by(User.id, oalias.id).all(),
)
self.assert_sql_count(testing.db, go, 1)
def test_aliased_entity_three(self):
Order, User = (
self.classes.Order,
self.classes.User)
sess = create_session()
oalias = sa.orm.aliased(Order)
# improper setup: oalias in the columns clause but join to usual
# orders alias. this should create two FROM clauses even though the
# query has a from_clause set up via the join
self.assert_compile(
sess.query(User, oalias).join(User.orders).
options(joinedload(oalias.items)).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id, "
"orders_1.description AS orders_1_description, "
"orders_1.isopen AS orders_1_isopen, items_1.id AS items_1_id, "
"items_1.description AS items_1_description FROM users "
"JOIN orders ON users.id = orders.user_id, "
"orders AS orders_1 LEFT OUTER JOIN (order_items AS order_items_1 "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id ORDER BY items_1.id"
)
class SubqueryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(16))
)
Table('tags_table', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer, ForeignKey("users_table.id")),
Column('score1', sa.Float),
Column('score2', sa.Float),
)
def test_label_anonymizing(self):
tags_table, users_table = self.tables.tags_table, \
self.tables.users_table
class User(fixtures.ComparableEntity):
@property
def prop_score(self):
return sum([tag.prop_score for tag in self.tags])
class Tag(fixtures.ComparableEntity):
@property
def prop_score(self):
return self.score1 * self.score2
for labeled, labelname in [(True, 'score'), (True, None),
(False, None)]:
sa.orm.clear_mappers()
tag_score = (tags_table.c.score1 * tags_table.c.score2)
user_score = sa.select([sa.func.sum(tags_table.c.score1 *
tags_table.c.score2)],
tags_table.c.user_id == users_table.c.id)
if labeled:
tag_score = tag_score.label(labelname)
user_score = user_score.label(labelname)
else:
user_score = user_score.as_scalar()
mapper(Tag, tags_table, properties={
'query_score': sa.orm.column_property(tag_score),
})
mapper(User, users_table, properties={
'tags': relationship(Tag, backref='user', lazy='joined'),
'query_score': sa.orm.column_property(user_score),
})
session = create_session()
session.add(User(name='joe', tags=[Tag(score1=5.0, score2=3.0),
Tag(score1=55.0, score2=1.0)]))
session.add(User(name='bar', tags=[Tag(score1=5.0, score2=4.0),
Tag(score1=50.0, score2=1.0),
Tag(score1=15.0, score2=2.0)]))
session.flush()
session.expunge_all()
for user in session.query(User).all():
eq_(user.query_score, user.prop_score)
def go():
u = session.query(User).filter_by(name='joe').one()
eq_(u.query_score, u.prop_score)
self.assert_sql_count(testing.db, go, 1)
for t in (tags_table, users_table):
t.delete().execute()
class CorrelatedSubqueryTest(fixtures.MappedTest):
# another argument for joinedload learning about inner joins
__requires__ = ('correlated_outer_joins', )
@classmethod
def define_tables(cls, metadata):
Table(
'users', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50))
)
Table(
'stuff', metadata,
Column(
'id',
Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('date', Date),
Column('user_id', Integer, ForeignKey('users.id')))
@classmethod
def insert_data(cls):
stuff, users = cls.tables.stuff, cls.tables.users
users.insert().execute(
{'id': 1, 'name': 'user1'},
{'id': 2, 'name': 'user2'},
{'id': 3, 'name': 'user3'},
)
stuff.insert().execute(
{'id': 1, 'user_id': 1, 'date': datetime.date(2007, 10, 15)},
{'id': 2, 'user_id': 1, 'date': datetime.date(2007, 12, 15)},
{'id': 3, 'user_id': 1, 'date': datetime.date(2007, 11, 15)},
{'id': 4, 'user_id': 2, 'date': datetime.date(2008, 1, 15)},
{'id': 5, 'user_id': 3, 'date': datetime.date(2007, 6, 15)},
{'id': 6, 'user_id': 3, 'date': datetime.date(2007, 3, 15)},
)
def test_labeled_on_date_noalias(self):
self._do_test('label', True, False)
def test_scalar_on_date_noalias(self):
self._do_test('scalar', True, False)
def test_plain_on_date_noalias(self):
self._do_test('none', True, False)
def test_labeled_on_limitid_noalias(self):
self._do_test('label', False, False)
def test_scalar_on_limitid_noalias(self):
self._do_test('scalar', False, False)
def test_plain_on_limitid_noalias(self):
self._do_test('none', False, False)
def test_labeled_on_date_alias(self):
self._do_test('label', True, True)
def test_scalar_on_date_alias(self):
self._do_test('scalar', True, True)
def test_plain_on_date_alias(self):
self._do_test('none', True, True)
def test_labeled_on_limitid_alias(self):
self._do_test('label', False, True)
def test_scalar_on_limitid_alias(self):
self._do_test('scalar', False, True)
def test_plain_on_limitid_alias(self):
self._do_test('none', False, True)
def _do_test(self, labeled, ondate, aliasstuff):
stuff, users = self.tables.stuff, self.tables.users
class User(fixtures.ComparableEntity):
pass
class Stuff(fixtures.ComparableEntity):
pass
mapper(Stuff, stuff)
if aliasstuff:
salias = stuff.alias()
else:
# if we don't alias the 'stuff' table within the correlated
salias = stuff
if ondate:
stuff_view = select([func.max(salias.c.date).label('max_date')]).\
where(salias.c.user_id == users.c.id).correlate(users)
else:
stuff_view = select([salias.c.id]).\
where(salias.c.user_id == users.c.id).\
correlate(users).order_by(salias.c.date.desc()).limit(1)
if testing.against("mssql"):
operator = operators.in_op
else:
operator = operators.eq
if labeled == 'label':
stuff_view = stuff_view.label('foo')
operator = operators.eq
elif labeled == 'scalar':
stuff_view = stuff_view.as_scalar()
if ondate:
mapper(User, users, properties={
'stuff': relationship(
Stuff,
primaryjoin=and_(users.c.id == stuff.c.user_id,
operator(stuff.c.date, stuff_view)))
})
else:
mapper(User, users, properties={
'stuff': relationship(
Stuff,
primaryjoin=and_(users.c.id == stuff.c.user_id,
operator(stuff.c.id, stuff_view)))
})
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(
joinedload('stuff')).all(),
[
User(name='user1', stuff=[Stuff(id=2)]),
User(name='user2', stuff=[Stuff(id=4)]),
User(name='user3', stuff=[Stuff(id=5)])
]
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 2)
sess = create_session()
def go():
eq_(
sess.query(User).order_by(User.name).options(
joinedload('stuff')).first(),
User(name='user1', stuff=[Stuff(id=2)])
)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
eq_(
sess.query(User).filter(User.id == 2).options(
joinedload('stuff')).one(),
User(name='user2', stuff=[Stuff(id=4)])
)
self.assert_sql_count(testing.db, go, 1)
class CyclicalInheritingEagerTestOne(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
't1', metadata,
Column(
'c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30))
)
Table('t2', metadata,
Column('c1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('c2', String(30)),
Column('type', String(30)),
Column('t1.id', Integer, ForeignKey('t1.c1')))
def test_basic(self):
t2, t1 = self.tables.t2, self.tables.t1
class T(object):
pass
class SubT(T):
pass
class T2(object):
pass
class SubT2(T2):
pass
mapper(T, t1, polymorphic_on=t1.c.type, polymorphic_identity='t1')
mapper(
SubT, None, inherits=T, polymorphic_identity='subt1',
properties={
't2s': relationship(
SubT2, lazy='joined',
backref=sa.orm.backref('subt', lazy='joined'))
})
mapper(T2, t2, polymorphic_on=t2.c.type, polymorphic_identity='t2')
mapper(SubT2, None, inherits=T2, polymorphic_identity='subt2')
# testing a particular endless loop condition in eager load setup
create_session().query(SubT).all()
class CyclicalInheritingEagerTestTwo(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = 'persistent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
class Movie(PersistentObject):
__tablename__ = 'movie'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
director_id = Column(Integer, ForeignKey('director.id'))
title = Column(String(50))
class Director(PersistentObject):
__tablename__ = 'director'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
movies = relationship("Movie", foreign_keys=Movie.director_id)
name = Column(String(50))
def test_from_subclass(self):
Director = self.classes.Director
s = create_session()
self.assert_compile(
s.query(Director).options(joinedload('*')),
"SELECT director.id AS director_id, "
"persistent.id AS persistent_id, "
"director.name AS director_name, movie_1.id AS movie_1_id, "
"persistent_1.id AS persistent_1_id, "
"movie_1.director_id AS movie_1_director_id, "
"movie_1.title AS movie_1_title "
"FROM persistent JOIN director ON persistent.id = director.id "
"LEFT OUTER JOIN "
"(persistent AS persistent_1 JOIN movie AS movie_1 "
"ON persistent_1.id = movie_1.id) "
"ON director.id = movie_1.director_id"
)
def test_integrate(self):
Director = self.classes.Director
Movie = self.classes.Movie
session = Session(testing.db)
rscott = Director(name="Ridley Scott")
alien = Movie(title="Alien")
brunner = Movie(title="Blade Runner")
rscott.movies.append(brunner)
rscott.movies.append(alien)
session.add_all([rscott, alien, brunner])
session.commit()
session.close_all()
self.d = session.query(Director).options(joinedload('*')).first()
assert len(list(session)) == 3
class CyclicalInheritingEagerTestThree(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class PersistentObject(Base):
__tablename__ = 'persistent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
__mapper_args__ = {'with_polymorphic': "*"}
class Director(PersistentObject):
__tablename__ = 'director'
id = Column(Integer, ForeignKey('persistent.id'), primary_key=True)
other_id = Column(Integer, ForeignKey('persistent.id'))
name = Column(String(50))
other = relationship(PersistentObject,
primaryjoin=other_id == PersistentObject.id,
lazy=False)
__mapper_args__ = {"inherit_condition": id == PersistentObject.id}
def test_gen_query_nodepth(self):
PersistentObject = self.classes.PersistentObject
sess = create_session()
self.assert_compile(
sess.query(PersistentObject),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id,"
" director.other_id AS director_other_id, "
"director.name AS director_name FROM persistent "
"LEFT OUTER JOIN director ON director.id = persistent.id"
)
def test_gen_query_depth(self):
PersistentObject = self.classes.PersistentObject
Director = self.classes.Director
sess = create_session()
self.assert_compile(
sess.query(PersistentObject).options(joinedload(Director.other)),
"SELECT persistent.id AS persistent_id, "
"director.id AS director_id, "
"director.other_id AS director_other_id, "
"director.name AS director_name, persistent_1.id AS "
"persistent_1_id, director_1.id AS director_1_id, "
"director_1.other_id AS director_1_other_id, "
"director_1.name AS director_1_name "
"FROM persistent LEFT OUTER JOIN director "
"ON director.id = persistent.id "
"LEFT OUTER JOIN (persistent AS persistent_1 "
"LEFT OUTER JOIN director AS director_1 ON "
"director_1.id = persistent_1.id) "
"ON director.other_id = persistent_1.id"
)
class EnsureColumnsAddedTest(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
arb = Column(Integer, unique=True)
data = Column(Integer)
o2mchild = relationship("O2MChild")
m2mchild = relationship("M2MChild", secondary=Table(
'parent_to_m2m', Base.metadata,
Column('parent_id', ForeignKey('parent.arb')),
Column('child_id', ForeignKey('m2mchild.id'))
))
class O2MChild(Base):
__tablename__ = 'o2mchild'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
parent_id = Column(ForeignKey('parent.arb'))
class M2MChild(Base):
__tablename__ = 'm2mchild'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
def test_joinedload_defered_pk_limit_o2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.o2mchild)).limit(10),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.parent_arb AS anon_1_parent_arb, "
"o2mchild_1.id AS o2mchild_1_id, "
"o2mchild_1.parent_id AS o2mchild_1_parent_id "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb FROM parent LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN o2mchild AS o2mchild_1 "
"ON anon_1.parent_arb = o2mchild_1.parent_id"
)
def test_joinedload_defered_pk_limit_m2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.m2mchild)).limit(10),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.parent_arb AS anon_1_parent_arb, "
"m2mchild_1.id AS m2mchild_1_id "
"FROM (SELECT parent.id AS parent_id, "
"parent.data AS parent_data, parent.arb AS parent_arb "
"FROM parent LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
"JOIN m2mchild AS m2mchild_1 "
"ON m2mchild_1.id = parent_to_m2m_1.child_id) "
"ON anon_1.parent_arb = parent_to_m2m_1.parent_id"
)
def test_joinedload_defered_pk_o2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.o2mchild)),
"SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb, o2mchild_1.id AS o2mchild_1_id, "
"o2mchild_1.parent_id AS o2mchild_1_parent_id "
"FROM parent LEFT OUTER JOIN o2mchild AS o2mchild_1 "
"ON parent.arb = o2mchild_1.parent_id"
)
def test_joinedload_defered_pk_m2m(self):
Parent = self.classes.Parent
s = Session()
self.assert_compile(
s.query(Parent).options(
load_only('data'),
joinedload(Parent.m2mchild)),
"SELECT parent.id AS parent_id, parent.data AS parent_data, "
"parent.arb AS parent_arb, m2mchild_1.id AS m2mchild_1_id "
"FROM parent LEFT OUTER JOIN (parent_to_m2m AS parent_to_m2m_1 "
"JOIN m2mchild AS m2mchild_1 "
"ON m2mchild_1.id = parent_to_m2m_1.child_id) "
"ON parent.arb = parent_to_m2m_1.parent_id"
)
| true | true |
1c3b57aeaf4fd15721d9e0c57215bb330c0fdb16 | 756 | py | Python | var/spack/repos/builtin/packages/py-fastcov/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/py-fastcov/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/py-fastcov/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFastcov(PythonPackage):
"""
A parallelized gcov wrapper for generating intermediate coverage formats
fast
"""
homepage = "https://github.com/RPGillespie6/fastcov"
pypi = "fastcov/fastcov-1.13.tar.gz"
maintainers = ['haampie']
version('1.13', sha256='ec8a5271f90a2f8b894cb999e262c33e225ed6072d9a6ca38f636f88cc0543e8')
# Depends on gcov too, but that's installed with the compiler
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools@38.3:', type='build')
| 30.24 | 94 | 0.715608 |
from spack import *
class PyFastcov(PythonPackage):
homepage = "https://github.com/RPGillespie6/fastcov"
pypi = "fastcov/fastcov-1.13.tar.gz"
maintainers = ['haampie']
version('1.13', sha256='ec8a5271f90a2f8b894cb999e262c33e225ed6072d9a6ca38f636f88cc0543e8')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools@38.3:', type='build')
| true | true |
1c3b57da8d3433fdd503a15495d4fbd01591b59f | 712 | py | Python | tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial1_Solution_fd84cbd0.py | liuxiaomiao123/NeuroMathAcademy | 16a7969604a300bf9fbb86f8a5b26050ebd14c65 | [
"CC-BY-4.0"
] | 2 | 2020-07-03T04:39:09.000Z | 2020-07-12T02:08:31.000Z | tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial1_Solution_fd84cbd0.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2020-06-22T22:57:03.000Z | 2020-06-22T22:57:03.000Z | tutorials/W2D1_BayesianStatistics/solutions/W2D1_Tutorial1_Solution_fd84cbd0.py | NinaHKivanani/course-content | 3c91dd1a669cebce892486ba4f8086b1ef2e1e49 | [
"CC-BY-4.0"
] | 1 | 2021-04-26T11:30:26.000Z | 2021-04-26T11:30:26.000Z | with plt.xkcd():
mu_posteriors = []
max_posteriors = []
for mu_visual in mu_visuals:
max_posterior = compute_mode_posterior_multiply(x,
mu_auditory, sigma_auditory,
mu_visual, sigma_visual)
mu_posterior = ((mu_auditory / sigma_auditory ** 2 +
mu_visual / sigma_visual ** 2) /
(1 / sigma_auditory ** 2 +
1 / sigma_visual ** 2))
mu_posteriors.append(mu_posterior)
max_posteriors.append(max_posterior)
plot_visual(mu_visuals, mu_posteriors, max_posteriors)
plt.show() | 35.6 | 81 | 0.508427 | with plt.xkcd():
mu_posteriors = []
max_posteriors = []
for mu_visual in mu_visuals:
max_posterior = compute_mode_posterior_multiply(x,
mu_auditory, sigma_auditory,
mu_visual, sigma_visual)
mu_posterior = ((mu_auditory / sigma_auditory ** 2 +
mu_visual / sigma_visual ** 2) /
(1 / sigma_auditory ** 2 +
1 / sigma_visual ** 2))
mu_posteriors.append(mu_posterior)
max_posteriors.append(max_posterior)
plot_visual(mu_visuals, mu_posteriors, max_posteriors)
plt.show() | true | true |
1c3b58290e3527f2bd100b7ab4eee0f95395fece | 5,385 | py | Python | evaluate.py | yangapku/OFA | 6bf21b0f2483d53b2750db1ea3fd103ec7d331d1 | [
"Apache-2.0"
] | 1 | 2022-03-25T09:30:24.000Z | 2022-03-25T09:30:24.000Z | evaluate.py | yangapku/OFA | 6bf21b0f2483d53b2750db1ea3fd103ec7d331d1 | [
"Apache-2.0"
] | null | null | null | evaluate.py | yangapku/OFA | 6bf21b0f2483d53b2750db1ea3fd103ec7d331d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3 -u
# Copyright 2022 The OFA-Sys Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
import logging
import os
import sys
import numpy as np
import torch
from fairseq import distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
from utils import checkpoint_utils
from utils.eval_utils import eval_step, merge_results
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("ofa.evaluate")
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def main(cfg: DictConfig, **kwargs):
utils.import_user_module(cfg.common)
reset_logging()
logger.info(cfg)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
# Load ensemble
overrides = eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
# Move models to GPU
for model, ckpt_path in zip(models, utils.split_paths(cfg.common_eval.path)):
if kwargs['ema_eval']:
logger.info("loading EMA weights from {}".format(ckpt_path))
model.load_state_dict(checkpoint_utils.load_ema_from_checkpoint(ckpt_path)['model'])
model.eval()
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
results = []
score_sum = torch.FloatTensor([0]).cuda()
score_cnt = torch.FloatTensor([0]).cuda()
for sample in progress:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(apply_half, sample) if cfg.common.fp16 else sample
with torch.no_grad():
result, scores = eval_step(task, generator, models, sample, **kwargs)
results += result
score_sum += sum(scores) if scores is not None else 0
score_cnt += len(scores) if scores is not None else 0
progress.log({"sentences": sample["nsentences"]})
merge_results(task, cfg, logger, score_cnt, score_sum, results)
def cli_main():
parser = options.get_generation_parser()
parser.add_argument("--ema-eval", action='store_true', help="Use EMA weights to make evaluation.")
parser.add_argument("--beam-search-vqa-eval", action='store_true', help="Use beam search for vqa evaluation (faster inference speed but sub-optimal result), if not specified, we compute scores for each answer in the candidate set, which is slower but can obtain best result.")
args = options.parse_args_and_arch(parser)
cfg = convert_namespace_to_omegaconf(args)
distributed_utils.call_main(cfg, main, ema_eval=args.ema_eval, beam_search_vqa_eval=args.beam_search_vqa_eval)
if __name__ == "__main__":
cli_main()
| 38.741007 | 280 | 0.712906 |
import logging
import os
import sys
import numpy as np
import torch
from fairseq import distributed_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.utils import reset_logging
from omegaconf import DictConfig
from utils import checkpoint_utils
from utils.eval_utils import eval_step, merge_results
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("ofa.evaluate")
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def main(cfg: DictConfig, **kwargs):
utils.import_user_module(cfg.common)
reset_logging()
logger.info(cfg)
assert (
cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_fp16 = cfg.common.fp16
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
overrides = eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
for model, ckpt_path in zip(models, utils.split_paths(cfg.common_eval.path)):
if kwargs['ema_eval']:
logger.info("loading EMA weights from {}".format(ckpt_path))
model.load_state_dict(checkpoint_utils.load_ema_from_checkpoint(ckpt_path)['model'])
model.eval()
if use_fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
generator = task.build_generator(models, cfg.generation)
results = []
score_sum = torch.FloatTensor([0]).cuda()
score_cnt = torch.FloatTensor([0]).cuda()
for sample in progress:
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(apply_half, sample) if cfg.common.fp16 else sample
with torch.no_grad():
result, scores = eval_step(task, generator, models, sample, **kwargs)
results += result
score_sum += sum(scores) if scores is not None else 0
score_cnt += len(scores) if scores is not None else 0
progress.log({"sentences": sample["nsentences"]})
merge_results(task, cfg, logger, score_cnt, score_sum, results)
def cli_main():
parser = options.get_generation_parser()
parser.add_argument("--ema-eval", action='store_true', help="Use EMA weights to make evaluation.")
parser.add_argument("--beam-search-vqa-eval", action='store_true', help="Use beam search for vqa evaluation (faster inference speed but sub-optimal result), if not specified, we compute scores for each answer in the candidate set, which is slower but can obtain best result.")
args = options.parse_args_and_arch(parser)
cfg = convert_namespace_to_omegaconf(args)
distributed_utils.call_main(cfg, main, ema_eval=args.ema_eval, beam_search_vqa_eval=args.beam_search_vqa_eval)
if __name__ == "__main__":
cli_main()
| true | true |
1c3b582ae8514b7fd13c1904986f59e812406230 | 974 | py | Python | cpdb/trr/models/action_response.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 25 | 2018-07-20T22:31:40.000Z | 2021-07-15T16:58:41.000Z | cpdb/trr/models/action_response.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 13 | 2018-06-18T23:08:47.000Z | 2022-02-10T07:38:25.000Z | cpdb/trr/models/action_response.py | invinst/CPDBv2_backend | b4e96d620ff7a437500f525f7e911651e4a18ef9 | [
"Apache-2.0"
] | 6 | 2018-05-17T21:59:43.000Z | 2020-11-17T00:30:26.000Z | from django.contrib.gis.db import models
from trr.constants import (
ACTION_PERSON_CHOICES,
RESISTANCE_TYPE_CHOICES,
RESISTANCE_LEVEL_CHOICES,
)
from data.models.common import TimeStampsModel
class ActionResponse(TimeStampsModel):
trr = models.ForeignKey('trr.TRR', on_delete=models.CASCADE)
person = models.CharField(max_length=16, null=True, choices=ACTION_PERSON_CHOICES)
resistance_type = models.CharField(max_length=32, null=True, choices=RESISTANCE_TYPE_CHOICES)
action = models.CharField(max_length=64, null=True)
other_description = models.CharField(max_length=64, null=True)
member_action = models.CharField(max_length=64, null=True)
force_type = models.CharField(max_length=64, null=True)
action_sub_category = models.CharField(max_length=3, null=True)
action_category = models.CharField(max_length=1, null=True)
resistance_level = models.CharField(max_length=16, null=True, choices=RESISTANCE_LEVEL_CHOICES)
| 44.272727 | 99 | 0.785421 | from django.contrib.gis.db import models
from trr.constants import (
ACTION_PERSON_CHOICES,
RESISTANCE_TYPE_CHOICES,
RESISTANCE_LEVEL_CHOICES,
)
from data.models.common import TimeStampsModel
class ActionResponse(TimeStampsModel):
trr = models.ForeignKey('trr.TRR', on_delete=models.CASCADE)
person = models.CharField(max_length=16, null=True, choices=ACTION_PERSON_CHOICES)
resistance_type = models.CharField(max_length=32, null=True, choices=RESISTANCE_TYPE_CHOICES)
action = models.CharField(max_length=64, null=True)
other_description = models.CharField(max_length=64, null=True)
member_action = models.CharField(max_length=64, null=True)
force_type = models.CharField(max_length=64, null=True)
action_sub_category = models.CharField(max_length=3, null=True)
action_category = models.CharField(max_length=1, null=True)
resistance_level = models.CharField(max_length=16, null=True, choices=RESISTANCE_LEVEL_CHOICES)
| true | true |
1c3b598fd837994e810cd2153f3b614a4572e952 | 19,110 | py | Python | archivist/archivist.py | leflambeur/archivist-python | cf0790f103d575e87c49334614a552395e4b1903 | [
"MIT"
] | 2 | 2021-05-04T15:12:37.000Z | 2021-09-08T10:04:41.000Z | archivist/archivist.py | leflambeur/archivist-python | cf0790f103d575e87c49334614a552395e4b1903 | [
"MIT"
] | 35 | 2021-05-04T12:39:26.000Z | 2022-03-28T09:20:19.000Z | archivist/archivist.py | leflambeur/archivist-python | cf0790f103d575e87c49334614a552395e4b1903 | [
"MIT"
] | 6 | 2021-04-28T14:49:48.000Z | 2022-01-07T15:29:05.000Z | # -*- coding: utf-8 -*-
"""Archivist connection interface
This module contains the base Archivist class which manages
the connection parameters to a Jitsuin Archivist instance and
the basic REST verbs to GET, POST, PATCH and DELETE entities..
The REST methods in this class should only be used directly when
a CRUD endpoint for the specific type of entity is unavailable.
Current CRUD endpoints are assets, events, locations, attachments.
IAM subjects and IAM access policies.
Instantiation of this class encapsulates the URL and authentication
parameters (the max_time parameter is optional):
.. code-block:: python
with open(".auth_token", mode="r", encoding="utf-8") as tokenfile:
authtoken = tokenfile.read().strip()
# Initialize connection to Archivist
arch = Archivist(
"https://app.rkvst.io",
authtoken,
max_time=1200,
)
The arch variable now has additional endpoints assets,events,locations,
attachments, IAM subjects and IAM access policies documented elsewhere.
"""
import logging
import json
from collections import deque
from copy import deepcopy
from time import time
from typing import BinaryIO, Dict, List, Optional, Union
import requests
from requests.models import Response
from requests_toolbelt.multipart.encoder import MultipartEncoder
from .constants import (
HEADERS_REQUEST_TOTAL_COUNT,
HEADERS_TOTAL_COUNT,
ROOT,
SEP,
VERBSEP,
)
from .dictmerge import _deepmerge, _dotstring
from .errors import (
_parse_response,
ArchivistBadFieldError,
ArchivistDuplicateError,
ArchivistHeaderError,
ArchivistNotFoundError,
)
from .headers import _headers_get
from .retry429 import retry_429
from .confirmer import MAX_TIME
from .access_policies import _AccessPoliciesClient
from .appidp import _AppIDPClient
from .applications import _ApplicationsClient
from .assets import _AssetsClient
from .attachments import _AttachmentsClient
from .compliance import _ComplianceClient
from .compliance_policies import _CompliancePoliciesClient
from .events import _EventsClient
from .locations import _LocationsClient
from .sboms import _SBOMSClient
from .subjects import _SubjectsClient
from .type_aliases import MachineAuth
LOGGER = logging.getLogger(__name__)
# also change the type hints in __init__ below
CLIENTS = {
"access_policies": _AccessPoliciesClient,
"assets": _AssetsClient,
"appidp": _AppIDPClient,
"applications": _ApplicationsClient,
"attachments": _AttachmentsClient,
"compliance": _ComplianceClient,
"compliance_policies": _CompliancePoliciesClient,
"events": _EventsClient,
"locations": _LocationsClient,
"sboms": _SBOMSClient,
"subjects": _SubjectsClient,
}
class Archivist: # pylint: disable=too-many-instance-attributes
"""Base class for all Archivist endpoints.
This class manages the connection to an Archivist instance and provides
basic methods that represent the underlying REST interface.
Args:
url (str): URL of archivist endpoint
auth: string representing JWT token.
verify: if True the certificate is verified
max_time (int): maximum time in seconds to wait for confirmation
"""
RING_BUFFER_MAX_LEN = 10
def __init__(
self,
url: str,
auth: Union[None, str, MachineAuth],
*,
fixtures: Optional[Dict] = None,
verify: bool = True,
max_time: int = MAX_TIME,
):
self._headers = {"content-type": "application/json"}
if isinstance(auth, tuple):
self._auth = None
self._client_id = auth[0]
self._client_secret = auth[1]
else:
self._auth = auth
self._client_id = None
self._client_secret = None
self._expires_at = 0
self._url = url
self._verify = verify
self._response_ring_buffer = deque(maxlen=self.RING_BUFFER_MAX_LEN)
self._session = requests.Session()
self._max_time = max_time
self._fixtures = fixtures or {}
# Type hints for IDE autocomplete, keep in sync with CLIENTS map above
self.access_policies: _AccessPoliciesClient
self.appidp: _AppIDPClient
self.applications: _ApplicationsClient
self.assets: _AssetsClient
self.attachments: _AttachmentsClient
self.compliance: _ComplianceClient
self.compliance_policies: _CompliancePoliciesClient
self.events: _EventsClient
self.locations: _LocationsClient
self.sboms: _SBOMSClient
self.subjects: _SubjectsClient
def __str__(self) -> str:
return f"Archivist({self._url})"
def __getattr__(self, value: str):
"""Create endpoints on demand"""
client = CLIENTS.get(value)
if client is None:
raise AttributeError
c = client(self)
super().__setattr__(value, c)
return c
@property
def headers(self) -> Dict:
"""dict: Headers REST headers from response"""
return self._headers
@property
def url(self) -> str:
"""str: URL of Archivist endpoint"""
return self._url
@property
def verify(self) -> bool:
"""bool: Returns True if https connections are to be verified"""
return self._verify
@property
def max_time(self) -> int:
"""bool: Returns maximum time in seconds to wait for confirmation"""
return self._max_time
@property
def auth(self) -> str:
"""str: authorization token."""
if self._client_id is not None and self._expires_at < time():
apptoken = self.appidp.token(self._client_id, self._client_secret) # type: ignore
self._auth = apptoken["access_token"]
self._expires_at = time() + apptoken["expires_in"] - 10 # fudge factor
LOGGER.debug("Refresh token")
return self._auth # type: ignore
@property
def fixtures(self) -> Dict:
"""dict: Contains predefined attributes for each endpoint"""
return self._fixtures
@fixtures.setter
def fixtures(self, fixtures: Dict):
"""dict: Contains predefined attributes for each endpoint"""
self._fixtures = _deepmerge(self._fixtures, fixtures)
def __copy__(self):
return Archivist(
self._url,
self.auth,
fixtures=deepcopy(self._fixtures),
verify=self._verify,
max_time=self._max_time,
)
def __add_headers(self, headers: Optional[Dict]) -> Dict:
if headers is not None:
newheaders = {**self.headers, **headers}
else:
newheaders = self.headers
auth = self.auth # this may trigger a refetch so only do it once here
# for appidp endpoint there may not be an authtoken
if auth is not None:
newheaders["authorization"] = "Bearer " + auth.strip()
return newheaders
@retry_429
def get(
self,
subpath: str,
identity: str,
*,
headers: Optional[Dict] = None,
params: Optional[Dict] = None,
tail: Optional[str] = None,
) -> Dict:
"""GET method (REST)
Args:
subpath (str): e.g. v2 or iam/v1...
identity (str): e.g. assets/xxxxxxxxxxxxxxxxxxxxxxxxxxxx`
tail (str): endpoint tail e.g. metadata
adds extra selector to tail of the endpoint
headers (dict): optional REST headers
params (dict): optional query strings
Returns:
dict representing the response body (entity).
"""
response = self._session.get(
SEP.join([f for f in (self.url, ROOT, subpath, identity, tail) if f]),
headers=self.__add_headers(headers),
verify=self.verify,
params=params,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def get_file(
self,
subpath: str,
identity: str,
fd: BinaryIO,
*,
headers: Optional[Dict] = None,
) -> Response:
"""GET method (REST) - chunked
Downloads a binary object from upstream storage.
Args:
subpath (str): e.g. v2 or iam/v1
identity (str): e.g. attachments/xxxxxxxxxxxxxxxxxxxxxxxxxxxx`
fd (file): an iterable representing a file (usually from open())
the file must be opened in binary mode
headers (dict): optional REST headers
Returns:
REST response (not the response body)
"""
response = self._session.get(
SEP.join((self.url, ROOT, subpath, identity)),
headers=self.__add_headers(headers),
verify=self.verify,
stream=True,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
for chunk in response.iter_content(chunk_size=4096):
if chunk:
fd.write(chunk)
return response
@retry_429
def post(
self,
path: str,
request: Optional[Dict],
*,
headers: Optional[Dict] = None,
verb: Optional[str] = None,
noheaders: bool = False,
) -> Dict:
"""POST method (REST)
Creates an entity
Args:
path (str): e.g. v2/assets
request (dict): request body defining the entity
headers (dict): optional REST headers
verb (str): optional REST verb
noheaders (bool): do not add headers and do not jsnify data
Returns:
dict representing the response body (entity).
"""
url = SEP.join((self.url, ROOT, VERBSEP.join([f for f in (path, verb) if f])))
LOGGER.debug("POST URL %s", url)
if noheaders:
data = request
else:
headers = self.__add_headers(headers)
data = json.dumps(request) if request else None
response = self._session.post(
url,
data=data,
headers=headers,
verify=self.verify,
)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def post_file(
self,
path: str,
fd: BinaryIO,
mtype: str,
*,
form: Optional[str] = "file",
params: Optional[Dict] = None,
) -> Dict:
"""POST method (REST) - upload binary
Uploads a file to an endpoint
Args:
path (str): e.g. v2/assets
fd : iterable representing the contents of a file.
mtype (str): mime type e.g. image/jpg
params (dict): dictiuonary of optional path params
Returns:
dict representing the response body (entity).
"""
multipart = MultipartEncoder(
fields={
form: ("filename", fd, mtype),
}
)
headers = {
"content-type": multipart.content_type,
}
if params:
qry = "&".join(sorted(f"{k}={v}" for k, v in _dotstring(params)))
path = "?".join((path, qry))
response = self._session.post(
SEP.join((self.url, ROOT, path)),
data=multipart, # type: ignore https://github.com/requests/toolbelt/issues/312
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def delete(
self, subpath: str, identity: str, *, headers: Optional[Dict] = None
) -> Dict:
"""DELETE method (REST)
Deletes an entity
Args:
subpath (str): e.g. v2 or iam/v1
identity (str): e.g. assets/xxxxxxxxxxxxxxxxxxxxxxxxxxxx`
headers (dict): optional REST headers
Returns:
dict representing the response body (entity).
"""
response = self._session.delete(
SEP.join((self.url, ROOT, subpath, identity)),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def patch(
self,
subpath: str,
identity: str,
request: Dict,
*,
headers: Optional[Dict] = None,
) -> Dict:
"""PATCH method (REST)
Updates the specified entity.
Args:
subpath (str): e.g. v2 or iam/v1
identity (str): e.g. assets/xxxxxxxxxxxxxxxxxxxxxxxxxxxx`
request (dict): request body defining the entity changes.
headers (dict): optional REST headers
Returns:
dict representing the response body (entity).
"""
response = self._session.patch(
SEP.join((self.url, ROOT, subpath, identity)),
data=json.dumps(request),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def __list(self, path, args, *, headers=None) -> Response:
if args:
path = "?".join((path, args))
response = self._session.get(
SEP.join((self.url, ROOT, path)),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response
def last_response(self, *, responses: int = 1) -> List[Response]:
"""Returns the requested number of response objects from the response ring buffer
Args:
responses (int): Number of responses to be returned in a list
Returns:
list of responses.
"""
return list(self._response_ring_buffer)[:responses]
@staticmethod
def __query(query: Optional[Dict]):
return query and "&".join(sorted(f"{k}={v}" for k, v in _dotstring(query)))
def get_by_signature(
self, path: str, field: str, query: Dict, *, headers: Optional[Dict] = None
) -> Dict:
"""GET method (REST) with query string
Reads an entity indirectly by searching for its signature
It is expected that the query parameters will result in only a single entity
being found.
Args:
path (str): e.g. v2/assets
field (str): name of collection of entities e.g assets
query (dict): selector e.g. {"attributes": {"arc_display_name":"container no. 1"}}
headers (dict): optional REST headers
Returns:
dict representing the entity found.
Raises:
ArchivistBadFieldError: field has incorrect value.
ArchivistNotFoundError: No entity found
ArchivistDuplicateError: More than one entity matching signature found
"""
paging = "page_size=2"
qry = self.__query(query)
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)), # type: ignore
headers=headers,
)
data = response.json()
try:
records = data[field]
except KeyError as ex:
raise ArchivistBadFieldError(f"No {field} found") from ex
if len(records) == 0:
raise ArchivistNotFoundError("No entity found")
if len(records) > 1:
raise ArchivistDuplicateError(f"{len(records)} found")
return records[0]
def count(self, path: str, *, query: Optional[Dict] = None) -> int:
"""GET method (REST) with query string
Returns the count of objects that match query
Args:
path (str): e.g. v2/assets
query (dict): selector e.g. {"attributes":{"arc_display_name":"container no. 1"}}
Returns:
integer count of entities found.
Raises:
ArchivistHeaderError: If the expected count header is not present
"""
paging = "page_size=1"
qry = self.__query(query)
headers = {HEADERS_REQUEST_TOTAL_COUNT: "true"}
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)), # type: ignore
headers=headers,
)
count = _headers_get(response.headers, HEADERS_TOTAL_COUNT) # type: ignore
if count is None:
raise ArchivistHeaderError("Did not get a count in the header")
return int(count)
def list(
self,
path: str,
field: str,
*,
page_size: Optional[int] = None,
query: Optional[Dict] = None,
headers: Optional[Dict] = None,
):
"""GET method (REST) with query string
Lists entities that match the query dictionary.
If page size is specified return the list of records in batches of page_size
until next_page_token in response is null.
If page size is unspecified return up to the internal limit of records.
(different for each endpoint)
Args:
path (str): e.g. v2/assets
field (str): name of collection of entities e.g assets
page_size (int): optional number of items per request e.g. 500
query (dict): selector e.g. {"confirmation_status": "CONFIRMED", }
headers (dict): optional REST headers
Returns:
iterable that lists entities
Raises:
ArchivistBadFieldError: field has incorrect value.
"""
paging = page_size and f"page_size={page_size}"
qry = self.__query(query)
while True:
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)), # type: ignore
headers=headers,
)
data = response.json()
try:
records = data[field]
except KeyError as ex:
raise ArchivistBadFieldError(f"No {field} found") from ex
for record in records:
yield record
token = data.get("next_page_token")
if not token:
break
paging = f"page_token={token}"
| 28.998483 | 94 | 0.595552 |
import logging
import json
from collections import deque
from copy import deepcopy
from time import time
from typing import BinaryIO, Dict, List, Optional, Union
import requests
from requests.models import Response
from requests_toolbelt.multipart.encoder import MultipartEncoder
from .constants import (
HEADERS_REQUEST_TOTAL_COUNT,
HEADERS_TOTAL_COUNT,
ROOT,
SEP,
VERBSEP,
)
from .dictmerge import _deepmerge, _dotstring
from .errors import (
_parse_response,
ArchivistBadFieldError,
ArchivistDuplicateError,
ArchivistHeaderError,
ArchivistNotFoundError,
)
from .headers import _headers_get
from .retry429 import retry_429
from .confirmer import MAX_TIME
from .access_policies import _AccessPoliciesClient
from .appidp import _AppIDPClient
from .applications import _ApplicationsClient
from .assets import _AssetsClient
from .attachments import _AttachmentsClient
from .compliance import _ComplianceClient
from .compliance_policies import _CompliancePoliciesClient
from .events import _EventsClient
from .locations import _LocationsClient
from .sboms import _SBOMSClient
from .subjects import _SubjectsClient
from .type_aliases import MachineAuth
LOGGER = logging.getLogger(__name__)
CLIENTS = {
"access_policies": _AccessPoliciesClient,
"assets": _AssetsClient,
"appidp": _AppIDPClient,
"applications": _ApplicationsClient,
"attachments": _AttachmentsClient,
"compliance": _ComplianceClient,
"compliance_policies": _CompliancePoliciesClient,
"events": _EventsClient,
"locations": _LocationsClient,
"sboms": _SBOMSClient,
"subjects": _SubjectsClient,
}
class Archivist:
RING_BUFFER_MAX_LEN = 10
def __init__(
self,
url: str,
auth: Union[None, str, MachineAuth],
*,
fixtures: Optional[Dict] = None,
verify: bool = True,
max_time: int = MAX_TIME,
):
self._headers = {"content-type": "application/json"}
if isinstance(auth, tuple):
self._auth = None
self._client_id = auth[0]
self._client_secret = auth[1]
else:
self._auth = auth
self._client_id = None
self._client_secret = None
self._expires_at = 0
self._url = url
self._verify = verify
self._response_ring_buffer = deque(maxlen=self.RING_BUFFER_MAX_LEN)
self._session = requests.Session()
self._max_time = max_time
self._fixtures = fixtures or {}
self.access_policies: _AccessPoliciesClient
self.appidp: _AppIDPClient
self.applications: _ApplicationsClient
self.assets: _AssetsClient
self.attachments: _AttachmentsClient
self.compliance: _ComplianceClient
self.compliance_policies: _CompliancePoliciesClient
self.events: _EventsClient
self.locations: _LocationsClient
self.sboms: _SBOMSClient
self.subjects: _SubjectsClient
def __str__(self) -> str:
return f"Archivist({self._url})"
def __getattr__(self, value: str):
client = CLIENTS.get(value)
if client is None:
raise AttributeError
c = client(self)
super().__setattr__(value, c)
return c
@property
def headers(self) -> Dict:
return self._headers
@property
def url(self) -> str:
return self._url
@property
def verify(self) -> bool:
return self._verify
@property
def max_time(self) -> int:
return self._max_time
@property
def auth(self) -> str:
if self._client_id is not None and self._expires_at < time():
apptoken = self.appidp.token(self._client_id, self._client_secret)
self._auth = apptoken["access_token"]
self._expires_at = time() + apptoken["expires_in"] - 10
LOGGER.debug("Refresh token")
return self._auth
@property
def fixtures(self) -> Dict:
return self._fixtures
@fixtures.setter
def fixtures(self, fixtures: Dict):
self._fixtures = _deepmerge(self._fixtures, fixtures)
def __copy__(self):
return Archivist(
self._url,
self.auth,
fixtures=deepcopy(self._fixtures),
verify=self._verify,
max_time=self._max_time,
)
def __add_headers(self, headers: Optional[Dict]) -> Dict:
if headers is not None:
newheaders = {**self.headers, **headers}
else:
newheaders = self.headers
auth = self.auth
if auth is not None:
newheaders["authorization"] = "Bearer " + auth.strip()
return newheaders
@retry_429
def get(
self,
subpath: str,
identity: str,
*,
headers: Optional[Dict] = None,
params: Optional[Dict] = None,
tail: Optional[str] = None,
) -> Dict:
response = self._session.get(
SEP.join([f for f in (self.url, ROOT, subpath, identity, tail) if f]),
headers=self.__add_headers(headers),
verify=self.verify,
params=params,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def get_file(
self,
subpath: str,
identity: str,
fd: BinaryIO,
*,
headers: Optional[Dict] = None,
) -> Response:
response = self._session.get(
SEP.join((self.url, ROOT, subpath, identity)),
headers=self.__add_headers(headers),
verify=self.verify,
stream=True,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
for chunk in response.iter_content(chunk_size=4096):
if chunk:
fd.write(chunk)
return response
@retry_429
def post(
self,
path: str,
request: Optional[Dict],
*,
headers: Optional[Dict] = None,
verb: Optional[str] = None,
noheaders: bool = False,
) -> Dict:
url = SEP.join((self.url, ROOT, VERBSEP.join([f for f in (path, verb) if f])))
LOGGER.debug("POST URL %s", url)
if noheaders:
data = request
else:
headers = self.__add_headers(headers)
data = json.dumps(request) if request else None
response = self._session.post(
url,
data=data,
headers=headers,
verify=self.verify,
)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def post_file(
self,
path: str,
fd: BinaryIO,
mtype: str,
*,
form: Optional[str] = "file",
params: Optional[Dict] = None,
) -> Dict:
multipart = MultipartEncoder(
fields={
form: ("filename", fd, mtype),
}
)
headers = {
"content-type": multipart.content_type,
}
if params:
qry = "&".join(sorted(f"{k}={v}" for k, v in _dotstring(params)))
path = "?".join((path, qry))
response = self._session.post(
SEP.join((self.url, ROOT, path)),
data=multipart,
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def delete(
self, subpath: str, identity: str, *, headers: Optional[Dict] = None
) -> Dict:
response = self._session.delete(
SEP.join((self.url, ROOT, subpath, identity)),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def patch(
self,
subpath: str,
identity: str,
request: Dict,
*,
headers: Optional[Dict] = None,
) -> Dict:
response = self._session.patch(
SEP.join((self.url, ROOT, subpath, identity)),
data=json.dumps(request),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response.json()
@retry_429
def __list(self, path, args, *, headers=None) -> Response:
if args:
path = "?".join((path, args))
response = self._session.get(
SEP.join((self.url, ROOT, path)),
headers=self.__add_headers(headers),
verify=self.verify,
)
self._response_ring_buffer.appendleft(response)
error = _parse_response(response)
if error is not None:
raise error
return response
def last_response(self, *, responses: int = 1) -> List[Response]:
return list(self._response_ring_buffer)[:responses]
@staticmethod
def __query(query: Optional[Dict]):
return query and "&".join(sorted(f"{k}={v}" for k, v in _dotstring(query)))
def get_by_signature(
self, path: str, field: str, query: Dict, *, headers: Optional[Dict] = None
) -> Dict:
paging = "page_size=2"
qry = self.__query(query)
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)),
headers=headers,
)
data = response.json()
try:
records = data[field]
except KeyError as ex:
raise ArchivistBadFieldError(f"No {field} found") from ex
if len(records) == 0:
raise ArchivistNotFoundError("No entity found")
if len(records) > 1:
raise ArchivistDuplicateError(f"{len(records)} found")
return records[0]
def count(self, path: str, *, query: Optional[Dict] = None) -> int:
paging = "page_size=1"
qry = self.__query(query)
headers = {HEADERS_REQUEST_TOTAL_COUNT: "true"}
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)),
headers=headers,
)
count = _headers_get(response.headers, HEADERS_TOTAL_COUNT)
if count is None:
raise ArchivistHeaderError("Did not get a count in the header")
return int(count)
def list(
self,
path: str,
field: str,
*,
page_size: Optional[int] = None,
query: Optional[Dict] = None,
headers: Optional[Dict] = None,
):
paging = page_size and f"page_size={page_size}"
qry = self.__query(query)
while True:
response = self.__list(
path,
"&".join((a for a in (paging, qry) if a)),
headers=headers,
)
data = response.json()
try:
records = data[field]
except KeyError as ex:
raise ArchivistBadFieldError(f"No {field} found") from ex
for record in records:
yield record
token = data.get("next_page_token")
if not token:
break
paging = f"page_token={token}"
| true | true |
1c3b5a486742d083e08034ad044e4be97599f9a4 | 7,493 | py | Python | surveysite/settings.py | r-anime/surveysite | 85c10882a8fcb0b01c180ba4ebe11c229d99c60e | [
"MIT"
] | 6 | 2021-01-23T22:21:41.000Z | 2021-06-30T00:45:34.000Z | surveysite/settings.py | r-anime/surveysite | 85c10882a8fcb0b01c180ba4ebe11c229d99c60e | [
"MIT"
] | 46 | 2021-01-02T02:52:42.000Z | 2022-03-28T18:54:48.000Z | surveysite/settings.py | r-anime/surveysite | 85c10882a8fcb0b01c180ba4ebe11c229d99c60e | [
"MIT"
] | 2 | 2021-04-20T05:20:45.000Z | 2021-04-20T05:28:13.000Z | """
Django settings for surveysite project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from django.contrib.messages import constants as message_constants
import os
import datetime
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
SECRET_KEY = os.environ.get('WEBSITE_SECRET')
REDDIT_OAUTH_SECRET = os.environ.get('WEBSITE_REDDIT_OAUTH_SECRET')
REDDIT_OAUTH_CLIENT_ID = os.environ.get('WEBSITE_REDDIT_OAUTH_CLIENT_ID')
DEBUG = True if os.environ.get('WEBSITE_DEBUG') else False
allowed_hosts_env = os.environ.get('WEBSITE_ALLOWED_HOSTS')
ALLOWED_HOSTS = allowed_hosts_env.split(';') if allowed_hosts_env else []
use_https = True if os.environ.get('WEBSITE_USE_HTTPS') else False
SESSION_COOKIE_SECURE = use_https
CSRF_COOKIE_SECURE = use_https
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' if use_https else 'http'
# Application definition
INSTALLED_APPS = [
'survey.apps.SurveyConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.reddit',
'django_sass',
]
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
]
ROOT_URLCONF = 'surveysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Explicitly set the default type of primary fields of models to AutoField - in the future, Django will use BigAutoField by default
# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
SOCIALACCOUNT_PROVIDERS = {
'reddit': {
'APP': {
'client_id': REDDIT_OAUTH_CLIENT_ID,
'secret': REDDIT_OAUTH_SECRET,
'key': '',
},
'SCOPE': ['identity'],
'USER_AGENT': 'django:animesurvey:1.0 (by /u/DragonsOnOurMountain)',
'AUTH_PARAMS': {
'duration': 'permanent',
},
}
}
MESSAGE_TAGS = {
message_constants.DEBUG: 'primary',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',
}
MESSAGE_LEVEL = message_constants.DEBUG if DEBUG else message_constants.INFO
LOGIN_REDIRECT_URL = 'survey:index'
ACCOUNT_LOGOUT_REDIRECT_URL = 'survey:index' # Why does allauth use django's LOGIN_REDIRECT_URL but not LOGOUT_REDIRECT_URL?
WSGI_APPLICATION = 'surveysite.wsgi.application'
HTML_MINIFY = True
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache' if DEBUG else 'django.core.cache.backends.locmem.LocMemCache',
},
'long': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': BASE_DIR / 'cache/',
},
}
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_KEY_PREFIX = ''
# Logging
# https://docs.djangoproject.com/en/3.1/topics/logging/
# LOGGING gets merged with django's own DEFAULT_LOGGING variable
# https://github.com/django/django/blob/master/django/utils/log.py
log_directory = 'log/'
log_filename = datetime.datetime.now().strftime('%Y%m%d') + '.log'
try:
os.mkdir(BASE_DIR / log_directory)
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'file': {
'format': '{levelname} {asctime}\n{message}\n',
'style': '{',
},
'console': {
'format': '{levelname} {message}',
'style': '{',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'console',
},
'file': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': BASE_DIR / (log_directory + log_filename),
'formatter': 'file',
},
},
'root': {
'handlers': ['file'],
},
'loggers': {
'django': {
'handlers': ['console'],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = BASE_DIR / 'static/'
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = BASE_DIR / 'files/'
MEDIA_URL = '/files/'
| 27.751852 | 131 | 0.669558 |
from pathlib import Path
from django.contrib.messages import constants as message_constants
import os
import datetime
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
SECRET_KEY = os.environ.get('WEBSITE_SECRET')
REDDIT_OAUTH_SECRET = os.environ.get('WEBSITE_REDDIT_OAUTH_SECRET')
REDDIT_OAUTH_CLIENT_ID = os.environ.get('WEBSITE_REDDIT_OAUTH_CLIENT_ID')
DEBUG = True if os.environ.get('WEBSITE_DEBUG') else False
allowed_hosts_env = os.environ.get('WEBSITE_ALLOWED_HOSTS')
ALLOWED_HOSTS = allowed_hosts_env.split(';') if allowed_hosts_env else []
use_https = True if os.environ.get('WEBSITE_USE_HTTPS') else False
SESSION_COOKIE_SECURE = use_https
CSRF_COOKIE_SECURE = use_https
ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' if use_https else 'http'
# Application definition
INSTALLED_APPS = [
'survey.apps.SurveyConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.reddit',
'django_sass',
]
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
]
ROOT_URLCONF = 'surveysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Explicitly set the default type of primary fields of models to AutoField - in the future, Django will use BigAutoField by default
# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
SOCIALACCOUNT_PROVIDERS = {
'reddit': {
'APP': {
'client_id': REDDIT_OAUTH_CLIENT_ID,
'secret': REDDIT_OAUTH_SECRET,
'key': '',
},
'SCOPE': ['identity'],
'USER_AGENT': 'django:animesurvey:1.0 (by /u/DragonsOnOurMountain)',
'AUTH_PARAMS': {
'duration': 'permanent',
},
}
}
MESSAGE_TAGS = {
message_constants.DEBUG: 'primary',
message_constants.INFO: 'info',
message_constants.SUCCESS: 'success',
message_constants.WARNING: 'warning',
message_constants.ERROR: 'danger',
}
MESSAGE_LEVEL = message_constants.DEBUG if DEBUG else message_constants.INFO
LOGIN_REDIRECT_URL = 'survey:index'
ACCOUNT_LOGOUT_REDIRECT_URL = 'survey:index' # Why does allauth use django's LOGIN_REDIRECT_URL but not LOGOUT_REDIRECT_URL?
WSGI_APPLICATION = 'surveysite.wsgi.application'
HTML_MINIFY = True
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache' if DEBUG else 'django.core.cache.backends.locmem.LocMemCache',
},
'long': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': BASE_DIR / 'cache/',
},
}
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_KEY_PREFIX = ''
# https://github.com/django/django/blob/master/django/utils/log.py
log_directory = 'log/'
log_filename = datetime.datetime.now().strftime('%Y%m%d') + '.log'
try:
os.mkdir(BASE_DIR / log_directory)
except FileExistsError:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'file': {
'format': '{levelname} {asctime}\n{message}\n',
'style': '{',
},
'console': {
'format': '{levelname} {message}',
'style': '{',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'console',
},
'file': {
'level': 'WARNING',
'class': 'logging.FileHandler',
'filename': BASE_DIR / (log_directory + log_filename),
'formatter': 'file',
},
},
'root': {
'handlers': ['file'],
},
'loggers': {
'django': {
'handlers': ['console'],
},
},
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = BASE_DIR / 'static/'
STATIC_URL = '/static/'
# Media files
MEDIA_ROOT = BASE_DIR / 'files/'
MEDIA_URL = '/files/'
| true | true |
1c3b5a934e3be37f7112f3b58cc9fd7dc8ef632a | 7,743 | py | Python | doc/conf.py | bioidiap/bob.ip.tensorflow_extractor | 14ab1f878a352e1075c31d94c715b4f7556e7afb | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | bioidiap/bob.ip.tensorflow_extractor | 14ab1f878a352e1075c31d94c715b4f7556e7afb | [
"BSD-3-Clause"
] | null | null | null | doc/conf.py | bioidiap/bob.ip.tensorflow_extractor | 14ab1f878a352e1075c31d94c715b4f7556e7afb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import os
import sys
import glob
import pkg_resources
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.3"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
]
import sphinx
if sphinx.__version__ >= "1.4.1":
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
else:
extensions.append("sphinx.ext.pngmath")
# Be picky about warnings
nitpicky = True
# Ignores stuff we can't easily resolve on other project's sphinx manuals
nitpick_ignore = []
# Allows the user to override warnings from a separate file
if os.path.exists("nitpick-exceptions.txt"):
for line in open("nitpick-exceptions.txt"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
try: # python 2.x
target = unicode(target)
except NameError:
pass
nitpick_ignore.append((dtype, target))
# Always includes todos
todo_include_todos = True
# Generates auto-summary automatically
autosummary_generate = True
# Create numbers on figures with captions
numfig = True
# If we are on OSX, the 'dvipng' path maybe different
dvipng_osx = "/opt/local/libexec/texlive/binaries/dvipng"
if os.path.exists(dvipng_osx):
pngmath_dvipng = dvipng_osx
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"bob.ip.tensorflow_extractor"
import time
copyright = u"%s, Idiap Research Institute" % time.strftime("%Y")
# Grab the setup entry
distribution = pkg_resources.require(project)[0]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = distribution.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["links.rst"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Some variables which are useful for generated material
project_variable = project.replace(".", "_")
short_description = u"Tensorflow bindings"
owner = [u"Idiap Research Institute"]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = project_variable
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project_variable + u"_doc"
# -- Post configuration --------------------------------------------------------
# Included after all input documents
rst_epilog = """
.. |project| replace:: Bob
.. |version| replace:: %s
.. |current-year| date:: %%Y
""" % (
version,
)
# Default processing flags for sphinx
autoclass_content = "class"
autodoc_member_order = "bysource"
autodoc_default_options = {
"members": True,
"show-inheritance": True,
}
# For inter-documentation mapping:
from bob.extension.utils import link_documentation, load_requirements
sphinx_requirements = "extra-intersphinx.txt"
if os.path.exists(sphinx_requirements):
intersphinx_mapping = link_documentation(
additional_packages=["python", "numpy"] + load_requirements(sphinx_requirements)
)
else:
intersphinx_mapping = link_documentation()
| 30.604743 | 88 | 0.718455 |
import os
import sys
import glob
import pkg_resources
needs_sphinx = "1.3"
extensions = [
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
]
import sphinx
if sphinx.__version__ >= "1.4.1":
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
else:
extensions.append("sphinx.ext.pngmath")
nitpicky = True
nitpick_ignore = []
if os.path.exists("nitpick-exceptions.txt"):
for line in open("nitpick-exceptions.txt"):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
try:
target = unicode(target)
except NameError:
pass
nitpick_ignore.append((dtype, target))
todo_include_todos = True
autosummary_generate = True
numfig = True
dvipng_osx = "/opt/local/libexec/texlive/binaries/dvipng"
if os.path.exists(dvipng_osx):
pngmath_dvipng = dvipng_osx
templates_path = ["_templates"]
source_suffix = ".rst"
master_doc = "index"
project = u"bob.ip.tensorflow_extractor"
import time
copyright = u"%s, Idiap Research Institute" % time.strftime("%Y")
distribution = pkg_resources.require(project)[0]
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = distribution.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["links.rst"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# Some variables which are useful for generated material
project_variable = project.replace(".", "_")
short_description = u"Tensorflow bindings"
owner = [u"Idiap Research Institute"]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = project_variable
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project_variable + u"_doc"
# -- Post configuration --------------------------------------------------------
# Included after all input documents
rst_epilog = """
.. |project| replace:: Bob
.. |version| replace:: %s
.. |current-year| date:: %%Y
""" % (
version,
)
# Default processing flags for sphinx
autoclass_content = "class"
autodoc_member_order = "bysource"
autodoc_default_options = {
"members": True,
"show-inheritance": True,
}
# For inter-documentation mapping:
from bob.extension.utils import link_documentation, load_requirements
sphinx_requirements = "extra-intersphinx.txt"
if os.path.exists(sphinx_requirements):
intersphinx_mapping = link_documentation(
additional_packages=["python", "numpy"] + load_requirements(sphinx_requirements)
)
else:
intersphinx_mapping = link_documentation()
| true | true |
1c3b5b74f45d677071fa6a87f980d8bd333fdc67 | 3,980 | py | Python | hue_sensors_phue.py | robmarkcole/Hue-sensors-phue | d7214ad313c46c7ff48d4f062fe772e4e3b2c1bf | [
"MIT"
] | 3 | 2017-12-21T06:17:55.000Z | 2019-03-15T16:37:45.000Z | hue_sensors_phue.py | robmarkcole/Hue-sensors-phue | d7214ad313c46c7ff48d4f062fe772e4e3b2c1bf | [
"MIT"
] | null | null | null | hue_sensors_phue.py | robmarkcole/Hue-sensors-phue | d7214ad313c46c7ff48d4f062fe772e4e3b2c1bf | [
"MIT"
] | null | null | null | """
Standalone code for parsing phue sensor data.
Robin Cole. 19-12-2017
"""
from phue import Bridge
__version__ = 1.0
def get_response_from_ip(bridge_ip):
"""Returns the phue sensors response for a bridge_ip."""
b = Bridge(bridge_ip)
response = b.get_sensor_objects('name')
return response
def parse_sml(response):
"""Parse the json for a SML Hue motion sensor and return the data."""
if response.type == "ZLLLightLevel":
lightlevel = response.state['lightlevel']
if lightlevel is not None:
lux = round(float(10**((lightlevel-1)/10000)), 2)
dark = response.state['dark']
daylight = response.state['daylight']
data = {'light_level': lightlevel,
'lux': lux,
'dark': dark,
'daylight': daylight, }
else:
data = {'light_level': 'No light level data'}
elif response.type == "ZLLTemperature":
if response.state['temperature'] is not None:
data = {'temperature': response.state['temperature']/100.0}
else:
data = {'temperature': 'No temperature data'}
elif response.type == "ZLLPresence":
name_raw = response.name
arr = name_raw.split()
arr.insert(-1, 'motion')
name = ' '.join(arr)
hue_state = response.state['presence']
if hue_state is True:
state = 'on'
else:
state = 'off'
data = {'model': 'SML',
'name': name,
'state': state,
'battery': response.config['battery'],
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_zgp(response):
"""Parse the json response for a ZGPSWITCH Hue Tap."""
TAP_BUTTONS = {34: '1_click', 16: '2_click', 17: '3_click', 18: '4_click'}
press = response.state['buttonevent']
if press is None:
button = 'No data'
else:
button = TAP_BUTTONS[press]
data = {'model': 'ZGP',
'name': response.name,
'state': button,
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_rwl(response):
"""Parse the json response for a RWL Hue remote."""
press = str(response.state['buttonevent'])
if press[-1] in ['0', '2']:
button = str(press)[0] + '_click'
else:
button = str(press)[0] + '_hold'
data = {'model': 'RWL',
'name': response.name,
'state': button,
'battery': response.config['battery'],
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_geofence(response):
"""Parse the json response for a GEOFENCE and return the data."""
hue_state = response.state['presence']
if hue_state is True:
state = 'on'
else:
state = 'off'
data = {'name': response.name,
'model': 'GEO',
'state': state}
return data
def parse_hue_api_response(response):
"""Take in the Hue API json response."""
data_dict = {} # The list of sensors, referenced by their hue_id.
# Loop over all keys (1,2 etc) to identify sensors and get data.
for key in response.keys():
sensor = response[key]
modelid = sensor.modelid[0:3]
if modelid in ['RWL', 'SML', 'ZGP']:
_key = modelid + '_' + sensor.uniqueid.split(':')[-1][0:5]
if modelid == 'RWL':
data_dict[_key] = parse_rwl(sensor)
elif modelid == 'ZGP':
data_dict[_key] = parse_zgp(sensor)
elif modelid == 'SML':
if _key not in data_dict.keys():
data_dict[_key] = parse_sml(sensor)
else:
data_dict[_key].update(parse_sml(sensor))
elif sensor.modelid == 'HA_GEOFENCE':
data_dict['Geofence'] = parse_geofence(sensor)
return data_dict
| 31.338583 | 78 | 0.557286 | from phue import Bridge
__version__ = 1.0
def get_response_from_ip(bridge_ip):
b = Bridge(bridge_ip)
response = b.get_sensor_objects('name')
return response
def parse_sml(response):
if response.type == "ZLLLightLevel":
lightlevel = response.state['lightlevel']
if lightlevel is not None:
lux = round(float(10**((lightlevel-1)/10000)), 2)
dark = response.state['dark']
daylight = response.state['daylight']
data = {'light_level': lightlevel,
'lux': lux,
'dark': dark,
'daylight': daylight, }
else:
data = {'light_level': 'No light level data'}
elif response.type == "ZLLTemperature":
if response.state['temperature'] is not None:
data = {'temperature': response.state['temperature']/100.0}
else:
data = {'temperature': 'No temperature data'}
elif response.type == "ZLLPresence":
name_raw = response.name
arr = name_raw.split()
arr.insert(-1, 'motion')
name = ' '.join(arr)
hue_state = response.state['presence']
if hue_state is True:
state = 'on'
else:
state = 'off'
data = {'model': 'SML',
'name': name,
'state': state,
'battery': response.config['battery'],
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_zgp(response):
TAP_BUTTONS = {34: '1_click', 16: '2_click', 17: '3_click', 18: '4_click'}
press = response.state['buttonevent']
if press is None:
button = 'No data'
else:
button = TAP_BUTTONS[press]
data = {'model': 'ZGP',
'name': response.name,
'state': button,
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_rwl(response):
press = str(response.state['buttonevent'])
if press[-1] in ['0', '2']:
button = str(press)[0] + '_click'
else:
button = str(press)[0] + '_hold'
data = {'model': 'RWL',
'name': response.name,
'state': button,
'battery': response.config['battery'],
'last_updated': response.state['lastupdated'].split('T')}
return data
def parse_geofence(response):
hue_state = response.state['presence']
if hue_state is True:
state = 'on'
else:
state = 'off'
data = {'name': response.name,
'model': 'GEO',
'state': state}
return data
def parse_hue_api_response(response):
data_dict = {}
for key in response.keys():
sensor = response[key]
modelid = sensor.modelid[0:3]
if modelid in ['RWL', 'SML', 'ZGP']:
_key = modelid + '_' + sensor.uniqueid.split(':')[-1][0:5]
if modelid == 'RWL':
data_dict[_key] = parse_rwl(sensor)
elif modelid == 'ZGP':
data_dict[_key] = parse_zgp(sensor)
elif modelid == 'SML':
if _key not in data_dict.keys():
data_dict[_key] = parse_sml(sensor)
else:
data_dict[_key].update(parse_sml(sensor))
elif sensor.modelid == 'HA_GEOFENCE':
data_dict['Geofence'] = parse_geofence(sensor)
return data_dict
| true | true |
1c3b5b94e089f52f18d59ebd2d3121a21de46f0a | 2,449 | py | Python | data/cirq_new/cirq_program/startCirq_pragma182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma182.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0])) # number=6
c.append(cirq.X.on(input_qubit[3])) # number=8
c.append(cirq.X.on(input_qubit[3])) # number=9
c.append(cirq.X.on(input_qubit[2])) # number=10
c.append(cirq.X.on(input_qubit[2])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma182.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 31 | 92 | 0.639853 |
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
def make_circuit(n: int, input_qubit):
c = cirq.Circuit()
c.append(cirq.H.on(input_qubit[0]))
c.append(cirq.H.on(input_qubit[1]))
c.append(cirq.H.on(input_qubit[2]))
c.append(cirq.H.on(input_qubit[3]))
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0]))
c.append(cirq.X.on(input_qubit[1]))
c.append(cirq.SWAP.on(input_qubit[2],input_qubit[0]))
c.append(cirq.X.on(input_qubit[3]))
c.append(cirq.X.on(input_qubit[3]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.X.on(input_qubit[2]))
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma182.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | true | true |
1c3b5c6e0d48501de1a38795e0fcf84cb9e36606 | 16,927 | py | Python | seml/config.py | cqql/seml | 9c0c8fed0135508e1f151662843af6d6adf7102b | [
"MIT"
] | 69 | 2019-12-14T06:04:54.000Z | 2022-03-24T08:58:23.000Z | seml/config.py | cqql/seml | 9c0c8fed0135508e1f151662843af6d6adf7102b | [
"MIT"
] | 60 | 2020-04-02T13:19:20.000Z | 2022-03-31T10:24:43.000Z | seml/config.py | cqql/seml | 9c0c8fed0135508e1f151662843af6d6adf7102b | [
"MIT"
] | 21 | 2020-04-02T10:04:51.000Z | 2022-03-23T17:34:10.000Z | import logging
import numpy as np
import yaml
import ast
import jsonpickle
import json
import os
from pathlib import Path
import copy
from itertools import combinations
from seml.sources import import_exe
from seml.parameters import sample_random_configs, generate_grid, cartesian_product_dict
from seml.utils import merge_dicts, flatten, unflatten
from seml.errors import ConfigError, ExecutableError
from seml.settings import SETTINGS
RESERVED_KEYS = ['grid', 'fixed', 'random']
def unpack_config(config):
config = convert_parameter_collections(config)
children = {}
reserved_dict = {}
for key, value in config.items():
if not isinstance(value, dict):
continue
if key not in RESERVED_KEYS:
children[key] = value
else:
if key == 'random':
if 'samples' not in value:
raise ConfigError('Random parameters must specify "samples", i.e. the number of random samples.')
reserved_dict[key] = value
else:
reserved_dict[key] = value
return reserved_dict, children
def extract_parameter_set(input_config: dict, key: str):
flattened_dict = flatten(input_config.get(key, {}))
keys = flattened_dict.keys()
if key != 'fixed':
keys = [".".join(k.split(".")[:-1]) for k in keys
if flattened_dict[k] != 'parameter_collection']
return set(keys)
def convert_parameter_collections(input_config: dict):
flattened_dict = flatten(input_config)
parameter_collection_keys = [k for k in flattened_dict.keys()
if flattened_dict[k] == "parameter_collection"]
if len(parameter_collection_keys) > 0:
logging.warning("Parameter collections are deprecated. Use dot-notation for nested parameters instead.")
while len(parameter_collection_keys) > 0:
k = parameter_collection_keys[0]
del flattened_dict[k]
# sub1.sub2.type ==> # sub1.sub2
k = ".".join(k.split(".")[:-1])
parameter_collections_params = [param_key for param_key in flattened_dict.keys() if param_key.startswith(k)]
for p in parameter_collections_params:
if f"{k}.params" in p:
new_key = p.replace(f"{k}.params", k)
if new_key in flattened_dict:
raise ConfigError(f"Could not convert parameter collections due to key collision: {new_key}.")
flattened_dict[new_key] = flattened_dict[p]
del flattened_dict[p]
parameter_collection_keys = [k for k in flattened_dict.keys()
if flattened_dict[k] == "parameter_collection"]
return unflatten(flattened_dict)
def standardize_config(config: dict):
config = unflatten(flatten(config), levels=[0])
out_dict = {}
for k in RESERVED_KEYS:
if k == "fixed":
out_dict[k] = config.get(k, {})
else:
out_dict[k] = unflatten(config.get(k, {}), levels=[-1])
return out_dict
def invert_config(config: dict):
reserved_sets = [(k, set(config.get(k, {}).keys())) for k in RESERVED_KEYS]
inverted_config = {}
for k, params in reserved_sets:
for p in params:
l = inverted_config.get(p, [])
l.append(k)
inverted_config[p] = l
return inverted_config
def detect_duplicate_parameters(inverted_config: dict, sub_config_name: str = None, ignore_keys: dict = None):
if ignore_keys is None:
ignore_keys = {'random': ('seed', 'samples')}
duplicate_keys = []
for p, l in inverted_config.items():
if len(l) > 1:
if 'random' in l and p in ignore_keys['random']:
continue
duplicate_keys.append((p, l))
if len(duplicate_keys) > 0:
if sub_config_name:
raise ConfigError(f"Found duplicate keys in sub-config {sub_config_name}: "
f"{duplicate_keys}")
else:
raise ConfigError(f"Found duplicate keys: {duplicate_keys}")
start_characters = set([x[0] for x in inverted_config.keys()])
buckets = {k: {x for x in inverted_config.keys() if x.startswith(k)} for k in start_characters}
if sub_config_name:
error_str = (f"Conflicting parameters in sub-config {sub_config_name}, most likely "
"due to ambiguous use of dot-notation in the config dict. Found "
"parameter '{p1}' in dot-notation starting with other parameter "
"'{p2}', which is ambiguous.")
else:
error_str = (f"Conflicting parameters, most likely "
"due to ambiguous use of dot-notation in the config dict. Found "
"parameter '{p1}' in dot-notation starting with other parameter "
"'{p2}', which is ambiguous.")
for k in buckets.keys():
for p1, p2 in combinations(buckets[k], r=2):
if p1.startswith(f"{p2}."): # with "." after p2 to catch cases like "test" and "test1", which are valid.
raise ConfigError(error_str.format(p1=p1, p2=p2))
elif p2.startswith(f"{p1}."):
raise ConfigError(error_str.format(p1=p1, p2=p2))
def generate_configs(experiment_config):
"""Generate parameter configurations based on an input configuration.
Input is a nested configuration where on each level there can be 'fixed', 'grid', and 'random' parameters.
In essence, we take the cartesian product of all the `grid` parameters and take random samples for the random
parameters. The nested structure makes it possible to define different parameter spaces e.g. for different datasets.
Parameter definitions lower in the hierarchy overwrite parameters defined closer to the root.
For each leaf configuration we take the maximum of all num_samples values on the path since we need to have the same
number of samples for each random parameter.
For each configuration of the `grid` parameters we then create `num_samples` configurations of the random
parameters, i.e. leading to `num_samples * len(grid_configurations)` configurations.
See Also `examples/example_config.yaml` and the example below.
Parameters
----------
experiment_config: dict
Dictionary that specifies the "search space" of parameters that will be enumerated. Should be
parsed from a YAML file.
Returns
-------
all_configs: list of dicts
Contains the individual combinations of the parameters.
"""
reserved, next_level = unpack_config(experiment_config)
reserved = standardize_config(reserved)
if not any([len(reserved.get(k, {})) > 0 for k in RESERVED_KEYS]):
raise ConfigError("No parameters defined under grid, fixed, or random in the config file.")
level_stack = [('', next_level)]
config_levels = [reserved]
final_configs = []
detect_duplicate_parameters(invert_config(reserved), None)
while len(level_stack) > 0:
current_sub_name, sub_vals = level_stack.pop(0)
sub_config, sub_levels = unpack_config(sub_vals)
if current_sub_name != '' and not any([len(sub_config.get(k, {})) > 0 for k in RESERVED_KEYS]):
raise ConfigError(f"No parameters defined under grid, fixed, or random in sub-config {current_sub_name}.")
sub_config = standardize_config(sub_config)
config_above = config_levels.pop(0)
inverted_sub_config = invert_config(sub_config)
detect_duplicate_parameters(inverted_sub_config, current_sub_name)
inverted_config_above = invert_config(config_above)
redefined_parameters = set(inverted_sub_config.keys()).intersection(set(inverted_config_above.keys()))
if len(redefined_parameters) > 0:
logging.info(f"Found redefined parameters in sub-config '{current_sub_name}': {redefined_parameters}. "
f"Definitions in sub-configs override more general ones.")
config_above = copy.deepcopy(config_above)
for p in redefined_parameters:
sections = inverted_config_above[p]
for s in sections:
del config_above[s][p]
config = merge_dicts(config_above, sub_config)
if len(sub_levels) == 0:
final_configs.append((current_sub_name, config))
for sub_name, sub_vals in sub_levels.items():
new_sub_name = f'{current_sub_name}.{sub_name}' if current_sub_name != '' else sub_name
level_stack.append((new_sub_name, sub_vals))
config_levels.append(config)
all_configs = []
for subconfig_name, conf in final_configs:
conf = standardize_config(conf)
random_params = conf.get('random', {})
fixed_params = flatten(conf.get('fixed', {}))
grid_params = conf.get('grid', {})
if len(random_params) > 0:
num_samples = random_params['samples']
root_seed = random_params.get('seed', None)
random_sampled = sample_random_configs(flatten(random_params), seed=root_seed, samples=num_samples)
grids = [generate_grid(v, parent_key=k) for k, v in grid_params.items()]
grid_configs = dict([sub for item in grids for sub in item])
grid_product = list(cartesian_product_dict(grid_configs))
with_fixed = [{**d, **fixed_params} for d in grid_product]
if len(random_params) > 0:
with_random = [{**grid, **random} for grid in with_fixed for random in random_sampled]
else:
with_random = with_fixed
all_configs.extend(with_random)
# Cast NumPy integers to normal integers since PyMongo doesn't like them
all_configs = [{k: int(v) if isinstance(v, np.integer) else v
for k, v in config.items()}
for config in all_configs]
all_configs = [unflatten(conf) for conf in all_configs]
return all_configs
def check_config(executable, conda_env, configs):
"""Check if the given configs are consistent with the Sacred experiment in the given executable.
Parameters
----------
executable: str
The Python file containing the experiment.
conda_env: str
The experiment's Anaconda environment.
configs: list of dicts
Contains the parameter configurations.
Returns
-------
None
"""
import sacred
exp_module = import_exe(executable, conda_env)
# Extract experiment from module
exps = [v for k, v in exp_module.__dict__.items() if type(v) == sacred.Experiment]
if len(exps) == 0:
raise ExecutableError(f"Found no Sacred experiment. Something is wrong in '{executable}'.")
elif len(exps) > 1:
raise ExecutableError(f"Found more than 1 Sacred experiment in '{executable}'. "
f"Can't check parameter configs. Disable via --no-sanity-check.")
exp = exps[0]
empty_run = sacred.initialize.create_run(exp, exp.default_command, config_updates=None, named_configs=())
captured_args = {
sacred.utils.join_paths(cf.prefix, n)
for cf in exp.captured_functions
for n in cf.signature.arguments
}
for config in configs:
config_added = {k: v for k, v in config.items() if k not in empty_run.config.keys()}
config_flattened = {k for k, _ in sacred.utils.iterate_flattened(config_added)}
# Check for unused arguments
for conf in sorted(config_flattened):
if not (set(sacred.utils.iter_prefixes(conf)) & captured_args):
raise sacred.utils.ConfigAddedError(conf, config=config_added)
# Check for missing arguments
options = empty_run.config.copy()
options.update(config)
options.update({k: None for k in sacred.utils.ConfigAddedError.SPECIAL_ARGS})
empty_run.main_function.signature.construct_arguments((), {}, options, False)
def restore(flat):
"""
Restore more complex data that Python's json can't handle (e.g. Numpy arrays).
Copied from sacred.serializer for performance reasons.
"""
return jsonpickle.decode(json.dumps(flat), keys=True)
def _convert_value(value):
"""
Parse string as python literal if possible and fallback to string.
Copied from sacred.arg_parser for performance reasons.
"""
try:
return restore(ast.literal_eval(value))
except (ValueError, SyntaxError):
# use as string if nothing else worked
return value
def convert_values(val):
if isinstance(val, dict):
for key, inner_val in val.items():
val[key] = convert_values(inner_val)
elif isinstance(val, list):
for i, inner_val in enumerate(val):
val[i] = convert_values(inner_val)
elif isinstance(val, str):
return _convert_value(val)
return val
class YamlUniqueLoader(yaml.FullLoader):
"""
Custom YAML loader that disallows duplicate keys
From https://github.com/encukou/naucse_render/commit/658197ed142fec2fe31574f1ff24d1ff6d268797
Workaround for PyYAML issue: https://github.com/yaml/pyyaml/issues/165
This disables some uses of YAML merge (`<<`)
"""
def construct_mapping(loader, node, deep=False):
"""Construct a YAML mapping node, avoiding duplicates"""
loader.flatten_mapping(node)
result = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in result:
raise ConfigError(f"Found duplicate keys: '{key}'")
result[key] = loader.construct_object(value_node, deep=deep)
return result
YamlUniqueLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping,
)
def read_config(config_path):
with open(config_path, 'r') as conf:
config_dict = convert_values(yaml.load(conf, Loader=YamlUniqueLoader))
if "seml" not in config_dict:
raise ConfigError("Please specify a 'seml' dictionary.")
seml_dict = config_dict['seml']
del config_dict['seml']
for k in seml_dict.keys():
if k not in SETTINGS.VALID_SEML_CONFIG_VALUES:
raise ConfigError(f"{k} is not a valid value in the `seml` config block.")
set_executable_and_working_dir(config_path, seml_dict)
if 'output_dir' in seml_dict:
seml_dict['output_dir'] = str(Path(seml_dict['output_dir']).expanduser().resolve())
if 'slurm' in config_dict:
slurm_dict = config_dict['slurm']
del config_dict['slurm']
for k in slurm_dict.keys():
if k not in SETTINGS.VALID_SLURM_CONFIG_VALUES:
raise ConfigError(f"{k} is not a valid value in the `slurm` config block.")
return seml_dict, slurm_dict, config_dict
else:
return seml_dict, None, config_dict
def set_executable_and_working_dir(config_path, seml_dict):
"""
Determine the working directory of the project and chdir into the working directory.
Parameters
----------
config_path: Path to the config file
seml_dict: SEML config dictionary
Returns
-------
None
"""
config_dir = str(Path(config_path).expanduser().resolve().parent)
working_dir = config_dir
os.chdir(working_dir)
if "executable" not in seml_dict:
raise ConfigError("Please specify an executable path for the experiment.")
executable = seml_dict['executable']
executable_relative_to_config = os.path.exists(executable)
executable_relative_to_project_root = False
if 'project_root_dir' in seml_dict:
working_dir = str(Path(seml_dict['project_root_dir']).expanduser().resolve())
seml_dict['use_uploaded_sources'] = True
os.chdir(working_dir) # use project root as base dir from now on
executable_relative_to_project_root = os.path.exists(executable)
del seml_dict['project_root_dir'] # from now on we use only the working dir
else:
seml_dict['use_uploaded_sources'] = False
logging.warning("'project_root_dir' not defined in seml config. Source files will not be saved in MongoDB.")
seml_dict['working_dir'] = working_dir
if not (executable_relative_to_config or executable_relative_to_project_root):
raise ExecutableError(f"Could not find the executable.")
executable = str(Path(executable).expanduser().resolve())
seml_dict['executable'] = (str(Path(executable).relative_to(working_dir)) if executable_relative_to_project_root
else str(Path(executable).relative_to(config_dir)))
def remove_prepended_dashes(param_dict):
new_dict = {}
for k, v in param_dict.items():
if k.startswith('--'):
new_dict[k[2:]] = v
elif k.startswith('-'):
new_dict[k[1:]] = v
else:
new_dict[k] = v
return new_dict
| 38.734554 | 120 | 0.659184 | import logging
import numpy as np
import yaml
import ast
import jsonpickle
import json
import os
from pathlib import Path
import copy
from itertools import combinations
from seml.sources import import_exe
from seml.parameters import sample_random_configs, generate_grid, cartesian_product_dict
from seml.utils import merge_dicts, flatten, unflatten
from seml.errors import ConfigError, ExecutableError
from seml.settings import SETTINGS
RESERVED_KEYS = ['grid', 'fixed', 'random']
def unpack_config(config):
config = convert_parameter_collections(config)
children = {}
reserved_dict = {}
for key, value in config.items():
if not isinstance(value, dict):
continue
if key not in RESERVED_KEYS:
children[key] = value
else:
if key == 'random':
if 'samples' not in value:
raise ConfigError('Random parameters must specify "samples", i.e. the number of random samples.')
reserved_dict[key] = value
else:
reserved_dict[key] = value
return reserved_dict, children
def extract_parameter_set(input_config: dict, key: str):
flattened_dict = flatten(input_config.get(key, {}))
keys = flattened_dict.keys()
if key != 'fixed':
keys = [".".join(k.split(".")[:-1]) for k in keys
if flattened_dict[k] != 'parameter_collection']
return set(keys)
def convert_parameter_collections(input_config: dict):
flattened_dict = flatten(input_config)
parameter_collection_keys = [k for k in flattened_dict.keys()
if flattened_dict[k] == "parameter_collection"]
if len(parameter_collection_keys) > 0:
logging.warning("Parameter collections are deprecated. Use dot-notation for nested parameters instead.")
while len(parameter_collection_keys) > 0:
k = parameter_collection_keys[0]
del flattened_dict[k]
= ".".join(k.split(".")[:-1])
parameter_collections_params = [param_key for param_key in flattened_dict.keys() if param_key.startswith(k)]
for p in parameter_collections_params:
if f"{k}.params" in p:
new_key = p.replace(f"{k}.params", k)
if new_key in flattened_dict:
raise ConfigError(f"Could not convert parameter collections due to key collision: {new_key}.")
flattened_dict[new_key] = flattened_dict[p]
del flattened_dict[p]
parameter_collection_keys = [k for k in flattened_dict.keys()
if flattened_dict[k] == "parameter_collection"]
return unflatten(flattened_dict)
def standardize_config(config: dict):
config = unflatten(flatten(config), levels=[0])
out_dict = {}
for k in RESERVED_KEYS:
if k == "fixed":
out_dict[k] = config.get(k, {})
else:
out_dict[k] = unflatten(config.get(k, {}), levels=[-1])
return out_dict
def invert_config(config: dict):
reserved_sets = [(k, set(config.get(k, {}).keys())) for k in RESERVED_KEYS]
inverted_config = {}
for k, params in reserved_sets:
for p in params:
l = inverted_config.get(p, [])
l.append(k)
inverted_config[p] = l
return inverted_config
def detect_duplicate_parameters(inverted_config: dict, sub_config_name: str = None, ignore_keys: dict = None):
if ignore_keys is None:
ignore_keys = {'random': ('seed', 'samples')}
duplicate_keys = []
for p, l in inverted_config.items():
if len(l) > 1:
if 'random' in l and p in ignore_keys['random']:
continue
duplicate_keys.append((p, l))
if len(duplicate_keys) > 0:
if sub_config_name:
raise ConfigError(f"Found duplicate keys in sub-config {sub_config_name}: "
f"{duplicate_keys}")
else:
raise ConfigError(f"Found duplicate keys: {duplicate_keys}")
start_characters = set([x[0] for x in inverted_config.keys()])
buckets = {k: {x for x in inverted_config.keys() if x.startswith(k)} for k in start_characters}
if sub_config_name:
error_str = (f"Conflicting parameters in sub-config {sub_config_name}, most likely "
"due to ambiguous use of dot-notation in the config dict. Found "
"parameter '{p1}' in dot-notation starting with other parameter "
"'{p2}', which is ambiguous.")
else:
error_str = (f"Conflicting parameters, most likely "
"due to ambiguous use of dot-notation in the config dict. Found "
"parameter '{p1}' in dot-notation starting with other parameter "
"'{p2}', which is ambiguous.")
for k in buckets.keys():
for p1, p2 in combinations(buckets[k], r=2):
if p1.startswith(f"{p2}."):
raise ConfigError(error_str.format(p1=p1, p2=p2))
elif p2.startswith(f"{p1}."):
raise ConfigError(error_str.format(p1=p1, p2=p2))
def generate_configs(experiment_config):
reserved, next_level = unpack_config(experiment_config)
reserved = standardize_config(reserved)
if not any([len(reserved.get(k, {})) > 0 for k in RESERVED_KEYS]):
raise ConfigError("No parameters defined under grid, fixed, or random in the config file.")
level_stack = [('', next_level)]
config_levels = [reserved]
final_configs = []
detect_duplicate_parameters(invert_config(reserved), None)
while len(level_stack) > 0:
current_sub_name, sub_vals = level_stack.pop(0)
sub_config, sub_levels = unpack_config(sub_vals)
if current_sub_name != '' and not any([len(sub_config.get(k, {})) > 0 for k in RESERVED_KEYS]):
raise ConfigError(f"No parameters defined under grid, fixed, or random in sub-config {current_sub_name}.")
sub_config = standardize_config(sub_config)
config_above = config_levels.pop(0)
inverted_sub_config = invert_config(sub_config)
detect_duplicate_parameters(inverted_sub_config, current_sub_name)
inverted_config_above = invert_config(config_above)
redefined_parameters = set(inverted_sub_config.keys()).intersection(set(inverted_config_above.keys()))
if len(redefined_parameters) > 0:
logging.info(f"Found redefined parameters in sub-config '{current_sub_name}': {redefined_parameters}. "
f"Definitions in sub-configs override more general ones.")
config_above = copy.deepcopy(config_above)
for p in redefined_parameters:
sections = inverted_config_above[p]
for s in sections:
del config_above[s][p]
config = merge_dicts(config_above, sub_config)
if len(sub_levels) == 0:
final_configs.append((current_sub_name, config))
for sub_name, sub_vals in sub_levels.items():
new_sub_name = f'{current_sub_name}.{sub_name}' if current_sub_name != '' else sub_name
level_stack.append((new_sub_name, sub_vals))
config_levels.append(config)
all_configs = []
for subconfig_name, conf in final_configs:
conf = standardize_config(conf)
random_params = conf.get('random', {})
fixed_params = flatten(conf.get('fixed', {}))
grid_params = conf.get('grid', {})
if len(random_params) > 0:
num_samples = random_params['samples']
root_seed = random_params.get('seed', None)
random_sampled = sample_random_configs(flatten(random_params), seed=root_seed, samples=num_samples)
grids = [generate_grid(v, parent_key=k) for k, v in grid_params.items()]
grid_configs = dict([sub for item in grids for sub in item])
grid_product = list(cartesian_product_dict(grid_configs))
with_fixed = [{**d, **fixed_params} for d in grid_product]
if len(random_params) > 0:
with_random = [{**grid, **random} for grid in with_fixed for random in random_sampled]
else:
with_random = with_fixed
all_configs.extend(with_random)
all_configs = [{k: int(v) if isinstance(v, np.integer) else v
for k, v in config.items()}
for config in all_configs]
all_configs = [unflatten(conf) for conf in all_configs]
return all_configs
def check_config(executable, conda_env, configs):
import sacred
exp_module = import_exe(executable, conda_env)
# Extract experiment from module
exps = [v for k, v in exp_module.__dict__.items() if type(v) == sacred.Experiment]
if len(exps) == 0:
raise ExecutableError(f"Found no Sacred experiment. Something is wrong in '{executable}'.")
elif len(exps) > 1:
raise ExecutableError(f"Found more than 1 Sacred experiment in '{executable}'. "
f"Can't check parameter configs. Disable via --no-sanity-check.")
exp = exps[0]
empty_run = sacred.initialize.create_run(exp, exp.default_command, config_updates=None, named_configs=())
captured_args = {
sacred.utils.join_paths(cf.prefix, n)
for cf in exp.captured_functions
for n in cf.signature.arguments
}
for config in configs:
config_added = {k: v for k, v in config.items() if k not in empty_run.config.keys()}
config_flattened = {k for k, _ in sacred.utils.iterate_flattened(config_added)}
for conf in sorted(config_flattened):
if not (set(sacred.utils.iter_prefixes(conf)) & captured_args):
raise sacred.utils.ConfigAddedError(conf, config=config_added)
options = empty_run.config.copy()
options.update(config)
options.update({k: None for k in sacred.utils.ConfigAddedError.SPECIAL_ARGS})
empty_run.main_function.signature.construct_arguments((), {}, options, False)
def restore(flat):
return jsonpickle.decode(json.dumps(flat), keys=True)
def _convert_value(value):
try:
return restore(ast.literal_eval(value))
except (ValueError, SyntaxError):
return value
def convert_values(val):
if isinstance(val, dict):
for key, inner_val in val.items():
val[key] = convert_values(inner_val)
elif isinstance(val, list):
for i, inner_val in enumerate(val):
val[i] = convert_values(inner_val)
elif isinstance(val, str):
return _convert_value(val)
return val
class YamlUniqueLoader(yaml.FullLoader):
def construct_mapping(loader, node, deep=False):
loader.flatten_mapping(node)
result = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in result:
raise ConfigError(f"Found duplicate keys: '{key}'")
result[key] = loader.construct_object(value_node, deep=deep)
return result
YamlUniqueLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping,
)
def read_config(config_path):
with open(config_path, 'r') as conf:
config_dict = convert_values(yaml.load(conf, Loader=YamlUniqueLoader))
if "seml" not in config_dict:
raise ConfigError("Please specify a 'seml' dictionary.")
seml_dict = config_dict['seml']
del config_dict['seml']
for k in seml_dict.keys():
if k not in SETTINGS.VALID_SEML_CONFIG_VALUES:
raise ConfigError(f"{k} is not a valid value in the `seml` config block.")
set_executable_and_working_dir(config_path, seml_dict)
if 'output_dir' in seml_dict:
seml_dict['output_dir'] = str(Path(seml_dict['output_dir']).expanduser().resolve())
if 'slurm' in config_dict:
slurm_dict = config_dict['slurm']
del config_dict['slurm']
for k in slurm_dict.keys():
if k not in SETTINGS.VALID_SLURM_CONFIG_VALUES:
raise ConfigError(f"{k} is not a valid value in the `slurm` config block.")
return seml_dict, slurm_dict, config_dict
else:
return seml_dict, None, config_dict
def set_executable_and_working_dir(config_path, seml_dict):
config_dir = str(Path(config_path).expanduser().resolve().parent)
working_dir = config_dir
os.chdir(working_dir)
if "executable" not in seml_dict:
raise ConfigError("Please specify an executable path for the experiment.")
executable = seml_dict['executable']
executable_relative_to_config = os.path.exists(executable)
executable_relative_to_project_root = False
if 'project_root_dir' in seml_dict:
working_dir = str(Path(seml_dict['project_root_dir']).expanduser().resolve())
seml_dict['use_uploaded_sources'] = True
os.chdir(working_dir)
executable_relative_to_project_root = os.path.exists(executable)
del seml_dict['project_root_dir']
else:
seml_dict['use_uploaded_sources'] = False
logging.warning("'project_root_dir' not defined in seml config. Source files will not be saved in MongoDB.")
seml_dict['working_dir'] = working_dir
if not (executable_relative_to_config or executable_relative_to_project_root):
raise ExecutableError(f"Could not find the executable.")
executable = str(Path(executable).expanduser().resolve())
seml_dict['executable'] = (str(Path(executable).relative_to(working_dir)) if executable_relative_to_project_root
else str(Path(executable).relative_to(config_dir)))
def remove_prepended_dashes(param_dict):
new_dict = {}
for k, v in param_dict.items():
if k.startswith('--'):
new_dict[k[2:]] = v
elif k.startswith('-'):
new_dict[k[1:]] = v
else:
new_dict[k] = v
return new_dict
| true | true |
1c3b5d034c1f5cc82d2da4d439996a66a0eefad5 | 16 | py | Python | radioxenon_ml/test_files/__init__.py | manninosi/radioxenon_ml | e901a2465bcbe491184cefc58db021a9321b9555 | [
"MIT"
] | null | null | null | radioxenon_ml/test_files/__init__.py | manninosi/radioxenon_ml | e901a2465bcbe491184cefc58db021a9321b9555 | [
"MIT"
] | 1 | 2018-04-24T03:26:56.000Z | 2018-05-09T17:10:55.000Z | radioxenon_ml/test_files/__init__.py | manninosi/radioxenon_ml | e901a2465bcbe491184cefc58db021a9321b9555 | [
"MIT"
] | 1 | 2018-04-23T20:52:43.000Z | 2018-04-23T20:52:43.000Z | #read_in__init__ | 16 | 16 | 0.9375 | true | true | |
1c3b5d09ff767e14087b63a78fa7d9321583e55c | 3,951 | py | Python | financial_canvas/figures/CustomFigure.py | EvgeniiaVak/financial-canvas | 1726c62f2735b67afd853a02f1130cc59ae28963 | [
"MIT"
] | null | null | null | financial_canvas/figures/CustomFigure.py | EvgeniiaVak/financial-canvas | 1726c62f2735b67afd853a02f1130cc59ae28963 | [
"MIT"
] | 2 | 2021-08-21T14:03:45.000Z | 2021-10-01T11:32:11.000Z | financial_canvas/figures/CustomFigure.py | EvgeniiaVak/financial_canvas | 1726c62f2735b67afd853a02f1130cc59ae28963 | [
"MIT"
] | null | null | null | from functools import partial
from bokeh.plotting import figure as bokeh_figure
from bokeh.models import CustomJS
from financial_canvas.figures.utils import create_sources
from financial_canvas.figures.utils import read_file
class CustomFigure(object):
'''
A base class for creating bokeh figures that will filter. Bokeh
already has live data filtering capabilities through CDSViews but it has its drawbacks:
* lines are not supported by filtered view bokeh, related bokeh issues:
* https://github.com/bokeh/bokeh/issues/9388
* https://github.com/bokeh/bokeh/issues/7070
* y axis autoscale does not work with zoom
* TODO: report to bokeh
Sources and figure are extendable so that it's possible add new elements to the figure.
Args:
df (pandas.DataFrame): should have 'date_time' as index
(in pandas.DatetimeIndex format), main sources will be created from this df
all columns will be added to sources
file_name (str): Error code.
file_name (int, optional): Error code.
selected_from (pandas.Timestamp) date from which to select the short initially drawn source
Attributes:
sources (dict with tuples with bokeh.models.ColumnDataSource (source, origin)):
source - initial source that the chart will be made from
(short, before any js callback updates)
origin - large source, will be used to recreate source after inside JS callbacks
p (bokeh.models.Figure): the plot with glyphs
selected_from (pandas.Timestamp) date from which to select
'''
def __init__(self, df, *, selected_from=None):
if selected_from is None:
selected_from = df.index[0]
self.sources = create_sources(df, selected_from=selected_from)
self.selected_from = selected_from
self.p = None
def add_hover(self, columns):
hover_tool = self.p.hover
# hover_tool.tooltips = []
# hover_tool.formatters = {}
for column_name, pretty_name in columns.items():
hover_tool.tooltips.extend([
(pretty_name, '@' + column_name + '{%f}'),
])
hover_tool.formatters.update({
'@' + column_name: 'printf',
})
def get_figure_defaults(self):
return partial(
bokeh_figure,
# even though "webgl" is advertised as the fastest engine
# not using it here because it introduces a bug (TODO: report bug to bokeh repo)
# when panning/zooming the window some glyphs like circles or diamonds can stay
# where from previous view
output_backend="canvas",
# TODO: pass arguments to constructor
plot_height=450,
margin=(10, 10, 10, 10),
height_policy='fixed',
sizing_mode='stretch_width',
)
def y_axis_autorange(self):
yaxis = self.p.left[0]
yaxis.formatter.use_scientific = False
if (self.y_range_resize_columns):
y_axis_auto_range_callback = CustomJS(
args=dict(
unique_name=id(self),
y_range=self.p.y_range,
# TODO: deal with multiple sources
source=self.sources['main'][0],
columns=self.y_range_resize_columns,
),
code=read_file('y_axis_auto_range.js'))
self.p.x_range.js_on_change('start', y_axis_auto_range_callback)
self.p.x_range.js_on_change('end', y_axis_auto_range_callback)
def add_sources(self, df, name):
additional_sources = create_sources(df,
selected_from=self.selected_from,
name=name)
self.sources.update(additional_sources)
return self.sources
| 42.031915 | 99 | 0.618578 | from functools import partial
from bokeh.plotting import figure as bokeh_figure
from bokeh.models import CustomJS
from financial_canvas.figures.utils import create_sources
from financial_canvas.figures.utils import read_file
class CustomFigure(object):
def __init__(self, df, *, selected_from=None):
if selected_from is None:
selected_from = df.index[0]
self.sources = create_sources(df, selected_from=selected_from)
self.selected_from = selected_from
self.p = None
def add_hover(self, columns):
hover_tool = self.p.hover
for column_name, pretty_name in columns.items():
hover_tool.tooltips.extend([
(pretty_name, '@' + column_name + '{%f}'),
])
hover_tool.formatters.update({
'@' + column_name: 'printf',
})
def get_figure_defaults(self):
return partial(
bokeh_figure,
output_backend="canvas",
plot_height=450,
margin=(10, 10, 10, 10),
height_policy='fixed',
sizing_mode='stretch_width',
)
def y_axis_autorange(self):
yaxis = self.p.left[0]
yaxis.formatter.use_scientific = False
if (self.y_range_resize_columns):
y_axis_auto_range_callback = CustomJS(
args=dict(
unique_name=id(self),
y_range=self.p.y_range,
source=self.sources['main'][0],
columns=self.y_range_resize_columns,
),
code=read_file('y_axis_auto_range.js'))
self.p.x_range.js_on_change('start', y_axis_auto_range_callback)
self.p.x_range.js_on_change('end', y_axis_auto_range_callback)
def add_sources(self, df, name):
additional_sources = create_sources(df,
selected_from=self.selected_from,
name=name)
self.sources.update(additional_sources)
return self.sources
| true | true |
1c3b5d954ad0aee1b9e5fb0e78f9cd2780d1d5a1 | 1,330 | py | Python | src/models/stochastic/bbb/utils.py | tiwalayo/flexible-bnn | 424572de879d64ee0b2f004d9649e823d2004430 | [
"Apache-2.0"
] | 1 | 2020-12-20T09:49:10.000Z | 2020-12-20T09:49:10.000Z | src/models/stochastic/bbb/utils.py | tiwalayo/flexible-bnn | 424572de879d64ee0b2f004d9649e823d2004430 | [
"Apache-2.0"
] | 1 | 2020-10-22T03:39:50.000Z | 2020-11-02T18:30:49.000Z | src/models/stochastic/bbb/utils.py | tiwalayo/flexible-bnn | 424572de879d64ee0b2f004d9649e823d2004430 | [
"Apache-2.0"
] | null | null | null | import torch
import numpy as np
import torch.nn.functional as F
def kl_divergence(mu, sigma, mu_prior, sigma_prior):
kl = 0.5 * (2 * torch.log(sigma_prior / sigma) - 1 + (sigma / sigma_prior).pow(2) + ((mu_prior - mu) / sigma_prior).pow(2)).sum()
return kl
def normpdf(x, mu=0.0, sigma=0.3):
m = torch.distributions.Normal(torch.tensor([mu]).to(x.device), torch.tensor([sigma]).to(x.device))
return torch.exp(m.log_prob(x))
def KumaraswamyKL(A, B, prior=None, n_samples=100):
GAMMA = 0.57721566490153286060651209008240243104215933593992
return -((1-1/B) + (1-1/A) * (GAMMA + torch.log(B)) - torch.log(A*B)).sum()
if not prior:
raise ValueError("You need to supply a prior.")
eps = 1e-20
T_ = lambda x, a, b: 2*(torch.pow(1 - torch.pow(1-x,1/b), 1/a))-1
Kpdf = lambda x, a, b: a * b * torch.pow((x+1)/2,a-1) * torch.pow((1-torch.pow((x+1)/2,a)), b-1)
def logratio(x):
noise = torch.FloatTensor(n_samples).uniform_(0, 1).to(x.device)
samples = T_(noise, x[0], x[1])
return torch.log(eps+Kpdf(samples, x[0], x[1])) - torch.log(eps + prior(samples))
params = torch.unbind(torch.cat((A.unsqueeze(0),B.unsqueeze(0)),dim=0).view(2,-1),dim=1)
s =torch.cat([logratio(p) for p in params]).sum()
return s | 44.333333 | 135 | 0.606767 | import torch
import numpy as np
import torch.nn.functional as F
def kl_divergence(mu, sigma, mu_prior, sigma_prior):
kl = 0.5 * (2 * torch.log(sigma_prior / sigma) - 1 + (sigma / sigma_prior).pow(2) + ((mu_prior - mu) / sigma_prior).pow(2)).sum()
return kl
def normpdf(x, mu=0.0, sigma=0.3):
m = torch.distributions.Normal(torch.tensor([mu]).to(x.device), torch.tensor([sigma]).to(x.device))
return torch.exp(m.log_prob(x))
def KumaraswamyKL(A, B, prior=None, n_samples=100):
GAMMA = 0.57721566490153286060651209008240243104215933593992
return -((1-1/B) + (1-1/A) * (GAMMA + torch.log(B)) - torch.log(A*B)).sum()
if not prior:
raise ValueError("You need to supply a prior.")
eps = 1e-20
T_ = lambda x, a, b: 2*(torch.pow(1 - torch.pow(1-x,1/b), 1/a))-1
Kpdf = lambda x, a, b: a * b * torch.pow((x+1)/2,a-1) * torch.pow((1-torch.pow((x+1)/2,a)), b-1)
def logratio(x):
noise = torch.FloatTensor(n_samples).uniform_(0, 1).to(x.device)
samples = T_(noise, x[0], x[1])
return torch.log(eps+Kpdf(samples, x[0], x[1])) - torch.log(eps + prior(samples))
params = torch.unbind(torch.cat((A.unsqueeze(0),B.unsqueeze(0)),dim=0).view(2,-1),dim=1)
s =torch.cat([logratio(p) for p in params]).sum()
return s | true | true |
1c3b605c52be0f323cf4325abca5c1af9c2a8497 | 1,172 | py | Python | parameter_init_adjustments.py | ChristophRaab/DATL | e1d44992e41060bb842525591181bfbbf7fd3c23 | [
"MIT"
] | 2 | 2022-01-27T22:30:42.000Z | 2022-01-29T14:14:30.000Z | parameter_init_adjustments.py | ChristophRaab/DATL | e1d44992e41060bb842525591181bfbbf7fd3c23 | [
"MIT"
] | null | null | null | parameter_init_adjustments.py | ChristophRaab/DATL | e1d44992e41060bb842525591181bfbbf7fd3c23 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch import nn
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
nn.init.kaiming_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
def cdann_lda_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0): # CDAM Lambda Adjustments progress based.
return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low)
def inv_lr_scheduler(optimizer, iter_num, gamma, power, lr=0.001, weight_decay=0.0005):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = lr * (1 + gamma * iter_num) ** (-power)
i=0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group['lr_mult']
param_group['weight_decay'] = weight_decay * param_group['decay_mult']
i+=1
return optimizer | 39.066667 | 122 | 0.654437 | import numpy as np
import torch
from torch import nn
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
nn.init.kaiming_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
def cdann_lda_coeff(iter_num, high=1.0, low=0.0, alpha=10.0, max_iter=10000.0):
return np.float(2.0 * (high - low) / (1.0 + np.exp(-alpha*iter_num / max_iter)) - (high - low) + low)
def inv_lr_scheduler(optimizer, iter_num, gamma, power, lr=0.001, weight_decay=0.0005):
lr = lr * (1 + gamma * iter_num) ** (-power)
i=0
for param_group in optimizer.param_groups:
param_group['lr'] = lr * param_group['lr_mult']
param_group['weight_decay'] = weight_decay * param_group['decay_mult']
i+=1
return optimizer | true | true |
1c3b61505602f8e0d3ede08e1930940d3e13eaf6 | 9,934 | py | Python | armory/utils/metrics.py | mzweilin/armory | da3fedc02f6f4841a813c4af8aafcc3ff7501665 | [
"MIT"
] | null | null | null | armory/utils/metrics.py | mzweilin/armory | da3fedc02f6f4841a813c4af8aafcc3ff7501665 | [
"MIT"
] | null | null | null | armory/utils/metrics.py | mzweilin/armory | da3fedc02f6f4841a813c4af8aafcc3ff7501665 | [
"MIT"
] | null | null | null | """
Metrics for scenarios
Outputs are lists of python variables amenable to JSON serialization:
e.g., bool, int, float
numpy data types and tensors generally fail to serialize
"""
import logging
import numpy as np
logger = logging.getLogger(__name__)
def categorical_accuracy(y, y_pred):
"""
Return the categorical accuracy of the predictions
"""
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if y.shape == y_pred.shape:
return [int(x) for x in list(y == y_pred)]
elif y.ndim + 1 == y_pred.ndim:
if y.ndim == 0:
return [int(y == np.argmax(y_pred, axis=-1))]
return [int(x) for x in list(y == np.argmax(y_pred, axis=-1))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def top_5_categorical_accuracy(y, y_pred):
"""
Return the top 5 categorical accuracy of the predictions
"""
return top_n_categorical_accuracy(y, y_pred, 5)
def top_n_categorical_accuracy(y, y_pred, n):
if n < 1:
raise ValueError(f"n must be a positive integer, not {n}")
n = int(n)
if n == 1:
return categorical_accuracy(y, y_pred)
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if len(y) != len(y_pred):
raise ValueError("y and y_pred are of different length")
if y.shape == y_pred.shape:
raise ValueError("Must supply multiple predictions for top 5 accuracy")
elif y.ndim + 1 == y_pred.ndim:
y_pred_top5 = np.argsort(y_pred, axis=-1)[:, -n:]
if y.ndim == 0:
return [int(y in y_pred_top5)]
return [int(y[i] in y_pred_top5[i]) for i in range(len(y))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def norm(x, x_adv, ord):
"""
Return the given norm over a batch, outputting a list of floats
"""
x = np.asarray(x)
x_adv = np.asarray(x_adv)
# cast to float first to prevent overflow errors
diff = (x.astype(float) - x_adv.astype(float)).reshape(x.shape[0], -1)
values = np.linalg.norm(diff, ord=ord, axis=1)
return list(float(x) for x in values)
def linf(x, x_adv):
"""
Return the L-infinity norm over a batch of inputs as a float
"""
return norm(x, x_adv, np.inf)
def l2(x, x_adv):
"""
Return the L2 norm over a batch of inputs as a float
"""
return norm(x, x_adv, 2)
def l1(x, x_adv):
"""
Return the L1 norm over a batch of inputs as a float
"""
return norm(x, x_adv, 1)
def lp(x, x_adv, p):
"""
Return the Lp norm over a batch of inputs as a float
"""
if p <= 0:
raise ValueError(f"p must be positive, not {p}")
return norm(x, x_adv, p)
def l0(x, x_adv):
"""
Return the L0 'norm' over a batch of inputs as a float
"""
return norm(x, x_adv, 0)
def _snr(x_i, x_adv_i):
x_i = np.asarray(x_i, dtype=float)
x_adv_i = np.asarray(x_adv_i, dtype=float)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
elif x_i.ndim != 1:
raise ValueError("_snr input must be single dimensional (not multichannel)")
signal_power = (x_i ** 2).mean()
noise_power = ((x_i - x_adv_i) ** 2).mean()
return signal_power / noise_power
def snr(x, x_adv):
"""
Return the SNR of a batch of samples with raw audio input
"""
if len(x) != len(x_adv):
raise ValueError(f"len(x) {len(x)} != len(x_adv) {len(x_adv)}")
return [float(_snr(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]
def snr_db(x, x_adv):
"""
Return the SNR of a batch of samples with raw audio input in Decibels (DB)
"""
return [float(i) for i in 10 * np.log10(snr(x, x_adv))]
def _snr_spectrogram(x_i, x_adv_i):
x_i = np.asarray(x_i, dtype=float)
x_adv_i = np.asarray(x_adv_i, dtype=float)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
signal_power = np.abs(x_i).mean()
noise_power = np.abs(x_i - x_adv_i).mean()
return signal_power / noise_power
def snr_spectrogram(x, x_adv):
"""
Return the SNR of a batch of samples with spectrogram input
NOTE: Due to phase effects, this is only an estimate of the SNR.
For instance, if x[0] = sin(t) and x_adv[0] = sin(t + 2*pi/3),
Then the SNR will be calculated as infinity, when it should be 1.
However, the spectrograms will look identical, so as long as the
model uses spectrograms and not the underlying raw signal,
this should not have a significant effect on the results.
"""
if x.shape != x_adv.shape:
raise ValueError(f"x.shape {x.shape} != x_adv.shape {x_adv.shape}")
return [float(_snr_spectrogram(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]
def snr_spectrogram_db(x, x_adv):
"""
Return the SNR of a batch of samples with spectrogram input in Decibels (DB)
"""
return [float(i) for i in 10 * np.log10(snr_spectrogram(x, x_adv))]
SUPPORTED_METRICS = {
"categorical_accuracy": categorical_accuracy,
"top_n_categorical_accuracy": top_n_categorical_accuracy,
"top_5_categorical_accuracy": top_5_categorical_accuracy,
"norm": norm,
"l0": l0,
"l1": l1,
"l2": l2,
"lp": lp,
"linf": linf,
"snr": snr,
"snr_db": snr_db,
"snr_spectrogram": snr_spectrogram,
"snr_spectrogram_db": snr_spectrogram_db,
}
class MetricList:
"""
Keeps track of all results from a single metric
"""
def __init__(self, name, function=None):
if function is None:
try:
self.function = SUPPORTED_METRICS[name]
except KeyError:
raise KeyError(f"{name} is not part of armory.utils.metrics")
elif callable(function):
self.function = function
else:
raise ValueError(f"function must be callable or None, not {function}")
self.name = name
self._values = []
def clear(self):
self._values.clear()
def append(self, *args, **kwargs):
value = self.function(*args, **kwargs)
self._values.extend(value)
def __iter__(self):
return self._values.__iter__()
def __len__(self):
return len(self._values)
def values(self):
return list(self._values)
def mean(self):
return sum(float(x) for x in self._values) / len(self._values)
class MetricsLogger:
"""
Uses the set of task and perturbation metrics given to it.
"""
def __init__(
self, task=None, perturbation=None, means=True, record_metric_per_sample=False
):
"""
task - single metric or list of metrics
perturbation - single metric or list of metrics
means - whether to return the mean value for each metric
record_metric_per_sample - whether to return metric values for each sample
"""
self.tasks = self._generate_counters(task)
self.adversarial_tasks = self._generate_counters(task)
self.perturbations = self._generate_counters(perturbation)
self.means = bool(means)
self.full = bool(record_metric_per_sample)
if not self.means and not self.full:
logger.warning(
"No metric results will be produced. "
"To change this, set 'means' or 'record_metric_per_sample' to True."
)
if not self.tasks and not self.perturbations:
logger.warning(
"No metric results will be produced. "
"To change this, set one or more 'task' or 'perturbation' metrics"
)
def _generate_counters(self, names):
if names is None:
names = []
elif isinstance(names, str):
names = [names]
elif not isinstance(names, list):
raise ValueError(
f"{names} must be one of (None, str, list), not {type(names)}"
)
return [MetricList(x) for x in names]
@classmethod
def from_config(cls, config):
return cls(**config)
def clear(self):
for metric in self.tasks + self.adversarial_tasks + self.perturbations:
metric.clear()
def update_task(self, y, y_pred, adversarial=False):
tasks = self.adversarial_tasks if adversarial else self.tasks
for metric in tasks:
metric.append(y, y_pred)
def update_perturbation(self, x, x_adv):
for metric in self.perturbations:
metric.append(x, x_adv)
def log_task(self, adversarial=False):
if adversarial:
metrics = self.adversarial_tasks
task_type = "adversarial"
else:
metrics = self.tasks
task_type = "benign"
for metric in metrics:
logger.info(
f"Average {metric.name} on {task_type} test examples: "
f"{metric.mean():.2%}"
)
def results(self):
"""
Return dict of results
"""
results = {}
for metrics, prefix in [
(self.tasks, "benign"),
(self.adversarial_tasks, "adversarial"),
(self.perturbations, "perturbation"),
]:
for metric in metrics:
if self.full:
results[f"{prefix}_{metric.name}"] = metric.values()
if self.means:
try:
results[f"{prefix}_mean_{metric.name}"] = metric.mean()
except ZeroDivisionError:
raise ZeroDivisionError(
f"No values to calculate mean in {prefix}_{metric.name}"
)
return results
| 30.472393 | 86 | 0.599859 |
import logging
import numpy as np
logger = logging.getLogger(__name__)
def categorical_accuracy(y, y_pred):
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if y.shape == y_pred.shape:
return [int(x) for x in list(y == y_pred)]
elif y.ndim + 1 == y_pred.ndim:
if y.ndim == 0:
return [int(y == np.argmax(y_pred, axis=-1))]
return [int(x) for x in list(y == np.argmax(y_pred, axis=-1))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def top_5_categorical_accuracy(y, y_pred):
return top_n_categorical_accuracy(y, y_pred, 5)
def top_n_categorical_accuracy(y, y_pred, n):
if n < 1:
raise ValueError(f"n must be a positive integer, not {n}")
n = int(n)
if n == 1:
return categorical_accuracy(y, y_pred)
y = np.asarray(y)
y_pred = np.asarray(y_pred)
if y.ndim == 0:
y = np.array([y])
y_pred = np.array([y_pred])
if len(y) != len(y_pred):
raise ValueError("y and y_pred are of different length")
if y.shape == y_pred.shape:
raise ValueError("Must supply multiple predictions for top 5 accuracy")
elif y.ndim + 1 == y_pred.ndim:
y_pred_top5 = np.argsort(y_pred, axis=-1)[:, -n:]
if y.ndim == 0:
return [int(y in y_pred_top5)]
return [int(y[i] in y_pred_top5[i]) for i in range(len(y))]
else:
raise ValueError(f"{y} and {y_pred} have mismatched dimensions")
def norm(x, x_adv, ord):
x = np.asarray(x)
x_adv = np.asarray(x_adv)
diff = (x.astype(float) - x_adv.astype(float)).reshape(x.shape[0], -1)
values = np.linalg.norm(diff, ord=ord, axis=1)
return list(float(x) for x in values)
def linf(x, x_adv):
return norm(x, x_adv, np.inf)
def l2(x, x_adv):
return norm(x, x_adv, 2)
def l1(x, x_adv):
return norm(x, x_adv, 1)
def lp(x, x_adv, p):
if p <= 0:
raise ValueError(f"p must be positive, not {p}")
return norm(x, x_adv, p)
def l0(x, x_adv):
return norm(x, x_adv, 0)
def _snr(x_i, x_adv_i):
x_i = np.asarray(x_i, dtype=float)
x_adv_i = np.asarray(x_adv_i, dtype=float)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
elif x_i.ndim != 1:
raise ValueError("_snr input must be single dimensional (not multichannel)")
signal_power = (x_i ** 2).mean()
noise_power = ((x_i - x_adv_i) ** 2).mean()
return signal_power / noise_power
def snr(x, x_adv):
if len(x) != len(x_adv):
raise ValueError(f"len(x) {len(x)} != len(x_adv) {len(x_adv)}")
return [float(_snr(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]
def snr_db(x, x_adv):
return [float(i) for i in 10 * np.log10(snr(x, x_adv))]
def _snr_spectrogram(x_i, x_adv_i):
x_i = np.asarray(x_i, dtype=float)
x_adv_i = np.asarray(x_adv_i, dtype=float)
if x_i.shape != x_adv_i.shape:
raise ValueError(f"x_i.shape {x_i.shape} != x_adv_i.shape {x_adv_i.shape}")
signal_power = np.abs(x_i).mean()
noise_power = np.abs(x_i - x_adv_i).mean()
return signal_power / noise_power
def snr_spectrogram(x, x_adv):
if x.shape != x_adv.shape:
raise ValueError(f"x.shape {x.shape} != x_adv.shape {x_adv.shape}")
return [float(_snr_spectrogram(x_i, x_adv_i)) for (x_i, x_adv_i) in zip(x, x_adv)]
def snr_spectrogram_db(x, x_adv):
return [float(i) for i in 10 * np.log10(snr_spectrogram(x, x_adv))]
SUPPORTED_METRICS = {
"categorical_accuracy": categorical_accuracy,
"top_n_categorical_accuracy": top_n_categorical_accuracy,
"top_5_categorical_accuracy": top_5_categorical_accuracy,
"norm": norm,
"l0": l0,
"l1": l1,
"l2": l2,
"lp": lp,
"linf": linf,
"snr": snr,
"snr_db": snr_db,
"snr_spectrogram": snr_spectrogram,
"snr_spectrogram_db": snr_spectrogram_db,
}
class MetricList:
def __init__(self, name, function=None):
if function is None:
try:
self.function = SUPPORTED_METRICS[name]
except KeyError:
raise KeyError(f"{name} is not part of armory.utils.metrics")
elif callable(function):
self.function = function
else:
raise ValueError(f"function must be callable or None, not {function}")
self.name = name
self._values = []
def clear(self):
self._values.clear()
def append(self, *args, **kwargs):
value = self.function(*args, **kwargs)
self._values.extend(value)
def __iter__(self):
return self._values.__iter__()
def __len__(self):
return len(self._values)
def values(self):
return list(self._values)
def mean(self):
return sum(float(x) for x in self._values) / len(self._values)
class MetricsLogger:
def __init__(
self, task=None, perturbation=None, means=True, record_metric_per_sample=False
):
self.tasks = self._generate_counters(task)
self.adversarial_tasks = self._generate_counters(task)
self.perturbations = self._generate_counters(perturbation)
self.means = bool(means)
self.full = bool(record_metric_per_sample)
if not self.means and not self.full:
logger.warning(
"No metric results will be produced. "
"To change this, set 'means' or 'record_metric_per_sample' to True."
)
if not self.tasks and not self.perturbations:
logger.warning(
"No metric results will be produced. "
"To change this, set one or more 'task' or 'perturbation' metrics"
)
def _generate_counters(self, names):
if names is None:
names = []
elif isinstance(names, str):
names = [names]
elif not isinstance(names, list):
raise ValueError(
f"{names} must be one of (None, str, list), not {type(names)}"
)
return [MetricList(x) for x in names]
@classmethod
def from_config(cls, config):
return cls(**config)
def clear(self):
for metric in self.tasks + self.adversarial_tasks + self.perturbations:
metric.clear()
def update_task(self, y, y_pred, adversarial=False):
tasks = self.adversarial_tasks if adversarial else self.tasks
for metric in tasks:
metric.append(y, y_pred)
def update_perturbation(self, x, x_adv):
for metric in self.perturbations:
metric.append(x, x_adv)
def log_task(self, adversarial=False):
if adversarial:
metrics = self.adversarial_tasks
task_type = "adversarial"
else:
metrics = self.tasks
task_type = "benign"
for metric in metrics:
logger.info(
f"Average {metric.name} on {task_type} test examples: "
f"{metric.mean():.2%}"
)
def results(self):
results = {}
for metrics, prefix in [
(self.tasks, "benign"),
(self.adversarial_tasks, "adversarial"),
(self.perturbations, "perturbation"),
]:
for metric in metrics:
if self.full:
results[f"{prefix}_{metric.name}"] = metric.values()
if self.means:
try:
results[f"{prefix}_mean_{metric.name}"] = metric.mean()
except ZeroDivisionError:
raise ZeroDivisionError(
f"No values to calculate mean in {prefix}_{metric.name}"
)
return results
| true | true |
1c3b61d2af6ef4297abcbc41f6994d956956b8f5 | 7,048 | py | Python | src/frontend.py | Samhuw8a/Jakob | 86ac574b9191b856d46fefc5e90c732f6d5265df | [
"MIT"
] | null | null | null | src/frontend.py | Samhuw8a/Jakob | 86ac574b9191b856d46fefc5e90c732f6d5265df | [
"MIT"
] | 1 | 2022-01-15T16:34:53.000Z | 2022-01-15T16:34:53.000Z | src/frontend.py | Samhuw8a/Jakob | 86ac574b9191b856d46fefc5e90c732f6d5265df | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter.colorchooser import askcolor
import sys
class SettingsWindow(Toplevel):
def __init__(self,window):
self.root_window=window
super().__init__(window)
self.title("Einstellungen")
self.get_conf()
self.make_mess_methode_menu()
if sys.platform.startswith('win'):
self.iconbitmap('Icons/icon.ico')
else:
logo = PhotoImage(file='Icons/icon.gif')
self.call('wm', 'iconphoto', self._w, logo)
self.save_button=Button(self,text="speichern",command=self.set_conf)
self.save_button.grid(row=2,column=2)
self.a_entry=Entry(self)
self.a_entry.grid(row=2,column=1)
self.b_entry=Entry(self)
self.b_entry.grid(row=3,column=1)
self.a_lab=Label(self,text="a:")
self.a_lab.grid(row=2,column=0)
self.b_lab=Label(self,text="b:")
self.b_lab.grid(row=3,column=0)
self.choose_background_but=Button(self,text=" Hintergrund",command=self.choose_background_color,width=15)
self.choose_background_but.grid(row=1,column=0)
self.choose_foreground_but=Button(self,text="Schrifftfarbe",command=self.choose_foreground_color,width=15)
self.choose_foreground_but.grid(row=1,column=1)
self.get_conf()
self.set_style()
def set_conf(self):
self.new_conf["mess_methode"]=self.aktuell_mess_methode.get()
try:
self.new_conf["stab_hoehe"]=float(self.b_entry.get())
except :
pass
try:
self.new_conf["stab_laenge"]=float(self.a_entry.get())
except :
pass
self.root_window.set_config(self.new_conf)
self.get_conf()
self.set_style()
def get_conf(self):
conf=self.root_window.get_config()
self.new_conf=conf
self.bg=conf["backgroud_colour"]
self.fg=conf["foregroud_colour"]
self.font=(conf["font"],conf["fontsz"])
self.aktuell_mess_methode=StringVar(self)
self.aktuell_mess_methode.set(conf["mess_methode"])
def set_style(self):
self.config(bg=self.bg)
self.save_button.config(bg=self.bg,fg=self.fg,font=self.font)
self.choose_foreground_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.choose_background_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.mess_methode_menu.config(bg=self.bg,fg=self.fg,font=self.font)
self.mess_methode_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.a_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.b_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.a_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.b_lab.config(bg=self.bg,fg=self.fg,font=self.font)
def choose_background_color(self):
self.new_conf["backgroud_colour"]=askcolor(parent=self)[1]
def choose_foreground_color(self):
self.new_conf["foregroud_colour"]=askcolor(parent=self)[1]
def make_mess_methode_menu(self):
self.mess_methode_lab=Label(self,text="Mess Methode:")
self.mess_methode_lab.grid(row=0,column=0)
self.mess_methode_menu=OptionMenu(self,self.aktuell_mess_methode,"Apian","Strahlensatz",command=self.change_mess_methode)
self.mess_methode_menu.config(width=12)
self.mess_methode_menu.grid(row=0,column=1)
def change_mess_methode(self,choice):
self.aktuell_mess_methode.set(choice)
class Window(Tk):
def __init__(self,backend_handler):
super().__init__()
self.backend_handler=backend_handler
self.title("Jakobsstab")
_=self.get_config()
self.make_menu()
self.make_entry_frame()
if sys.platform.startswith('win'):
self.iconbitmap('Icons/icon.ico')
else:
logo = PhotoImage(file='Icons/icon.gif')
self.call('wm', 'iconphoto', self._w, logo)
self.result_lab=Label(self)
self.result_lab.grid(row=1,column=1)
self.Image=Label(self)
self.Image.grid(row=0,column=1)
self.set_style()
def make_menu(self):
self.menu=Menu(self)
self.config(menu=self.menu)
self.menu.add_command(label="Einstellungen",command=self.open_settings_window)
def make_entry_frame(self):
self.entry_frame=Frame(self)
self.entry_frame.grid(row=0,column=0)
self.ha_lab=Label(self.entry_frame,text="ha:")
self.s_lab=Label(self.entry_frame,text=" s:")
self.ha_entry=Entry(self.entry_frame,width=12)
self.s_entry=Entry(self.entry_frame,width=12)
self.ha_lab.grid(row=1,column=0)
self.ha_entry.grid(row=1,column=1)
self.s_entry.grid(row=0,column=1)
self.s_lab.grid(row=0,column=0)
self.calculate_but=Button(self.entry_frame,text="Ausrechenen",command=self.calculate)
self.calculate_but.grid(row=2,column=1)
def calculate(self):
try:
ha=float(self.ha_entry.get())
s=float(self.s_entry.get())
res=round(self.backend_handler.solve(ha,s),2)
except:
res="ERROR"
self.result_lab.config(text=res)
def set_style(self):
self.configure(bg=self.bg)
self.entry_frame.config(bg=self.bg)
#add color config of image frame
self.calculate_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.result_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.ha_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.s_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.ha_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.s_lab.config(bg=self.bg,fg=self.fg,font=self.font)
image=PhotoImage(file=f"Icons/{self.mess_methode}.gif")
self.Image.config(image=image)
self.mainloop()
def get_config(self):
conf=self.backend_handler.get_conf()
self.bg=conf["backgroud_colour"]
self.fg=conf["foregroud_colour"]
self.font=(conf["font"],conf["fontsz"])
self.mess_methode=conf["mess_methode"]
return conf
def open_settings_window(self):
settings_window=SettingsWindow(self)
settings_window.mainloop()
def set_config(self,conf):
self.backend_handler.set_conf(conf)
_=self.get_config()
self.make_entry_frame()
self.set_style()
def main():
from backend import Handler
window=Window(Handler("src/Config.yaml","src/Backup.yaml"))
if __name__ == '__main__':
main() | 33.884615 | 130 | 0.610244 | from tkinter import *
from tkinter.colorchooser import askcolor
import sys
class SettingsWindow(Toplevel):
def __init__(self,window):
self.root_window=window
super().__init__(window)
self.title("Einstellungen")
self.get_conf()
self.make_mess_methode_menu()
if sys.platform.startswith('win'):
self.iconbitmap('Icons/icon.ico')
else:
logo = PhotoImage(file='Icons/icon.gif')
self.call('wm', 'iconphoto', self._w, logo)
self.save_button=Button(self,text="speichern",command=self.set_conf)
self.save_button.grid(row=2,column=2)
self.a_entry=Entry(self)
self.a_entry.grid(row=2,column=1)
self.b_entry=Entry(self)
self.b_entry.grid(row=3,column=1)
self.a_lab=Label(self,text="a:")
self.a_lab.grid(row=2,column=0)
self.b_lab=Label(self,text="b:")
self.b_lab.grid(row=3,column=0)
self.choose_background_but=Button(self,text=" Hintergrund",command=self.choose_background_color,width=15)
self.choose_background_but.grid(row=1,column=0)
self.choose_foreground_but=Button(self,text="Schrifftfarbe",command=self.choose_foreground_color,width=15)
self.choose_foreground_but.grid(row=1,column=1)
self.get_conf()
self.set_style()
def set_conf(self):
self.new_conf["mess_methode"]=self.aktuell_mess_methode.get()
try:
self.new_conf["stab_hoehe"]=float(self.b_entry.get())
except :
pass
try:
self.new_conf["stab_laenge"]=float(self.a_entry.get())
except :
pass
self.root_window.set_config(self.new_conf)
self.get_conf()
self.set_style()
def get_conf(self):
conf=self.root_window.get_config()
self.new_conf=conf
self.bg=conf["backgroud_colour"]
self.fg=conf["foregroud_colour"]
self.font=(conf["font"],conf["fontsz"])
self.aktuell_mess_methode=StringVar(self)
self.aktuell_mess_methode.set(conf["mess_methode"])
def set_style(self):
self.config(bg=self.bg)
self.save_button.config(bg=self.bg,fg=self.fg,font=self.font)
self.choose_foreground_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.choose_background_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.mess_methode_menu.config(bg=self.bg,fg=self.fg,font=self.font)
self.mess_methode_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.a_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.b_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.a_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.b_lab.config(bg=self.bg,fg=self.fg,font=self.font)
def choose_background_color(self):
self.new_conf["backgroud_colour"]=askcolor(parent=self)[1]
def choose_foreground_color(self):
self.new_conf["foregroud_colour"]=askcolor(parent=self)[1]
def make_mess_methode_menu(self):
self.mess_methode_lab=Label(self,text="Mess Methode:")
self.mess_methode_lab.grid(row=0,column=0)
self.mess_methode_menu=OptionMenu(self,self.aktuell_mess_methode,"Apian","Strahlensatz",command=self.change_mess_methode)
self.mess_methode_menu.config(width=12)
self.mess_methode_menu.grid(row=0,column=1)
def change_mess_methode(self,choice):
self.aktuell_mess_methode.set(choice)
class Window(Tk):
def __init__(self,backend_handler):
super().__init__()
self.backend_handler=backend_handler
self.title("Jakobsstab")
_=self.get_config()
self.make_menu()
self.make_entry_frame()
if sys.platform.startswith('win'):
self.iconbitmap('Icons/icon.ico')
else:
logo = PhotoImage(file='Icons/icon.gif')
self.call('wm', 'iconphoto', self._w, logo)
self.result_lab=Label(self)
self.result_lab.grid(row=1,column=1)
self.Image=Label(self)
self.Image.grid(row=0,column=1)
self.set_style()
def make_menu(self):
self.menu=Menu(self)
self.config(menu=self.menu)
self.menu.add_command(label="Einstellungen",command=self.open_settings_window)
def make_entry_frame(self):
self.entry_frame=Frame(self)
self.entry_frame.grid(row=0,column=0)
self.ha_lab=Label(self.entry_frame,text="ha:")
self.s_lab=Label(self.entry_frame,text=" s:")
self.ha_entry=Entry(self.entry_frame,width=12)
self.s_entry=Entry(self.entry_frame,width=12)
self.ha_lab.grid(row=1,column=0)
self.ha_entry.grid(row=1,column=1)
self.s_entry.grid(row=0,column=1)
self.s_lab.grid(row=0,column=0)
self.calculate_but=Button(self.entry_frame,text="Ausrechenen",command=self.calculate)
self.calculate_but.grid(row=2,column=1)
def calculate(self):
try:
ha=float(self.ha_entry.get())
s=float(self.s_entry.get())
res=round(self.backend_handler.solve(ha,s),2)
except:
res="ERROR"
self.result_lab.config(text=res)
def set_style(self):
self.configure(bg=self.bg)
self.entry_frame.config(bg=self.bg)
self.calculate_but.config(bg=self.bg,fg=self.fg,font=self.font)
self.result_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.ha_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.s_entry.config(bg=self.bg,fg=self.fg,font=self.font)
self.ha_lab.config(bg=self.bg,fg=self.fg,font=self.font)
self.s_lab.config(bg=self.bg,fg=self.fg,font=self.font)
image=PhotoImage(file=f"Icons/{self.mess_methode}.gif")
self.Image.config(image=image)
self.mainloop()
def get_config(self):
conf=self.backend_handler.get_conf()
self.bg=conf["backgroud_colour"]
self.fg=conf["foregroud_colour"]
self.font=(conf["font"],conf["fontsz"])
self.mess_methode=conf["mess_methode"]
return conf
def open_settings_window(self):
settings_window=SettingsWindow(self)
settings_window.mainloop()
def set_config(self,conf):
self.backend_handler.set_conf(conf)
_=self.get_config()
self.make_entry_frame()
self.set_style()
def main():
from backend import Handler
window=Window(Handler("src/Config.yaml","src/Backup.yaml"))
if __name__ == '__main__':
main() | true | true |
1c3b62adbe33c307499ef5ecfd5530a3a22e0a35 | 10,715 | py | Python | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 37 | 2016-09-14T20:34:42.000Z | 2022-02-15T06:47:21.000Z | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 24 | 2016-11-16T21:36:13.000Z | 2022-02-18T14:37:35.000Z | jwplatform/upload.py | jwplayer/jwplayer-py | 2f478550414145e9d36b1cdf901dcf5360f8fe2b | [
"MIT"
] | 45 | 2016-10-13T08:41:35.000Z | 2022-03-06T02:31:23.000Z | import http.client
import logging
import math
import os
from dataclasses import dataclass
from enum import Enum
from hashlib import md5
from urllib.parse import urlparse
MAX_PAGE_SIZE = 1000
MIN_PART_SIZE = 5 * 1024 * 1024
UPLOAD_BASE_URL = 'upload.jwplayer.com'
MAX_FILE_SIZE = 25 * 1000 * 1024 * 1024
class UploadType(Enum):
"""
This class stores the enum values for the different type of uploads.
"""
direct = "direct"
multipart = "multipart"
@dataclass
class UploadContext:
"""
This class stores the structure for an upload context so that it can be resumed later.
"""
def __init__(self, upload_method, upload_id, upload_token, direct_link):
self.upload_method = upload_method
self.upload_id = upload_id
self.upload_token = upload_token
self.direct_link = direct_link
"""
This method evaluates whether an upload can be resumed based on the upload context state
"""
def can_resume(self) -> bool:
return self.upload_token is not None \
and self.upload_method == UploadType.multipart.value \
and self.upload_id is not None
def _upload_to_s3(bytes_chunk, upload_link):
url_metadata = urlparse(upload_link)
if url_metadata.scheme in 'https':
connection = http.client.HTTPSConnection(host=url_metadata.hostname)
else:
connection = http.client.HTTPConnection(host=url_metadata.hostname)
connection.request('PUT', upload_link, body=bytes_chunk)
response = connection.getresponse()
if 200 <= response.status <= 299:
return response
raise S3UploadError(response)
def _get_bytes_hash(bytes_chunk):
return md5(bytes_chunk).hexdigest()
def _get_returned_hash(response):
return response.headers['ETag']
class MultipartUpload:
"""
This class manages the multi-part upload.
"""
def __init__(self, client, file, target_part_size, retry_count, upload_context: UploadContext):
self._upload_id = upload_context.upload_id
self._target_part_size = target_part_size
self._upload_retry_count = retry_count
self._file = file
self._client = client
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
"""
This methods uploads the parts for the multi-part upload.
Returns:
"""
if self._target_part_size < MIN_PART_SIZE:
raise ValueError(f"The part size has to be at least greater than {MIN_PART_SIZE} bytes.")
filename = self._file.name
file_size = os.stat(filename).st_size
part_count = math.ceil(file_size / self._target_part_size)
if part_count > 10000:
raise ValueError("The given file cannot be divided into more than 10000 parts. Please try increasing the "
"target part size.")
# Upload the parts
self._upload_parts(part_count)
# Mark upload as complete
self._mark_upload_completion()
def _upload_parts(self, part_count):
try:
filename = self._file.name
remaining_parts_count = part_count
total_page_count = math.ceil(part_count / MAX_PAGE_SIZE)
for page_number in range(1, total_page_count + 1):
batch_size = min(remaining_parts_count, MAX_PAGE_SIZE)
page_length = MAX_PAGE_SIZE
remaining_parts_count = remaining_parts_count - batch_size
query_params = {'page_length': page_length, 'page': page_number}
self._logger.debug(
f'calling list method with page_number:{page_number} and page_length:{page_length}.')
body = self._retrieve_part_links(query_params)
upload_links = body['parts']
for returned_part in upload_links[:batch_size]:
part_number = returned_part['id']
bytes_chunk = self._file.read(self._target_part_size)
if part_number < batch_size and len(bytes_chunk) != self._target_part_size:
raise IOError("Failed to read enough bytes")
retry_count = 0
for _ in range(self._upload_retry_count):
try:
self._upload_part(bytes_chunk, part_number, returned_part)
self._logger.debug(
f"Successfully uploaded part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for upload id {self._upload_id}")
break
except (DataIntegrityError, PartUploadError, OSError) as err:
self._logger.warning(err)
retry_count = retry_count + 1
self._logger.warning(
f"Encountered error upload part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for file {filename}.")
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(
f"Max retries ({self._upload_retry_count}) exceeded while uploading part"
f" {part_number} of {part_count} for file {filename}.") from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
def _retrieve_part_links(self, query_params):
resp = self._client.list(upload_id=self._upload_id, query_params=query_params)
return resp.json_body
def _upload_part(self, bytes_chunk, part_number, returned_part):
computed_hash = _get_bytes_hash(bytes_chunk)
# Check if the file has already been uploaded and the hash matches. Return immediately without doing anything
# if the hash matches.
upload_hash = self._get_uploaded_part_hash(returned_part)
if upload_hash and (repr(upload_hash) == repr(f"{computed_hash}")): # returned hash is not surrounded by '"'
self._logger.debug(f"Part number {part_number} already uploaded. Skipping")
return
if upload_hash:
raise UnrecoverableError(f'The file part {part_number} has been uploaded but the hash of the uploaded part '
f'does not match the hash of the current part read. Aborting.')
if "upload_link" not in returned_part:
raise KeyError(f"Invalid upload link for part {part_number}.")
returned_part = returned_part["upload_link"]
response = _upload_to_s3(bytes_chunk, returned_part)
returned_hash = _get_returned_hash(response)
if repr(returned_hash) != repr(f"\"{computed_hash}\""): # The returned hash is surrounded by '"' character
raise DataIntegrityError("The hash of the uploaded file does not match with the hash on the server.")
def _get_uploaded_part_hash(self, upload_link):
upload_hash = upload_link.get("etag")
return upload_hash
def _mark_upload_completion(self):
self._client.complete(self._upload_id)
self._logger.info("Upload successful!")
class SingleUpload:
"""
This class manages the operations related to the upload of a media file via a direct link.
"""
def __init__(self, upload_link, file, retry_count, upload_context: UploadContext):
self._upload_link = upload_link
self._upload_retry_count = retry_count
self._file = file
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
"""
Uploads the media file to the actual location as specified in the direct link.
Returns:
"""
self._logger.debug(f"Starting to upload file:{self._file.name}")
bytes_chunk = self._file.read()
computed_hash = _get_bytes_hash(bytes_chunk)
retry_count = 0
for _ in range(self._upload_retry_count):
try:
response = _upload_to_s3(bytes_chunk, self._upload_link)
returned_hash = _get_returned_hash(response)
# The returned hash is surrounded by '"' character
if repr(returned_hash) != repr(f"\"{computed_hash}\""):
raise DataIntegrityError(
"The hash of the uploaded file does not match with the hash on the server.")
self._logger.debug(f"Successfully uploaded file {self._file.name}.")
return
except (IOError, PartUploadError, DataIntegrityError, OSError) as err:
self._logger.warning(err)
self._logger.exception(err, stack_info=True)
self._logger.warning(f"Encountered error uploading file {self._file.name}.")
retry_count = retry_count + 1
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(f"Max retries exceeded while uploading file {self._file.name}") \
from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
class DataIntegrityError(Exception):
"""
This class is used to wrap exceptions when the uploaded data failed a data integrity check with the current file
part hash.
"""
pass
class MaxRetriesExceededError(Exception):
"""
This class is used to wrap exceptions when the number of retries are exceeded while uploading a part.
"""
pass
class PartUploadError(Exception):
"""
This class is used to wrap exceptions that occur because of part upload errors.
"""
pass
class S3UploadError(PartUploadError):
"""
This class extends the PartUploadError exception class when the upload is done via S3.
"""
pass
class UnrecoverableError(Exception):
"""
This class wraps exceptions that should not be recoverable or resumed from.
"""
pass
| 37.996454 | 120 | 0.629585 | import http.client
import logging
import math
import os
from dataclasses import dataclass
from enum import Enum
from hashlib import md5
from urllib.parse import urlparse
MAX_PAGE_SIZE = 1000
MIN_PART_SIZE = 5 * 1024 * 1024
UPLOAD_BASE_URL = 'upload.jwplayer.com'
MAX_FILE_SIZE = 25 * 1000 * 1024 * 1024
class UploadType(Enum):
direct = "direct"
multipart = "multipart"
@dataclass
class UploadContext:
def __init__(self, upload_method, upload_id, upload_token, direct_link):
self.upload_method = upload_method
self.upload_id = upload_id
self.upload_token = upload_token
self.direct_link = direct_link
def can_resume(self) -> bool:
return self.upload_token is not None \
and self.upload_method == UploadType.multipart.value \
and self.upload_id is not None
def _upload_to_s3(bytes_chunk, upload_link):
url_metadata = urlparse(upload_link)
if url_metadata.scheme in 'https':
connection = http.client.HTTPSConnection(host=url_metadata.hostname)
else:
connection = http.client.HTTPConnection(host=url_metadata.hostname)
connection.request('PUT', upload_link, body=bytes_chunk)
response = connection.getresponse()
if 200 <= response.status <= 299:
return response
raise S3UploadError(response)
def _get_bytes_hash(bytes_chunk):
return md5(bytes_chunk).hexdigest()
def _get_returned_hash(response):
return response.headers['ETag']
class MultipartUpload:
def __init__(self, client, file, target_part_size, retry_count, upload_context: UploadContext):
self._upload_id = upload_context.upload_id
self._target_part_size = target_part_size
self._upload_retry_count = retry_count
self._file = file
self._client = client
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
if self._target_part_size < MIN_PART_SIZE:
raise ValueError(f"The part size has to be at least greater than {MIN_PART_SIZE} bytes.")
filename = self._file.name
file_size = os.stat(filename).st_size
part_count = math.ceil(file_size / self._target_part_size)
if part_count > 10000:
raise ValueError("The given file cannot be divided into more than 10000 parts. Please try increasing the "
"target part size.")
self._upload_parts(part_count)
self._mark_upload_completion()
def _upload_parts(self, part_count):
try:
filename = self._file.name
remaining_parts_count = part_count
total_page_count = math.ceil(part_count / MAX_PAGE_SIZE)
for page_number in range(1, total_page_count + 1):
batch_size = min(remaining_parts_count, MAX_PAGE_SIZE)
page_length = MAX_PAGE_SIZE
remaining_parts_count = remaining_parts_count - batch_size
query_params = {'page_length': page_length, 'page': page_number}
self._logger.debug(
f'calling list method with page_number:{page_number} and page_length:{page_length}.')
body = self._retrieve_part_links(query_params)
upload_links = body['parts']
for returned_part in upload_links[:batch_size]:
part_number = returned_part['id']
bytes_chunk = self._file.read(self._target_part_size)
if part_number < batch_size and len(bytes_chunk) != self._target_part_size:
raise IOError("Failed to read enough bytes")
retry_count = 0
for _ in range(self._upload_retry_count):
try:
self._upload_part(bytes_chunk, part_number, returned_part)
self._logger.debug(
f"Successfully uploaded part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for upload id {self._upload_id}")
break
except (DataIntegrityError, PartUploadError, OSError) as err:
self._logger.warning(err)
retry_count = retry_count + 1
self._logger.warning(
f"Encountered error upload part {(page_number - 1) * MAX_PAGE_SIZE + part_number} "
f"of {part_count} for file {filename}.")
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(
f"Max retries ({self._upload_retry_count}) exceeded while uploading part"
f" {part_number} of {part_count} for file {filename}.") from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
def _retrieve_part_links(self, query_params):
resp = self._client.list(upload_id=self._upload_id, query_params=query_params)
return resp.json_body
def _upload_part(self, bytes_chunk, part_number, returned_part):
computed_hash = _get_bytes_hash(bytes_chunk)
upload_hash = self._get_uploaded_part_hash(returned_part)
if upload_hash and (repr(upload_hash) == repr(f"{computed_hash}")):
self._logger.debug(f"Part number {part_number} already uploaded. Skipping")
return
if upload_hash:
raise UnrecoverableError(f'The file part {part_number} has been uploaded but the hash of the uploaded part '
f'does not match the hash of the current part read. Aborting.')
if "upload_link" not in returned_part:
raise KeyError(f"Invalid upload link for part {part_number}.")
returned_part = returned_part["upload_link"]
response = _upload_to_s3(bytes_chunk, returned_part)
returned_hash = _get_returned_hash(response)
if repr(returned_hash) != repr(f"\"{computed_hash}\""): # The returned hash is surrounded by '"' character
raise DataIntegrityError("The hash of the uploaded file does not match with the hash on the server.")
def _get_uploaded_part_hash(self, upload_link):
upload_hash = upload_link.get("etag")
return upload_hash
def _mark_upload_completion(self):
self._client.complete(self._upload_id)
self._logger.info("Upload successful!")
class SingleUpload:
def __init__(self, upload_link, file, retry_count, upload_context: UploadContext):
self._upload_link = upload_link
self._upload_retry_count = retry_count
self._file = file
self._logger = logging.getLogger(self.__class__.__name__)
self._upload_context = upload_context
@property
def upload_context(self):
return self._upload_context
@upload_context.setter
def upload_context(self, value):
self._upload_context = value
def upload(self):
self._logger.debug(f"Starting to upload file:{self._file.name}")
bytes_chunk = self._file.read()
computed_hash = _get_bytes_hash(bytes_chunk)
retry_count = 0
for _ in range(self._upload_retry_count):
try:
response = _upload_to_s3(bytes_chunk, self._upload_link)
returned_hash = _get_returned_hash(response)
if repr(returned_hash) != repr(f"\"{computed_hash}\""):
raise DataIntegrityError(
"The hash of the uploaded file does not match with the hash on the server.")
self._logger.debug(f"Successfully uploaded file {self._file.name}.")
return
except (IOError, PartUploadError, DataIntegrityError, OSError) as err:
self._logger.warning(err)
self._logger.exception(err, stack_info=True)
self._logger.warning(f"Encountered error uploading file {self._file.name}.")
retry_count = retry_count + 1
if retry_count >= self._upload_retry_count:
self._file.seek(0, 0)
raise MaxRetriesExceededError(f"Max retries exceeded while uploading file {self._file.name}") \
from err
except Exception as ex:
self._file.seek(0, 0)
self._logger.exception(ex)
raise
class DataIntegrityError(Exception):
pass
class MaxRetriesExceededError(Exception):
pass
class PartUploadError(Exception):
pass
class S3UploadError(PartUploadError):
pass
class UnrecoverableError(Exception):
pass
| true | true |
1c3b634299fe4ae82ea90f3fdf2e6fe6c49b7c23 | 2,257 | py | Python | model/python/svg/connection_edge_point.py | demx8as6/network-topology-instance-generator | 5dcdba9ad295de32a5a0986f6f39c36c5a4695db | [
"Apache-2.0"
] | null | null | null | model/python/svg/connection_edge_point.py | demx8as6/network-topology-instance-generator | 5dcdba9ad295de32a5a0986f6f39c36c5a4695db | [
"Apache-2.0"
] | null | null | null | model/python/svg/connection_edge_point.py | demx8as6/network-topology-instance-generator | 5dcdba9ad295de32a5a0986f6f39c36c5a4695db | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 highstreet technologies GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
"""
Module containing a class representing an SVG Element as Connection Node Edge Point
"""
from typing import Dict
from lxml import etree
from model.python.svg.svg import Svg
class ConnectionEdgePoint(Svg):
"""
Class representing an SVG Element object as Connection Node Edge Point
"""
# overwrite
def svg_main(self) -> etree.Element:
"""
Mothod generating the main SVG Element shaping the TAPI object
:return SVG Element as main representations for the TAPI object
"""
main = etree.Element("ellipse")
main.attrib['cx'] = str(self.center_x())
main.attrib['cy'] = str(self.center_y())
main.attrib['rx'] = str(2 * self.FONTSIZE)
main.attrib['ry'] = str(self.FONTSIZE)
main.attrib['class'] = " ".join(
[self.type_name(), self.tapi_object().role()])
return main
def svg_label(self) -> etree.Element:
label = etree.Element('text')
label.attrib['x'] = str(self.center_x())
# +4px for font-size 14px (think of chars like 'gjy')
label.attrib['y'] = str(self.center_y() + 4)
label.text = self.__label_by_protocol(self.tapi_object().protocol())
return label
def __label_by_protocol(self, protocol) -> str:
mapping: Dict[str, str] = {
"netconf": "NC",
"ves": "VES",
"file": "FTP",
"ofh": "OFH",
"rest": "REST",
"restconf": "RC",
"unknown": "-"
}
search = protocol.split(":")[1]
if search in mapping:
return mapping[search]
return protocol
| 34.19697 | 83 | 0.626052 |
from typing import Dict
from lxml import etree
from model.python.svg.svg import Svg
class ConnectionEdgePoint(Svg):
def svg_main(self) -> etree.Element:
main = etree.Element("ellipse")
main.attrib['cx'] = str(self.center_x())
main.attrib['cy'] = str(self.center_y())
main.attrib['rx'] = str(2 * self.FONTSIZE)
main.attrib['ry'] = str(self.FONTSIZE)
main.attrib['class'] = " ".join(
[self.type_name(), self.tapi_object().role()])
return main
def svg_label(self) -> etree.Element:
label = etree.Element('text')
label.attrib['x'] = str(self.center_x())
label.attrib['y'] = str(self.center_y() + 4)
label.text = self.__label_by_protocol(self.tapi_object().protocol())
return label
def __label_by_protocol(self, protocol) -> str:
mapping: Dict[str, str] = {
"netconf": "NC",
"ves": "VES",
"file": "FTP",
"ofh": "OFH",
"rest": "REST",
"restconf": "RC",
"unknown": "-"
}
search = protocol.split(":")[1]
if search in mapping:
return mapping[search]
return protocol
| true | true |
1c3b63a9da742d8fa84f8684ae951375394f55f9 | 1,261 | py | Python | image/drawing/drawing_pen.py | shuge/Qt-Python-Binding-Examples | efe40c8af6c3e0805a5a7c3d053b8c8bf893a803 | [
"BSD-3-Clause"
] | 179 | 2015-01-08T10:21:28.000Z | 2020-03-24T07:03:04.000Z | image/drawing/drawing_pen.py | tonytony2020/Qt-Python-Binding-Examples | efe40c8af6c3e0805a5a7c3d053b8c8bf893a803 | [
"BSD-3-Clause"
] | 1 | 2019-12-23T17:14:37.000Z | 2020-01-09T16:45:58.000Z | image/drawing/drawing_pen.py | shuge/Qt-Python-Binding-Examples | efe40c8af6c3e0805a5a7c3d053b8c8bf893a803 | [
"BSD-3-Clause"
] | 57 | 2015-01-05T09:34:15.000Z | 2019-11-18T06:12:08.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# penstyles.py
import sys
from PySide import QtGui, QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.setGeometry(300, 300, 280, 270)
self.setWindowTitle('penstyles')
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.doDrawing(qp)
qp.end()
def doDrawing(self, qp):
pen = QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(20, 40, 250, 40)
pen.setStyle(QtCore.Qt.DashLine)
qp.setPen(pen)
qp.drawLine(20, 80, 250, 80)
pen.setStyle(QtCore.Qt.DashDotLine)
qp.setPen(pen)
qp.drawLine(20, 120, 250, 120)
pen.setStyle(QtCore.Qt.DotLine)
qp.setPen(pen)
qp.drawLine(20, 160, 250, 160)
pen.setStyle(QtCore.Qt.DashDotDotLine)
qp.setPen(pen)
qp.drawLine(20, 200, 250, 200)
pen.setStyle(QtCore.Qt.CustomDashLine)
pen.setDashPattern([1, 4, 5, 4])
qp.setPen(pen)
qp.drawLine(20, 240, 250, 240)
app = QtGui.QApplication(sys.argv)
ex = Example()
ex.show()
app.exec_()
| 21.372881 | 65 | 0.570182 |
import sys
from PySide import QtGui, QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.setGeometry(300, 300, 280, 270)
self.setWindowTitle('penstyles')
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.doDrawing(qp)
qp.end()
def doDrawing(self, qp):
pen = QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(20, 40, 250, 40)
pen.setStyle(QtCore.Qt.DashLine)
qp.setPen(pen)
qp.drawLine(20, 80, 250, 80)
pen.setStyle(QtCore.Qt.DashDotLine)
qp.setPen(pen)
qp.drawLine(20, 120, 250, 120)
pen.setStyle(QtCore.Qt.DotLine)
qp.setPen(pen)
qp.drawLine(20, 160, 250, 160)
pen.setStyle(QtCore.Qt.DashDotDotLine)
qp.setPen(pen)
qp.drawLine(20, 200, 250, 200)
pen.setStyle(QtCore.Qt.CustomDashLine)
pen.setDashPattern([1, 4, 5, 4])
qp.setPen(pen)
qp.drawLine(20, 240, 250, 240)
app = QtGui.QApplication(sys.argv)
ex = Example()
ex.show()
app.exec_()
| true | true |
1c3b6427f9cbf4c095ef56d602b113ff3e241190 | 2,215 | py | Python | test/test_scio.py | tomd/act-workers | ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1 | [
"ISC"
] | null | null | null | test/test_scio.py | tomd/act-workers | ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1 | [
"ISC"
] | null | null | null | test/test_scio.py | tomd/act-workers | ef42eaf26b14197a6bd1ac9ae12c4d39acc740c1 | [
"ISC"
] | null | null | null | """ Test for scio worker """
import json
import act.api
from act.workers import scio
def test_scio_facts(capsys) -> None: # type: ignore
""" Test for scio facts, by comparing to captue of stdout """
with open("test/scio-doc.json") as scio_doc:
doc = json.loads(scio_doc.read())
api = act.api.Act("", None, "error")
scio.add_to_act(api, doc, output_format="str")
captured = capsys.readouterr()
facts = set(captured.out.split("\n"))
report_id = doc["hexdigest"]
sha256 = doc["indicators"]["sha256"][0]
uri = doc["indicators"]["uri"][0] # "http://www.us-cert.gov/tlp."
fact_assertions = [
api.fact("name", "TA18-149A.stix.xml").source("report", report_id),
api.fact("mentions").source("report", report_id).destination("ipv4", "187.127.112.60"),
api.fact("mentions").source("report", report_id).destination("ipv6", "0000:0000:0000:0000:0000:0000:0000:0001"),
api.fact("mentions").source("report", report_id).destination("hash", "4613f51087f01715bf9132c704aea2c2"),
api.fact("mentions").source("report", report_id).destination("hash", sha256),
api.fact("mentions").source("report", report_id).destination("country", "Colombia"),
api.fact("mentions").source("report", report_id).destination("uri", uri),
api.fact("componentOf").source("fqdn", "www.us-cert.gov").destination("uri", uri),
api.fact("componentOf").source("path", "/tlp.").destination("uri", uri),
api.fact("scheme", "http").source("uri", uri),
api.fact("mentions").source("report", report_id).destination("tool", "kore"),
api.fact("mentions").source("report", report_id).destination("uri", "email://redhat@gmail.com"),
api.fact("mentions").source("report", report_id).destination("ipv4Network", "192.168.0.0/16"),
api.fact("represents").source("hash", sha256).destination("content", sha256),
api.fact("mentions").source("report", report_id).destination("vulnerability", "cve-2019-222"),
api.fact("mentions").source("report", report_id).destination("vulnerability", "ms16-034"),
]
for fact_assertion in fact_assertions:
assert str(fact_assertion) in facts
| 48.152174 | 120 | 0.649661 | import json
import act.api
from act.workers import scio
def test_scio_facts(capsys) -> None:
with open("test/scio-doc.json") as scio_doc:
doc = json.loads(scio_doc.read())
api = act.api.Act("", None, "error")
scio.add_to_act(api, doc, output_format="str")
captured = capsys.readouterr()
facts = set(captured.out.split("\n"))
report_id = doc["hexdigest"]
sha256 = doc["indicators"]["sha256"][0]
uri = doc["indicators"]["uri"][0]
fact_assertions = [
api.fact("name", "TA18-149A.stix.xml").source("report", report_id),
api.fact("mentions").source("report", report_id).destination("ipv4", "187.127.112.60"),
api.fact("mentions").source("report", report_id).destination("ipv6", "0000:0000:0000:0000:0000:0000:0000:0001"),
api.fact("mentions").source("report", report_id).destination("hash", "4613f51087f01715bf9132c704aea2c2"),
api.fact("mentions").source("report", report_id).destination("hash", sha256),
api.fact("mentions").source("report", report_id).destination("country", "Colombia"),
api.fact("mentions").source("report", report_id).destination("uri", uri),
api.fact("componentOf").source("fqdn", "www.us-cert.gov").destination("uri", uri),
api.fact("componentOf").source("path", "/tlp.").destination("uri", uri),
api.fact("scheme", "http").source("uri", uri),
api.fact("mentions").source("report", report_id).destination("tool", "kore"),
api.fact("mentions").source("report", report_id).destination("uri", "email://redhat@gmail.com"),
api.fact("mentions").source("report", report_id).destination("ipv4Network", "192.168.0.0/16"),
api.fact("represents").source("hash", sha256).destination("content", sha256),
api.fact("mentions").source("report", report_id).destination("vulnerability", "cve-2019-222"),
api.fact("mentions").source("report", report_id).destination("vulnerability", "ms16-034"),
]
for fact_assertion in fact_assertions:
assert str(fact_assertion) in facts
| true | true |
1c3b64777d39f62668262347595156cf7f937d70 | 69,565 | py | Python | aiida/orm/nodes/data/array/bands.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/orm/nodes/data/array/bands.py | HaoZeke/aiida-core | 1a4cada67fe36353326dcebfe888ebc01a6c5b7b | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-03-06T11:23:42.000Z | 2020-03-09T09:34:07.000Z | aiida/orm/nodes/data/array/bands.py | lorisercole/aiida-core | 84c2098318bf234641219e55795726f99dc25a16 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-lines
"""
This module defines the classes related to band structures or dispersions
in a Brillouin zone, and how to operate on them.
"""
from string import Template
import numpy
from aiida.common.exceptions import ValidationError
from aiida.common.utils import prettify_labels, join_labels
from .kpoints import KpointsData
def prepare_header_comment(uuid, plot_info, comment_char='#'):
"""Prepare the header."""
from aiida import get_file_header
filetext = []
filetext += get_file_header(comment_char='').splitlines()
filetext.append('')
filetext.append('Dumped from BandsData UUID={}'.format(uuid))
filetext.append('\tpoints\tbands')
filetext.append('\t{}\t{}'.format(*plot_info['y'].shape))
filetext.append('')
filetext.append('\tlabel\tpoint')
for label in plot_info['raw_labels']:
filetext.append('\t{}\t{:.8f}'.format(label[1], label[0]))
return '\n'.join('{} {}'.format(comment_char, line) for line in filetext)
def find_bandgap(bandsdata, number_electrons=None, fermi_energy=None):
"""
Tries to guess whether the bandsdata represent an insulator.
This method is meant to be used only for electronic bands (not phonons)
By default, it will try to use the occupations to guess the number of
electrons and find the Fermi Energy, otherwise, it can be provided
explicitely.
Also, there is an implicit assumption that the kpoints grid is
"sufficiently" dense, so that the bandsdata are not missing the
intersection between valence and conduction band if present.
Use this function with care!
:param number_electrons: (optional, float) number of electrons in the unit cell
:param fermi_energy: (optional, float) value of the fermi energy.
:note: By default, the algorithm uses the occupations array
to guess the number of electrons and the occupied bands. This is to be
used with care, because the occupations could be smeared so at a
non-zero temperature, with the unwanted effect that the conduction bands
might be occupied in an insulator.
Prefer to pass the number_of_electrons explicitly
:note: Only one between number_electrons and fermi_energy can be specified at the
same time.
:return: (is_insulator, gap), where is_insulator is a boolean, and gap a
float. The gap is None in case of a metal, zero when the homo is
equal to the lumo (e.g. in semi-metals).
"""
# pylint: disable=too-many-return-statements,too-many-branches,too-many-statements,no-else-return
def nint(num):
"""
Stable rounding function
"""
if num > 0:
return int(num + .5)
return int(num - .5)
if fermi_energy and number_electrons:
raise ValueError('Specify either the number of electrons or the Fermi energy, but not both')
try:
stored_bands = bandsdata.get_bands()
except KeyError:
raise KeyError('Cannot do much of a band analysis without bands')
if len(stored_bands.shape) == 3:
# I write the algorithm for the generic case of having both the spin up and spin down array
# put all spins on one band per kpoint
bands = numpy.concatenate(stored_bands, axis=1)
else:
bands = stored_bands
# analysis on occupations:
if fermi_energy is None:
num_kpoints = len(bands)
if number_electrons is None:
try:
_, stored_occupations = bandsdata.get_bands(also_occupations=True)
except KeyError:
raise KeyError("Cannot determine metallicity if I don't have either fermi energy, or occupations")
# put the occupations in the same order of bands, also in case of multiple bands
if len(stored_occupations.shape) == 3:
# I write the algorithm for the generic case of having both the
# spin up and spin down array
# put all spins on one band per kpoint
occupations = numpy.concatenate(stored_occupations, axis=1)
else:
occupations = stored_occupations
# now sort the bands by energy
# Note: I am sort of assuming that I have an electronic ground state
# sort the bands by energy, and reorder the occupations accordingly
# since after joining the two spins, I might have unsorted stuff
bands, occupations = [
numpy.array(y) for y in zip(
*[
list(zip(*j)) for j in [
sorted(zip(i[0].tolist(), i[1].tolist()), key=lambda x: x[0])
for i in zip(bands, occupations)
]
]
)
]
number_electrons = int(round(sum([sum(i) for i in occupations]) / num_kpoints))
homo_indexes = [numpy.where(numpy.array([nint(_) for _ in x]) > 0)[0][-1] for x in occupations]
if len(set(homo_indexes)) > 1: # there must be intersections of valence and conduction bands
return False, None
homo = [_[0][_[1]] for _ in zip(bands, homo_indexes)]
try:
lumo = [_[0][_[1] + 1] for _ in zip(bands, homo_indexes)]
except IndexError:
raise ValueError(
'To understand if it is a metal or insulator, '
'need more bands than n_band=number_electrons'
)
else:
bands = numpy.sort(bands)
number_electrons = int(number_electrons)
# find the zero-temperature occupation per band (1 for spin-polarized
# calculation, 2 otherwise)
number_electrons_per_band = 4 - len(stored_bands.shape) # 1 or 2
# gather the energies of the homo band, for every kpoint
homo = [i[number_electrons // number_electrons_per_band - 1] for i in bands] # take the nth level
try:
# gather the energies of the lumo band, for every kpoint
lumo = [i[number_electrons // number_electrons_per_band] for i in bands] # take the n+1th level
except IndexError:
raise ValueError(
'To understand if it is a metal or insulator, '
'need more bands than n_band=number_electrons'
)
if number_electrons % 2 == 1 and len(stored_bands.shape) == 2:
# if #electrons is odd and we have a non spin polarized calculation
# it must be a metal and I don't need further checks
return False, None
# if the nth band crosses the (n+1)th, it is an insulator
gap = min(lumo) - max(homo)
if gap == 0.:
return False, 0.
if gap < 0.:
return False, None
return True, gap
# analysis on the fermi energy
else:
# reorganize the bands, rather than per kpoint, per energy level
# I need the bands sorted by energy
bands.sort()
levels = bands.transpose()
max_mins = [(max(i), min(i)) for i in levels]
if fermi_energy > bands.max():
raise ValueError("The Fermi energy is above all band energies, don't know what to do")
if fermi_energy < bands.min():
raise ValueError("The Fermi energy is below all band energies, don't know what to do.")
# one band is crossed by the fermi energy
if any(i[1] < fermi_energy and fermi_energy < i[0] for i in max_mins): # pylint: disable=chained-comparison
return False, None
# case of semimetals, fermi energy at the crossing of two bands
# this will only work if the dirac point is computed!
if (any(i[0] == fermi_energy for i in max_mins) and any(i[1] == fermi_energy for i in max_mins)):
return False, 0.
# insulating case, take the max of the band maxima below the fermi energy
homo = max([i[0] for i in max_mins if i[0] < fermi_energy])
# take the min of the band minima above the fermi energy
lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])
gap = lumo - homo
if gap <= 0.:
raise Exception('Something wrong has been implemented. Revise the code!')
return True, gap
class BandsData(KpointsData):
"""
Class to handle bands data
"""
def set_kpointsdata(self, kpointsdata):
"""
Load the kpoints from a kpoint object.
:param kpointsdata: an instance of KpointsData class
"""
if not isinstance(kpointsdata, KpointsData):
raise ValueError('kpointsdata must be of the KpointsData class')
try:
self.cell = kpointsdata.cell
except AttributeError:
pass
try:
self.pbc = kpointsdata.pbc
except AttributeError:
pass
try:
the_kpoints = kpointsdata.get_kpoints()
except AttributeError:
the_kpoints = None
try:
the_weights = kpointsdata.get_kpoints(also_weights=True)[1]
except AttributeError:
the_weights = None
self.set_kpoints(the_kpoints, weights=the_weights)
try:
self.labels = kpointsdata.labels
except (AttributeError, TypeError):
self.labels = []
def _validate_bands_occupations(self, bands, occupations=None, labels=None):
"""
Validate the list of bands and of occupations before storage.
Kpoints must be set in advance.
Bands and occupations must be convertible into arrays of
Nkpoints x Nbands floats or Nspins x Nkpoints x Nbands; Nkpoints must
correspond to the number of kpoints.
"""
# pylint: disable=too-many-branches
try:
kpoints = self.get_kpoints()
except AttributeError:
raise AttributeError('Must first set the kpoints, then the bands')
the_bands = numpy.array(bands)
if len(the_bands.shape) not in [2, 3]:
raise ValueError(
'Bands must be an array of dimension 2'
'([N_kpoints, N_bands]) or of dimension 3 '
' ([N_arrays, N_kpoints, N_bands]), found instead {}'.format(len(the_bands.shape))
)
list_of_arrays_to_be_checked = []
# check that the shape of everything is consistent with the kpoints
num_kpoints_from_bands = the_bands.shape[0] if len(the_bands.shape) == 2 else the_bands.shape[1]
if num_kpoints_from_bands != len(kpoints):
raise ValueError('There must be energy values for every kpoint')
if occupations is not None:
the_occupations = numpy.array(occupations)
if the_occupations.shape != the_bands.shape:
raise ValueError(
'Shape of occupations {} different from shape'
'shape of bands {}'.format(the_occupations.shape, the_bands.shape)
)
if not the_bands.dtype.type == numpy.float64:
list_of_arrays_to_be_checked.append([the_occupations, 'occupations'])
else:
the_occupations = None
# list_of_arrays_to_be_checked = [ [the_bands,'bands'] ]
# check that there every element is a float
if not the_bands.dtype.type == numpy.float64:
list_of_arrays_to_be_checked.append([the_bands, 'bands'])
for x, msg in list_of_arrays_to_be_checked:
try:
[float(_) for _ in x.flatten() if _ is not None]
except (TypeError, ValueError):
raise ValueError('The {} array can only contain float or None values'.format(msg))
# check the labels
if labels is not None:
if isinstance(labels, str):
the_labels = [str(labels)]
elif isinstance(labels, (tuple, list)) and all([isinstance(_, str) for _ in labels]):
the_labels = [str(_) for _ in labels]
else:
raise ValidationError(
'Band labels have an unrecognized type ({})'
'but should be a string or a list of strings'.format(labels.__class__)
)
if len(the_bands.shape) == 2 and len(the_labels) != 1:
raise ValidationError('More array labels than the number of arrays')
elif len(the_bands.shape) == 3 and len(the_labels) != the_bands.shape[0]:
raise ValidationError('More array labels than the number of arrays')
else:
the_labels = None
return the_bands, the_occupations, the_labels
def set_bands(self, bands, units=None, occupations=None, labels=None):
"""
Set an array of band energies of dimension (nkpoints x nbands).
Kpoints must be set in advance. Can contain floats or None.
:param bands: a list of nkpoints lists of nbands bands, or a 2D array
of shape (nkpoints x nbands), with band energies for each kpoint
:param units: optional, energy units
:param occupations: optional, a 2D list or array of floats of same
shape as bands, with the occupation associated to each band
"""
# checks bands and occupations
the_bands, the_occupations, the_labels = self._validate_bands_occupations(bands, occupations, labels)
# set bands and their units
self.set_array('bands', the_bands)
self.units = units
if the_labels is not None:
self.set_attribute('array_labels', the_labels)
if the_occupations is not None:
# set occupations
self.set_array('occupations', the_occupations)
@property
def array_labels(self):
"""
Get the labels associated with the band arrays
"""
return self.get_attribute('array_labels', None)
@property
def units(self):
"""
Units in which the data in bands were stored. A string
"""
# return copy.deepcopy(self._pbc)
return self.get_attribute('units')
@units.setter
def units(self, value):
"""
Set the value of pbc, i.e. a tuple of three booleans, indicating if the
cell is periodic in the 1,2,3 crystal direction
"""
the_str = str(value)
self.set_attribute('units', the_str)
def _set_pbc(self, value):
"""
validate the pbc, then store them
"""
from aiida.common.exceptions import ModificationNotAllowed
from aiida.orm.nodes.data.structure import get_valid_pbc
if self.is_stored:
raise ModificationNotAllowed('The KpointsData object cannot be modified, it has already been stored')
the_pbc = get_valid_pbc(value)
self.set_attribute('pbc1', the_pbc[0])
self.set_attribute('pbc2', the_pbc[1])
self.set_attribute('pbc3', the_pbc[2])
def get_bands(self, also_occupations=False, also_labels=False):
"""
Returns an array (nkpoints x num_bands or nspins x nkpoints x num_bands)
of energies.
:param also_occupations: if True, returns also the occupations array.
Default = False
"""
try:
bands = numpy.array(self.get_array('bands'))
except KeyError:
raise AttributeError('No stored bands has been found')
to_return = [bands]
if also_occupations:
try:
occupations = numpy.array(self.get_array('occupations'))
except KeyError:
raise AttributeError('No occupations were set')
to_return.append(occupations)
if also_labels:
to_return.append(self.array_labels)
if len(to_return) == 1:
return bands
return to_return
def _get_bandplot_data(self, cartesian, prettify_format=None, join_symbol=None, get_segments=False, y_origin=0.):
"""
Get data to plot a band structure
:param cartesian: if True, distances (for the x-axis) are computed in
cartesian coordinates, otherwise they are computed in reciprocal
coordinates. cartesian=True will fail if no cell has been set.
:param prettify_format: by default, strings are not prettified. If you want
to prettify them, pass a valid prettify_format string (see valid options
in the docstring of :py:func:prettify_labels).
:param join_symbols: by default, strings are not joined. If you pass a string,
this is used to join strings that are much closer than a given threshold.
The most typical string is the pipe symbol: ``|``.
:param get_segments: if True, also computes the band split into segments
:param y_origin: if present, shift bands so to set the value specified at ``y=0``
:return: a plot_info dictiorary, whose keys are ``x`` (array of distances
for the x axis of the plot); ``y`` (array of bands), ``labels`` (list
of tuples in the format (float x value of the label, label string),
``band_type_idx`` (array containing an index for each band: if there is only
one spin, then it's an array of zeros, of length equal to the number of bands
at each point; if there are two spins, then it's an array of zeros or ones
depending on the type of spin; the length is always equalt to the total
number of bands per kpoint).
"""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
# load the x and y's of the graph
stored_bands = self.get_bands()
if len(stored_bands.shape) == 2:
bands = stored_bands
band_type_idx = numpy.array([0] * stored_bands.shape[1])
two_band_types = False
elif len(stored_bands.shape) == 3:
bands = numpy.concatenate(stored_bands, axis=1)
band_type_idx = numpy.array([0] * stored_bands.shape[2] + [1] * stored_bands.shape[2])
two_band_types = True
else:
raise ValueError('Unexpected shape of bands')
bands -= y_origin
# here I build the x distances on the graph (in cartesian coordinates
# if cartesian==True AND if the cell was set, otherwise in reciprocal
# coordinates)
try:
kpoints = self.get_kpoints(cartesian=cartesian)
except AttributeError:
# this error is happening if cartesian==True and if no cell has been
# set -> we switch to reciprocal coordinates to compute distances
kpoints = self.get_kpoints()
# I take advantage of the path to recognize discontinuities
try:
labels = self.labels
labels_indices = [i[0] for i in labels]
except (AttributeError, TypeError):
labels = []
labels_indices = []
# since I can have discontinuous paths, I set on those points the distance to zero
# as a result, where there are discontinuities in the path,
# I have two consecutive points with the same x coordinate
distances = [
numpy.linalg.norm(kpoints[i] -
kpoints[i - 1]) if not (i in labels_indices and i - 1 in labels_indices) else 0.
for i in range(1, len(kpoints))
]
x = [float(sum(distances[:i])) for i in range(len(distances) + 1)]
# transform the index of the labels in the coordinates of x
raw_labels = [(x[i[0]], i[1]) for i in labels]
the_labels = raw_labels
if prettify_format:
the_labels = prettify_labels(the_labels, format=prettify_format)
if join_symbol:
the_labels = join_labels(the_labels, join_symbol=join_symbol)
plot_info = {}
plot_info['x'] = x
plot_info['y'] = bands
plot_info['band_type_idx'] = band_type_idx
plot_info['raw_labels'] = raw_labels
plot_info['labels'] = the_labels
if get_segments:
plot_info['path'] = []
plot_info['paths'] = []
if len(labels) > 1:
# I add an empty label that points to the first band if the first label does not do it
if labels[0][0] != 0:
labels.insert(0, (0, ''))
# I add an empty label that points to the last band if the last label does not do it
if labels[-1][0] != len(bands) - 1:
labels.append((len(bands) - 1, ''))
for (position_from, label_from), (position_to, label_to) in zip(labels[:-1], labels[1:]):
if position_to - position_from > 1:
# Create a new path line only if there are at least two points,
# otherwise it is probably just a discontinuity point in the band
# structure (e.g. Gamma-X|Y-Gamma), where X and Y would be two
# consecutive points, but there is no path between them
plot_info['path'].append([label_from, label_to])
path_dict = {
'length': position_to - position_from,
'from': label_from,
'to': label_to,
'values': bands[position_from:position_to + 1, :].transpose().tolist(),
'x': x[position_from:position_to + 1],
'two_band_types': two_band_types,
}
plot_info['paths'].append(path_dict)
else:
label_from = '0'
label_to = '1'
path_dict = {
'length': bands.shape[0] - 1,
'from': label_from,
'to': label_to,
'values': bands.transpose().tolist(),
'x': x,
'two_band_types': two_band_types,
}
plot_info['paths'].append(path_dict)
plot_info['path'].append([label_from, label_to])
return plot_info
def _prepare_agr_batch(self, main_file_name='', comments=True, prettify_format=None):
"""
Prepare two files, data and batch, to be plot with xmgrace as:
xmgrace -batch file.dat
:param main_file_name: if the user asks to write the main content on a
file, this contains the filename. This should be used to infer a
good filename for the additional files.
In this case, we remove the extension, and add '_data.dat'
:param comments: if True, print comments (if it makes sense for the given
format)
:param prettify_format: if None, use the default prettify format. Otherwise
specify a string with the prettifier to use.
"""
# pylint: disable=too-many-locals
import os
dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'agr_seekpath'
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=prettify_format, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
labels = plot_info['labels']
num_bands = bands.shape[1]
# axis limits
y_max_lim = bands.max()
y_min_lim = bands.min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
# first prepare the xy coordinates of the sets
raw_data, _ = self._prepare_dat_blocks(plot_info)
batch = []
if comments:
batch.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
batch.append('READ XY "{}"'.format(dat_filename))
# axis limits
batch.append('world {}, {}, {}, {}'.format(x_min_lim, y_min_lim, x_max_lim, y_max_lim))
# axis label
batch.append('yaxis label "Dispersion"')
# axis ticks
batch.append('xaxis tick place both')
batch.append('xaxis tick spec type both')
batch.append('xaxis tick spec {}'.format(len(labels)))
# set the name of the special points
for index, label in enumerate(labels):
batch.append('xaxis tick major {}, {}'.format(index, label[0]))
batch.append('xaxis ticklabel {}, "{}"'.format(index, label[1]))
batch.append('xaxis tick major color 7')
batch.append('xaxis tick major grid on')
# minor graphical tweak
batch.append('yaxis tick minor ticks 3')
batch.append('frame linewidth 1.0')
# use helvetica fonts
batch.append('map font 4 to "Helvetica", "Helvetica"')
batch.append('yaxis label font 4')
batch.append('xaxis label font 4')
# set color and linewidths of bands
for index in range(num_bands):
batch.append('s{} line color 1'.format(index))
batch.append('s{} linewidth 1'.format(index))
batch_data = '\n'.join(batch) + '\n'
extra_files = {dat_filename: raw_data}
return batch_data.encode('utf-8'), extra_files
def _prepare_dat_multicolumn(self, main_file_name='', comments=True): # pylint: disable=unused-argument
"""
Write an N x M matrix. First column is the distance between kpoints,
The other columns are the bands. Header contains number of kpoints and
the number of bands (commented).
:param comments: if True, print comments (if it makes sense for the given
format)
"""
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
return_text = []
if comments:
return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
for i in zip(x, bands):
line = ['{:.8f}'.format(i[0])] + ['{:.8f}'.format(j) for j in i[1]]
return_text.append('\t'.join(line))
return ('\n'.join(return_text) + '\n').encode('utf-8'), {}
def _prepare_dat_blocks(self, main_file_name='', comments=True): # pylint: disable=unused-argument
"""
Format suitable for gnuplot using blocks.
Columns with x and y (path and band energy). Several blocks, separated
by two empty lines, one per energy band.
:param comments: if True, print comments (if it makes sense for the given
format)
"""
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
return_text = []
if comments:
return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
for band in numpy.transpose(bands):
for i in zip(x, band):
line = ['{:.8f}'.format(i[0]), '{:.8f}'.format(i[1])]
return_text.append('\t'.join(line))
return_text.append('')
return_text.append('')
return '\n'.join(return_text).encode('utf-8'), {}
def _matplotlib_get_dict(
self,
main_file_name='',
comments=True,
title='',
legend=None,
legend2=None,
y_max_lim=None,
y_min_lim=None,
y_origin=0.,
prettify_format=None,
**kwargs
): # pylint: disable=unused-argument
"""
Prepare the data to send to the python-matplotlib plotting script.
:param comments: if True, print comments (if it makes sense for the given
format)
:param plot_info: a dictionary
:param setnumber_offset: an offset to be applied to all set numbers
(i.e. s0 is replaced by s[offset], s1 by s[offset+1], etc.)
:param color_number: the color number for lines, symbols, error bars
and filling (should be less than the parameter MAX_NUM_AGR_COLORS
defined below)
:param title: the title
:param legend: the legend (applied only to the first of the set)
:param legend2: the legend for second-type spins
(applied only to the first of the set)
:param y_max_lim: the maximum on the y axis (if None, put the
maximum of the bands)
:param y_min_lim: the minimum on the y axis (if None, put the
minimum of the bands)
:param y_origin: the new origin of the y axis -> all bands are replaced
by bands-y_origin
:param prettify_format: if None, use the default prettify format. Otherwise
specify a string with the prettifier to use.
:param kwargs: additional customization variables; only a subset is
accepted, see internal variable 'valid_additional_keywords
"""
# pylint: disable=too-many-arguments,too-many-locals
# Only these keywords are accepted in kwargs, and then set into the json
valid_additional_keywords = [
'bands_color', # Color of band lines
'bands_linewidth', # linewidth of bands
'bands_linestyle', # linestyle of bands
'bands_marker', # marker for bands
'bands_markersize', # size of the marker of bands
'bands_markeredgecolor', # marker edge color for bands
'bands_markeredgewidth', # marker edge width for bands
'bands_markerfacecolor', # marker face color for bands
'bands_color2', # Color of band lines (for other spin, if present)
'bands_linewidth2', # linewidth of bands (for other spin, if present)
'bands_linestyle2', # linestyle of bands (for other spin, if present)
'bands_marker2', # marker for bands (for other spin, if present)
'bands_markersize2', # size of the marker of bands (for other spin, if present)
'bands_markeredgecolor2', # marker edge color for bands (for other spin, if present)
'bands_markeredgewidth2', # marker edge width for bands (for other spin, if present)
'bands_markerfacecolor2', # marker face color for bands (for other spin, if present)
'plot_zero_axis', # If true, plot an axis at y=0
'zero_axis_color', # Color of the axis at y=0
'zero_axis_linestyle', # linestyle of the axis at y=0
'zero_axis_linewidth', # linewidth of the axis at y=0
'use_latex', # If true, use latex to render captions
]
# Note: I do not want to import matplotlib here, for two reasons:
# 1. I would like to be able to print the script for the user
# 2. I don't want to mess up with the user matplotlib backend
# (that I should do if the user does not have a X server, but that
# I do not want to do if he's e.g. in jupyter)
# Therefore I just create a string that can be executed as needed, e.g. with eval.
# I take care of sanitizing the output.
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'latex_seekpath'
# The default for use_latex is False
join_symbol = r'\textbar{}' if kwargs.get('use_latex', False) else '|'
plot_info = self._get_bandplot_data(
cartesian=True,
prettify_format=prettify_format,
join_symbol=join_symbol,
get_segments=True,
y_origin=y_origin
)
all_data = {}
bands = plot_info['y']
x = plot_info['x']
labels = plot_info['labels']
# prepare xticks labels
if labels:
tick_pos, tick_labels = zip(*labels)
else:
tick_pos = []
tick_labels = []
all_data['paths'] = plot_info['paths']
all_data['band_type_idx'] = plot_info['band_type_idx'].tolist()
all_data['tick_pos'] = tick_pos
all_data['tick_labels'] = tick_labels
all_data['legend_text'] = legend
all_data['legend_text2'] = legend2
all_data['yaxis_label'] = 'Dispersion ({})'.format(self.units)
all_data['title'] = title
if comments:
all_data['comment'] = prepare_header_comment(self.uuid, plot_info, comment_char='#')
# axis limits
if y_max_lim is None:
y_max_lim = numpy.array(bands).max()
if y_min_lim is None:
y_min_lim = numpy.array(bands).min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
all_data['x_min_lim'] = x_min_lim
all_data['x_max_lim'] = x_max_lim
all_data['y_min_lim'] = y_min_lim
all_data['y_max_lim'] = y_max_lim
for key, value in kwargs.items():
if key not in valid_additional_keywords:
raise TypeError("_matplotlib_get_dict() got an unexpected keyword argument '{}'".format(key))
all_data[key] = value
return all_data
def _prepare_mpl_singlefile(self, *args, **kwargs):
"""
Prepare a python script using matplotlib to plot the bands
For the possible parameters, see documentation of
:py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`
"""
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, **kwargs)
s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()
string = s_header + s_import + s_body + s_footer
return string.encode('utf-8'), {}
def _prepare_mpl_withjson(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""
Prepare a python script using matplotlib to plot the bands, with the JSON
returned as an independent file.
For the possible parameters, see documentation of
:py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`
"""
import os
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, main_file_name=main_file_name, **kwargs)
json_fname = os.path.splitext(main_file_name)[0] + '_data.json'
# Escape double_quotes
json_fname = json_fname.replace('"', '\"')
ext_files = {json_fname: json.dumps(all_data, indent=2).encode('utf-8')}
s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE.substitute(json_fname=json_fname)
s_body = self._get_mpl_body_template(all_data['paths'])
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()
string = s_header + s_import + s_body + s_footer
return string.encode('utf-8'), ext_files
def _prepare_mpl_pdf(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,unused-argument
"""
Prepare a python script using matplotlib to plot the bands, with the JSON
returned as an independent file.
For the possible parameters, see documentation of
:py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`
"""
import os
import tempfile
import subprocess
import sys
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, **kwargs)
# Use the Agg backend
s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
# I get a temporary file name
handle, filename = tempfile.mkstemp()
os.close(handle)
os.remove(filename)
escaped_fname = filename.replace('"', '\"')
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE.substitute(fname=escaped_fname, format='pdf')
string = s_header + s_import + s_body + s_footer
# I don't exec it because I might mess up with the matplotlib backend etc.
# I run instead in a different process, with the same executable
# (so it should work properly with virtualenvs)
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(string)
handle.flush()
subprocess.check_output([sys.executable, handle.name])
if not os.path.exists(filename):
raise RuntimeError('Unable to generate the PDF...')
with open(filename, 'rb', encoding=None) as handle:
imgdata = handle.read()
os.remove(filename)
return imgdata, {}
def _prepare_mpl_png(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,unused-argument
"""
Prepare a python script using matplotlib to plot the bands, with the JSON
returned as an independent file.
For the possible parameters, see documentation of
:py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`
"""
import json
import os
import tempfile
import subprocess
import sys
all_data = self._matplotlib_get_dict(*args, **kwargs)
# Use the Agg backend
s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
# I get a temporary file name
handle, filename = tempfile.mkstemp()
os.close(handle)
os.remove(filename)
escaped_fname = filename.replace('"', '\"')
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI.substitute(fname=escaped_fname, format='png', dpi=300)
string = s_header + s_import + s_body + s_footer
# I don't exec it because I might mess up with the matplotlib backend etc.
# I run instead in a different process, with the same executable
# (so it should work properly with virtualenvs)
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(string)
handle.flush()
subprocess.check_output([sys.executable, handle.name])
if not os.path.exists(filename):
raise RuntimeError('Unable to generate the PNG...')
with open(filename, 'rb', encoding=None) as handle:
imgdata = handle.read()
os.remove(filename)
return imgdata, {}
@staticmethod
def _get_mpl_body_template(paths):
"""
:param paths: paths of k-points
"""
if len(paths) == 1:
s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=SINGLE_KP)
else:
s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=MULTI_KP)
return s_body
def show_mpl(self, **kwargs):
"""
Call a show() command for the band structure using matplotlib.
This uses internally the 'mpl_singlefile' format, with empty
main_file_name.
Other kwargs are passed to self._exportcontent.
"""
exec(*self._exportcontent(fileformat='mpl_singlefile', main_file_name='', **kwargs)) # pylint: disable=exec-used
def _prepare_gnuplot(
self,
main_file_name=None,
title='',
comments=True,
prettify_format=None,
y_max_lim=None,
y_min_lim=None,
y_origin=0.
):
"""
Prepare an gnuplot script to plot the bands, with the .dat file
returned as an independent file.
:param main_file_name: if the user asks to write the main content on a
file, this contains the filename. This should be used to infer a
good filename for the additional files.
In this case, we remove the extension, and add '_data.dat'
:param title: if specified, add a title to the plot
:param comments: if True, print comments (if it makes sense for the given
format)
:param prettify_format: if None, use the default prettify format. Otherwise
specify a string with the prettifier to use.
"""
# pylint: disable=too-many-arguments,too-many-locals
import os
main_file_name = main_file_name or 'band.dat'
dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'gnuplot_seekpath'
plot_info = self._get_bandplot_data(
cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin
)
bands = plot_info['y']
x = plot_info['x']
# axis limits
if y_max_lim is None:
y_max_lim = bands.max()
if y_min_lim is None:
y_min_lim = bands.min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
# first prepare the xy coordinates of the sets
raw_data, _ = self._prepare_dat_blocks(plot_info, comments=comments)
xtics_string = ', '.join('"{}" {}'.format(label, pos) for pos, label in plot_info['labels'])
script = []
# Start with some useful comments
if comments:
script.append(prepare_header_comment(self.uuid, plot_info=plot_info, comment_char='# '))
script.append('')
script.append(
"""## Uncomment the next two lines to write directly to PDF
## Note: You need to have gnuplot installed with pdfcairo support!
#set term pdfcairo
#set output 'out.pdf'
### Uncomment one of the options below to change font
### For the LaTeX fonts, you can download them from here:
### https://sourceforge.net/projects/cm-unicode/
### And then install them in your system
## LaTeX Serif font, if installed
#set termopt font "CMU Serif, 12"
## LaTeX Sans Serif font, if installed
#set termopt font "CMU Sans Serif, 12"
## Classical Times New Roman
#set termopt font "Times New Roman, 12"
"""
)
# Actual logic
script.append('set termopt enhanced') # Properly deals with e.g. subscripts
script.append('set encoding utf8') # To deal with Greek letters
script.append('set xtics ({})'.format(xtics_string))
script.append('unset key')
script.append('set yrange [{}:{}]'.format(y_min_lim, y_max_lim))
script.append('set ylabel "{}"'.format('Dispersion ({})'.format(self.units)))
if title:
script.append('set title "{}"'.format(title.replace('"', '\"')))
# Plot, escaping filename
if len(x) > 1:
script.append('set xrange [{}:{}]'.format(x_min_lim, x_max_lim))
script.append('set grid xtics lt 1 lc rgb "#888888"')
script.append('plot "{}" with l lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '\"')))
else:
script.append('set xrange [-1.0:1.0]')
script.append(
'plot "{}" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb "#000000"'.format(
os.path.basename(dat_filename).replace('"', '\"')
)
)
script_data = '\n'.join(script) + '\n'
extra_files = {dat_filename: raw_data}
return script_data.encode('utf-8'), extra_files
def _prepare_agr(
self,
main_file_name='',
comments=True,
setnumber_offset=0,
color_number=1,
color_number2=2,
legend='',
title='',
y_max_lim=None,
y_min_lim=None,
y_origin=0.,
prettify_format=None
):
"""
Prepare an xmgrace agr file.
:param comments: if True, print comments
(if it makes sense for the given format)
:param plot_info: a dictionary
:param setnumber_offset: an offset to be applied to all set numbers
(i.e. s0 is replaced by s[offset], s1 by s[offset+1], etc.)
:param color_number: the color number for lines, symbols, error bars
and filling (should be less than the parameter MAX_NUM_AGR_COLORS
defined below)
:param color_number2: the color number for lines, symbols, error bars
and filling for the second-type spins (should be less than the
parameter MAX_NUM_AGR_COLORS defined below)
:param legend: the legend (applied only to the first set)
:param title: the title
:param y_max_lim: the maximum on the y axis (if None, put the
maximum of the bands); applied *after* shifting the origin
by ``y_origin``
:param y_min_lim: the minimum on the y axis (if None, put the
minimum of the bands); applied *after* shifting the origin
by ``y_origin``
:param y_origin: the new origin of the y axis -> all bands are replaced
by bands-y_origin
:param prettify_format: if None, use the default prettify format. Otherwise
specify a string with the prettifier to use.
"""
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches,unused-argument
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'agr_seekpath'
plot_info = self._get_bandplot_data(
cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin
)
import math
# load the x and y of every set
if color_number > MAX_NUM_AGR_COLORS:
raise ValueError('Color number is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))
if color_number2 > MAX_NUM_AGR_COLORS:
raise ValueError('Color number 2 is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))
bands = plot_info['y']
x = plot_info['x']
the_bands = numpy.transpose(bands)
labels = plot_info['labels']
num_labels = len(labels)
# axis limits
if y_max_lim is None:
y_max_lim = the_bands.max()
if y_min_lim is None:
y_min_lim = the_bands.min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
ytick_spacing = 10**int(math.log10((y_max_lim - y_min_lim)))
# prepare xticks labels
sx1 = ''
for i, label in enumerate(labels):
sx1 += AGR_SINGLE_XTICK_TEMPLATE.substitute(
index=i,
coord=label[0],
name=label[1],
)
xticks = AGR_XTICKS_TEMPLATE.substitute(
num_labels=num_labels,
single_xtick_templates=sx1,
)
# build the arrays with the xy coordinates
all_sets = []
for band in the_bands:
this_set = ''
for i in zip(x, band):
line = '{:.8f}'.format(i[0]) + '\t' + '{:.8f}'.format(i[1]) + '\n'
this_set += line
all_sets.append(this_set)
set_descriptions = ''
for i, (this_set, band_type) in enumerate(zip(all_sets, plot_info['band_type_idx'])):
if band_type % 2 == 0:
linecolor = color_number
else:
linecolor = color_number2
width = str(2.0)
set_descriptions += AGR_SET_DESCRIPTION_TEMPLATE.substitute(
set_number=i + setnumber_offset,
linewidth=width,
color_number=linecolor,
legend=legend if i == 0 else ''
)
units = self.units
graphs = AGR_GRAPH_TEMPLATE.substitute(
x_min_lim=x_min_lim,
y_min_lim=y_min_lim,
x_max_lim=x_max_lim,
y_max_lim=y_max_lim,
yaxislabel='Dispersion ({})'.format(units),
xticks_template=xticks,
set_descriptions=set_descriptions,
ytick_spacing=ytick_spacing,
title=title,
)
sets = []
for i, this_set in enumerate(all_sets):
sets.append(AGR_SINGLESET_TEMPLATE.substitute(set_number=i + setnumber_offset, xydata=this_set))
the_sets = '&\n'.join(sets)
string = AGR_TEMPLATE.substitute(graphs=graphs, sets=the_sets)
if comments:
string = prepare_header_comment(self.uuid, plot_info, comment_char='#') + '\n' + string
return string.encode('utf-8'), {}
def _get_band_segments(self, cartesian):
"""Return the band segments."""
plot_info = self._get_bandplot_data(
cartesian=cartesian, prettify_format=None, join_symbol=None, get_segments=True
)
out_dict = {'label': self.label}
out_dict['path'] = plot_info['path']
out_dict['paths'] = plot_info['paths']
return out_dict
def _prepare_json(self, main_file_name='', comments=True): # pylint: disable=unused-argument
"""
Prepare a json file in a format compatible with the AiiDA band visualizer
:param comments: if True, print comments (if it makes sense for the given
format)
"""
from aiida import get_file_header
from aiida.common import json
json_dict = self._get_band_segments(cartesian=True)
json_dict['original_uuid'] = self.uuid
if comments:
json_dict['comments'] = get_file_header(comment_char='')
return json.dumps(json_dict).encode('utf-8'), {}
MAX_NUM_AGR_COLORS = 15
AGR_TEMPLATE = Template(
"""
# Grace project file
#
@version 50122
@page size 792, 612
@page scroll 5%
@page inout 5%
@link page off
@map font 8 to "Courier", "Courier"
@map font 10 to "Courier-Bold", "Courier-Bold"
@map font 11 to "Courier-BoldOblique", "Courier-BoldOblique"
@map font 9 to "Courier-Oblique", "Courier-Oblique"
@map font 4 to "Helvetica", "Helvetica"
@map font 6 to "Helvetica-Bold", "Helvetica-Bold"
@map font 7 to "Helvetica-BoldOblique", "Helvetica-BoldOblique"
@map font 5 to "Helvetica-Oblique", "Helvetica-Oblique"
@map font 14 to "NimbusMonoL-BoldOblique", "NimbusMonoL-BoldOblique"
@map font 15 to "NimbusMonoL-Regular", "NimbusMonoL-Regular"
@map font 16 to "NimbusMonoL-RegularOblique", "NimbusMonoL-RegularOblique"
@map font 17 to "NimbusRomanNo9L-Medium", "NimbusRomanNo9L-Medium"
@map font 18 to "NimbusRomanNo9L-MediumItalic", "NimbusRomanNo9L-MediumItalic"
@map font 19 to "NimbusRomanNo9L-Regular", "NimbusRomanNo9L-Regular"
@map font 20 to "NimbusRomanNo9L-RegularItalic", "NimbusRomanNo9L-RegularItalic"
@map font 21 to "NimbusSansL-Bold", "NimbusSansL-Bold"
@map font 22 to "NimbusSansL-BoldCondensed", "NimbusSansL-BoldCondensed"
@map font 23 to "NimbusSansL-BoldCondensedItalic", "NimbusSansL-BoldCondensedItalic"
@map font 24 to "NimbusSansL-BoldItalic", "NimbusSansL-BoldItalic"
@map font 25 to "NimbusSansL-Regular", "NimbusSansL-Regular"
@map font 26 to "NimbusSansL-RegularCondensed", "NimbusSansL-RegularCondensed"
@map font 27 to "NimbusSansL-RegularCondensedItalic", "NimbusSansL-RegularCondensedItalic"
@map font 28 to "NimbusSansL-RegularItalic", "NimbusSansL-RegularItalic"
@map font 29 to "StandardSymbolsL-Regular", "StandardSymbolsL-Regular"
@map font 12 to "Symbol", "Symbol"
@map font 31 to "Symbol-Regular", "Symbol-Regular"
@map font 2 to "Times-Bold", "Times-Bold"
@map font 3 to "Times-BoldItalic", "Times-BoldItalic"
@map font 1 to "Times-Italic", "Times-Italic"
@map font 0 to "Times-Roman", "Times-Roman"
@map font 36 to "URWBookmanL-DemiBold", "URWBookmanL-DemiBold"
@map font 37 to "URWBookmanL-DemiBoldItalic", "URWBookmanL-DemiBoldItalic"
@map font 38 to "URWBookmanL-Light", "URWBookmanL-Light"
@map font 39 to "URWBookmanL-LightItalic", "URWBookmanL-LightItalic"
@map font 40 to "URWChanceryL-MediumItalic", "URWChanceryL-MediumItalic"
@map font 41 to "URWGothicL-Book", "URWGothicL-Book"
@map font 42 to "URWGothicL-BookOblique", "URWGothicL-BookOblique"
@map font 43 to "URWGothicL-Demi", "URWGothicL-Demi"
@map font 44 to "URWGothicL-DemiOblique", "URWGothicL-DemiOblique"
@map font 45 to "URWPalladioL-Bold", "URWPalladioL-Bold"
@map font 46 to "URWPalladioL-BoldItalic", "URWPalladioL-BoldItalic"
@map font 47 to "URWPalladioL-Italic", "URWPalladioL-Italic"
@map font 48 to "URWPalladioL-Roman", "URWPalladioL-Roman"
@map font 13 to "ZapfDingbats", "ZapfDingbats"
@map color 0 to (255, 255, 255), "white"
@map color 1 to (0, 0, 0), "black"
@map color 2 to (255, 0, 0), "red"
@map color 3 to (0, 255, 0), "green"
@map color 4 to (0, 0, 255), "blue"
@map color 5 to (255, 215, 0), "yellow"
@map color 6 to (188, 143, 143), "brown"
@map color 7 to (220, 220, 220), "grey"
@map color 8 to (148, 0, 211), "violet"
@map color 9 to (0, 255, 255), "cyan"
@map color 10 to (255, 0, 255), "magenta"
@map color 11 to (255, 165, 0), "orange"
@map color 12 to (114, 33, 188), "indigo"
@map color 13 to (103, 7, 72), "maroon"
@map color 14 to (64, 224, 208), "turquoise"
@map color 15 to (0, 139, 0), "green4"
@reference date 0
@date wrap off
@date wrap year 1950
@default linewidth 1.0
@default linestyle 1
@default color 1
@default pattern 1
@default font 0
@default char size 1.000000
@default symbol size 1.000000
@default sformat "%.8g"
@background color 0
@page background fill on
@timestamp off
@timestamp 0.03, 0.03
@timestamp color 1
@timestamp rot 0
@timestamp font 0
@timestamp char size 1.000000
@timestamp def "Wed Jul 30 16:44:34 2014"
@r0 off
@link r0 to g0
@r0 type above
@r0 linestyle 1
@r0 linewidth 1.0
@r0 color 1
@r0 line 0, 0, 0, 0
@r1 off
@link r1 to g0
@r1 type above
@r1 linestyle 1
@r1 linewidth 1.0
@r1 color 1
@r1 line 0, 0, 0, 0
@r2 off
@link r2 to g0
@r2 type above
@r2 linestyle 1
@r2 linewidth 1.0
@r2 color 1
@r2 line 0, 0, 0, 0
@r3 off
@link r3 to g0
@r3 type above
@r3 linestyle 1
@r3 linewidth 1.0
@r3 color 1
@r3 line 0, 0, 0, 0
@r4 off
@link r4 to g0
@r4 type above
@r4 linestyle 1
@r4 linewidth 1.0
@r4 color 1
@r4 line 0, 0, 0, 0
$graphs
$sets
"""
)
AGR_XTICKS_TEMPLATE = Template("""
@ xaxis tick spec $num_labels
$single_xtick_templates
""")
AGR_SINGLE_XTICK_TEMPLATE = Template(
"""
@ xaxis tick major $index, $coord
@ xaxis ticklabel $index, "$name"
"""
)
AGR_GRAPH_TEMPLATE = Template(
"""
@g0 on
@g0 hidden false
@g0 type XY
@g0 stacked false
@g0 bar hgap 0.000000
@g0 fixedpoint off
@g0 fixedpoint type 0
@g0 fixedpoint xy 0.000000, 0.000000
@g0 fixedpoint format general general
@g0 fixedpoint prec 6, 6
@with g0
@ world $x_min_lim, $y_min_lim, $x_max_lim, $y_max_lim
@ stack world 0, 0, 0, 0
@ znorm 1
@ view 0.150000, 0.150000, 1.150000, 0.850000
@ title "$title"
@ title font 0
@ title size 1.500000
@ title color 1
@ subtitle ""
@ subtitle font 0
@ subtitle size 1.000000
@ subtitle color 1
@ xaxes scale Normal
@ yaxes scale Normal
@ xaxes invert off
@ yaxes invert off
@ xaxis on
@ xaxis type zero false
@ xaxis offset 0.000000 , 0.000000
@ xaxis bar on
@ xaxis bar color 1
@ xaxis bar linestyle 1
@ xaxis bar linewidth 1.0
@ xaxis label ""
@ xaxis label layout para
@ xaxis label place auto
@ xaxis label char size 1.000000
@ xaxis label font 4
@ xaxis label color 1
@ xaxis label place normal
@ xaxis tick on
@ xaxis tick major 5
@ xaxis tick minor ticks 0
@ xaxis tick default 6
@ xaxis tick place rounded true
@ xaxis tick in
@ xaxis tick major size 1.000000
@ xaxis tick major color 1
@ xaxis tick major linewidth 1.0
@ xaxis tick major linestyle 1
@ xaxis tick major grid on
@ xaxis tick minor color 1
@ xaxis tick minor linewidth 1.0
@ xaxis tick minor linestyle 1
@ xaxis tick minor grid off
@ xaxis tick minor size 0.500000
@ xaxis ticklabel on
@ xaxis ticklabel format general
@ xaxis ticklabel prec 5
@ xaxis ticklabel formula ""
@ xaxis ticklabel append ""
@ xaxis ticklabel prepend ""
@ xaxis ticklabel angle 0
@ xaxis ticklabel skip 0
@ xaxis ticklabel stagger 0
@ xaxis ticklabel place normal
@ xaxis ticklabel offset auto
@ xaxis ticklabel offset 0.000000 , 0.010000
@ xaxis ticklabel start type auto
@ xaxis ticklabel start 0.000000
@ xaxis ticklabel stop type auto
@ xaxis ticklabel stop 0.000000
@ xaxis ticklabel char size 1.500000
@ xaxis ticklabel font 4
@ xaxis ticklabel color 1
@ xaxis tick place both
@ xaxis tick spec type both
$xticks_template
@ yaxis on
@ yaxis type zero false
@ yaxis offset 0.000000 , 0.000000
@ yaxis bar on
@ yaxis bar color 1
@ yaxis bar linestyle 1
@ yaxis bar linewidth 1.0
@ yaxis label "$yaxislabel"
@ yaxis label layout para
@ yaxis label place auto
@ yaxis label char size 1.500000
@ yaxis label font 4
@ yaxis label color 1
@ yaxis label place normal
@ yaxis tick on
@ yaxis tick major $ytick_spacing
@ yaxis tick minor ticks 1
@ yaxis tick default 6
@ yaxis tick place rounded true
@ yaxis tick in
@ yaxis tick major size 1.000000
@ yaxis tick major color 1
@ yaxis tick major linewidth 1.0
@ yaxis tick major linestyle 1
@ yaxis tick major grid off
@ yaxis tick minor color 1
@ yaxis tick minor linewidth 1.0
@ yaxis tick minor linestyle 1
@ yaxis tick minor grid off
@ yaxis tick minor size 0.500000
@ yaxis ticklabel on
@ yaxis ticklabel format general
@ yaxis ticklabel prec 5
@ yaxis ticklabel formula ""
@ yaxis ticklabel append ""
@ yaxis ticklabel prepend ""
@ yaxis ticklabel angle 0
@ yaxis ticklabel skip 0
@ yaxis ticklabel stagger 0
@ yaxis ticklabel place normal
@ yaxis ticklabel offset auto
@ yaxis ticklabel offset 0.000000 , 0.010000
@ yaxis ticklabel start type auto
@ yaxis ticklabel start 0.000000
@ yaxis ticklabel stop type auto
@ yaxis ticklabel stop 0.000000
@ yaxis ticklabel char size 1.250000
@ yaxis ticklabel font 4
@ yaxis ticklabel color 1
@ yaxis tick place both
@ yaxis tick spec type none
@ altxaxis off
@ altyaxis off
@ legend on
@ legend loctype view
@ legend 0.85, 0.8
@ legend box color 1
@ legend box pattern 1
@ legend box linewidth 1.0
@ legend box linestyle 1
@ legend box fill color 0
@ legend box fill pattern 1
@ legend font 0
@ legend char size 1.000000
@ legend color 1
@ legend length 4
@ legend vgap 1
@ legend hgap 1
@ legend invert false
@ frame type 0
@ frame linestyle 1
@ frame linewidth 1.0
@ frame color 1
@ frame pattern 1
@ frame background color 0
@ frame background pattern 0
$set_descriptions
"""
)
AGR_SET_DESCRIPTION_TEMPLATE = Template(
"""
@ s$set_number hidden false
@ s$set_number type xy
@ s$set_number symbol 0
@ s$set_number symbol size 1.000000
@ s$set_number symbol color $color_number
@ s$set_number symbol pattern 1
@ s$set_number symbol fill color $color_number
@ s$set_number symbol fill pattern 0
@ s$set_number symbol linewidth 1.0
@ s$set_number symbol linestyle 1
@ s$set_number symbol char 65
@ s$set_number symbol char font 0
@ s$set_number symbol skip 0
@ s$set_number line type 1
@ s$set_number line linestyle 1
@ s$set_number line linewidth $linewidth
@ s$set_number line color $color_number
@ s$set_number line pattern 1
@ s$set_number baseline type 0
@ s$set_number baseline off
@ s$set_number dropline off
@ s$set_number fill type 0
@ s$set_number fill rule 0
@ s$set_number fill color $color_number
@ s$set_number fill pattern 1
@ s$set_number avalue off
@ s$set_number avalue type 2
@ s$set_number avalue char size 1.000000
@ s$set_number avalue font 0
@ s$set_number avalue color 1
@ s$set_number avalue rot 0
@ s$set_number avalue format general
@ s$set_number avalue prec 3
@ s$set_number avalue prepend ""
@ s$set_number avalue append ""
@ s$set_number avalue offset 0.000000 , 0.000000
@ s$set_number errorbar on
@ s$set_number errorbar place both
@ s$set_number errorbar color $color_number
@ s$set_number errorbar pattern 1
@ s$set_number errorbar size 1.000000
@ s$set_number errorbar linewidth 1.0
@ s$set_number errorbar linestyle 1
@ s$set_number errorbar riser linewidth 1.0
@ s$set_number errorbar riser linestyle 1
@ s$set_number errorbar riser clip off
@ s$set_number errorbar riser clip length 0.100000
@ s$set_number comment "Cols 1:2"
@ s$set_number legend "$legend"
"""
)
AGR_SINGLESET_TEMPLATE = Template("""
@target G0.S$set_number
@type xy
$xydata
""")
# text.latex.preview=True is needed to have a proper alignment of
# tick marks with and without subscripts
# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html
MATPLOTLIB_HEADER_AGG_TEMPLATE = Template(
"""# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
# Uncomment to change default font
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})
# To use proper font for, e.g., Gamma if usetex is set to False
rc('mathtext', fontset='cm')
rc('text', usetex=True)
import matplotlib.pyplot as plt
plt.rcParams.update({'text.latex.preview': True})
import pylab as pl
# I use json to make sure the input is sanitized
import json
print_comment = False
"""
)
# text.latex.preview=True is needed to have a proper alignment of
# tick marks with and without subscripts
# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html
MATPLOTLIB_HEADER_TEMPLATE = Template(
"""# -*- coding: utf-8 -*-
from matplotlib import rc
# Uncomment to change default font
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})
# To use proper font for, e.g., Gamma if usetex is set to False
rc('mathtext', fontset='cm')
rc('text', usetex=True)
import matplotlib.pyplot as plt
plt.rcParams.update({'text.latex.preview': True})
import pylab as pl
# I use json to make sure the input is sanitized
import json
print_comment = False
"""
)
MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE = Template('''all_data_str = r"""$all_data_json"""
''')
MATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE = Template(
"""with open("$json_fname", encoding='utf8') as f:
all_data_str = f.read()
"""
)
MULTI_KP = """
for path in paths:
if path['length'] <= 1:
# Avoid printing empty lines
continue
x = path['x']
#for band in bands:
for band, band_type in zip(path['values'], all_data['band_type_idx']):
# For now we support only two colors
if band_type % 2 == 0:
further_plot_options = further_plot_options1
else:
further_plot_options = further_plot_options2
# Put the legend text only once
label = None
if first_band_1 and band_type % 2 == 0:
first_band_1 = False
label = all_data.get('legend_text', None)
elif first_band_2 and band_type % 2 == 1:
first_band_2 = False
label = all_data.get('legend_text2', None)
p.plot(x, band, label=label,
**further_plot_options
)
"""
SINGLE_KP = """
path = paths[0]
values = path['values']
x = [path['x'] for _ in values]
p.scatter(x, values, marker="_")
"""
MATPLOTLIB_BODY_TEMPLATE = Template(
"""all_data = json.loads(all_data_str)
if not all_data.get('use_latex', False):
rc('text', usetex=False)
#x = all_data['x']
#bands = all_data['bands']
paths = all_data['paths']
tick_pos = all_data['tick_pos']
tick_labels = all_data['tick_labels']
# Option for bands (all, or those of type 1 if there are two spins)
further_plot_options1 = {}
further_plot_options1['color'] = all_data.get('bands_color', 'k')
further_plot_options1['linewidth'] = all_data.get('bands_linewidth', 0.5)
further_plot_options1['linestyle'] = all_data.get('bands_linestyle', None)
further_plot_options1['marker'] = all_data.get('bands_marker', None)
further_plot_options1['markersize'] = all_data.get('bands_markersize', None)
further_plot_options1['markeredgecolor'] = all_data.get('bands_markeredgecolor', None)
further_plot_options1['markeredgewidth'] = all_data.get('bands_markeredgewidth', None)
further_plot_options1['markerfacecolor'] = all_data.get('bands_markerfacecolor', None)
# Options for second-type of bands if present (e.g. spin up vs. spin down)
further_plot_options2 = {}
further_plot_options2['color'] = all_data.get('bands_color2', 'r')
# Use the values of further_plot_options1 by default
further_plot_options2['linewidth'] = all_data.get('bands_linewidth2',
further_plot_options1['linewidth']
)
further_plot_options2['linestyle'] = all_data.get('bands_linestyle2',
further_plot_options1['linestyle']
)
further_plot_options2['marker'] = all_data.get('bands_marker2',
further_plot_options1['marker']
)
further_plot_options2['markersize'] = all_data.get('bands_markersize2',
further_plot_options1['markersize']
)
further_plot_options2['markeredgecolor'] = all_data.get('bands_markeredgecolor2',
further_plot_options1['markeredgecolor']
)
further_plot_options2['markeredgewidth'] = all_data.get('bands_markeredgewidth2',
further_plot_options1['markeredgewidth']
)
further_plot_options2['markerfacecolor'] = all_data.get('bands_markerfacecolor2',
further_plot_options1['markerfacecolor']
)
fig = pl.figure()
p = fig.add_subplot(1,1,1)
first_band_1 = True
first_band_2 = True
${plot_code}
p.set_xticks(tick_pos)
p.set_xticklabels(tick_labels)
p.set_xlim([all_data['x_min_lim'], all_data['x_max_lim']])
p.set_ylim([all_data['y_min_lim'], all_data['y_max_lim']])
p.xaxis.grid(True, which='major', color='#888888', linestyle='-', linewidth=0.5)
if all_data.get('plot_zero_axis', False):
p.axhline(
0.,
color=all_data.get('zero_axis_color', '#888888'),
linestyle=all_data.get('zero_axis_linestyle', '--'),
linewidth=all_data.get('zero_axis_linewidth', 0.5),
)
if all_data['title']:
p.set_title(all_data['title'])
if all_data['legend_text']:
p.legend(loc='best')
p.set_ylabel(all_data['yaxis_label'])
try:
if print_comment:
print(all_data['comment'])
except KeyError:
pass
"""
)
MATPLOTLIB_FOOTER_TEMPLATE_SHOW = Template("""pl.show()""")
MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE = Template("""pl.savefig("$fname", format="$format")""")
MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI = Template("""pl.savefig("$fname", format="$format", dpi=$dpi)""")
| 38.625763 | 128 | 0.617595 |
if (any(i[0] == fermi_energy for i in max_mins) and any(i[1] == fermi_energy for i in max_mins)):
return False, 0.
homo = max([i[0] for i in max_mins if i[0] < fermi_energy])
lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])
gap = lumo - homo
if gap <= 0.:
raise Exception('Something wrong has been implemented. Revise the code!')
return True, gap
class BandsData(KpointsData):
def set_kpointsdata(self, kpointsdata):
if not isinstance(kpointsdata, KpointsData):
raise ValueError('kpointsdata must be of the KpointsData class')
try:
self.cell = kpointsdata.cell
except AttributeError:
pass
try:
self.pbc = kpointsdata.pbc
except AttributeError:
pass
try:
the_kpoints = kpointsdata.get_kpoints()
except AttributeError:
the_kpoints = None
try:
the_weights = kpointsdata.get_kpoints(also_weights=True)[1]
except AttributeError:
the_weights = None
self.set_kpoints(the_kpoints, weights=the_weights)
try:
self.labels = kpointsdata.labels
except (AttributeError, TypeError):
self.labels = []
def _validate_bands_occupations(self, bands, occupations=None, labels=None):
try:
kpoints = self.get_kpoints()
except AttributeError:
raise AttributeError('Must first set the kpoints, then the bands')
the_bands = numpy.array(bands)
if len(the_bands.shape) not in [2, 3]:
raise ValueError(
'Bands must be an array of dimension 2'
'([N_kpoints, N_bands]) or of dimension 3 '
' ([N_arrays, N_kpoints, N_bands]), found instead {}'.format(len(the_bands.shape))
)
list_of_arrays_to_be_checked = []
num_kpoints_from_bands = the_bands.shape[0] if len(the_bands.shape) == 2 else the_bands.shape[1]
if num_kpoints_from_bands != len(kpoints):
raise ValueError('There must be energy values for every kpoint')
if occupations is not None:
the_occupations = numpy.array(occupations)
if the_occupations.shape != the_bands.shape:
raise ValueError(
'Shape of occupations {} different from shape'
'shape of bands {}'.format(the_occupations.shape, the_bands.shape)
)
if not the_bands.dtype.type == numpy.float64:
list_of_arrays_to_be_checked.append([the_occupations, 'occupations'])
else:
the_occupations = None
if not the_bands.dtype.type == numpy.float64:
list_of_arrays_to_be_checked.append([the_bands, 'bands'])
for x, msg in list_of_arrays_to_be_checked:
try:
[float(_) for _ in x.flatten() if _ is not None]
except (TypeError, ValueError):
raise ValueError('The {} array can only contain float or None values'.format(msg))
if labels is not None:
if isinstance(labels, str):
the_labels = [str(labels)]
elif isinstance(labels, (tuple, list)) and all([isinstance(_, str) for _ in labels]):
the_labels = [str(_) for _ in labels]
else:
raise ValidationError(
'Band labels have an unrecognized type ({})'
'but should be a string or a list of strings'.format(labels.__class__)
)
if len(the_bands.shape) == 2 and len(the_labels) != 1:
raise ValidationError('More array labels than the number of arrays')
elif len(the_bands.shape) == 3 and len(the_labels) != the_bands.shape[0]:
raise ValidationError('More array labels than the number of arrays')
else:
the_labels = None
return the_bands, the_occupations, the_labels
def set_bands(self, bands, units=None, occupations=None, labels=None):
the_bands, the_occupations, the_labels = self._validate_bands_occupations(bands, occupations, labels)
self.set_array('bands', the_bands)
self.units = units
if the_labels is not None:
self.set_attribute('array_labels', the_labels)
if the_occupations is not None:
self.set_array('occupations', the_occupations)
@property
def array_labels(self):
return self.get_attribute('array_labels', None)
@property
def units(self):
return self.get_attribute('units')
@units.setter
def units(self, value):
the_str = str(value)
self.set_attribute('units', the_str)
def _set_pbc(self, value):
from aiida.common.exceptions import ModificationNotAllowed
from aiida.orm.nodes.data.structure import get_valid_pbc
if self.is_stored:
raise ModificationNotAllowed('The KpointsData object cannot be modified, it has already been stored')
the_pbc = get_valid_pbc(value)
self.set_attribute('pbc1', the_pbc[0])
self.set_attribute('pbc2', the_pbc[1])
self.set_attribute('pbc3', the_pbc[2])
def get_bands(self, also_occupations=False, also_labels=False):
try:
bands = numpy.array(self.get_array('bands'))
except KeyError:
raise AttributeError('No stored bands has been found')
to_return = [bands]
if also_occupations:
try:
occupations = numpy.array(self.get_array('occupations'))
except KeyError:
raise AttributeError('No occupations were set')
to_return.append(occupations)
if also_labels:
to_return.append(self.array_labels)
if len(to_return) == 1:
return bands
return to_return
def _get_bandplot_data(self, cartesian, prettify_format=None, join_symbol=None, get_segments=False, y_origin=0.):
stored_bands = self.get_bands()
if len(stored_bands.shape) == 2:
bands = stored_bands
band_type_idx = numpy.array([0] * stored_bands.shape[1])
two_band_types = False
elif len(stored_bands.shape) == 3:
bands = numpy.concatenate(stored_bands, axis=1)
band_type_idx = numpy.array([0] * stored_bands.shape[2] + [1] * stored_bands.shape[2])
two_band_types = True
else:
raise ValueError('Unexpected shape of bands')
bands -= y_origin
# here I build the x distances on the graph (in cartesian coordinates
# if cartesian==True AND if the cell was set, otherwise in reciprocal
# coordinates)
try:
kpoints = self.get_kpoints(cartesian=cartesian)
except AttributeError:
# this error is happening if cartesian==True and if no cell has been
# set -> we switch to reciprocal coordinates to compute distances
kpoints = self.get_kpoints()
# I take advantage of the path to recognize discontinuities
try:
labels = self.labels
labels_indices = [i[0] for i in labels]
except (AttributeError, TypeError):
labels = []
labels_indices = []
# since I can have discontinuous paths, I set on those points the distance to zero
# as a result, where there are discontinuities in the path,
# I have two consecutive points with the same x coordinate
distances = [
numpy.linalg.norm(kpoints[i] -
kpoints[i - 1]) if not (i in labels_indices and i - 1 in labels_indices) else 0.
for i in range(1, len(kpoints))
]
x = [float(sum(distances[:i])) for i in range(len(distances) + 1)]
# transform the index of the labels in the coordinates of x
raw_labels = [(x[i[0]], i[1]) for i in labels]
the_labels = raw_labels
if prettify_format:
the_labels = prettify_labels(the_labels, format=prettify_format)
if join_symbol:
the_labels = join_labels(the_labels, join_symbol=join_symbol)
plot_info = {}
plot_info['x'] = x
plot_info['y'] = bands
plot_info['band_type_idx'] = band_type_idx
plot_info['raw_labels'] = raw_labels
plot_info['labels'] = the_labels
if get_segments:
plot_info['path'] = []
plot_info['paths'] = []
if len(labels) > 1:
# I add an empty label that points to the first band if the first label does not do it
if labels[0][0] != 0:
labels.insert(0, (0, ''))
# I add an empty label that points to the last band if the last label does not do it
if labels[-1][0] != len(bands) - 1:
labels.append((len(bands) - 1, ''))
for (position_from, label_from), (position_to, label_to) in zip(labels[:-1], labels[1:]):
if position_to - position_from > 1:
# Create a new path line only if there are at least two points,
# otherwise it is probably just a discontinuity point in the band
# structure (e.g. Gamma-X|Y-Gamma), where X and Y would be two
# consecutive points, but there is no path between them
plot_info['path'].append([label_from, label_to])
path_dict = {
'length': position_to - position_from,
'from': label_from,
'to': label_to,
'values': bands[position_from:position_to + 1, :].transpose().tolist(),
'x': x[position_from:position_to + 1],
'two_band_types': two_band_types,
}
plot_info['paths'].append(path_dict)
else:
label_from = '0'
label_to = '1'
path_dict = {
'length': bands.shape[0] - 1,
'from': label_from,
'to': label_to,
'values': bands.transpose().tolist(),
'x': x,
'two_band_types': two_band_types,
}
plot_info['paths'].append(path_dict)
plot_info['path'].append([label_from, label_to])
return plot_info
def _prepare_agr_batch(self, main_file_name='', comments=True, prettify_format=None):
# pylint: disable=too-many-locals
import os
dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'agr_seekpath'
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=prettify_format, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
labels = plot_info['labels']
num_bands = bands.shape[1]
# axis limits
y_max_lim = bands.max()
y_min_lim = bands.min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
raw_data, _ = self._prepare_dat_blocks(plot_info)
batch = []
if comments:
batch.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
batch.append('READ XY "{}"'.format(dat_filename))
batch.append('world {}, {}, {}, {}'.format(x_min_lim, y_min_lim, x_max_lim, y_max_lim))
batch.append('yaxis label "Dispersion"')
batch.append('xaxis tick place both')
batch.append('xaxis tick spec type both')
batch.append('xaxis tick spec {}'.format(len(labels)))
for index, label in enumerate(labels):
batch.append('xaxis tick major {}, {}'.format(index, label[0]))
batch.append('xaxis ticklabel {}, "{}"'.format(index, label[1]))
batch.append('xaxis tick major color 7')
batch.append('xaxis tick major grid on')
batch.append('yaxis tick minor ticks 3')
batch.append('frame linewidth 1.0')
batch.append('map font 4 to "Helvetica", "Helvetica"')
batch.append('yaxis label font 4')
batch.append('xaxis label font 4')
for index in range(num_bands):
batch.append('s{} line color 1'.format(index))
batch.append('s{} linewidth 1'.format(index))
batch_data = '\n'.join(batch) + '\n'
extra_files = {dat_filename: raw_data}
return batch_data.encode('utf-8'), extra_files
def _prepare_dat_multicolumn(self, main_file_name='', comments=True):
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
return_text = []
if comments:
return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
for i in zip(x, bands):
line = ['{:.8f}'.format(i[0])] + ['{:.8f}'.format(j) for j in i[1]]
return_text.append('\t'.join(line))
return ('\n'.join(return_text) + '\n').encode('utf-8'), {}
def _prepare_dat_blocks(self, main_file_name='', comments=True):
plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')
bands = plot_info['y']
x = plot_info['x']
return_text = []
if comments:
return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))
for band in numpy.transpose(bands):
for i in zip(x, band):
line = ['{:.8f}'.format(i[0]), '{:.8f}'.format(i[1])]
return_text.append('\t'.join(line))
return_text.append('')
return_text.append('')
return '\n'.join(return_text).encode('utf-8'), {}
def _matplotlib_get_dict(
self,
main_file_name='',
comments=True,
title='',
legend=None,
legend2=None,
y_max_lim=None,
y_min_lim=None,
y_origin=0.,
prettify_format=None,
**kwargs
):
valid_additional_keywords = [
'bands_color',
'bands_linewidth',
'bands_linestyle',
'bands_marker',
'bands_markersize',
'bands_markeredgecolor',
'bands_markeredgewidth',
'bands_markerfacecolor',
'bands_color2',
'bands_linewidth2',
'bands_linestyle2',
'bands_marker2',
'bands_markersize2',
'bands_markeredgecolor2',
'bands_markeredgewidth2',
'bands_markerfacecolor2',
'plot_zero_axis',
'zero_axis_color',
'zero_axis_linestyle',
'zero_axis_linewidth',
'use_latex',
]
# (that I should do if the user does not have a X server, but that
# I do not want to do if he's e.g. in jupyter)
if prettify_format is None:
prettify_format = 'latex_seekpath'
join_symbol = r'\textbar{}' if kwargs.get('use_latex', False) else '|'
plot_info = self._get_bandplot_data(
cartesian=True,
prettify_format=prettify_format,
join_symbol=join_symbol,
get_segments=True,
y_origin=y_origin
)
all_data = {}
bands = plot_info['y']
x = plot_info['x']
labels = plot_info['labels']
if labels:
tick_pos, tick_labels = zip(*labels)
else:
tick_pos = []
tick_labels = []
all_data['paths'] = plot_info['paths']
all_data['band_type_idx'] = plot_info['band_type_idx'].tolist()
all_data['tick_pos'] = tick_pos
all_data['tick_labels'] = tick_labels
all_data['legend_text'] = legend
all_data['legend_text2'] = legend2
all_data['yaxis_label'] = 'Dispersion ({})'.format(self.units)
all_data['title'] = title
if comments:
all_data['comment'] = prepare_header_comment(self.uuid, plot_info, comment_char='#')
if y_max_lim is None:
y_max_lim = numpy.array(bands).max()
if y_min_lim is None:
y_min_lim = numpy.array(bands).min()
x_min_lim = min(x)
x_max_lim = max(x)
all_data['x_min_lim'] = x_min_lim
all_data['x_max_lim'] = x_max_lim
all_data['y_min_lim'] = y_min_lim
all_data['y_max_lim'] = y_max_lim
for key, value in kwargs.items():
if key not in valid_additional_keywords:
raise TypeError("_matplotlib_get_dict() got an unexpected keyword argument '{}'".format(key))
all_data[key] = value
return all_data
def _prepare_mpl_singlefile(self, *args, **kwargs):
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, **kwargs)
s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()
string = s_header + s_import + s_body + s_footer
return string.encode('utf-8'), {}
def _prepare_mpl_withjson(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
import os
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, main_file_name=main_file_name, **kwargs)
json_fname = os.path.splitext(main_file_name)[0] + '_data.json'
# Escape double_quotes
json_fname = json_fname.replace('"', '\"')
ext_files = {json_fname: json.dumps(all_data, indent=2).encode('utf-8')}
s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE.substitute(json_fname=json_fname)
s_body = self._get_mpl_body_template(all_data['paths'])
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()
string = s_header + s_import + s_body + s_footer
return string.encode('utf-8'), ext_files
def _prepare_mpl_pdf(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,unused-argument
import os
import tempfile
import subprocess
import sys
from aiida.common import json
all_data = self._matplotlib_get_dict(*args, **kwargs)
# Use the Agg backend
s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
# I get a temporary file name
handle, filename = tempfile.mkstemp()
os.close(handle)
os.remove(filename)
escaped_fname = filename.replace('"', '\"')
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE.substitute(fname=escaped_fname, format='pdf')
string = s_header + s_import + s_body + s_footer
# I don't exec it because I might mess up with the matplotlib backend etc.
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(string)
handle.flush()
subprocess.check_output([sys.executable, handle.name])
if not os.path.exists(filename):
raise RuntimeError('Unable to generate the PDF...')
with open(filename, 'rb', encoding=None) as handle:
imgdata = handle.read()
os.remove(filename)
return imgdata, {}
def _prepare_mpl_png(self, main_file_name='', *args, **kwargs):
import json
import os
import tempfile
import subprocess
import sys
all_data = self._matplotlib_get_dict(*args, **kwargs)
s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()
s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))
s_body = self._get_mpl_body_template(all_data['paths'])
handle, filename = tempfile.mkstemp()
os.close(handle)
os.remove(filename)
escaped_fname = filename.replace('"', '\"')
s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI.substitute(fname=escaped_fname, format='png', dpi=300)
string = s_header + s_import + s_body + s_footer
# I run instead in a different process, with the same executable
# (so it should work properly with virtualenvs)
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(string)
handle.flush()
subprocess.check_output([sys.executable, handle.name])
if not os.path.exists(filename):
raise RuntimeError('Unable to generate the PNG...')
with open(filename, 'rb', encoding=None) as handle:
imgdata = handle.read()
os.remove(filename)
return imgdata, {}
@staticmethod
def _get_mpl_body_template(paths):
if len(paths) == 1:
s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=SINGLE_KP)
else:
s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=MULTI_KP)
return s_body
def show_mpl(self, **kwargs):
exec(*self._exportcontent(fileformat='mpl_singlefile', main_file_name='', **kwargs)) # pylint: disable=exec-used
def _prepare_gnuplot(
self,
main_file_name=None,
title='',
comments=True,
prettify_format=None,
y_max_lim=None,
y_min_lim=None,
y_origin=0.
):
# pylint: disable=too-many-arguments,too-many-locals
import os
main_file_name = main_file_name or 'band.dat'
dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'
if prettify_format is None:
# Default. Specified like this to allow caller functions to pass 'None'
prettify_format = 'gnuplot_seekpath'
plot_info = self._get_bandplot_data(
cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin
)
bands = plot_info['y']
x = plot_info['x']
# axis limits
if y_max_lim is None:
y_max_lim = bands.max()
if y_min_lim is None:
y_min_lim = bands.min()
x_min_lim = min(x) # this isn't a numpy array, but a list
x_max_lim = max(x)
raw_data, _ = self._prepare_dat_blocks(plot_info, comments=comments)
xtics_string = ', '.join('"{}" {}'.format(label, pos) for pos, label in plot_info['labels'])
script = []
if comments:
script.append(prepare_header_comment(self.uuid, plot_info=plot_info, comment_char='# '))
script.append('')
script.append(
"""## Uncomment the next two lines to write directly to PDF
## Note: You need to have gnuplot installed with pdfcairo support!
#set term pdfcairo
#set output 'out.pdf'
### Uncomment one of the options below to change font
### For the LaTeX fonts, you can download them from here:
### https://sourceforge.net/projects/cm-unicode/
### And then install them in your system
## LaTeX Serif font, if installed
#set termopt font "CMU Serif, 12"
## LaTeX Sans Serif font, if installed
#set termopt font "CMU Sans Serif, 12"
## Classical Times New Roman
#set termopt font "Times New Roman, 12"
"""
)
script.append('set termopt enhanced')
script.append('set encoding utf8')
script.append('set xtics ({})'.format(xtics_string))
script.append('unset key')
script.append('set yrange [{}:{}]'.format(y_min_lim, y_max_lim))
script.append('set ylabel "{}"'.format('Dispersion ({})'.format(self.units)))
if title:
script.append('set title "{}"'.format(title.replace('"', '\"')))
if len(x) > 1:
script.append('set xrange [{}:{}]'.format(x_min_lim, x_max_lim))
script.append('set grid xtics lt 1 lc rgb "#888888"')
script.append('plot "{}" with l lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '\"')))
else:
script.append('set xrange [-1.0:1.0]')
script.append(
'plot "{}" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb "#000000"'.format(
os.path.basename(dat_filename).replace('"', '\"')
)
)
script_data = '\n'.join(script) + '\n'
extra_files = {dat_filename: raw_data}
return script_data.encode('utf-8'), extra_files
def _prepare_agr(
self,
main_file_name='',
comments=True,
setnumber_offset=0,
color_number=1,
color_number2=2,
legend='',
title='',
y_max_lim=None,
y_min_lim=None,
y_origin=0.,
prettify_format=None
):
if prettify_format is None:
prettify_format = 'agr_seekpath'
plot_info = self._get_bandplot_data(
cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin
)
import math
if color_number > MAX_NUM_AGR_COLORS:
raise ValueError('Color number is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))
if color_number2 > MAX_NUM_AGR_COLORS:
raise ValueError('Color number 2 is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))
bands = plot_info['y']
x = plot_info['x']
the_bands = numpy.transpose(bands)
labels = plot_info['labels']
num_labels = len(labels)
if y_max_lim is None:
y_max_lim = the_bands.max()
if y_min_lim is None:
y_min_lim = the_bands.min()
x_min_lim = min(x)
x_max_lim = max(x)
ytick_spacing = 10**int(math.log10((y_max_lim - y_min_lim)))
# prepare xticks labels
sx1 = ''
for i, label in enumerate(labels):
sx1 += AGR_SINGLE_XTICK_TEMPLATE.substitute(
index=i,
coord=label[0],
name=label[1],
)
xticks = AGR_XTICKS_TEMPLATE.substitute(
num_labels=num_labels,
single_xtick_templates=sx1,
)
# build the arrays with the xy coordinates
all_sets = []
for band in the_bands:
this_set = ''
for i in zip(x, band):
line = '{:.8f}'.format(i[0]) + '\t' + '{:.8f}'.format(i[1]) + '\n'
this_set += line
all_sets.append(this_set)
set_descriptions = ''
for i, (this_set, band_type) in enumerate(zip(all_sets, plot_info['band_type_idx'])):
if band_type % 2 == 0:
linecolor = color_number
else:
linecolor = color_number2
width = str(2.0)
set_descriptions += AGR_SET_DESCRIPTION_TEMPLATE.substitute(
set_number=i + setnumber_offset,
linewidth=width,
color_number=linecolor,
legend=legend if i == 0 else ''
)
units = self.units
graphs = AGR_GRAPH_TEMPLATE.substitute(
x_min_lim=x_min_lim,
y_min_lim=y_min_lim,
x_max_lim=x_max_lim,
y_max_lim=y_max_lim,
yaxislabel='Dispersion ({})'.format(units),
xticks_template=xticks,
set_descriptions=set_descriptions,
ytick_spacing=ytick_spacing,
title=title,
)
sets = []
for i, this_set in enumerate(all_sets):
sets.append(AGR_SINGLESET_TEMPLATE.substitute(set_number=i + setnumber_offset, xydata=this_set))
the_sets = '&\n'.join(sets)
string = AGR_TEMPLATE.substitute(graphs=graphs, sets=the_sets)
if comments:
string = prepare_header_comment(self.uuid, plot_info, comment_char='
return string.encode('utf-8'), {}
def _get_band_segments(self, cartesian):
plot_info = self._get_bandplot_data(
cartesian=cartesian, prettify_format=None, join_symbol=None, get_segments=True
)
out_dict = {'label': self.label}
out_dict['path'] = plot_info['path']
out_dict['paths'] = plot_info['paths']
return out_dict
def _prepare_json(self, main_file_name='', comments=True): # pylint: disable=unused-argument
from aiida import get_file_header
from aiida.common import json
json_dict = self._get_band_segments(cartesian=True)
json_dict['original_uuid'] = self.uuid
if comments:
json_dict['comments'] = get_file_header(comment_char='')
return json.dumps(json_dict).encode('utf-8'), {}
MAX_NUM_AGR_COLORS = 15
AGR_TEMPLATE = Template(
"""
# Grace project file
#
@version 50122
@page size 792, 612
@page scroll 5%
@page inout 5%
@link page off
@map font 8 to "Courier", "Courier"
@map font 10 to "Courier-Bold", "Courier-Bold"
@map font 11 to "Courier-BoldOblique", "Courier-BoldOblique"
@map font 9 to "Courier-Oblique", "Courier-Oblique"
@map font 4 to "Helvetica", "Helvetica"
@map font 6 to "Helvetica-Bold", "Helvetica-Bold"
@map font 7 to "Helvetica-BoldOblique", "Helvetica-BoldOblique"
@map font 5 to "Helvetica-Oblique", "Helvetica-Oblique"
@map font 14 to "NimbusMonoL-BoldOblique", "NimbusMonoL-BoldOblique"
@map font 15 to "NimbusMonoL-Regular", "NimbusMonoL-Regular"
@map font 16 to "NimbusMonoL-RegularOblique", "NimbusMonoL-RegularOblique"
@map font 17 to "NimbusRomanNo9L-Medium", "NimbusRomanNo9L-Medium"
@map font 18 to "NimbusRomanNo9L-MediumItalic", "NimbusRomanNo9L-MediumItalic"
@map font 19 to "NimbusRomanNo9L-Regular", "NimbusRomanNo9L-Regular"
@map font 20 to "NimbusRomanNo9L-RegularItalic", "NimbusRomanNo9L-RegularItalic"
@map font 21 to "NimbusSansL-Bold", "NimbusSansL-Bold"
@map font 22 to "NimbusSansL-BoldCondensed", "NimbusSansL-BoldCondensed"
@map font 23 to "NimbusSansL-BoldCondensedItalic", "NimbusSansL-BoldCondensedItalic"
@map font 24 to "NimbusSansL-BoldItalic", "NimbusSansL-BoldItalic"
@map font 25 to "NimbusSansL-Regular", "NimbusSansL-Regular"
@map font 26 to "NimbusSansL-RegularCondensed", "NimbusSansL-RegularCondensed"
@map font 27 to "NimbusSansL-RegularCondensedItalic", "NimbusSansL-RegularCondensedItalic"
@map font 28 to "NimbusSansL-RegularItalic", "NimbusSansL-RegularItalic"
@map font 29 to "StandardSymbolsL-Regular", "StandardSymbolsL-Regular"
@map font 12 to "Symbol", "Symbol"
@map font 31 to "Symbol-Regular", "Symbol-Regular"
@map font 2 to "Times-Bold", "Times-Bold"
@map font 3 to "Times-BoldItalic", "Times-BoldItalic"
@map font 1 to "Times-Italic", "Times-Italic"
@map font 0 to "Times-Roman", "Times-Roman"
@map font 36 to "URWBookmanL-DemiBold", "URWBookmanL-DemiBold"
@map font 37 to "URWBookmanL-DemiBoldItalic", "URWBookmanL-DemiBoldItalic"
@map font 38 to "URWBookmanL-Light", "URWBookmanL-Light"
@map font 39 to "URWBookmanL-LightItalic", "URWBookmanL-LightItalic"
@map font 40 to "URWChanceryL-MediumItalic", "URWChanceryL-MediumItalic"
@map font 41 to "URWGothicL-Book", "URWGothicL-Book"
@map font 42 to "URWGothicL-BookOblique", "URWGothicL-BookOblique"
@map font 43 to "URWGothicL-Demi", "URWGothicL-Demi"
@map font 44 to "URWGothicL-DemiOblique", "URWGothicL-DemiOblique"
@map font 45 to "URWPalladioL-Bold", "URWPalladioL-Bold"
@map font 46 to "URWPalladioL-BoldItalic", "URWPalladioL-BoldItalic"
@map font 47 to "URWPalladioL-Italic", "URWPalladioL-Italic"
@map font 48 to "URWPalladioL-Roman", "URWPalladioL-Roman"
@map font 13 to "ZapfDingbats", "ZapfDingbats"
@map color 0 to (255, 255, 255), "white"
@map color 1 to (0, 0, 0), "black"
@map color 2 to (255, 0, 0), "red"
@map color 3 to (0, 255, 0), "green"
@map color 4 to (0, 0, 255), "blue"
@map color 5 to (255, 215, 0), "yellow"
@map color 6 to (188, 143, 143), "brown"
@map color 7 to (220, 220, 220), "grey"
@map color 8 to (148, 0, 211), "violet"
@map color 9 to (0, 255, 255), "cyan"
@map color 10 to (255, 0, 255), "magenta"
@map color 11 to (255, 165, 0), "orange"
@map color 12 to (114, 33, 188), "indigo"
@map color 13 to (103, 7, 72), "maroon"
@map color 14 to (64, 224, 208), "turquoise"
@map color 15 to (0, 139, 0), "green4"
@reference date 0
@date wrap off
@date wrap year 1950
@default linewidth 1.0
@default linestyle 1
@default color 1
@default pattern 1
@default font 0
@default char size 1.000000
@default symbol size 1.000000
@default sformat "%.8g"
@background color 0
@page background fill on
@timestamp off
@timestamp 0.03, 0.03
@timestamp color 1
@timestamp rot 0
@timestamp font 0
@timestamp char size 1.000000
@timestamp def "Wed Jul 30 16:44:34 2014"
@r0 off
@link r0 to g0
@r0 type above
@r0 linestyle 1
@r0 linewidth 1.0
@r0 color 1
@r0 line 0, 0, 0, 0
@r1 off
@link r1 to g0
@r1 type above
@r1 linestyle 1
@r1 linewidth 1.0
@r1 color 1
@r1 line 0, 0, 0, 0
@r2 off
@link r2 to g0
@r2 type above
@r2 linestyle 1
@r2 linewidth 1.0
@r2 color 1
@r2 line 0, 0, 0, 0
@r3 off
@link r3 to g0
@r3 type above
@r3 linestyle 1
@r3 linewidth 1.0
@r3 color 1
@r3 line 0, 0, 0, 0
@r4 off
@link r4 to g0
@r4 type above
@r4 linestyle 1
@r4 linewidth 1.0
@r4 color 1
@r4 line 0, 0, 0, 0
$graphs
$sets
"""
)
AGR_XTICKS_TEMPLATE = Template("""
@ xaxis tick spec $num_labels
$single_xtick_templates
""")
AGR_SINGLE_XTICK_TEMPLATE = Template(
"""
@ xaxis tick major $index, $coord
@ xaxis ticklabel $index, "$name"
"""
)
AGR_GRAPH_TEMPLATE = Template(
"""
@g0 on
@g0 hidden false
@g0 type XY
@g0 stacked false
@g0 bar hgap 0.000000
@g0 fixedpoint off
@g0 fixedpoint type 0
@g0 fixedpoint xy 0.000000, 0.000000
@g0 fixedpoint format general general
@g0 fixedpoint prec 6, 6
@with g0
@ world $x_min_lim, $y_min_lim, $x_max_lim, $y_max_lim
@ stack world 0, 0, 0, 0
@ znorm 1
@ view 0.150000, 0.150000, 1.150000, 0.850000
@ title "$title"
@ title font 0
@ title size 1.500000
@ title color 1
@ subtitle ""
@ subtitle font 0
@ subtitle size 1.000000
@ subtitle color 1
@ xaxes scale Normal
@ yaxes scale Normal
@ xaxes invert off
@ yaxes invert off
@ xaxis on
@ xaxis type zero false
@ xaxis offset 0.000000 , 0.000000
@ xaxis bar on
@ xaxis bar color 1
@ xaxis bar linestyle 1
@ xaxis bar linewidth 1.0
@ xaxis label ""
@ xaxis label layout para
@ xaxis label place auto
@ xaxis label char size 1.000000
@ xaxis label font 4
@ xaxis label color 1
@ xaxis label place normal
@ xaxis tick on
@ xaxis tick major 5
@ xaxis tick minor ticks 0
@ xaxis tick default 6
@ xaxis tick place rounded true
@ xaxis tick in
@ xaxis tick major size 1.000000
@ xaxis tick major color 1
@ xaxis tick major linewidth 1.0
@ xaxis tick major linestyle 1
@ xaxis tick major grid on
@ xaxis tick minor color 1
@ xaxis tick minor linewidth 1.0
@ xaxis tick minor linestyle 1
@ xaxis tick minor grid off
@ xaxis tick minor size 0.500000
@ xaxis ticklabel on
@ xaxis ticklabel format general
@ xaxis ticklabel prec 5
@ xaxis ticklabel formula ""
@ xaxis ticklabel append ""
@ xaxis ticklabel prepend ""
@ xaxis ticklabel angle 0
@ xaxis ticklabel skip 0
@ xaxis ticklabel stagger 0
@ xaxis ticklabel place normal
@ xaxis ticklabel offset auto
@ xaxis ticklabel offset 0.000000 , 0.010000
@ xaxis ticklabel start type auto
@ xaxis ticklabel start 0.000000
@ xaxis ticklabel stop type auto
@ xaxis ticklabel stop 0.000000
@ xaxis ticklabel char size 1.500000
@ xaxis ticklabel font 4
@ xaxis ticklabel color 1
@ xaxis tick place both
@ xaxis tick spec type both
$xticks_template
@ yaxis on
@ yaxis type zero false
@ yaxis offset 0.000000 , 0.000000
@ yaxis bar on
@ yaxis bar color 1
@ yaxis bar linestyle 1
@ yaxis bar linewidth 1.0
@ yaxis label "$yaxislabel"
@ yaxis label layout para
@ yaxis label place auto
@ yaxis label char size 1.500000
@ yaxis label font 4
@ yaxis label color 1
@ yaxis label place normal
@ yaxis tick on
@ yaxis tick major $ytick_spacing
@ yaxis tick minor ticks 1
@ yaxis tick default 6
@ yaxis tick place rounded true
@ yaxis tick in
@ yaxis tick major size 1.000000
@ yaxis tick major color 1
@ yaxis tick major linewidth 1.0
@ yaxis tick major linestyle 1
@ yaxis tick major grid off
@ yaxis tick minor color 1
@ yaxis tick minor linewidth 1.0
@ yaxis tick minor linestyle 1
@ yaxis tick minor grid off
@ yaxis tick minor size 0.500000
@ yaxis ticklabel on
@ yaxis ticklabel format general
@ yaxis ticklabel prec 5
@ yaxis ticklabel formula ""
@ yaxis ticklabel append ""
@ yaxis ticklabel prepend ""
@ yaxis ticklabel angle 0
@ yaxis ticklabel skip 0
@ yaxis ticklabel stagger 0
@ yaxis ticklabel place normal
@ yaxis ticklabel offset auto
@ yaxis ticklabel offset 0.000000 , 0.010000
@ yaxis ticklabel start type auto
@ yaxis ticklabel start 0.000000
@ yaxis ticklabel stop type auto
@ yaxis ticklabel stop 0.000000
@ yaxis ticklabel char size 1.250000
@ yaxis ticklabel font 4
@ yaxis ticklabel color 1
@ yaxis tick place both
@ yaxis tick spec type none
@ altxaxis off
@ altyaxis off
@ legend on
@ legend loctype view
@ legend 0.85, 0.8
@ legend box color 1
@ legend box pattern 1
@ legend box linewidth 1.0
@ legend box linestyle 1
@ legend box fill color 0
@ legend box fill pattern 1
@ legend font 0
@ legend char size 1.000000
@ legend color 1
@ legend length 4
@ legend vgap 1
@ legend hgap 1
@ legend invert false
@ frame type 0
@ frame linestyle 1
@ frame linewidth 1.0
@ frame color 1
@ frame pattern 1
@ frame background color 0
@ frame background pattern 0
$set_descriptions
"""
)
AGR_SET_DESCRIPTION_TEMPLATE = Template(
"""
@ s$set_number hidden false
@ s$set_number type xy
@ s$set_number symbol 0
@ s$set_number symbol size 1.000000
@ s$set_number symbol color $color_number
@ s$set_number symbol pattern 1
@ s$set_number symbol fill color $color_number
@ s$set_number symbol fill pattern 0
@ s$set_number symbol linewidth 1.0
@ s$set_number symbol linestyle 1
@ s$set_number symbol char 65
@ s$set_number symbol char font 0
@ s$set_number symbol skip 0
@ s$set_number line type 1
@ s$set_number line linestyle 1
@ s$set_number line linewidth $linewidth
@ s$set_number line color $color_number
@ s$set_number line pattern 1
@ s$set_number baseline type 0
@ s$set_number baseline off
@ s$set_number dropline off
@ s$set_number fill type 0
@ s$set_number fill rule 0
@ s$set_number fill color $color_number
@ s$set_number fill pattern 1
@ s$set_number avalue off
@ s$set_number avalue type 2
@ s$set_number avalue char size 1.000000
@ s$set_number avalue font 0
@ s$set_number avalue color 1
@ s$set_number avalue rot 0
@ s$set_number avalue format general
@ s$set_number avalue prec 3
@ s$set_number avalue prepend ""
@ s$set_number avalue append ""
@ s$set_number avalue offset 0.000000 , 0.000000
@ s$set_number errorbar on
@ s$set_number errorbar place both
@ s$set_number errorbar color $color_number
@ s$set_number errorbar pattern 1
@ s$set_number errorbar size 1.000000
@ s$set_number errorbar linewidth 1.0
@ s$set_number errorbar linestyle 1
@ s$set_number errorbar riser linewidth 1.0
@ s$set_number errorbar riser linestyle 1
@ s$set_number errorbar riser clip off
@ s$set_number errorbar riser clip length 0.100000
@ s$set_number comment "Cols 1:2"
@ s$set_number legend "$legend"
"""
)
AGR_SINGLESET_TEMPLATE = Template("""
@target G0.S$set_number
@type xy
$xydata
""")
# text.latex.preview=True is needed to have a proper alignment of
# tick marks with and without subscripts
# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html
MATPLOTLIB_HEADER_AGG_TEMPLATE = Template(
"""# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
# Uncomment to change default font
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})
# To use proper font for, e.g., Gamma if usetex is set to False
rc('mathtext', fontset='cm')
rc('text', usetex=True)
import matplotlib.pyplot as plt
plt.rcParams.update({'text.latex.preview': True})
import pylab as pl
# I use json to make sure the input is sanitized
import json
print_comment = False
"""
)
# text.latex.preview=True is needed to have a proper alignment of
# tick marks with and without subscripts
# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html
MATPLOTLIB_HEADER_TEMPLATE = Template(
"""# -*- coding: utf-8 -*-
from matplotlib import rc
# Uncomment to change default font
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})
# To use proper font for, e.g., Gamma if usetex is set to False
rc('mathtext', fontset='cm')
rc('text', usetex=True)
import matplotlib.pyplot as plt
plt.rcParams.update({'text.latex.preview': True})
import pylab as pl
# I use json to make sure the input is sanitized
import json
print_comment = False
"""
)
MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE = Template('''all_data_str = r"""$all_data_json"""
''')
MATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE = Template(
"""with open("$json_fname", encoding='utf8') as f:
all_data_str = f.read()
"""
)
MULTI_KP = """
for path in paths:
if path['length'] <= 1:
# Avoid printing empty lines
continue
x = path['x']
#for band in bands:
for band, band_type in zip(path['values'], all_data['band_type_idx']):
# For now we support only two colors
if band_type % 2 == 0:
further_plot_options = further_plot_options1
else:
further_plot_options = further_plot_options2
# Put the legend text only once
label = None
if first_band_1 and band_type % 2 == 0:
first_band_1 = False
label = all_data.get('legend_text', None)
elif first_band_2 and band_type % 2 == 1:
first_band_2 = False
label = all_data.get('legend_text2', None)
p.plot(x, band, label=label,
**further_plot_options
)
"""
SINGLE_KP = """
path = paths[0]
values = path['values']
x = [path['x'] for _ in values]
p.scatter(x, values, marker="_")
"""
MATPLOTLIB_BODY_TEMPLATE = Template(
"""all_data = json.loads(all_data_str)
if not all_data.get('use_latex', False):
rc('text', usetex=False)
#x = all_data['x']
#bands = all_data['bands']
paths = all_data['paths']
tick_pos = all_data['tick_pos']
tick_labels = all_data['tick_labels']
# Option for bands (all, or those of type 1 if there are two spins)
further_plot_options1 = {}
further_plot_options1['color'] = all_data.get('bands_color', 'k')
further_plot_options1['linewidth'] = all_data.get('bands_linewidth', 0.5)
further_plot_options1['linestyle'] = all_data.get('bands_linestyle', None)
further_plot_options1['marker'] = all_data.get('bands_marker', None)
further_plot_options1['markersize'] = all_data.get('bands_markersize', None)
further_plot_options1['markeredgecolor'] = all_data.get('bands_markeredgecolor', None)
further_plot_options1['markeredgewidth'] = all_data.get('bands_markeredgewidth', None)
further_plot_options1['markerfacecolor'] = all_data.get('bands_markerfacecolor', None)
# Options for second-type of bands if present (e.g. spin up vs. spin down)
further_plot_options2 = {}
further_plot_options2['color'] = all_data.get('bands_color2', 'r')
# Use the values of further_plot_options1 by default
further_plot_options2['linewidth'] = all_data.get('bands_linewidth2',
further_plot_options1['linewidth']
)
further_plot_options2['linestyle'] = all_data.get('bands_linestyle2',
further_plot_options1['linestyle']
)
further_plot_options2['marker'] = all_data.get('bands_marker2',
further_plot_options1['marker']
)
further_plot_options2['markersize'] = all_data.get('bands_markersize2',
further_plot_options1['markersize']
)
further_plot_options2['markeredgecolor'] = all_data.get('bands_markeredgecolor2',
further_plot_options1['markeredgecolor']
)
further_plot_options2['markeredgewidth'] = all_data.get('bands_markeredgewidth2',
further_plot_options1['markeredgewidth']
)
further_plot_options2['markerfacecolor'] = all_data.get('bands_markerfacecolor2',
further_plot_options1['markerfacecolor']
)
fig = pl.figure()
p = fig.add_subplot(1,1,1)
first_band_1 = True
first_band_2 = True
${plot_code}
p.set_xticks(tick_pos)
p.set_xticklabels(tick_labels)
p.set_xlim([all_data['x_min_lim'], all_data['x_max_lim']])
p.set_ylim([all_data['y_min_lim'], all_data['y_max_lim']])
p.xaxis.grid(True, which='major', color='#888888', linestyle='-', linewidth=0.5)
if all_data.get('plot_zero_axis', False):
p.axhline(
0.,
color=all_data.get('zero_axis_color', '#888888'),
linestyle=all_data.get('zero_axis_linestyle', '--'),
linewidth=all_data.get('zero_axis_linewidth', 0.5),
)
if all_data['title']:
p.set_title(all_data['title'])
if all_data['legend_text']:
p.legend(loc='best')
p.set_ylabel(all_data['yaxis_label'])
try:
if print_comment:
print(all_data['comment'])
except KeyError:
pass
"""
)
MATPLOTLIB_FOOTER_TEMPLATE_SHOW = Template("""pl.show()""")
MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE = Template("""pl.savefig("$fname", format="$format")""")
MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI = Template("""pl.savefig("$fname", format="$format", dpi=$dpi)""")
| true | true |
1c3b649fe1c28b0257033d6e497222d0913c9d9c | 7,396 | py | Python | saleor/billpayment/income_api/views.py | glosoftgroup/tenants | a6b229ad1f6d567b7078f83425a532830b71e1bb | [
"BSD-3-Clause"
] | 1 | 2018-05-03T06:17:02.000Z | 2018-05-03T06:17:02.000Z | saleor/billpayment/income_api/views.py | glosoftgroup/tenants | a6b229ad1f6d567b7078f83425a532830b71e1bb | [
"BSD-3-Clause"
] | 8 | 2018-05-07T16:42:35.000Z | 2022-02-26T03:31:56.000Z | saleor/billpayment/income_api/views.py | glosoftgroup/tenants | a6b229ad1f6d567b7078f83425a532830b71e1bb | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import generics
from django.db.models import Q
from django.contrib.auth import get_user_model
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import pagination
from .pagination import PostLimitOffsetPagination
from saleor.billpayment.models import BillPayment as Table
from .serializers import (
TableListSerializer,
)
User = get_user_model()
from django.db.models import Sum
from django.db.models.functions import TruncMonth
import datetime
now = datetime.datetime.now()
class ListAPIView(generics.ListAPIView):
"""
list details
GET /api/setting/
"""
serializer_class = TableListSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
pagination_class = PostLimitOffsetPagination
def get_serializer_context(self):
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
return {"date": None, 'request': self.request, 'current_year':current_year}
def list(self, request, *args, **kwargs):
response = super(ListAPIView, self).list(request, args, kwargs)
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
if self.request.GET.get('property'):
search_query = self.request.GET.get('property')
query_set = Table.objects.filter(room__name__icontains=str(search_query))
try:
response.data['property'] = query_set.first().room.name
except:
response.data['property'] = self.request.GET.get('property')
else:
query_set = Table.objects.all()
response.data['property'] = ''
response.data['period'] = current_year
queryset = query_set.\
filter(bill__month__year=str(current_year)).\
exclude(tax__exact='-1').annotate(month=TruncMonth('bill__month')).\
values('month').annotate(total_amount=Sum('amount')).annotate(total_tax=Sum('tax')).values('month', 'total_amount', 'total_tax', 'room__name')
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
queryset_all = query_set.exclude(tax__exact='-1').\
annotate(month=TruncMonth('bill__month'))\
.values('month').annotate(total_amount=Sum('amount')).annotate(total_tax=Sum('tax')).values('month', 'total_amount', 'total_tax', 'room__name')
if self.request.GET.get('month_from') and self.request.GET.get('month_to'):
month_from = self.request.GET.get('month_from')
month_to = self.request.GET.get('month_to')
queryset = queryset_all.filter(bill__month__range=[str(month_from), str(month_to)])
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
if self.request.GET.get('month') and self.request.GET.get('year'):
month = self.request.GET.get('month')
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__month=str(month),
bill__month__year=str(year))
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
if self.request.GET.get('year') and not self.request.GET.get('month'):
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__year=str(year))
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
page_size = 'page_size'
if self.request.GET.get(page_size):
pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)
else:
pagination.PageNumberPagination.page_size = 10
if not totalTax:
response.data['totalTax'] = '0.00'
if not totalAmount:
response.data['totalAmount'] = '0.00'
return response
def get_queryset(self, *args, **kwargs):
# display the latest years tax data first
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
if self.request.GET.get('property'):
search_query = self.request.GET.get('property')
query_set = Table.objects.filter(room__name__icontains=str(search_query))
else:
query_set = Table.objects.all()
current_year_queryset = query_set.\
filter(bill__month__year=str(current_year)).\
exclude(tax__exact='0').annotate(month=TruncMonth('bill__month')).\
values('month').annotate(total_amount=Sum('amount')).\
annotate(total_tax=Sum('tax')).\
values('month', 'total_amount', 'total_tax', 'room__name')
queryset_all = query_set.exclude(tax__exact='0').\
annotate(month=TruncMonth('bill__month'))\
.values('month').annotate(total_amount=Sum('amount')).\
annotate(total_tax=Sum('tax')).\
values('month', 'total_amount', 'total_tax', 'room__name')
queryset = current_year_queryset
if self.request.GET.get('month_from') and self.request.GET.get('month_to'):
month_from = self.request.GET.get('month_from')
month_to = self.request.GET.get('month_to')
queryset = queryset_all.filter(bill__month__range=[str(month_from), str(month_to)])
if self.request.GET.get('month') and self.request.GET.get('year'):
month = self.request.GET.get('month')
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__month=str(month),
bill__month__year=str(year))
if self.request.GET.get('year') and not self.request.GET.get('month'):
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__year=str(year))
page_size = 'page_size'
if self.request.GET.get(page_size):
pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)
else:
pagination.PageNumberPagination.page_size = 10
finalqueryset = queryset.values('month', 'total_amount', 'total_tax', 'room__name', 'bill__billtype__name')
for i in queryset:
i['service'] = 0
for j in finalqueryset:
if j['room__name'] == i['room__name'] and j['bill__billtype__name'] == 'Service':
i['service'] += j['total_amount']
i['rents'] = i['total_amount'] - i['service']
return queryset
| 42.505747 | 155 | 0.631558 | from rest_framework import generics
from django.db.models import Q
from django.contrib.auth import get_user_model
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import pagination
from .pagination import PostLimitOffsetPagination
from saleor.billpayment.models import BillPayment as Table
from .serializers import (
TableListSerializer,
)
User = get_user_model()
from django.db.models import Sum
from django.db.models.functions import TruncMonth
import datetime
now = datetime.datetime.now()
class ListAPIView(generics.ListAPIView):
serializer_class = TableListSerializer
permission_classes = (IsAuthenticatedOrReadOnly,)
pagination_class = PostLimitOffsetPagination
def get_serializer_context(self):
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
return {"date": None, 'request': self.request, 'current_year':current_year}
def list(self, request, *args, **kwargs):
response = super(ListAPIView, self).list(request, args, kwargs)
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
if self.request.GET.get('property'):
search_query = self.request.GET.get('property')
query_set = Table.objects.filter(room__name__icontains=str(search_query))
try:
response.data['property'] = query_set.first().room.name
except:
response.data['property'] = self.request.GET.get('property')
else:
query_set = Table.objects.all()
response.data['property'] = ''
response.data['period'] = current_year
queryset = query_set.\
filter(bill__month__year=str(current_year)).\
exclude(tax__exact='-1').annotate(month=TruncMonth('bill__month')).\
values('month').annotate(total_amount=Sum('amount')).annotate(total_tax=Sum('tax')).values('month', 'total_amount', 'total_tax', 'room__name')
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
queryset_all = query_set.exclude(tax__exact='-1').\
annotate(month=TruncMonth('bill__month'))\
.values('month').annotate(total_amount=Sum('amount')).annotate(total_tax=Sum('tax')).values('month', 'total_amount', 'total_tax', 'room__name')
if self.request.GET.get('month_from') and self.request.GET.get('month_to'):
month_from = self.request.GET.get('month_from')
month_to = self.request.GET.get('month_to')
queryset = queryset_all.filter(bill__month__range=[str(month_from), str(month_to)])
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
if self.request.GET.get('month') and self.request.GET.get('year'):
month = self.request.GET.get('month')
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__month=str(month),
bill__month__year=str(year))
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
if self.request.GET.get('year') and not self.request.GET.get('month'):
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__year=str(year))
totalTax = queryset.aggregate(Sum('total_tax'))["total_tax__sum"]
response.data['totalTax'] = totalTax
totalAmount = queryset.aggregate(Sum('total_amount'))["total_amount__sum"]
response.data['totalAmount'] = totalAmount
page_size = 'page_size'
if self.request.GET.get(page_size):
pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)
else:
pagination.PageNumberPagination.page_size = 10
if not totalTax:
response.data['totalTax'] = '0.00'
if not totalAmount:
response.data['totalAmount'] = '0.00'
return response
def get_queryset(self, *args, **kwargs):
try:
current_year = Table.objects.last().bill.month.year
except:
current_year = now.year
if self.request.GET.get('property'):
search_query = self.request.GET.get('property')
query_set = Table.objects.filter(room__name__icontains=str(search_query))
else:
query_set = Table.objects.all()
current_year_queryset = query_set.\
filter(bill__month__year=str(current_year)).\
exclude(tax__exact='0').annotate(month=TruncMonth('bill__month')).\
values('month').annotate(total_amount=Sum('amount')).\
annotate(total_tax=Sum('tax')).\
values('month', 'total_amount', 'total_tax', 'room__name')
queryset_all = query_set.exclude(tax__exact='0').\
annotate(month=TruncMonth('bill__month'))\
.values('month').annotate(total_amount=Sum('amount')).\
annotate(total_tax=Sum('tax')).\
values('month', 'total_amount', 'total_tax', 'room__name')
queryset = current_year_queryset
if self.request.GET.get('month_from') and self.request.GET.get('month_to'):
month_from = self.request.GET.get('month_from')
month_to = self.request.GET.get('month_to')
queryset = queryset_all.filter(bill__month__range=[str(month_from), str(month_to)])
if self.request.GET.get('month') and self.request.GET.get('year'):
month = self.request.GET.get('month')
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__month=str(month),
bill__month__year=str(year))
if self.request.GET.get('year') and not self.request.GET.get('month'):
year = self.request.GET.get('year')
queryset = queryset_all.filter(bill__month__year=str(year))
page_size = 'page_size'
if self.request.GET.get(page_size):
pagination.PageNumberPagination.page_size = self.request.GET.get(page_size)
else:
pagination.PageNumberPagination.page_size = 10
finalqueryset = queryset.values('month', 'total_amount', 'total_tax', 'room__name', 'bill__billtype__name')
for i in queryset:
i['service'] = 0
for j in finalqueryset:
if j['room__name'] == i['room__name'] and j['bill__billtype__name'] == 'Service':
i['service'] += j['total_amount']
i['rents'] = i['total_amount'] - i['service']
return queryset
| true | true |
1c3b64bc4bf2b7569679a67aa99de3fa72d31013 | 777 | py | Python | greeter_server.py | mbchristoff/grpc-hello-world | c49bad4a650f783b12ad277c7856ecf63d318af0 | [
"Apache-2.0"
] | null | null | null | greeter_server.py | mbchristoff/grpc-hello-world | c49bad4a650f783b12ad277c7856ecf63d318af0 | [
"Apache-2.0"
] | null | null | null | greeter_server.py | mbchristoff/grpc-hello-world | c49bad4a650f783b12ad277c7856ecf63d318af0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from concurrent import futures
import time
import socket
import grpc
import helloworld_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s! Greetings from %s,' % (request.name, socket.gethostname()))
def serve():
#server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
#helloworld_pb2.add_GreeterServicer_to_server(Greeter(), server)
server = helloworld_pb2.beta_create_Greeter_server(Greeter())
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| 22.852941 | 116 | 0.747748 |
from concurrent import futures
import time
import socket
import grpc
import helloworld_pb2
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2.GreeterServicer):
def SayHello(self, request, context):
return helloworld_pb2.HelloReply(message='Hello, %s! Greetings from %s,' % (request.name, socket.gethostname()))
def serve():
server = helloworld_pb2.beta_create_Greeter_server(Greeter())
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| true | true |
1c3b65ac63df0e9f4aed422b094ec0c28e1f9ece | 2,200 | py | Python | waveforms/quantum/circuit/qlisp/utils.py | feihoo87/waveforms | d986852019206f18269a702f4dfbd17a78dc135a | [
"MIT"
] | 7 | 2020-08-10T12:07:52.000Z | 2021-11-11T08:40:07.000Z | waveforms/quantum/circuit/qlisp/utils.py | feihoo87/waveforms | d986852019206f18269a702f4dfbd17a78dc135a | [
"MIT"
] | null | null | null | waveforms/quantum/circuit/qlisp/utils.py | feihoo87/waveforms | d986852019206f18269a702f4dfbd17a78dc135a | [
"MIT"
] | null | null | null | from itertools import repeat
import numpy as np
def DD(qubit, t, gates, pos, f=0):
seq = [('X/2', qubit)]
i = 0
for gate in gates:
gap = t * (pos[i] - pos[i - 1]) if i > 0 else t * pos[0]
seq.append((('Delay', gap), qubit))
seq.append((gate, qubit))
i += 1
gap = t * (1 - pos[-1]) if len(pos) > 0 else t
seq.append((('Delay', gap), qubit))
if f != 0:
seq.append((('P', 2 * np.pi * f * t), qubit))
seq.append(('X/2', qubit))
return seq
def XY4(qubit, t, f=0):
pos = np.arange(1, 5) / 5
return DD(qubit, t, ['X', 'Y', 'X', 'Y'], pos, f)
def XY8(qubit, t, f=0):
pos = np.arange(1, 9) / 9
return DD(qubit, t, ['X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X'], pos, f)
def XY16(qubit, t, f=0):
pos = np.arange(1, 17) / 17
return DD(qubit, t, [
'X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X', 'X', 'Y', 'X', 'Y', 'Y', 'X',
'Y', 'X'
], pos, f)
def UDD(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('Y', times=n),
np.sin(np.pi * j / (2 * n + 2))**2, f)
def CPMG(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('Y', times=n), (j - 0.5) / n, f)
def CP(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('X', times=n), (j - 0.5) / n, f)
def Ramsey(qubit, t, f=0):
return [('X/2', qubit), (('Delay', t), qubit),
(('rfUnitary', np.pi / 2, 2 * np.pi * f * t), qubit)]
def SpinEcho(qubit, t, f=0):
return [('X/2', qubit), (('Delay', t / 2), qubit),
(('rfUnitary', np.pi, np.pi * f * t), qubit),
(('Delay', t / 2), qubit), ('X/2', qubit)]
_ALLXYSeq = [('I', 'I'), ('X', 'X'), ('Y', 'Y'), ('X', 'Y'), ('Y', 'X'),
('X/2', 'I'), ('Y/2', 'I'), ('X/2', 'Y/2'), ('Y/2', 'X/2'),
('X/2', 'Y'), ('Y/2', 'X'), ('X', 'Y/2'), ('Y', 'X/2'),
('X/2', 'X'), ('X', 'X/2'), ('Y/2', 'Y'), ('Y', 'Y/2'),
('X', 'I'), ('Y', 'I'), ('X/2', 'X/2'), ('Y/2', 'Y/2')]
def ALLXY(qubit, i):
assert 0 <= i < len(
_ALLXYSeq), f"i={i} is out of range(0, {len(_ALLXYSeq)})"
return [(gate, qubit) for gate in _ALLXYSeq[i]]
| 28.205128 | 77 | 0.419545 | from itertools import repeat
import numpy as np
def DD(qubit, t, gates, pos, f=0):
seq = [('X/2', qubit)]
i = 0
for gate in gates:
gap = t * (pos[i] - pos[i - 1]) if i > 0 else t * pos[0]
seq.append((('Delay', gap), qubit))
seq.append((gate, qubit))
i += 1
gap = t * (1 - pos[-1]) if len(pos) > 0 else t
seq.append((('Delay', gap), qubit))
if f != 0:
seq.append((('P', 2 * np.pi * f * t), qubit))
seq.append(('X/2', qubit))
return seq
def XY4(qubit, t, f=0):
pos = np.arange(1, 5) / 5
return DD(qubit, t, ['X', 'Y', 'X', 'Y'], pos, f)
def XY8(qubit, t, f=0):
pos = np.arange(1, 9) / 9
return DD(qubit, t, ['X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X'], pos, f)
def XY16(qubit, t, f=0):
pos = np.arange(1, 17) / 17
return DD(qubit, t, [
'X', 'Y', 'X', 'Y', 'Y', 'X', 'Y', 'X', 'X', 'Y', 'X', 'Y', 'Y', 'X',
'Y', 'X'
], pos, f)
def UDD(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('Y', times=n),
np.sin(np.pi * j / (2 * n + 2))**2, f)
def CPMG(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('Y', times=n), (j - 0.5) / n, f)
def CP(qubit, n, t, f=0):
j = np.arange(n) + 1
return DD(qubit, t, repeat('X', times=n), (j - 0.5) / n, f)
def Ramsey(qubit, t, f=0):
return [('X/2', qubit), (('Delay', t), qubit),
(('rfUnitary', np.pi / 2, 2 * np.pi * f * t), qubit)]
def SpinEcho(qubit, t, f=0):
return [('X/2', qubit), (('Delay', t / 2), qubit),
(('rfUnitary', np.pi, np.pi * f * t), qubit),
(('Delay', t / 2), qubit), ('X/2', qubit)]
_ALLXYSeq = [('I', 'I'), ('X', 'X'), ('Y', 'Y'), ('X', 'Y'), ('Y', 'X'),
('X/2', 'I'), ('Y/2', 'I'), ('X/2', 'Y/2'), ('Y/2', 'X/2'),
('X/2', 'Y'), ('Y/2', 'X'), ('X', 'Y/2'), ('Y', 'X/2'),
('X/2', 'X'), ('X', 'X/2'), ('Y/2', 'Y'), ('Y', 'Y/2'),
('X', 'I'), ('Y', 'I'), ('X/2', 'X/2'), ('Y/2', 'Y/2')]
def ALLXY(qubit, i):
assert 0 <= i < len(
_ALLXYSeq), f"i={i} is out of range(0, {len(_ALLXYSeq)})"
return [(gate, qubit) for gate in _ALLXYSeq[i]]
| true | true |
1c3b670d4bfa60482306031762c79099ed9a9b78 | 34 | py | Python | models/__init__.py | vanja/browserentropy | f541c1f29457f865cb2b8ecf84b1ab8f4b7fb243 | [
"MIT"
] | null | null | null | models/__init__.py | vanja/browserentropy | f541c1f29457f865cb2b8ecf84b1ab8f4b7fb243 | [
"MIT"
] | 1 | 2018-03-05T12:53:57.000Z | 2018-03-05T12:53:57.000Z | models/__init__.py | vanja/browserentropy | f541c1f29457f865cb2b8ecf84b1ab8f4b7fb243 | [
"MIT"
] | null | null | null | """Package for classes.
"""
| 6.8 | 24 | 0.5 | true | true | |
1c3b695591253e4ad35f6585380f0a804675b030 | 9,272 | py | Python | active_learning_dd/utils/generate_dissimilarity_matrix.py | gitter-lab/active-learning-drug-discovery | b24004a359037b3a1175a61c181ec231b711c797 | [
"MIT"
] | null | null | null | active_learning_dd/utils/generate_dissimilarity_matrix.py | gitter-lab/active-learning-drug-discovery | b24004a359037b3a1175a61c181ec231b711c797 | [
"MIT"
] | null | null | null | active_learning_dd/utils/generate_dissimilarity_matrix.py | gitter-lab/active-learning-drug-discovery | b24004a359037b3a1175a61c181ec231b711c797 | [
"MIT"
] | null | null | null | """
Script for generating the dissimilarity matrix.
csv_file_or_dir: specifies a single file or path with format of csv files to be loaded. e.g: /path/iter_{}.csv or /path/iter_*.csv.
output_dir: where to save the memmap file of the dissimilarity matrix.
feature_name: specifies the column name for features in the csv file.
cutoff: instances within this cutoff distance belong to the same cluster.
dist_function: distance function to use.
process: not used; can be ignored.
Usage:
python generate_dissimilarity_matrix.py \
--csv_file_or_dir=../../datasets/lc_clusters_cv_96/unlabeled_{}.csv \
--output_dir=../../datasets/ \
--feature_name="Morgan FP_2_1024" \
--cutoff=0.3 \
--dist_function=tanimoto_dissimilarity \
--process_count=4 \
--process_batch_size=2056
"""
from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import glob
import time
import pathlib
from multiprocessing import Process
from .data_utils import *
def get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size) :
# first get n_instances
instances_per_file = []
for f in csv_files_list:
for chunk in pd.read_csv(f, chunksize=process_batch_size):
instances_per_file.append(chunk.shape[0])
n_features = len(chunk[feature_name].iloc[0])
n_instances = np.sum(instances_per_file)
X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='w+', shape=(n_instances, n_features))
chunksize = process_batch_size
for i, f in enumerate(csv_files_list):
for chunk in pd.read_csv(f, chunksize=chunksize):
for batch_i in range(instances_per_file[i]//chunksize + 1):
row_start = batch_i*chunksize
row_end = min(instances_per_file[i], (batch_i+1)*chunksize)
if i > 0:
row_start = np.sum(instances_per_file[:i]) + batch_i*chunksize
row_end = min(np.sum(instances_per_file[:(i+1)]), np.sum(instances_per_file[:i]) + (batch_i+1)*chunksize)
X[chunk[index_name].values.astype('int64'),:] = np.vstack([np.fromstring(x, 'u1') - ord('0') for x in chunk[feature_name]]).astype(float) # this is from: https://stackoverflow.com/a/29091970
X.flush()
return n_instances, n_features
"""
Function wrapper method for computing dissimilarity_matrix for a range of indices.
Used with multiprocessing.
"""
def compute_dissimilarity_matrix_wrapper(start_ind, end_ind,
n_instances, n_features,
tmp_dir, output_dir, dist_func,
process_id, process_batch_size):
X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='r', shape=(n_instances, n_features))
dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances),
dtype='float16', mode='r+', shape=(n_instances, n_instances))
dissimilarity_process_matrix = np.load(tmp_dir+'/dissimilarity_process_matrix.npy')[start_ind:end_ind]
for i in range(end_ind-start_ind):
start_time = time.time()
row_start, row_end, col_start, col_end = dissimilarity_process_matrix[i,:]
X_cols = X[col_start:col_end]
X_rows = X[row_start:row_end]
dist_col_row = dist_func(X_cols, X_rows, X_batch_size=process_batch_size//2, Y_batch_size=process_batch_size//2)
dist_col_row = dist_col_row.reshape(X_cols.shape[0], X_rows.shape[0])
dissimilarity_matrix[row_start:row_end, col_start:col_end] = dist_col_row.T
dissimilarity_matrix[col_start:col_end, row_start:row_end] = dist_col_row
end_time = time.time()
print('pid: {}, at {} of {}. time {} seconds.'.format(process_id, i, (end_ind-start_ind), (end_time-start_time)))
del dissimilarity_matrix
def compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name='Morgan FP_2_1024', dist_function='tanimoto_dissimilarity',
process_count=1, process_batch_size=2056, index_name='Index ID'):
num_files = len(glob.glob(csv_file_or_dir.format('*')))
csv_files_list = [csv_file_or_dir.format(i) for i in range(num_files)]
df_list = [pd.read_csv(csv_file) for csv_file in csv_files_list]
data_df = pd.concat(df_list)
# create tmp directory to store memmap arrays
tmp_dir = './tmp/'
pathlib.Path(tmp_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
n_instances, n_features = get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size)
dist_func = feature_dist_func_dict()[dist_function]
# compute_dissimilarity_matrix
print('Generating dissimilarity_matrix...')
start_time = time.time()
dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances),
dtype='float16', mode='w+', shape=(n_instances, n_instances))
del dissimilarity_matrix
# precompute indices of slices for dissimilarity_matrix
examples_per_slice = n_instances//process_count
dissimilarity_process_matrix = []
row_batch_size = process_batch_size // 2
col_batch_size = process_batch_size // 2
num_slices = 0
for process_id in range(process_count):
start_ind = process_id*examples_per_slice
end_ind = (process_id+1)*examples_per_slice
if process_id == (process_count-1):
end_ind = n_instances
if start_ind >= n_instances:
break
num_cols = end_ind - start_ind
for batch_col_i in range(num_cols//col_batch_size + 1):
col_start = start_ind + batch_col_i*col_batch_size
col_end = min(end_ind, start_ind + (batch_col_i+1)*col_batch_size)
for batch_row_i in range(col_end//row_batch_size + 1):
row_start = batch_row_i*row_batch_size
row_end = min(col_end, (batch_row_i+1)*row_batch_size)
dissimilarity_process_matrix.append([row_start, row_end, col_start, col_end])
num_slices += 1
dissimilarity_process_matrix = np.array(dissimilarity_process_matrix)
np.save(tmp_dir+'/dissimilarity_process_matrix.npy', dissimilarity_process_matrix)
del dissimilarity_process_matrix
print(num_slices)
# distribute slices among processes
process_pool = []
slices_per_process = num_slices//process_count
for process_id in range(process_count):
start_ind = process_id*slices_per_process
end_ind = (process_id+1)*slices_per_process
if process_id == (process_count-1):
end_ind = num_slices
if start_ind >= num_slices:
break
process_pool.append(Process(target=compute_dissimilarity_matrix_wrapper, args=(start_ind, end_ind,
n_instances, n_features,
tmp_dir, output_dir, dist_func,
process_id, process_batch_size)))
process_pool[process_id].start()
for process in process_pool:
process.join()
process.terminate()
end_time = time.time()
total_time = (end_time-start_time)/3600.0
print('Done generating dissimilarity_matrix. Took {} hours'.format(total_time))
import shutil
shutil.rmtree(tmp_dir)
np.random.seed(1103)
if __name__ == '__main__':
# read args
parser = argparse.ArgumentParser()
parser.add_argument('--csv_file_or_dir', action="store", dest="csv_file_or_dir", required=True)
parser.add_argument('--output_dir', action="store", dest="output_dir", required=True)
parser.add_argument('--feature_name', default='Morgan FP_2_1024', action="store",
dest="feature_name", required=False)
parser.add_argument('--dist_function', default='tanimoto_dissimilarity', action="store",
dest="dist_function", required=False)
parser.add_argument('--process_count', type=int, default=1, action="store", dest="process_count", required=False)
parser.add_argument('--process_batch_size', type=int, default=2**17, action="store", dest="process_batch_size", required=False)
parser.add_argument('--index_name', default='Index ID', action="store", dest="index_name", required=False)
given_args = parser.parse_args()
csv_file_or_dir = given_args.csv_file_or_dir
output_dir = given_args.output_dir
feature_name = given_args.feature_name
dist_function = given_args.dist_function
process_count = given_args.process_count
process_batch_size = given_args.process_batch_size
index_name = given_args.index_name
compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name, dist_function,
process_count, process_batch_size, index_name) | 50.945055 | 206 | 0.663503 | from __future__ import print_function
import argparse
import pandas as pd
import numpy as np
import glob
import time
import pathlib
from multiprocessing import Process
from .data_utils import *
def get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size) :
instances_per_file = []
for f in csv_files_list:
for chunk in pd.read_csv(f, chunksize=process_batch_size):
instances_per_file.append(chunk.shape[0])
n_features = len(chunk[feature_name].iloc[0])
n_instances = np.sum(instances_per_file)
X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='w+', shape=(n_instances, n_features))
chunksize = process_batch_size
for i, f in enumerate(csv_files_list):
for chunk in pd.read_csv(f, chunksize=chunksize):
for batch_i in range(instances_per_file[i]//chunksize + 1):
row_start = batch_i*chunksize
row_end = min(instances_per_file[i], (batch_i+1)*chunksize)
if i > 0:
row_start = np.sum(instances_per_file[:i]) + batch_i*chunksize
row_end = min(np.sum(instances_per_file[:(i+1)]), np.sum(instances_per_file[:i]) + (batch_i+1)*chunksize)
X[chunk[index_name].values.astype('int64'),:] = np.vstack([np.fromstring(x, 'u1') - ord('0') for x in chunk[feature_name]]).astype(float)
X.flush()
return n_instances, n_features
def compute_dissimilarity_matrix_wrapper(start_ind, end_ind,
n_instances, n_features,
tmp_dir, output_dir, dist_func,
process_id, process_batch_size):
X = np.memmap(tmp_dir+'/X.dat', dtype='float16', mode='r', shape=(n_instances, n_features))
dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances),
dtype='float16', mode='r+', shape=(n_instances, n_instances))
dissimilarity_process_matrix = np.load(tmp_dir+'/dissimilarity_process_matrix.npy')[start_ind:end_ind]
for i in range(end_ind-start_ind):
start_time = time.time()
row_start, row_end, col_start, col_end = dissimilarity_process_matrix[i,:]
X_cols = X[col_start:col_end]
X_rows = X[row_start:row_end]
dist_col_row = dist_func(X_cols, X_rows, X_batch_size=process_batch_size//2, Y_batch_size=process_batch_size//2)
dist_col_row = dist_col_row.reshape(X_cols.shape[0], X_rows.shape[0])
dissimilarity_matrix[row_start:row_end, col_start:col_end] = dist_col_row.T
dissimilarity_matrix[col_start:col_end, row_start:row_end] = dist_col_row
end_time = time.time()
print('pid: {}, at {} of {}. time {} seconds.'.format(process_id, i, (end_ind-start_ind), (end_time-start_time)))
del dissimilarity_matrix
def compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name='Morgan FP_2_1024', dist_function='tanimoto_dissimilarity',
process_count=1, process_batch_size=2056, index_name='Index ID'):
num_files = len(glob.glob(csv_file_or_dir.format('*')))
csv_files_list = [csv_file_or_dir.format(i) for i in range(num_files)]
df_list = [pd.read_csv(csv_file) for csv_file in csv_files_list]
data_df = pd.concat(df_list)
tmp_dir = './tmp/'
pathlib.Path(tmp_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
n_instances, n_features = get_features(csv_files_list, feature_name, index_name, tmp_dir, process_batch_size)
dist_func = feature_dist_func_dict()[dist_function]
print('Generating dissimilarity_matrix...')
start_time = time.time()
dissimilarity_matrix = np.memmap(output_dir+'/dissimilarity_matrix_{}_{}.dat'.format(n_instances, n_instances),
dtype='float16', mode='w+', shape=(n_instances, n_instances))
del dissimilarity_matrix
examples_per_slice = n_instances//process_count
dissimilarity_process_matrix = []
row_batch_size = process_batch_size // 2
col_batch_size = process_batch_size // 2
num_slices = 0
for process_id in range(process_count):
start_ind = process_id*examples_per_slice
end_ind = (process_id+1)*examples_per_slice
if process_id == (process_count-1):
end_ind = n_instances
if start_ind >= n_instances:
break
num_cols = end_ind - start_ind
for batch_col_i in range(num_cols//col_batch_size + 1):
col_start = start_ind + batch_col_i*col_batch_size
col_end = min(end_ind, start_ind + (batch_col_i+1)*col_batch_size)
for batch_row_i in range(col_end//row_batch_size + 1):
row_start = batch_row_i*row_batch_size
row_end = min(col_end, (batch_row_i+1)*row_batch_size)
dissimilarity_process_matrix.append([row_start, row_end, col_start, col_end])
num_slices += 1
dissimilarity_process_matrix = np.array(dissimilarity_process_matrix)
np.save(tmp_dir+'/dissimilarity_process_matrix.npy', dissimilarity_process_matrix)
del dissimilarity_process_matrix
print(num_slices)
process_pool = []
slices_per_process = num_slices//process_count
for process_id in range(process_count):
start_ind = process_id*slices_per_process
end_ind = (process_id+1)*slices_per_process
if process_id == (process_count-1):
end_ind = num_slices
if start_ind >= num_slices:
break
process_pool.append(Process(target=compute_dissimilarity_matrix_wrapper, args=(start_ind, end_ind,
n_instances, n_features,
tmp_dir, output_dir, dist_func,
process_id, process_batch_size)))
process_pool[process_id].start()
for process in process_pool:
process.join()
process.terminate()
end_time = time.time()
total_time = (end_time-start_time)/3600.0
print('Done generating dissimilarity_matrix. Took {} hours'.format(total_time))
import shutil
shutil.rmtree(tmp_dir)
np.random.seed(1103)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--csv_file_or_dir', action="store", dest="csv_file_or_dir", required=True)
parser.add_argument('--output_dir', action="store", dest="output_dir", required=True)
parser.add_argument('--feature_name', default='Morgan FP_2_1024', action="store",
dest="feature_name", required=False)
parser.add_argument('--dist_function', default='tanimoto_dissimilarity', action="store",
dest="dist_function", required=False)
parser.add_argument('--process_count', type=int, default=1, action="store", dest="process_count", required=False)
parser.add_argument('--process_batch_size', type=int, default=2**17, action="store", dest="process_batch_size", required=False)
parser.add_argument('--index_name', default='Index ID', action="store", dest="index_name", required=False)
given_args = parser.parse_args()
csv_file_or_dir = given_args.csv_file_or_dir
output_dir = given_args.output_dir
feature_name = given_args.feature_name
dist_function = given_args.dist_function
process_count = given_args.process_count
process_batch_size = given_args.process_batch_size
index_name = given_args.index_name
compute_dissimilarity_matrix(csv_file_or_dir, output_dir, feature_name, dist_function,
process_count, process_batch_size, index_name) | true | true |
1c3b6a68ef1c98aebe02c2059f7150413d2f0201 | 1,007 | py | Python | tests/unit/cli/formatter_test.py | matthieudelaro/dockernut | 998f614c6ad018873f3b3aee58841c62e1b160da | [
"Apache-2.0"
] | 1 | 2019-11-04T06:52:35.000Z | 2019-11-04T06:52:35.000Z | tests/unit/cli/formatter_test.py | matthieudelaro/dockernut | 998f614c6ad018873f3b3aee58841c62e1b160da | [
"Apache-2.0"
] | 1 | 2021-03-26T00:26:31.000Z | 2021-03-26T00:26:31.000Z | tests/unit/cli/formatter_test.py | matthieudelaro/dockernut | 998f614c6ad018873f3b3aee58841c62e1b160da | [
"Apache-2.0"
] | 1 | 2019-11-04T06:52:37.000Z | 2019-11-04T06:52:37.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from compose.cli import colors
from compose.cli.formatter import ConsoleWarningFormatter
from tests import unittest
MESSAGE = 'this is the message'
def makeLogRecord(level):
return logging.LogRecord('name', level, 'pathame', 0, MESSAGE, (), None)
class ConsoleWarningFormatterTestCase(unittest.TestCase):
def setUp(self):
self.formatter = ConsoleWarningFormatter()
def test_format_warn(self):
output = self.formatter.format(makeLogRecord(logging.WARN))
expected = colors.yellow('WARNING') + ': '
assert output == expected + MESSAGE
def test_format_error(self):
output = self.formatter.format(makeLogRecord(logging.ERROR))
expected = colors.red('ERROR') + ': '
assert output == expected + MESSAGE
def test_format_info(self):
output = self.formatter.format(makeLogRecord(logging.INFO))
assert output == MESSAGE
| 27.972222 | 76 | 0.714002 | from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from compose.cli import colors
from compose.cli.formatter import ConsoleWarningFormatter
from tests import unittest
MESSAGE = 'this is the message'
def makeLogRecord(level):
return logging.LogRecord('name', level, 'pathame', 0, MESSAGE, (), None)
class ConsoleWarningFormatterTestCase(unittest.TestCase):
def setUp(self):
self.formatter = ConsoleWarningFormatter()
def test_format_warn(self):
output = self.formatter.format(makeLogRecord(logging.WARN))
expected = colors.yellow('WARNING') + ': '
assert output == expected + MESSAGE
def test_format_error(self):
output = self.formatter.format(makeLogRecord(logging.ERROR))
expected = colors.red('ERROR') + ': '
assert output == expected + MESSAGE
def test_format_info(self):
output = self.formatter.format(makeLogRecord(logging.INFO))
assert output == MESSAGE
| true | true |
1c3b6b134598894e4dda51d1cfcb8e9c4e2d366b | 5,495 | py | Python | TSP-VRP/TSPalgos.py | elifaydin00/ENS208-Introduction-To-Industrial-Engineering | f17932a773ed4c83d960c4a3657db50abb68c3a9 | [
"MIT"
] | 2 | 2021-06-11T22:19:36.000Z | 2021-10-04T13:40:46.000Z | TSP-VRP/TSPalgos.py | elifaydin00/ENS208-Introduction-To-Industrial-Engineering | f17932a773ed4c83d960c4a3657db50abb68c3a9 | [
"MIT"
] | null | null | null | TSP-VRP/TSPalgos.py | elifaydin00/ENS208-Introduction-To-Industrial-Engineering | f17932a773ed4c83d960c4a3657db50abb68c3a9 | [
"MIT"
] | 1 | 2021-10-04T13:40:48.000Z | 2021-10-04T13:40:48.000Z | '''
ENS 208 - Introduction to IE
Function definitions for nearest neighbor, savings, and 2-opt algorithms.
'''
from pqdict import pqdict
# =============================================================================
def nearest_neighbor(nodes, origin, d):
'''
Constructs a TSP solution using the nearest neighbor algorithm, NNH,
for a given set of nodes, the associated pairwise distance matrix-d,
and the origin.
'''
# Tour should start at the origin
tour = [origin]
# Initialize the tour length
tour_length = 0
# If the origin is not in nodes, add it to nodes
if origin not in nodes:
nodes.append(origin)
# Nearest neighbor search until all nodes are visited
while len(tour) < len(nodes):
dist, next_node = min((d[tour[-1]][i], i) for i in nodes if i not in tour)
tour_length += dist
tour.append(next_node)
print('Added', next_node, 'to the tour!')
# Tour should end at the origin
tour_length += d[tour[-1]][origin]
tour.append(origin)
# Round the result to 2 decimals to avoid floating point representation errors
tour_length = round(tour_length, 2)
# Return the resulting tour and its length as a tuple
return tour, tour_length
# =============================================================================
def savings(nodes, origin, d):
'''
Constructs a TSP solution using the savings method for a given set/list of
nodes, their pairwise distances-d, and the origin.
'''
# Set of customer nodes (i.e. nodes other than the origin)
customers = {i for i in nodes if i != origin}
# Initialize out-and-back tours from the origin to every other node
tours = {(i,i): [origin, i, origin] for i in customers}
# Compute savings
savings = {(i, j): round(d[i][origin] + d[origin][j] - d[i][j], 2)
for i in customers for j in customers if j != i}
# Define a priority queue dictionary to get a pair of nodes (i,j) which yields
# the maximum savings
pq = pqdict(savings, reverse = True)
# Merge subtours until obtaining a TSP tour
while len(tours) > 1:
i,j = pq.pop()
print((i, j))
# Outer loop
break_outer = False
for t1 in tours:
for t2 in tours.keys()-{t1}:
if t1[1] == i and t2[0] == j:
print('Merging', tours[t1], 'and', tours[t2])
tours[(t1[0], t2[1])] = tours[t1][:-1] + tours[t2][1:]
del tours[t1], tours[t2]
print(tours)
break_outer = True
break
if break_outer:
break
else:
print('No merging opportunities can be found for', (i,j))
# Final tours dictionary (involves a single tour, which is the TSP tour)
print(tours)
# Compute tour length
tour_length = 0
for tour in tours.values():
for i in range(len(tour)-1):
tour_length += d[tour[i]][tour[i+1]]
# Round the result to 2 decimals to avoid floating point representation errors
tour_length = round(tour_length, 2)
# Return the resulting tour and its length as a tuple
return tour, tour_length
# =============================================================================
def two_opt(tour, tour_length, d):
'''
Improves a given TSP solution using the 2-opt algorithm. Note: This function
applies 2opt correctly only when the distance matrix is symmetric. In case
of asymmetric distances, one needs to update the cost difference calculation
incurred by swapping.
'''
current_tour, current_tour_length = tour, tour_length
best_tour, best_tour_length = current_tour, current_tour_length
solution_improved = True
while solution_improved:
print()
print('Attempting to improve the tour', current_tour,
'with length', current_tour_length)
solution_improved = False
for i in range(1, len(current_tour)-2):
for j in range(i+1, len(current_tour)-1):
difference = round((d[current_tour[i-1]][current_tour[j]]
+ d[current_tour[i]][current_tour[j+1]]
- d[current_tour[i-1]][current_tour[i]]
- d[current_tour[j]][current_tour[j+1]]), 2)
print('Cost difference due to swapping', current_tour[i], 'and',
current_tour[j], 'is:', difference)
if current_tour_length + difference < best_tour_length:
print('Found an improving move! Updating the best tour...')
best_tour = current_tour[:i] + list(reversed(current_tour[i:j+1])) + current_tour[j+1:]
best_tour_length = round(current_tour_length + difference, 2)
print('Improved tour is:', best_tour, 'with length',
best_tour_length)
solution_improved = True
current_tour, current_tour_length = best_tour, best_tour_length
# Return the resulting tour and its length as a tuple
return best_tour, best_tour_length
# ============================================================================= | 38.697183 | 107 | 0.549227 |
from pqdict import pqdict
def nearest_neighbor(nodes, origin, d):
tour = [origin]
tour_length = 0
if origin not in nodes:
nodes.append(origin)
while len(tour) < len(nodes):
dist, next_node = min((d[tour[-1]][i], i) for i in nodes if i not in tour)
tour_length += dist
tour.append(next_node)
print('Added', next_node, 'to the tour!')
tour_length += d[tour[-1]][origin]
tour.append(origin)
tour_length = round(tour_length, 2)
return tour, tour_length
def savings(nodes, origin, d):
customers = {i for i in nodes if i != origin}
tours = {(i,i): [origin, i, origin] for i in customers}
savings = {(i, j): round(d[i][origin] + d[origin][j] - d[i][j], 2)
for i in customers for j in customers if j != i}
pq = pqdict(savings, reverse = True)
while len(tours) > 1:
i,j = pq.pop()
print((i, j))
break_outer = False
for t1 in tours:
for t2 in tours.keys()-{t1}:
if t1[1] == i and t2[0] == j:
print('Merging', tours[t1], 'and', tours[t2])
tours[(t1[0], t2[1])] = tours[t1][:-1] + tours[t2][1:]
del tours[t1], tours[t2]
print(tours)
break_outer = True
break
if break_outer:
break
else:
print('No merging opportunities can be found for', (i,j))
print(tours)
tour_length = 0
for tour in tours.values():
for i in range(len(tour)-1):
tour_length += d[tour[i]][tour[i+1]]
tour_length = round(tour_length, 2)
return tour, tour_length
def two_opt(tour, tour_length, d):
current_tour, current_tour_length = tour, tour_length
best_tour, best_tour_length = current_tour, current_tour_length
solution_improved = True
while solution_improved:
print()
print('Attempting to improve the tour', current_tour,
'with length', current_tour_length)
solution_improved = False
for i in range(1, len(current_tour)-2):
for j in range(i+1, len(current_tour)-1):
difference = round((d[current_tour[i-1]][current_tour[j]]
+ d[current_tour[i]][current_tour[j+1]]
- d[current_tour[i-1]][current_tour[i]]
- d[current_tour[j]][current_tour[j+1]]), 2)
print('Cost difference due to swapping', current_tour[i], 'and',
current_tour[j], 'is:', difference)
if current_tour_length + difference < best_tour_length:
print('Found an improving move! Updating the best tour...')
best_tour = current_tour[:i] + list(reversed(current_tour[i:j+1])) + current_tour[j+1:]
best_tour_length = round(current_tour_length + difference, 2)
print('Improved tour is:', best_tour, 'with length',
best_tour_length)
solution_improved = True
current_tour, current_tour_length = best_tour, best_tour_length
return best_tour, best_tour_length
| true | true |
1c3b6db6915f10b4aa96caac0847d740e301d886 | 3,611 | py | Python | samples/contrib/pytorch-samples/bert/wrapper.py | hwk42/pipelines | c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9 | [
"Apache-2.0"
] | 1 | 2021-08-23T19:09:56.000Z | 2021-08-23T19:09:56.000Z | samples/contrib/pytorch-samples/bert/wrapper.py | hwk42/pipelines | c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9 | [
"Apache-2.0"
] | 2 | 2021-06-01T10:02:51.000Z | 2021-06-07T07:19:14.000Z | samples/contrib/pytorch-samples/bert/wrapper.py | hwk42/pipelines | c89ed71cf6339cdcdd957d4dca4b1f32c10db9c9 | [
"Apache-2.0"
] | 1 | 2022-03-04T14:26:55.000Z | 2022-03-04T14:26:55.000Z | # !/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
# pylint: disable=abstract-method
"""Bert Wrapper."""
import torch
import torch.nn as nn
import torch.nn.functional as F
class AGNewsmodelWrapper(nn.Module):
"""Warapper Class."""
def __init__(self, model):
super( # pylint: disable=super-with-arguments
AGNewsmodelWrapper, self
).__init__()
self.model = model
def compute_bert_outputs( # pylint: disable=no-self-use
self, model_bert, embedding_input, attention_mask=None, head_mask=None
):
"""Computes Bert Outputs.
Args:
model_bert : the bert model
embedding_input : input for bert embeddings.
attention_mask : attention mask
head_mask : head mask
Returns:
output : the bert output
"""
if attention_mask is None:
attention_mask = torch.ones( # pylint: disable=no-member
embedding_input.shape[0], embedding_input.shape[1]
).to(embedding_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(model_bert.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(-1)
.unsqueeze(-1)
)
head_mask = head_mask.expand(
model_bert.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(model_bert.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * model_bert.config.num_hidden_layers
encoder_outputs = model_bert.encoder(
embedding_input, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = model_bert.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return outputs
def forward(self, embeddings):
"""Forward function.
Args:
embeddings : bert embeddings.
"""
outputs = self.compute_bert_outputs(self.model.bert_model, embeddings)
pooled_output = outputs[1]
output = F.relu(self.model.fc1(pooled_output))
output = self.model.drop(output)
output = self.model.out(output)
return output
| 35.752475 | 78 | 0.61368 |
import torch
import torch.nn as nn
import torch.nn.functional as F
class AGNewsmodelWrapper(nn.Module):
def __init__(self, model):
super(
AGNewsmodelWrapper, self
).__init__()
self.model = model
def compute_bert_outputs(
self, model_bert, embedding_input, attention_mask=None, head_mask=None
):
if attention_mask is None:
attention_mask = torch.ones(
embedding_input.shape[0], embedding_input.shape[1]
).to(embedding_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(model_bert.parameters()).dtype
)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(-1)
.unsqueeze(-1)
)
head_mask = head_mask.expand(
model_bert.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.to(
dtype=next(model_bert.parameters()).dtype
)
else:
head_mask = [None] * model_bert.config.num_hidden_layers
encoder_outputs = model_bert.encoder(
embedding_input, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = model_bert.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return outputs
def forward(self, embeddings):
outputs = self.compute_bert_outputs(self.model.bert_model, embeddings)
pooled_output = outputs[1]
output = F.relu(self.model.fc1(pooled_output))
output = self.model.drop(output)
output = self.model.out(output)
return output
| true | true |
1c3b6e7d260fb2296297ff4d554f3d2741b0f933 | 207 | py | Python | plaid_project/plaid_app/signals.py | reetikaSR/PlaidProject | 904bd7fd3412a4b5149aae899abcf8794bebba81 | [
"MIT"
] | null | null | null | plaid_project/plaid_app/signals.py | reetikaSR/PlaidProject | 904bd7fd3412a4b5149aae899abcf8794bebba81 | [
"MIT"
] | null | null | null | plaid_project/plaid_app/signals.py | reetikaSR/PlaidProject | 904bd7fd3412a4b5149aae899abcf8794bebba81 | [
"MIT"
] | null | null | null | import django.dispatch
fetch_transactions = django.dispatch.Signal(providing_args=['access_token', 'user_id'])
fetch_accounts = django.dispatch.Signal(providing_args=['access_token', 'user_id', 'item_id']) | 41.4 | 94 | 0.797101 | import django.dispatch
fetch_transactions = django.dispatch.Signal(providing_args=['access_token', 'user_id'])
fetch_accounts = django.dispatch.Signal(providing_args=['access_token', 'user_id', 'item_id']) | true | true |
1c3b6f3dec6348c1f3e5a4afd8ab545d45f2c423 | 945 | py | Python | full_cost/implant/migrations/0006_auto_20200422_1614.py | CEMES-CNRS/full_cost_git | 600409b49db123db82e7f66462395294dde320ce | [
"CECILL-B"
] | null | null | null | full_cost/implant/migrations/0006_auto_20200422_1614.py | CEMES-CNRS/full_cost_git | 600409b49db123db82e7f66462395294dde320ce | [
"CECILL-B"
] | null | null | null | full_cost/implant/migrations/0006_auto_20200422_1614.py | CEMES-CNRS/full_cost_git | 600409b49db123db82e7f66462395294dde320ce | [
"CECILL-B"
] | null | null | null | # Generated by Django 2.2.8 on 2020-04-22 14:14
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('implant', '0005_auto_20200421_1743'),
]
operations = [
migrations.AlterField(
model_name='historicalrecord',
name='time_from',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='historicalrecord',
name='time_to',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='record',
name='time_from',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='record',
name='time_to',
field=models.TimeField(default=datetime.time(0, 0)),
),
]
| 27 | 64 | 0.574603 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('implant', '0005_auto_20200421_1743'),
]
operations = [
migrations.AlterField(
model_name='historicalrecord',
name='time_from',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='historicalrecord',
name='time_to',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='record',
name='time_from',
field=models.TimeField(default=datetime.time(0, 0)),
),
migrations.AlterField(
model_name='record',
name='time_to',
field=models.TimeField(default=datetime.time(0, 0)),
),
]
| true | true |
1c3b6fb390c84f75c083897c59cc206166c7975c | 271 | py | Python | src/app_messages/views.py | abairo/presentation_poetry_docker | b22785eb567ecf81e00ce3762bcc074e8c0de9c7 | [
"MIT"
] | null | null | null | src/app_messages/views.py | abairo/presentation_poetry_docker | b22785eb567ecf81e00ce3762bcc074e8c0de9c7 | [
"MIT"
] | null | null | null | src/app_messages/views.py | abairo/presentation_poetry_docker | b22785eb567ecf81e00ce3762bcc074e8c0de9c7 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from .models import Message
def current_message(request):
message = Message.objects.all().last()
html = "<html><body><h1>%s</h1>.</body></html>" % message.text if message else 'Nenhuma mensagem'
return HttpResponse(html) | 33.875 | 101 | 0.715867 | from django.http import HttpResponse
from .models import Message
def current_message(request):
message = Message.objects.all().last()
html = "<html><body><h1>%s</h1>.</body></html>" % message.text if message else 'Nenhuma mensagem'
return HttpResponse(html) | true | true |
1c3b70ccf0f780ce1b5004dd03b412b1672e00d7 | 6,345 | py | Python | attachments/matrix_util.py | 11wi/11wi.github.io | c89f6999ece59cba3ba5bdfd378028adcbad5ee3 | [
"CC-BY-4.0"
] | null | null | null | attachments/matrix_util.py | 11wi/11wi.github.io | c89f6999ece59cba3ba5bdfd378028adcbad5ee3 | [
"CC-BY-4.0"
] | 5 | 2021-03-30T13:59:01.000Z | 2022-02-26T10:25:24.000Z | attachments/matrix_util.py | 11wi/11wi.github.io | c89f6999ece59cba3ba5bdfd378028adcbad5ee3 | [
"CC-BY-4.0"
] | null | null | null | import numpy as _np
from multiprocessing import RawArray as _RawArray
from multiprocessing import Pool as _Pool
from functools import partial as _partial
from numba import njit
def nonzero(array):
index_array = _np.nonzero(array)[0]
return index_array
def inverse(mat):
return _np.ascontiguousarray(_np.linalg.inv(mat))
def cholesky(mat):
return _np.linalg.cholesky(mat)
def normal(mu=0, sd=1, size=1):
if isinstance(size, tuple):
size = [int(i) for i in size]
else:
size = int(size)
return _np.random.normal(loc=mu, scale=sd, size=size)
def wishart(nu, scale):
"""
:param nu: df
:param scale: scale matrix (must be positive definite)
:return: covariance matrix (symmetric positive definite)
referred from
https://gist.github.com/jfrelinger/2638485
http://thaines.com/content/misc/gaussian_conjugate_prior_cheat_sheet.pdf
"""
dim = scale.shape[1]
chol = cholesky(scale)
Lambda = _np.zeros((dim, dim))
for i in range(dim):
for j in range(i + 1):
if i == j:
Lambda[i, j] = _np.random.chisquare(nu - (i + 1) + 1) ** .5
else:
Lambda[i, j] = normal(0, 1, 1).item()
return chol @ Lambda @ Lambda.T @ chol.T
def mean_latent(latent_u):
u_bar = _np.sum(latent_u, axis=0).reshape(-1, 1) / latent_u.shape[0]
return u_bar
def cov_latent(latent_u):
s_bar = _np.cov(latent_u, rowvar=False, bias=True)
return s_bar
def user_based_item_rating(n, rating_matrix):
items = nonzero(rating_matrix[n, :])
rating = rating_matrix[n, :][items].reshape(-1, 1)
return items, rating
def item_based_user_rating(n, rating_matrix):
users = nonzero(rating_matrix[:, n])
rating = rating_matrix[:, n][users].reshape(-1, 1)
return users, rating
def update_hyperparam(latent_u, mu0, w0, b0):
n_sample = latent_u.shape[0]
u_bar = mean_latent(latent_u)
s_bar = cov_latent(latent_u)
mu0_star = ((b0 * mu0) + (n_sample * u_bar)) / (b0 + n_sample)
w0_u_inv = inverse(w0)
w0_star = inverse(w0_u_inv + n_sample * s_bar + (b0 * n_sample) / (b0 + n_sample) * (mu0 - u_bar) @ (mu0 - u_bar).T)
return mu0_star, w0_star
def sampling_params(n_latent, n_sample, mu0_star, w0_star, b0):
_sigma_u = wishart(nu=n_latent + n_sample, scale=w0_star)
sigma_u = (_sigma_u + _sigma_u.T) / 2
lambda_u = inverse(b0 + n_sample * sigma_u)
mu_u = mu0_star + cholesky(lambda_u) @ normal(size=(n_latent, 1))
return mu_u, lambda_u, sigma_u
def _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0):
lambda_star_u = inverse(sigma_u + b0 * latent_v_i.T @ latent_v_i)
mean_star_u = lambda_star_u @ (b0 * latent_v_i.T @ target_ratings + lambda_u @ mu_u)
posterior_sample_u = mean_star_u + cholesky(lambda_star_u) @ normal(size=(n_latent, 1))
return posterior_sample_u.reshape(-1)
def sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
find_user = user_based_item_rating(each, rating_matrix)
target_items, target_ratings = find_user[0], find_user[1]
latent_v_i = latent_v[target_items]
each_user_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)
return each_user_latent
def sampling_latent_item(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
find_item = item_based_user_rating(each, rating_matrix)
target_user, target_ratings = find_item[0], find_item[1]
latent_v_i = latent_v[target_user]
each_item_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)
return each_item_latent
_parallel_env = {}
def _init_parallel(shared_array, latent_shape):
_parallel_env['latent'] = shared_array
_parallel_env['shape'] = latent_shape
def _init_args(n_sample_u, n_latent):
shape_latent = (n_sample_u, n_latent)
shared_latent = _RawArray('d', int(n_sample_u * n_latent))
return shape_latent, shared_latent
def _pool_map(n_core, parallel_function, n_sample_u, shape_latent, shared_latent):
with _Pool(processes=n_core, initializer=_init_parallel, initargs=(shared_latent, shape_latent)) as pool:
pool.map(parallel_function, iterable=_np.arange(n_sample_u))
latent = _np.frombuffer(shared_latent, dtype=_np.float64).reshape(shape_latent)
return latent
def parallel_sampling_latent_user(n_core, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_sample_u,
n_latent, b0):
"""
https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html
"""
shape_latent, shared_latent = _init_args(n_sample_u, n_latent)
f = _partial(_parallel_sampling_latent_user, mu_u=mu_u, lambda_u=lambda_u, sigma_u=sigma_u, latent_v=latent_v,
rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)
latent = _pool_map(n_core, f, n_sample_u, shape_latent, shared_latent)
return latent
def parallel_sampling_latent_item(n_core, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_sample_v,
n_latent, b0):
"""
https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html
"""
shape_latent, shared_latent = _init_args(n_sample_v, n_latent)
f = _partial(_parallel_sampling_latent_item, mu_v=mu_v, lambda_v=lambda_v, sigma_v=sigma_v, latent_u=latent_u,
rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)
latent = _pool_map(n_core, f, n_sample_v, shape_latent, shared_latent)
return latent
def _parallel_sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
updated = sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0)
latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])
latent[each, :] = updated
def _parallel_sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0):
updated = sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0)
latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])
latent[each, :] = updated
| 36.889535 | 120 | 0.709535 | import numpy as _np
from multiprocessing import RawArray as _RawArray
from multiprocessing import Pool as _Pool
from functools import partial as _partial
from numba import njit
def nonzero(array):
index_array = _np.nonzero(array)[0]
return index_array
def inverse(mat):
return _np.ascontiguousarray(_np.linalg.inv(mat))
def cholesky(mat):
return _np.linalg.cholesky(mat)
def normal(mu=0, sd=1, size=1):
if isinstance(size, tuple):
size = [int(i) for i in size]
else:
size = int(size)
return _np.random.normal(loc=mu, scale=sd, size=size)
def wishart(nu, scale):
dim = scale.shape[1]
chol = cholesky(scale)
Lambda = _np.zeros((dim, dim))
for i in range(dim):
for j in range(i + 1):
if i == j:
Lambda[i, j] = _np.random.chisquare(nu - (i + 1) + 1) ** .5
else:
Lambda[i, j] = normal(0, 1, 1).item()
return chol @ Lambda @ Lambda.T @ chol.T
def mean_latent(latent_u):
u_bar = _np.sum(latent_u, axis=0).reshape(-1, 1) / latent_u.shape[0]
return u_bar
def cov_latent(latent_u):
s_bar = _np.cov(latent_u, rowvar=False, bias=True)
return s_bar
def user_based_item_rating(n, rating_matrix):
items = nonzero(rating_matrix[n, :])
rating = rating_matrix[n, :][items].reshape(-1, 1)
return items, rating
def item_based_user_rating(n, rating_matrix):
users = nonzero(rating_matrix[:, n])
rating = rating_matrix[:, n][users].reshape(-1, 1)
return users, rating
def update_hyperparam(latent_u, mu0, w0, b0):
n_sample = latent_u.shape[0]
u_bar = mean_latent(latent_u)
s_bar = cov_latent(latent_u)
mu0_star = ((b0 * mu0) + (n_sample * u_bar)) / (b0 + n_sample)
w0_u_inv = inverse(w0)
w0_star = inverse(w0_u_inv + n_sample * s_bar + (b0 * n_sample) / (b0 + n_sample) * (mu0 - u_bar) @ (mu0 - u_bar).T)
return mu0_star, w0_star
def sampling_params(n_latent, n_sample, mu0_star, w0_star, b0):
_sigma_u = wishart(nu=n_latent + n_sample, scale=w0_star)
sigma_u = (_sigma_u + _sigma_u.T) / 2
lambda_u = inverse(b0 + n_sample * sigma_u)
mu_u = mu0_star + cholesky(lambda_u) @ normal(size=(n_latent, 1))
return mu_u, lambda_u, sigma_u
def _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0):
lambda_star_u = inverse(sigma_u + b0 * latent_v_i.T @ latent_v_i)
mean_star_u = lambda_star_u @ (b0 * latent_v_i.T @ target_ratings + lambda_u @ mu_u)
posterior_sample_u = mean_star_u + cholesky(lambda_star_u) @ normal(size=(n_latent, 1))
return posterior_sample_u.reshape(-1)
def sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
find_user = user_based_item_rating(each, rating_matrix)
target_items, target_ratings = find_user[0], find_user[1]
latent_v_i = latent_v[target_items]
each_user_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)
return each_user_latent
def sampling_latent_item(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
find_item = item_based_user_rating(each, rating_matrix)
target_user, target_ratings = find_item[0], find_item[1]
latent_v_i = latent_v[target_user]
each_item_latent = _sampling_latent(latent_v_i, mu_u, lambda_u, sigma_u, target_ratings, n_latent, b0)
return each_item_latent
_parallel_env = {}
def _init_parallel(shared_array, latent_shape):
_parallel_env['latent'] = shared_array
_parallel_env['shape'] = latent_shape
def _init_args(n_sample_u, n_latent):
shape_latent = (n_sample_u, n_latent)
shared_latent = _RawArray('d', int(n_sample_u * n_latent))
return shape_latent, shared_latent
def _pool_map(n_core, parallel_function, n_sample_u, shape_latent, shared_latent):
with _Pool(processes=n_core, initializer=_init_parallel, initargs=(shared_latent, shape_latent)) as pool:
pool.map(parallel_function, iterable=_np.arange(n_sample_u))
latent = _np.frombuffer(shared_latent, dtype=_np.float64).reshape(shape_latent)
return latent
def parallel_sampling_latent_user(n_core, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_sample_u,
n_latent, b0):
shape_latent, shared_latent = _init_args(n_sample_u, n_latent)
f = _partial(_parallel_sampling_latent_user, mu_u=mu_u, lambda_u=lambda_u, sigma_u=sigma_u, latent_v=latent_v,
rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)
latent = _pool_map(n_core, f, n_sample_u, shape_latent, shared_latent)
return latent
def parallel_sampling_latent_item(n_core, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_sample_v,
n_latent, b0):
shape_latent, shared_latent = _init_args(n_sample_v, n_latent)
f = _partial(_parallel_sampling_latent_item, mu_v=mu_v, lambda_v=lambda_v, sigma_v=sigma_v, latent_u=latent_u,
rating_matrix=rating_matrix, n_latent=n_latent, b0=b0)
latent = _pool_map(n_core, f, n_sample_v, shape_latent, shared_latent)
return latent
def _parallel_sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0):
updated = sampling_latent_user(each, mu_u, lambda_u, sigma_u, latent_v, rating_matrix, n_latent, b0)
latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])
latent[each, :] = updated
def _parallel_sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0):
updated = sampling_latent_item(each, mu_v, lambda_v, sigma_v, latent_u, rating_matrix, n_latent, b0)
latent = _np.frombuffer(_parallel_env['latent']).reshape(_parallel_env['shape'])
latent[each, :] = updated
| true | true |
1c3b71311b38054abbc680b04e743b1557f30ce3 | 3,404 | py | Python | ExamplesAndTests_Wimp_reach/params_test_single_bkg_nu_DSNB_3.py | sbaum90/paleoSens | 0f501780858059bac5e563b60250947e28416109 | [
"MIT"
] | null | null | null | ExamplesAndTests_Wimp_reach/params_test_single_bkg_nu_DSNB_3.py | sbaum90/paleoSens | 0f501780858059bac5e563b60250947e28416109 | [
"MIT"
] | null | null | null | ExamplesAndTests_Wimp_reach/params_test_single_bkg_nu_DSNB_3.py | sbaum90/paleoSens | 0f501780858059bac5e563b60250947e28416109 | [
"MIT"
] | null | null | null | # ------------------------------------------------
# output file name
# ------------------------------------------------
fout_name = "ExamplesAndTests_Wimp_reach/test_single_bkg_nu_DSNB_3"
# ------------------------------------------------
# parameter block for sample and read-out info
# ------------------------------------------------
sample_age_Myr = 1e3 # age of the target sample in [Myr]
sample_mass_kg = 1e-3 # mass of the target sample in [kg]
readout_resolution_Aa = 10.0 # track length resolution in [Ångström]
C238 = 1e-11 # uranium-238 concentration per weight in [g/g]
mineral_name = "Gypsum" # name of the target mineral.
keep_H_tracks = False # boolean variable. If True/False, tracks from hydrogen are in-/excluded in the track length spectra
# ------------------------------------------------
# external constraints on background parameters
# and sample properties.
# For each parameter, there is a boolean switch
# to include/not include the external constraint
# as well as a parameter specifying the relative
# uncertainty on the respective parameter
# ------------------------------------------------
# target sample age
ext_sample_age_bool = True
ext_sample_age_unc = 0.05
# target sample mass
ext_sample_mass_bool = True
ext_sample_mass_unc = 1e-5
# solar neutrinos
ext_nu_solar_bool = False
ext_nu_solar_unc = 1.0
# Galactic Supernova Neutrino Background
ext_nu_GSNB_bool = False
ext_nu_GSNB_unc = 1.0
# Diffuse Supernova Neutrino Background
ext_nu_DSNB_bool = False
ext_nu_DSNB_unc = 1.0
# atmospheric neutrinos
ext_nu_atm_bool = False
ext_nu_atm_unc = 1.0
# uranium-238 concentration
ext_C238_bool = False
ext_C238_unc = 0.1
# ------------------------------------------------
# parameters for the run setup
# ------------------------------------------------
TR_xmin_Aa = -1 # lower edge of smallest track length bin in [Aa]. For xmin=-1, the code uses readout_resolution_Aa/2
TR_xmax_Aa = 1e4 # upper edge of the largest track length bin in [Aa]. Should not be chosen larger than 10,000
TR_logbins = True # set True/False for log-spaced/linear spaced track length bins
TR_nbins = 100 # number of bins. If TR_logbins == False, TR_nbins can be set to -1 in which case the bin-width is set to readout_resolution_Aa
DMmass_min_GeV = 5e-1 # smallest DM mass in [GeV] for which the limit is computed
DMmass_max_GeV = 5e3 # largest DM mass in [GeV] for which the limit is computed
DMmass_nbins = 401 # number of (log-spaced) bins for which the reach is computed
output_exclusion_sens = True # if True, the code computes the 90% CL exclusion limit
output_discovery_sens = True # if True, the code computes the 5-\sigma discovery sensitivity
Ncores_mp = 4 # number of cores to use for parallelized part of computation
verbose = True # if True, code will print messages in std.out
# ------------------------------------------------
# boolean switches allowing to turn off background
# components
# This block should be used for testing only
# ------------------------------------------------
include_bkg_nu_solar = False # solar neutrinos
include_bkg_nu_GSNB = False # Galactic Supernova Neutrino Background
include_bkg_nu_DSNB = True # Diffuse Supernova Neutrino Background
include_bkg_nu_atm = False # atmospheric neutrinos
include_bkg_rad_1a = False # radiogenic single-alpha background
include_bkg_rad_neutrons = False # radiogenic neutron background
| 43.641026 | 143 | 0.668038 |
fout_name = "ExamplesAndTests_Wimp_reach/test_single_bkg_nu_DSNB_3"
sample_age_Myr = 1e3
sample_mass_kg = 1e-3
readout_resolution_Aa = 10.0
C238 = 1e-11
mineral_name = "Gypsum"
keep_H_tracks = False
ext_sample_age_bool = True
ext_sample_age_unc = 0.05
ext_sample_mass_bool = True
ext_sample_mass_unc = 1e-5
ext_nu_solar_bool = False
ext_nu_solar_unc = 1.0
ext_nu_GSNB_bool = False
ext_nu_GSNB_unc = 1.0
ext_nu_DSNB_bool = False
ext_nu_DSNB_unc = 1.0
ext_nu_atm_bool = False
ext_nu_atm_unc = 1.0
ext_C238_bool = False
ext_C238_unc = 0.1
TR_xmin_Aa = -1
TR_xmax_Aa = 1e4
TR_logbins = True
TR_nbins = 100
DMmass_min_GeV = 5e-1
DMmass_max_GeV = 5e3
DMmass_nbins = 401
output_exclusion_sens = True
output_discovery_sens = True
Ncores_mp = 4
verbose = True
include_bkg_nu_solar = False
include_bkg_nu_GSNB = False
include_bkg_nu_DSNB = True
include_bkg_nu_atm = False
include_bkg_rad_1a = False
include_bkg_rad_neutrons = False
| true | true |
1c3b7148b68e89bd4aada1bea08d2093e8f7ae58 | 4,247 | py | Python | migration/db_init.py | cocobear/fuxi | a3916131689d82ce6b804e0993d89f755d1108ec | [
"MIT"
] | 731 | 2018-06-13T05:41:04.000Z | 2019-09-06T01:36:57.000Z | migration/db_init.py | riusksk/fuxi | fadb1136b8896fe2a0f7783627bda867d5e6fd98 | [
"MIT"
] | 16 | 2019-10-14T08:17:13.000Z | 2021-12-13T20:13:23.000Z | migration/db_init.py | riusksk/fuxi | fadb1136b8896fe2a0f7783627bda867d5e6fd98 | [
"MIT"
] | 238 | 2018-06-14T08:59:44.000Z | 2019-09-04T06:35:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : jeffzhang
# @Time : 2019/5/22
# @File : db_init.py
# @Desc : ""
#
# import os
# import re
# import subprocess
# import sys
# from fuxi.common.utils.logger import logger
# from sqlalchemy.exc import OperationalError
# from fuxi.common.utils.poc_handler import poc_parser
# from fuxi.core.databases.orm.auth.user_orm import DBFuxiAdmin
# from fuxi.core.databases.orm.scanner.pocsuite_orm import DBPocsuitePlugin
# from fuxi.core.databases.orm.exploit.xss_orm import DBXssPayloads
# from fuxi.core.databases.orm.configuration.config import DBFuxiConfiguration
#
# def databases_init():
# try:
# if not DBFuxiAdmin.find_one():
# # fuxi console default login user and password (user: fuxi password: whoami)
# DBFuxiAdmin.add_admin(
# username="fuxi", password="whoami",
# nick="Administrator", email="admin@fuxi.com",
# )
# if not DBPocsuitePlugin.find_one():
# # pocsuit plugin initialization
# _poc_path = os.path.abspath(os.path.dirname(__file__)) + "/pocs"
# for poc_filename in os.listdir(_poc_path):
# with open(_poc_path + "/" + poc_filename, "r", encoding="UTF-8") as poc_read:
# poc_str = poc_read.read()
# poc_data = poc_parser(poc_str)
# DBPocsuitePlugin.add(
# name=poc_data['name'], poc_str=poc_str, filename=poc_filename,
# app=poc_data['app'], poc_type=poc_data['type'], op="fuxi"
# )
# except OperationalError:
# # catch database connect exception
# logger.error("OperationalError: can't connect to database server")
# sys.exit(0)
# except Exception as e:
# # catch database error
# logger.error("database initialization failure: {}".format(e))
# sys.exit(0)
#
# if not DBXssPayloads.find_one():
# # xss payload example
# name = "get document.cookie"
# value = "var api = 'http://127.0.0.1:50020';\n" \
# "var url = document.location.href;\n" \
# "var salt = 'abcde';\n" \
# "var data = 'cookie=' + encodeURIComponent(document.cookie);\n" \
# "var img = document.createElement('img');\n" \
# "img.width = 0; img.height = 0;\n" \
# "img.src = api+'/xss?salt='+salt+'&url='+encodeURIComponent(url)+'&data='+ encodeURIComponent(data);"
# DBXssPayloads.add(name, value)
#
# if not DBFuxiConfiguration.find_one():
# # base configuration
# cid = DBFuxiConfiguration.config_init()
# x = FuxiConfigInit(cid)
# if not x.set_whatweb_exe():
# logger.warning("Configuration init: whatweb cannot found")
# if not x.set_nmap_exe():
# logger.warning("Configuration init: nmap cannot found")
#
#
# class FuxiConfigInit(object):
# def __init__(self, cid):
# self.cid = cid
#
# def set_whatweb_exe(self):
# re_compile = re.compile('WhatWeb version ([\d]+)\.([\d]+)(?:\.([\d])+)')
# for exe in ["/usr/local/bin/whatweb", "/usr/bin/whatweb", "whatweb"]:
# subp = subprocess.run("{} --version".format(exe), shell=True, encoding="utf-8",
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if re_compile.match(subp.stdout):
# DBFuxiConfiguration.update_by_id(self.cid, {
# "whatweb_exe": exe,
# })
# return True
# return False
#
# def set_nmap_exe(self):
# re_compile = re.compile("([\s]*)Starting Nmap ([\d]+)\.([\d]+)")
# for exe in ["/usr/local/bin/nmap", "/usr/bin/nmap", "nmap"]:
# subp = subprocess.run("{} -v".format(exe), shell=True, encoding="utf-8",
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if re_compile.match(subp.stdout):
# DBFuxiConfiguration.update_by_id(self.cid, {
# "nmap_exe": exe,
# })
# return True
# return False
| 43.783505 | 119 | 0.561808 |
alization failure: {}".format(e))
# sys.exit(0)
#
# if not DBXssPayloads.find_one():
# # xss payload example
# name = "get document.cookie"
# value = "var api = 'http://127.0.0.1:50020';\n" \
# "var url = document.location.href;\n" \
# "var salt = 'abcde';\n" \
# "var data = 'cookie=' + encodeURIComponent(document.cookie);\n" \
# "var img = document.createElement('img');\n" \
# "img.width = 0; img.height = 0;\n" \
# "img.src = api+'/xss?salt='+salt+'&url='+encodeURIComponent(url)+'&data='+ encodeURIComponent(data);"
# DBXssPayloads.add(name, value)
#
# if not DBFuxiConfiguration.find_one():
# # base configuration
# cid = DBFuxiConfiguration.config_init()
# x = FuxiConfigInit(cid)
# if not x.set_whatweb_exe():
# logger.warning("Configuration init: whatweb cannot found")
# if not x.set_nmap_exe():
# logger.warning("Configuration init: nmap cannot found")
#
#
# class FuxiConfigInit(object):
# def __init__(self, cid):
# self.cid = cid
#
# def set_whatweb_exe(self):
# re_compile = re.compile('WhatWeb version ([\d]+)\.([\d]+)(?:\.([\d])+)')
# for exe in ["/usr/local/bin/whatweb", "/usr/bin/whatweb", "whatweb"]:
# subp = subprocess.run("{} --version".format(exe), shell=True, encoding="utf-8",
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if re_compile.match(subp.stdout):
# DBFuxiConfiguration.update_by_id(self.cid, {
# "whatweb_exe": exe,
# })
# return True
# return False
#
# def set_nmap_exe(self):
# re_compile = re.compile("([\s]*)Starting Nmap ([\d]+)\.([\d]+)")
# for exe in ["/usr/local/bin/nmap", "/usr/bin/nmap", "nmap"]:
# subp = subprocess.run("{} -v".format(exe), shell=True, encoding="utf-8",
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if re_compile.match(subp.stdout):
# DBFuxiConfiguration.update_by_id(self.cid, {
# "nmap_exe": exe,
# })
# return True
# return False
| true | true |
1c3b7203b593306be831ad4edabfb7eedf8274fa | 13,768 | py | Python | django/db/models/deletion.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-08-30T06:46:16.000Z | 2017-08-30T06:46:16.000Z | django/db/models/deletion.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/db/models/deletion.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2019-10-22T12:16:53.000Z | 2019-10-22T12:16:53.000Z | from collections import Counter, OrderedDict
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import signals, sql
from django.utils import six
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super(ProtectedError, self).__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.remote_field.model,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
# The candidate relations are the ones that come from N-1 and 1-1 relations.
# N-N (i.e., many-to-many) relations aren't candidates for deletion.
return (
f for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = OrderedDict()
self.field_updates = {} # {model: {(field, value): {instances}}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Returns a list of all objects that were not already collected.
"""
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
"""
Determines if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allows also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model)
or signals.post_delete.has_listeners(model)
or signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in get_candidate_relations_to_delete(opts):
if related.field.remote_field.on_delete is not DO_NOTHING:
return False
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
return False
return True
def get_del_batches(self, objs, field):
"""
Returns the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
If the call is the result of a cascade, 'source' should be the model
that caused it and 'nullable' should be set to True, if the relation
can be null.
If 'reverse_dependency' is True, 'source' will be deleted before the
current model, rather than after. (Needed for cascading to parent
models, the one case in which the cascade follows the forwards
direction of an FK rather than the reverse direction.)
If 'keep_parents' is True, data of parent model's will be not deleted.
"""
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# Recursively collect concrete model's parent models, but not their
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in six.itervalues(concrete_model._meta.parents):
if ptr:
# FIXME: This seems to be buggy and execute a query for each
# parent object fetch. We have the parent data in the obj,
# but we don't have a nice way to turn that data into parent
# object instance.
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for related in get_candidate_relations_to_delete(model._meta):
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
batches = self.get_del_batches(new_objs, field)
for batch in batches:
sub_objs = self.related_objects(related, batch)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.remote_field.on_delete(self, field, sub_objs, self.using)
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs, source=model, nullable=True)
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
"""
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in six.iteritems(self.data):
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
self.sort()
# number of objects deleted for each model label
deleted_counter = Counter()
with transaction.atomic(using=self.using, savepoint=False):
# send pre_delete signals
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
# fast deletes
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
deleted_counter[qs.model._meta.label] += count
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in six.iteritems(self.data):
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
| 43.159875 | 90 | 0.615122 | from collections import Counter, OrderedDict
from operator import attrgetter
from django.db import IntegrityError, connections, transaction
from django.db.models import signals, sql
from django.utils import six
class ProtectedError(IntegrityError):
def __init__(self, msg, protected_objects):
self.protected_objects = protected_objects
super(ProtectedError, self).__init__(msg, protected_objects)
def CASCADE(collector, field, sub_objs, using):
collector.collect(sub_objs, source=field.remote_field.model,
source_attr=field.name, nullable=field.null)
if field.null and not connections[using].features.can_defer_constraint_checks:
collector.add_field_update(field, None, sub_objs)
def PROTECT(collector, field, sub_objs, using):
raise ProtectedError("Cannot delete some instances of model '%s' because "
"they are referenced through a protected foreign key: '%s.%s'" % (
field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name
),
sub_objs
)
def SET(value):
if callable(value):
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value(), sub_objs)
else:
def set_on_delete(collector, field, sub_objs, using):
collector.add_field_update(field, value, sub_objs)
set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {})
return set_on_delete
def SET_NULL(collector, field, sub_objs, using):
collector.add_field_update(field, None, sub_objs)
def SET_DEFAULT(collector, field, sub_objs, using):
collector.add_field_update(field, field.get_default(), sub_objs)
def DO_NOTHING(collector, field, sub_objs, using):
pass
def get_candidate_relations_to_delete(opts):
return (
f for f in opts.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many)
)
class Collector(object):
def __init__(self, using):
self.using = using
# Initially, {model: {instances}}, later values become lists.
self.data = OrderedDict()
self.field_updates = {} # {model: {(field, value): {instances}}}
# fast_deletes is a list of queryset-likes that can be deleted without
# fetching the objects into memory.
self.fast_deletes = []
# Tracks deletion-order dependency for databases without transactions
# or ability to defer constraint checks. Only concrete model classes
# should be included, as the dependencies exist only between actual
# database tables; proxy models are represented here by their concrete
# parent.
self.dependencies = {} # {model: {models}}
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
if not objs:
return []
new_objs = []
model = objs[0].__class__
instances = self.data.setdefault(model, set())
for obj in objs:
if obj not in instances:
new_objs.append(obj)
instances.update(new_objs)
# Nullable relationships can be ignored -- they are nulled out before
# deleting, and therefore do not affect the order in which objects have
# to be deleted.
if source is not None and not nullable:
if reverse_dependency:
source, model = model, source
self.dependencies.setdefault(
source._meta.concrete_model, set()).add(model._meta.concrete_model)
return new_objs
def add_field_update(self, field, value, objs):
if not objs:
return
model = objs[0].__class__
self.field_updates.setdefault(
model, {}).setdefault(
(field, value), set()).update(objs)
def can_fast_delete(self, objs, from_field=None):
if from_field and from_field.remote_field.on_delete is not CASCADE:
return False
if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
return False
model = objs.model
if (signals.pre_delete.has_listeners(model)
or signals.post_delete.has_listeners(model)
or signals.m2m_changed.has_listeners(model)):
return False
# The use of from_field comes from the need to avoid cascade back to
# parent when parent delete is cascading to child.
opts = model._meta
if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
return False
# Foreign keys pointing to this model, both from m2m and other
# models.
for related in get_candidate_relations_to_delete(opts):
if related.field.remote_field.on_delete is not DO_NOTHING:
return False
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
# It's something like generic foreign key.
return False
return True
def get_del_batches(self, objs, field):
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
if len(objs) > conn_batch_size:
return [objs[i:i + conn_batch_size]
for i in range(0, len(objs), conn_batch_size)]
else:
return [objs]
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
if self.can_fast_delete(objs):
self.fast_deletes.append(objs)
return
new_objs = self.add(objs, source, nullable,
reverse_dependency=reverse_dependency)
if not new_objs:
return
model = new_objs[0].__class__
if not keep_parents:
# related objects. These will be found by meta.get_fields()
concrete_model = model._meta.concrete_model
for ptr in six.itervalues(concrete_model._meta.parents):
if ptr:
# FIXME: This seems to be buggy and execute a query for each
# parent object fetch. We have the parent data in the obj,
# but we don't have a nice way to turn that data into parent
parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
self.collect(parent_objs, source=model,
source_attr=ptr.remote_field.related_name,
collect_related=False,
reverse_dependency=True)
if collect_related:
for related in get_candidate_relations_to_delete(model._meta):
field = related.field
if field.remote_field.on_delete == DO_NOTHING:
continue
batches = self.get_del_batches(new_objs, field)
for batch in batches:
sub_objs = self.related_objects(related, batch)
if self.can_fast_delete(sub_objs, from_field=field):
self.fast_deletes.append(sub_objs)
elif sub_objs:
field.remote_field.on_delete(self, field, sub_objs, self.using)
for field in model._meta.virtual_fields:
if hasattr(field, 'bulk_related_objects'):
sub_objs = field.bulk_related_objects(new_objs, self.using)
self.collect(sub_objs, source=model, nullable=True)
def related_objects(self, related, objs):
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}
)
def instances_with_model(self):
for model, instances in six.iteritems(self.data):
for obj in instances:
yield model, obj
def sort(self):
sorted_models = []
concrete_models = set()
models = list(self.data)
while len(sorted_models) < len(models):
found = False
for model in models:
if model in sorted_models:
continue
dependencies = self.dependencies.get(model._meta.concrete_model)
if not (dependencies and dependencies.difference(concrete_models)):
sorted_models.append(model)
concrete_models.add(model._meta.concrete_model)
found = True
if not found:
return
self.data = OrderedDict((model, self.data[model])
for model in sorted_models)
def delete(self):
# sort instance collections
for model, instances in self.data.items():
self.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
self.sort()
deleted_counter = Counter()
with transaction.atomic(using=self.using, savepoint=False):
for model, obj in self.instances_with_model():
if not model._meta.auto_created:
signals.pre_delete.send(
sender=model, instance=obj, using=self.using
)
for qs in self.fast_deletes:
count = qs._raw_delete(using=self.using)
deleted_counter[qs.model._meta.label] += count
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
for instances in six.itervalues(self.data):
instances.reverse()
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
count = query.delete_batch(pk_list, self.using)
deleted_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
for obj in instances:
setattr(obj, field.attname, value)
for model, instances in six.iteritems(self.data):
for instance in instances:
setattr(instance, model._meta.pk.attname, None)
return sum(deleted_counter.values()), dict(deleted_counter)
| true | true |
1c3b743681d6c6d737f3e9a733fc8a941192007f | 1,027 | py | Python | pipecutter/interface.py | binste/pipecutter | 18cac9340ea9f192e524b8a1b8f351cba972d45b | [
"MIT"
] | 3 | 2020-01-05T18:32:40.000Z | 2021-10-13T09:37:14.000Z | pipecutter/interface.py | binste/pipecutter | 18cac9340ea9f192e524b8a1b8f351cba972d45b | [
"MIT"
] | null | null | null | pipecutter/interface.py | binste/pipecutter | 18cac9340ea9f192e524b8a1b8f351cba972d45b | [
"MIT"
] | null | null | null | from typing import Union, List
from contextlib import contextmanager
import luigi
from luigi import worker
def _raise_run_exception(self, ex) -> None:
raise ex
@contextmanager
def debug_mode():
original_handle_run_exception = worker.TaskProcess._handle_run_exception
try:
worker.TaskProcess._handle_run_exception = _raise_run_exception
yield
finally:
worker.TaskProcess._handle_run_exception = original_handle_run_exception
def run(
tasks: Union[luigi.Task, List[luigi.Task]],
local_scheduler: bool = True,
print_detailed_summary: bool = True,
log_level: str = "WARNING",
**kwargs,
) -> None:
tasks = [tasks] if isinstance(tasks, luigi.Task) else tasks
with debug_mode():
r = luigi.build(
tasks,
local_scheduler=local_scheduler,
log_level=log_level,
detailed_summary=print_detailed_summary,
**kwargs,
)
if print_detailed_summary:
print(r.summary_text)
return
| 24.452381 | 80 | 0.681597 | from typing import Union, List
from contextlib import contextmanager
import luigi
from luigi import worker
def _raise_run_exception(self, ex) -> None:
raise ex
@contextmanager
def debug_mode():
original_handle_run_exception = worker.TaskProcess._handle_run_exception
try:
worker.TaskProcess._handle_run_exception = _raise_run_exception
yield
finally:
worker.TaskProcess._handle_run_exception = original_handle_run_exception
def run(
tasks: Union[luigi.Task, List[luigi.Task]],
local_scheduler: bool = True,
print_detailed_summary: bool = True,
log_level: str = "WARNING",
**kwargs,
) -> None:
tasks = [tasks] if isinstance(tasks, luigi.Task) else tasks
with debug_mode():
r = luigi.build(
tasks,
local_scheduler=local_scheduler,
log_level=log_level,
detailed_summary=print_detailed_summary,
**kwargs,
)
if print_detailed_summary:
print(r.summary_text)
return
| true | true |
1c3b7622f3555e2fb2980f2450b77b596627d868 | 1,347 | py | Python | tests/ecosystem/upgrade/test_upgrade.py | romayalon/ocs-ci | b40428cae0f0766ffb0c2441041744821562c8b5 | [
"MIT"
] | null | null | null | tests/ecosystem/upgrade/test_upgrade.py | romayalon/ocs-ci | b40428cae0f0766ffb0c2441041744821562c8b5 | [
"MIT"
] | null | null | null | tests/ecosystem/upgrade/test_upgrade.py | romayalon/ocs-ci | b40428cae0f0766ffb0c2441041744821562c8b5 | [
"MIT"
] | null | null | null | import logging
import pytest
from ocs_ci.framework.testlib import ocs_upgrade, polarion_id
from ocs_ci.ocs.disruptive_operations import worker_node_shutdown
from ocs_ci.ocs.ocs_upgrade import run_ocs_upgrade
from ocs_ci.utility.reporting import get_polarion_id
log = logging.getLogger(__name__)
@pytest.fixture()
def teardown(request, nodes):
def finalizer():
"""
Make sure all nodes are up again
"""
nodes.restart_nodes_by_stop_and_start_teardown()
request.addfinalizer(finalizer)
@pytest.mark.polarion_id("OCS-1579")
def test_worker_node_abrupt_shutdown(teardown):
"""
Test OCS upgrade with disruption of shutting down worker node,
for 5.5 minutes
"""
log.info("Starting disruptive function: test_worker_node_abrupt_shutdown")
run_ocs_upgrade(operation=worker_node_shutdown, abrupt=True)
@pytest.mark.polarion_id("OCS-1575")
def test_worker_node_permanent_shutdown(teardown):
"""
Test OCS upgrade with disruption of shutting down worker node
"""
log.info("Starting disruptive function: test_worker_node_permanent_shutdown")
run_ocs_upgrade(operation=worker_node_shutdown, abrupt=False)
@ocs_upgrade
@polarion_id(get_polarion_id(upgrade=True))
def test_upgrade():
"""
Tests upgrade procedure of OCS cluster
"""
run_ocs_upgrade()
| 24.490909 | 81 | 0.756496 | import logging
import pytest
from ocs_ci.framework.testlib import ocs_upgrade, polarion_id
from ocs_ci.ocs.disruptive_operations import worker_node_shutdown
from ocs_ci.ocs.ocs_upgrade import run_ocs_upgrade
from ocs_ci.utility.reporting import get_polarion_id
log = logging.getLogger(__name__)
@pytest.fixture()
def teardown(request, nodes):
def finalizer():
nodes.restart_nodes_by_stop_and_start_teardown()
request.addfinalizer(finalizer)
@pytest.mark.polarion_id("OCS-1579")
def test_worker_node_abrupt_shutdown(teardown):
log.info("Starting disruptive function: test_worker_node_abrupt_shutdown")
run_ocs_upgrade(operation=worker_node_shutdown, abrupt=True)
@pytest.mark.polarion_id("OCS-1575")
def test_worker_node_permanent_shutdown(teardown):
log.info("Starting disruptive function: test_worker_node_permanent_shutdown")
run_ocs_upgrade(operation=worker_node_shutdown, abrupt=False)
@ocs_upgrade
@polarion_id(get_polarion_id(upgrade=True))
def test_upgrade():
run_ocs_upgrade()
| true | true |
1c3b77d76c0f8b07da1c3b122d9a67855f9f7434 | 708 | py | Python | seeding-db/convertImages.py | UAlbanyArchives/EspyProject | 1c2b7a29fb4f3791806d2a9e8534fc4ee3aee6c2 | [
"Unlicense"
] | 2 | 2017-04-05T17:45:18.000Z | 2017-04-17T17:40:41.000Z | seeding-db/convertImages.py | UAlbanyArchives/EspyProject | 1c2b7a29fb4f3791806d2a9e8534fc4ee3aee6c2 | [
"Unlicense"
] | null | null | null | seeding-db/convertImages.py | UAlbanyArchives/EspyProject | 1c2b7a29fb4f3791806d2a9e8534fc4ee3aee6c2 | [
"Unlicense"
] | null | null | null | import os
from subprocess import Popen, PIPE
inputPath = "C:\\Projects\\icpsr\\B01"
outputPath = "C:\\Projects\\icpsr\\testFiles"
for root, dirs, files in os.walk(inputPath):
for file in files:
if file.lower().endswith(".tif"):
if file == "B01_AL_000014a.tif" or file == "B01_AL_000052a.tif":
print (file)
img = os.path.join(root, file)
outFile = os.path.join(outputPath, os.path.splitext(file)[0] + ".png")
cmdString = "convert -density 300 \"" + img + "\" \"" + outFile + "\""
convert = Popen(cmdString, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = convert.communicate()
if len(stdout) > 0:
print (stdout)
if len(stderr) > 0:
print (stderr) | 32.181818 | 74 | 0.638418 | import os
from subprocess import Popen, PIPE
inputPath = "C:\\Projects\\icpsr\\B01"
outputPath = "C:\\Projects\\icpsr\\testFiles"
for root, dirs, files in os.walk(inputPath):
for file in files:
if file.lower().endswith(".tif"):
if file == "B01_AL_000014a.tif" or file == "B01_AL_000052a.tif":
print (file)
img = os.path.join(root, file)
outFile = os.path.join(outputPath, os.path.splitext(file)[0] + ".png")
cmdString = "convert -density 300 \"" + img + "\" \"" + outFile + "\""
convert = Popen(cmdString, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = convert.communicate()
if len(stdout) > 0:
print (stdout)
if len(stderr) > 0:
print (stderr) | true | true |
1c3b7956b60e6d5f552449df4af31b1e83c619a5 | 657 | py | Python | 30-days-of-code/day29/solution.py | eduellery/hackerrank | 250887d8e04841ba538f6c0cee5185155ec70e2d | [
"MIT"
] | null | null | null | 30-days-of-code/day29/solution.py | eduellery/hackerrank | 250887d8e04841ba538f6c0cee5185155ec70e2d | [
"MIT"
] | null | null | null | 30-days-of-code/day29/solution.py | eduellery/hackerrank | 250887d8e04841ba538f6c0cee5185155ec70e2d | [
"MIT"
] | null | null | null | #!/bin/python3
import os
def bitwiseAnd(n, k):
max_ab = 0
for i in range(k - 2, n):
for j in range(i + 1, n + 1):
ab = i & j
if ab == k - 1:
return ab
if max_ab < ab < k:
max_ab = ab
return max_ab
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
first_multiple_input = input().rstrip().split()
count = int(first_multiple_input[0])
lim = int(first_multiple_input[1])
res = bitwiseAnd(count, lim)
fptr.write(str(res) + '\n')
fptr.close()
| 21.9 | 55 | 0.506849 |
import os
def bitwiseAnd(n, k):
max_ab = 0
for i in range(k - 2, n):
for j in range(i + 1, n + 1):
ab = i & j
if ab == k - 1:
return ab
if max_ab < ab < k:
max_ab = ab
return max_ab
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input().strip())
for t_itr in range(t):
first_multiple_input = input().rstrip().split()
count = int(first_multiple_input[0])
lim = int(first_multiple_input[1])
res = bitwiseAnd(count, lim)
fptr.write(str(res) + '\n')
fptr.close()
| true | true |
1c3b795ae3bf6fb749c30c39d971ba6597eb15a7 | 5,930 | py | Python | excel/utils.py | gbmumumu/someTools | 0336b886388d57e8b7d7762446ad5c578732f924 | [
"MIT"
] | null | null | null | excel/utils.py | gbmumumu/someTools | 0336b886388d57e8b7d7762446ad5c578732f924 | [
"MIT"
] | null | null | null | excel/utils.py | gbmumumu/someTools | 0336b886388d57e8b7d7762446ad5c578732f924 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "gbmumumu"
from pathlib import Path
from collections import OrderedDict
from tqdm import tqdm
import re
import shutil
import zipfile
from xml.dom.minidom import parse
class XlsxImages:
def __init__(self, filepath, image_idx=1, symbol_idx=2,
work_space=Path("./data"), images_output_path=Path("./images")):
if not isinstance(filepath, Path):
filepath = Path(filepath)
self._xlsx = filepath
self._work_space = work_space / filepath.stem
self._zip = self._work_space / self._xlsx.with_suffix(".zip").name
self._output_path = images_output_path / filepath.stem
self._iid = image_idx
self._sid = symbol_idx
try:
self._work_space.mkdir(exist_ok=True, parents=True)
self._output_path.mkdir(exist_ok=True, parents=True)
shutil.copy(str(self._xlsx), str(self._zip))
except Exception as e:
print("Failed to initialize the file directory,"
"please check the file system or the permissions of this script"
f"error type: {e.__class__.__name__}")
exit(1)
else:
print(f"{filepath.name} initialize successfully!")
def unzip(self):
try:
print(f"Extracting files from {self._xlsx} to {self._work_space.absolute()}...")
zipfile.ZipFile(self._zip).extractall(str(self._work_space))
except Exception as e:
print(f"File decompression failed!: {self._xlsx} "
f"error type: {e.__class__.__name__}")
else:
print("Decompression done!")
return
def get_shared_string_data(self):
print("reading sharedStrings.xml...")
shared = self._work_space / "xl" / "sharedStrings.xml"
string_data = OrderedDict()
tree = parse(str(shared))
shared_data = tree.documentElement.getElementsByTagName("si")
for idx, node in enumerate(shared_data):
for node_i in node.childNodes:
if node_i.tagName == "t":
string_data[str(idx)] = node_i.childNodes[0].nodeValue
return string_data
def get_sheet_data(self, index=1):
image_rgx = re.compile(r".*DISPIMG\(\"(ID_.*)\",\d+\).*")
print(f"reading sheet{index}")
sheet = self._work_space / "xl" / "worksheets" / f"sheet{index}.xml"
tree = parse(str(sheet))
sheet_data = tree.documentElement.getElementsByTagName("sheetData")
image_data, symbol_data = OrderedDict(), OrderedDict()
for cell in sheet_data:
for row in cell.getElementsByTagName("row"):
image = row.getElementsByTagName("c")[self._iid - 1]
symbol = row.getElementsByTagName("c")[self._sid - 1]
image_cell = image.getAttribute("r")
symbol_cell = symbol.getAttribute("r")
inv, jnv = None, None
try:
for node_i in image.childNodes:
if node_i.tagName == "v":
inv = node_i.childNodes[0].nodeValue
for node_j in symbol.childNodes:
if node_j.tagName == "v":
jnv = node_j.childNodes[0].nodeValue
except ValueError:
continue
else:
if jnv is not None and inv is not None:
image_data[image_cell] = image_rgx.findall(inv)[0]
symbol_data[symbol_cell] = jnv
return image_data, symbol_data
def get_target_data(self):
print("reading cellimages.xml.rels")
cell_images = self._work_space / "xl" / "_rels" / "cellimages.xml.rels"
tree = parse(str(cell_images))
target_root = tree.documentElement
target_data = OrderedDict()
for image in target_root.getElementsByTagName("Relationship"):
target_data[image.getAttribute("Id")] = image.getAttribute("Target")
return target_data
def get_image_rids(self):
r_id_with_name = self._work_space / "xl" / "cellimages.xml"
r_id_name_tree = parse(str(r_id_with_name))
r_id_name_root = r_id_name_tree.documentElement
r_id_names = OrderedDict()
r_i_ds = []
for _image in r_id_name_root.getElementsByTagName("a:blip"):
r_i_ds.append(_image.getAttribute("r:embed"))
for idx, _image in enumerate(r_id_name_root.getElementsByTagName("xdr:cNvPr")):
r_id_names[_image.getAttribute("name")] = r_i_ds[idx]
return r_id_names
def get_images(self, sheet_index=1, image_field='A', name_field='B'):
image_field = image_field.upper()
name_field = name_field.upper()
image_data, symbol_data = self.get_sheet_data(sheet_index)
symbols = self.get_shared_string_data()
for item_cell, item_symbol_index in symbol_data.items():
symbol_data[item_cell] = symbols[item_symbol_index]
image_target = self.get_target_data()
image_rels = self.get_image_rids()
for cell, filename in tqdm(symbol_data.items(), desc='copying:'):
if cell.startswith(name_field):
src_name = Path(image_target.get(
image_rels.get(
image_data.get(image_field + re.findall(r"\d+", cell)[0])
)
)).name
src = self._work_space / "xl" / "media" / src_name
des = self._output_path / Path(filename).with_suffix(src.suffix)
shutil.copy(str(src), str(des))
print(f"{self._xlsx} done!")
if __name__ == "__main__":
pass
| 42.357143 | 93 | 0.580776 |
__author__ = "gbmumumu"
from pathlib import Path
from collections import OrderedDict
from tqdm import tqdm
import re
import shutil
import zipfile
from xml.dom.minidom import parse
class XlsxImages:
def __init__(self, filepath, image_idx=1, symbol_idx=2,
work_space=Path("./data"), images_output_path=Path("./images")):
if not isinstance(filepath, Path):
filepath = Path(filepath)
self._xlsx = filepath
self._work_space = work_space / filepath.stem
self._zip = self._work_space / self._xlsx.with_suffix(".zip").name
self._output_path = images_output_path / filepath.stem
self._iid = image_idx
self._sid = symbol_idx
try:
self._work_space.mkdir(exist_ok=True, parents=True)
self._output_path.mkdir(exist_ok=True, parents=True)
shutil.copy(str(self._xlsx), str(self._zip))
except Exception as e:
print("Failed to initialize the file directory,"
"please check the file system or the permissions of this script"
f"error type: {e.__class__.__name__}")
exit(1)
else:
print(f"{filepath.name} initialize successfully!")
def unzip(self):
try:
print(f"Extracting files from {self._xlsx} to {self._work_space.absolute()}...")
zipfile.ZipFile(self._zip).extractall(str(self._work_space))
except Exception as e:
print(f"File decompression failed!: {self._xlsx} "
f"error type: {e.__class__.__name__}")
else:
print("Decompression done!")
return
def get_shared_string_data(self):
print("reading sharedStrings.xml...")
shared = self._work_space / "xl" / "sharedStrings.xml"
string_data = OrderedDict()
tree = parse(str(shared))
shared_data = tree.documentElement.getElementsByTagName("si")
for idx, node in enumerate(shared_data):
for node_i in node.childNodes:
if node_i.tagName == "t":
string_data[str(idx)] = node_i.childNodes[0].nodeValue
return string_data
def get_sheet_data(self, index=1):
image_rgx = re.compile(r".*DISPIMG\(\"(ID_.*)\",\d+\).*")
print(f"reading sheet{index}")
sheet = self._work_space / "xl" / "worksheets" / f"sheet{index}.xml"
tree = parse(str(sheet))
sheet_data = tree.documentElement.getElementsByTagName("sheetData")
image_data, symbol_data = OrderedDict(), OrderedDict()
for cell in sheet_data:
for row in cell.getElementsByTagName("row"):
image = row.getElementsByTagName("c")[self._iid - 1]
symbol = row.getElementsByTagName("c")[self._sid - 1]
image_cell = image.getAttribute("r")
symbol_cell = symbol.getAttribute("r")
inv, jnv = None, None
try:
for node_i in image.childNodes:
if node_i.tagName == "v":
inv = node_i.childNodes[0].nodeValue
for node_j in symbol.childNodes:
if node_j.tagName == "v":
jnv = node_j.childNodes[0].nodeValue
except ValueError:
continue
else:
if jnv is not None and inv is not None:
image_data[image_cell] = image_rgx.findall(inv)[0]
symbol_data[symbol_cell] = jnv
return image_data, symbol_data
def get_target_data(self):
print("reading cellimages.xml.rels")
cell_images = self._work_space / "xl" / "_rels" / "cellimages.xml.rels"
tree = parse(str(cell_images))
target_root = tree.documentElement
target_data = OrderedDict()
for image in target_root.getElementsByTagName("Relationship"):
target_data[image.getAttribute("Id")] = image.getAttribute("Target")
return target_data
def get_image_rids(self):
r_id_with_name = self._work_space / "xl" / "cellimages.xml"
r_id_name_tree = parse(str(r_id_with_name))
r_id_name_root = r_id_name_tree.documentElement
r_id_names = OrderedDict()
r_i_ds = []
for _image in r_id_name_root.getElementsByTagName("a:blip"):
r_i_ds.append(_image.getAttribute("r:embed"))
for idx, _image in enumerate(r_id_name_root.getElementsByTagName("xdr:cNvPr")):
r_id_names[_image.getAttribute("name")] = r_i_ds[idx]
return r_id_names
def get_images(self, sheet_index=1, image_field='A', name_field='B'):
image_field = image_field.upper()
name_field = name_field.upper()
image_data, symbol_data = self.get_sheet_data(sheet_index)
symbols = self.get_shared_string_data()
for item_cell, item_symbol_index in symbol_data.items():
symbol_data[item_cell] = symbols[item_symbol_index]
image_target = self.get_target_data()
image_rels = self.get_image_rids()
for cell, filename in tqdm(symbol_data.items(), desc='copying:'):
if cell.startswith(name_field):
src_name = Path(image_target.get(
image_rels.get(
image_data.get(image_field + re.findall(r"\d+", cell)[0])
)
)).name
src = self._work_space / "xl" / "media" / src_name
des = self._output_path / Path(filename).with_suffix(src.suffix)
shutil.copy(str(src), str(des))
print(f"{self._xlsx} done!")
if __name__ == "__main__":
pass
| true | true |
1c3b79e941fe8b8ccfe9f77d3ce7e5217f14e8bf | 5,464 | py | Python | tools/c7n_mailer/c7n_mailer/azure/azure_queue_processor.py | ivan-shaporov/cloud-custodian | 619851ac8fb8e9609d42080fac50f9ef70529764 | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/c7n_mailer/azure/azure_queue_processor.py | ivan-shaporov/cloud-custodian | 619851ac8fb8e9609d42080fac50f9ef70529764 | [
"Apache-2.0"
] | null | null | null | tools/c7n_mailer/c7n_mailer/azure/azure_queue_processor.py | ivan-shaporov/cloud-custodian | 619851ac8fb8e9609d42080fac50f9ef70529764 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Azure Queue Message Processing
==============================
"""
import base64
import json
import traceback
import zlib
import six
from c7n_mailer.azure.sendgrid_delivery import SendGridDelivery
from c7n_mailer.smtp_delivery import SmtpDelivery
try:
from c7n_azure.storage_utils import StorageUtilities
from c7n_azure.session import Session
except ImportError:
StorageUtilities = None
Session = None
pass
class MailerAzureQueueProcessor(object):
def __init__(self, config, logger, session=None, max_num_processes=16):
if StorageUtilities is None:
raise Exception("Using Azure queue requires package c7n_azure to be installed.")
self.max_num_processes = max_num_processes
self.config = config
self.logger = logger
self.receive_queue = self.config['queue_url']
self.batch_size = 16
self.max_message_retry = 3
self.session = session or Session()
def run(self, parallel=False):
if parallel:
self.logger.info("Parallel processing with Azure Queue is not yet implemented.")
self.logger.info("Downloading messages from the Azure Storage queue.")
queue_settings = StorageUtilities.get_queue_client_by_uri(self.receive_queue, self.session)
queue_messages = StorageUtilities.get_queue_messages(
*queue_settings, num_messages=self.batch_size)
while len(queue_messages) > 0:
for queue_message in queue_messages:
self.logger.debug("Message id: %s received" % queue_message.id)
if (self.process_azure_queue_message(queue_message) or
queue_message.dequeue_count > self.max_message_retry):
# If message handled successfully or max retry hit, delete
StorageUtilities.delete_queue_message(*queue_settings, message=queue_message)
queue_messages = StorageUtilities.get_queue_messages(
*queue_settings, num_messages=self.batch_size)
self.logger.info('No messages left on the azure storage queue, exiting c7n_mailer.')
def process_azure_queue_message(self, encoded_azure_queue_message):
queue_message = json.loads(
zlib.decompress(base64.b64decode(encoded_azure_queue_message.content)))
self.logger.debug("Got account:%s message:%s %s:%d policy:%s recipients:%s" % (
queue_message.get('account', 'na'),
encoded_azure_queue_message.id,
queue_message['policy']['resource'],
len(queue_message['resources']),
queue_message['policy']['name'],
', '.join(queue_message['action'].get('to'))))
if any(e.startswith('slack') or e.startswith('https://hooks.slack.com/')
for e in queue_message.get('action', ()).get('to')):
from c7n_mailer.slack_delivery import SlackDelivery
slack_delivery = SlackDelivery(self.config,
self.logger,
SendGridDelivery(self.config, self.logger))
slack_messages = slack_delivery.get_to_addrs_slack_messages_map(queue_message)
try:
slack_delivery.slack_handler(queue_message, slack_messages)
except Exception:
traceback.print_exc()
pass
# this section gets the map of metrics to send to datadog and delivers it
if any(e.startswith('datadog') for e in queue_message.get('action', ()).get('to')):
from c7n_mailer.datadog_delivery import DataDogDelivery
datadog_delivery = DataDogDelivery(self.config, self.session, self.logger)
datadog_message_packages = datadog_delivery.get_datadog_message_packages(queue_message)
try:
datadog_delivery.deliver_datadog_messages(datadog_message_packages, queue_message)
except Exception:
traceback.print_exc()
pass
# this section sends a notification to the resource owner via SendGrid
try:
sendgrid_delivery = SendGridDelivery(self.config, self.logger)
email_messages = sendgrid_delivery.get_to_addrs_sendgrid_messages_map(queue_message)
if 'smtp_server' in self.config:
smtp_delivery = SmtpDelivery(config=self.config,
session=self.session,
logger=self.logger)
for to_addrs, message in six.iteritems(email_messages):
smtp_delivery.send_message(message=message, to_addrs=list(to_addrs))
else:
return sendgrid_delivery.sendgrid_handler(queue_message, email_messages)
except Exception:
traceback.print_exc()
return True
| 42.6875 | 99 | 0.657577 |
import base64
import json
import traceback
import zlib
import six
from c7n_mailer.azure.sendgrid_delivery import SendGridDelivery
from c7n_mailer.smtp_delivery import SmtpDelivery
try:
from c7n_azure.storage_utils import StorageUtilities
from c7n_azure.session import Session
except ImportError:
StorageUtilities = None
Session = None
pass
class MailerAzureQueueProcessor(object):
def __init__(self, config, logger, session=None, max_num_processes=16):
if StorageUtilities is None:
raise Exception("Using Azure queue requires package c7n_azure to be installed.")
self.max_num_processes = max_num_processes
self.config = config
self.logger = logger
self.receive_queue = self.config['queue_url']
self.batch_size = 16
self.max_message_retry = 3
self.session = session or Session()
def run(self, parallel=False):
if parallel:
self.logger.info("Parallel processing with Azure Queue is not yet implemented.")
self.logger.info("Downloading messages from the Azure Storage queue.")
queue_settings = StorageUtilities.get_queue_client_by_uri(self.receive_queue, self.session)
queue_messages = StorageUtilities.get_queue_messages(
*queue_settings, num_messages=self.batch_size)
while len(queue_messages) > 0:
for queue_message in queue_messages:
self.logger.debug("Message id: %s received" % queue_message.id)
if (self.process_azure_queue_message(queue_message) or
queue_message.dequeue_count > self.max_message_retry):
StorageUtilities.delete_queue_message(*queue_settings, message=queue_message)
queue_messages = StorageUtilities.get_queue_messages(
*queue_settings, num_messages=self.batch_size)
self.logger.info('No messages left on the azure storage queue, exiting c7n_mailer.')
def process_azure_queue_message(self, encoded_azure_queue_message):
queue_message = json.loads(
zlib.decompress(base64.b64decode(encoded_azure_queue_message.content)))
self.logger.debug("Got account:%s message:%s %s:%d policy:%s recipients:%s" % (
queue_message.get('account', 'na'),
encoded_azure_queue_message.id,
queue_message['policy']['resource'],
len(queue_message['resources']),
queue_message['policy']['name'],
', '.join(queue_message['action'].get('to'))))
if any(e.startswith('slack') or e.startswith('https://hooks.slack.com/')
for e in queue_message.get('action', ()).get('to')):
from c7n_mailer.slack_delivery import SlackDelivery
slack_delivery = SlackDelivery(self.config,
self.logger,
SendGridDelivery(self.config, self.logger))
slack_messages = slack_delivery.get_to_addrs_slack_messages_map(queue_message)
try:
slack_delivery.slack_handler(queue_message, slack_messages)
except Exception:
traceback.print_exc()
pass
if any(e.startswith('datadog') for e in queue_message.get('action', ()).get('to')):
from c7n_mailer.datadog_delivery import DataDogDelivery
datadog_delivery = DataDogDelivery(self.config, self.session, self.logger)
datadog_message_packages = datadog_delivery.get_datadog_message_packages(queue_message)
try:
datadog_delivery.deliver_datadog_messages(datadog_message_packages, queue_message)
except Exception:
traceback.print_exc()
pass
try:
sendgrid_delivery = SendGridDelivery(self.config, self.logger)
email_messages = sendgrid_delivery.get_to_addrs_sendgrid_messages_map(queue_message)
if 'smtp_server' in self.config:
smtp_delivery = SmtpDelivery(config=self.config,
session=self.session,
logger=self.logger)
for to_addrs, message in six.iteritems(email_messages):
smtp_delivery.send_message(message=message, to_addrs=list(to_addrs))
else:
return sendgrid_delivery.sendgrid_handler(queue_message, email_messages)
except Exception:
traceback.print_exc()
return True
| true | true |
1c3b7a05f023cb371773f817497d23cb6e0825a0 | 21,308 | py | Python | tools/run_tests/python_utils/jobset.py | yongw5/grpc | 3c7b77a613182786d926445801f1f8f197a0c26a | [
"Apache-2.0"
] | null | null | null | tools/run_tests/python_utils/jobset.py | yongw5/grpc | 3c7b77a613182786d926445801f1f8f197a0c26a | [
"Apache-2.0"
] | null | null | null | tools/run_tests/python_utils/jobset.py | yongw5/grpc | 3c7b77a613182786d926445801f1f8f197a0c26a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a group of subprocesses and then finish."""
import errno
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
# cpu cost measurement
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
# Maximum number of bytes of job's stdout that will be stored in the result.
# Only last N bytes of stdout will be kept if the actual output longer.
_MAX_RESULT_SIZE = 64 * 1024
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [31, 0],
'green': [32, 0],
'yellow': [33, 0],
'lightgray': [37, 0],
'gray': [30, 1],
'purple': [35, 0],
'cyan': [36, 0]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError as e:
if e.errno != errno.EINTR:
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
while True:
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write(
'%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
(_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' %
explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
tag, msg, '\n'
if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError as e:
if e.errno != errno.EINTR:
raise
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
"""Specifies what to run for a job."""
def __init__(self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False,
logfilename=None):
"""
Arguments:
cmdline: a list of arguments to pass as the command line
environ: a dictionary of environment variables to set in the child process
kill_handler: a handler that will be called whenever job.kill() is invoked
cpu_cost: number of cores per second this job needs
logfilename: use given file to store job's output, rather than using a temporary file
"""
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
self.logfilename = logfilename
if self.logfilename and self.flake_retries != 0 and self.timeout_retries != 0:
# Forbidden to avoid overwriting the test log when retrying.
raise Exception(
'Cannot use custom logfile when retries are enabled')
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __lt__(self, other):
return self.identity() < other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
self.cmdline)
def __str__(self):
return '%s: %s %s' % (self.shortname, ' '.join(
'%s=%s' % kv for kv in self.environ.items()), ' '.join(
self.cmdline))
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
self.cpu_estimated = 1
self.cpu_measured = 1
def read_from_start(f):
f.seek(0)
return f.read().decode("utf8")
class Job(object):
"""Manages one job."""
def __init__(self,
spec,
newline_on_success,
travis,
add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
if self._spec.logfilename:
# make sure the log directory exists
logfile_dir = os.path.dirname(
os.path.abspath(self._spec.logfilename))
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
self._logfile = open(self._spec.logfilename, 'w+')
else:
# macOS: a series of quick os.unlink invocation might cause OS
# error during the creation of temporary file. By using
# NamedTemporaryFile, we defer the removal of file and directory.
self._logfile = tempfile.NamedTemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
# The Unix time command is finicky when used with MSBuild, so we don't use it
# with jobs that run MSBuild.
global measure_cpu_costs
if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
cmdline = ['time', '-p'] + cmdline
else:
measure_cpu_costs = False
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._logfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message(
'WARNING', 'Failed to start %s, retrying in %f seconds' %
(self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
"""Poll current state of the job. Prints messages at completion."""
def stdout(self=self):
stdout = read_from_start(self._logfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message('FLAKE',
'%s [ret=%d, pid=%d]' %
(self._spec.shortname, self._process.returncode,
self._process.pid),
stdout(),
do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message('FAILED',
'%s [ret=%d, pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.returncode,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(
r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
self.result.cpu_estimated = float('%.01f' %
self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
message('PASSED',
'%s [time=%.1fsec, retries=%d:%d%s]' %
(self._spec.shortname, elapsed, self._retries,
self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE',
'%s [pid=%d]' %
(self._spec.shortname, self._process.pid),
stdout(),
do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message('TIMEOUT',
'%s [pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
"""Manages one run of jobs."""
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
"""Start a job. Return True on success, False on failure."""
while True:
if self._max_time > 0 and time.time(
) - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled():
return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0:
break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
if len(self._running) < self._maxjobs_cpu_agnostic:
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled():
return False
job = Job(spec, self._newline_on_success, self._travis, self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self, waiting_for=None, waiting_for_cost=None):
"""Collect the dead jobs."""
while self._running:
dead = set()
for job in self._running:
st = eintr_be_gone(lambda: job.state())
if st == _RUNNING:
continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead:
return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (self._remaining +
len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
if waiting_for is not None:
wstr = ' next: %s @ %.2f cpu' % (waiting_for,
waiting_for_cost)
else:
wstr = ''
message(
'WAITING',
'%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
(rstr, len(self._running), self._completed, self._failures,
self.cpu_cost(), wstr))
if platform_string() == 'windows':
time.sleep(0.1)
else:
signal.alarm(10)
signal.pause()
def cancelled(self):
"""Poll for cancellation."""
if self._cancelled:
return True
if not self._check_cancelled():
return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled():
pass # poll cancellation
self.reap()
if platform_string() != 'windows':
signal.alarm(0)
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
maxjobs_cpu_agnostic=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False,
max_time=-1):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(
check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else
_DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
| 35.632107 | 91 | 0.544678 |
import errno
import logging
import multiprocessing
import os
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
measure_cpu_costs = False
_DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
# Only last N bytes of stdout will be kept if the actual output longer.
_MAX_RESULT_SIZE = 64 * 1024
# NOTE: If you change this, please make sure to test reviewing the
# github PR with http://reviewable.io, which is known to add UTF-8
# characters to the PR description, which leak into the environment here
# and cause failures.
def strip_non_ascii_chars(s):
return ''.join(c for c in s if ord(c) < 128)
def sanitized_environment(env):
sanitized = {}
for key, value in env.items():
sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
return sanitized
def platform_string():
if platform.system() == 'Windows':
return 'windows'
elif platform.system()[:7] == 'MSYS_NT':
return 'windows'
elif platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
else:
return 'posix'
# setup a signal handler so that signal.pause registers 'something'
# when a child finishes
# not using futures and threading to avoid a dependency on subprocess32
if platform_string() == 'windows':
pass
else:
def alarm_handler(unused_signum, unused_frame):
pass
signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
signal.signal(signal.SIGALRM, alarm_handler)
_SUCCESS = object()
_FAILURE = object()
_RUNNING = object()
_KILLED = object()
_COLORS = {
'red': [31, 0],
'green': [32, 0],
'yellow': [33, 0],
'lightgray': [37, 0],
'gray': [30, 1],
'purple': [35, 0],
'cyan': [36, 0]
}
_BEGINNING_OF_LINE = '\x1b[0G'
_CLEAR_LINE = '\x1b[2K'
_TAG_COLOR = {
'FAILED': 'red',
'FLAKE': 'purple',
'TIMEOUT_FLAKE': 'purple',
'WARNING': 'yellow',
'TIMEOUT': 'red',
'PASSED': 'green',
'START': 'gray',
'WAITING': 'yellow',
'SUCCESS': 'green',
'IDLE': 'gray',
'SKIPPED': 'cyan'
}
_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=_FORMAT)
def eintr_be_gone(fn):
while True:
try:
return fn()
except IOError as e:
if e.errno != errno.EINTR:
raise
def message(tag, msg, explanatory_text=None, do_newline=False):
if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
return
message.old_tag = tag
message.old_msg = msg
while True:
try:
if platform_string() == 'windows' or not sys.stdout.isatty():
if explanatory_text:
logging.info(explanatory_text)
logging.info('%s: %s', tag, msg)
else:
sys.stdout.write(
'%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
(_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' %
explanatory_text if explanatory_text is not None else '',
_COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
tag, msg, '\n'
if do_newline or explanatory_text is not None else ''))
sys.stdout.flush()
return
except IOError as e:
if e.errno != errno.EINTR:
raise
message.old_tag = ''
message.old_msg = ''
def which(filename):
if '/' in filename:
return filename
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
raise Exception('%s not found' % filename)
class JobSpec(object):
def __init__(self,
cmdline,
shortname=None,
environ=None,
cwd=None,
shell=False,
timeout_seconds=5 * 60,
flake_retries=0,
timeout_retries=0,
kill_handler=None,
cpu_cost=1.0,
verbose_success=False,
logfilename=None):
if environ is None:
environ = {}
self.cmdline = cmdline
self.environ = environ
self.shortname = cmdline[0] if shortname is None else shortname
self.cwd = cwd
self.shell = shell
self.timeout_seconds = timeout_seconds
self.flake_retries = flake_retries
self.timeout_retries = timeout_retries
self.kill_handler = kill_handler
self.cpu_cost = cpu_cost
self.verbose_success = verbose_success
self.logfilename = logfilename
if self.logfilename and self.flake_retries != 0 and self.timeout_retries != 0:
# Forbidden to avoid overwriting the test log when retrying.
raise Exception(
'Cannot use custom logfile when retries are enabled')
def identity(self):
return '%r %r' % (self.cmdline, self.environ)
def __hash__(self):
return hash(self.identity())
def __cmp__(self, other):
return self.identity() == other.identity()
def __lt__(self, other):
return self.identity() < other.identity()
def __repr__(self):
return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
self.cmdline)
def __str__(self):
return '%s: %s %s' % (self.shortname, ' '.join(
'%s=%s' % kv for kv in self.environ.items()), ' '.join(
self.cmdline))
class JobResult(object):
def __init__(self):
self.state = 'UNKNOWN'
self.returncode = -1
self.elapsed_time = 0
self.num_failures = 0
self.retries = 0
self.message = ''
self.cpu_estimated = 1
self.cpu_measured = 1
def read_from_start(f):
f.seek(0)
return f.read().decode("utf8")
class Job(object):
def __init__(self,
spec,
newline_on_success,
travis,
add_env,
quiet_success=False):
self._spec = spec
self._newline_on_success = newline_on_success
self._travis = travis
self._add_env = add_env.copy()
self._retries = 0
self._timeout_retries = 0
self._suppress_failure_message = False
self._quiet_success = quiet_success
if not self._quiet_success:
message('START', spec.shortname, do_newline=self._travis)
self.result = JobResult()
self.start()
def GetSpec(self):
return self._spec
def start(self):
if self._spec.logfilename:
# make sure the log directory exists
logfile_dir = os.path.dirname(
os.path.abspath(self._spec.logfilename))
if not os.path.exists(logfile_dir):
os.makedirs(logfile_dir)
self._logfile = open(self._spec.logfilename, 'w+')
else:
# macOS: a series of quick os.unlink invocation might cause OS
# error during the creation of temporary file. By using
# NamedTemporaryFile, we defer the removal of file and directory.
self._logfile = tempfile.NamedTemporaryFile()
env = dict(os.environ)
env.update(self._spec.environ)
env.update(self._add_env)
env = sanitized_environment(env)
self._start = time.time()
cmdline = self._spec.cmdline
# The Unix time command is finicky when used with MSBuild, so we don't use it
global measure_cpu_costs
if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
cmdline = ['time', '-p'] + cmdline
else:
measure_cpu_costs = False
try_start = lambda: subprocess.Popen(args=cmdline,
stderr=subprocess.STDOUT,
stdout=self._logfile,
cwd=self._spec.cwd,
shell=self._spec.shell,
env=env)
delay = 0.3
for i in range(0, 4):
try:
self._process = try_start()
break
except OSError:
message(
'WARNING', 'Failed to start %s, retrying in %f seconds' %
(self._spec.shortname, delay))
time.sleep(delay)
delay *= 2
else:
self._process = try_start()
self._state = _RUNNING
def state(self):
def stdout(self=self):
stdout = read_from_start(self._logfile)
self.result.message = stdout[-_MAX_RESULT_SIZE:]
return stdout
if self._state == _RUNNING and self._process.poll() is not None:
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._process.returncode != 0:
if self._retries < self._spec.flake_retries:
message('FLAKE',
'%s [ret=%d, pid=%d]' %
(self._spec.shortname, self._process.returncode,
self._process.pid),
stdout(),
do_newline=True)
self._retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
self.start()
else:
self._state = _FAILURE
if not self._suppress_failure_message:
message('FAILED',
'%s [ret=%d, pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.returncode,
self._process.pid, elapsed),
stdout(),
do_newline=True)
self.result.state = 'FAILED'
self.result.num_failures += 1
self.result.returncode = self._process.returncode
else:
self._state = _SUCCESS
measurement = ''
if measure_cpu_costs:
m = re.search(
r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
stdout())
real = float(m.group(1))
user = float(m.group(2))
sys = float(m.group(3))
if real > 0.5:
cores = (user + sys) / real
self.result.cpu_measured = float('%.01f' % cores)
self.result.cpu_estimated = float('%.01f' %
self._spec.cpu_cost)
measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
self.result.cpu_measured, self.result.cpu_estimated)
if not self._quiet_success:
message('PASSED',
'%s [time=%.1fsec, retries=%d:%d%s]' %
(self._spec.shortname, elapsed, self._retries,
self._timeout_retries, measurement),
stdout() if self._spec.verbose_success else None,
do_newline=self._newline_on_success or self._travis)
self.result.state = 'PASSED'
elif (self._state == _RUNNING and
self._spec.timeout_seconds is not None and
time.time() - self._start > self._spec.timeout_seconds):
elapsed = time.time() - self._start
self.result.elapsed_time = elapsed
if self._timeout_retries < self._spec.timeout_retries:
message('TIMEOUT_FLAKE',
'%s [pid=%d]' %
(self._spec.shortname, self._process.pid),
stdout(),
do_newline=True)
self._timeout_retries += 1
self.result.num_failures += 1
self.result.retries = self._timeout_retries + self._retries
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
# NOTE: job is restarted regardless of jobset's max_time setting
self.start()
else:
message('TIMEOUT',
'%s [pid=%d, time=%.1fsec]' %
(self._spec.shortname, self._process.pid, elapsed),
stdout(),
do_newline=True)
self.kill()
self.result.state = 'TIMEOUT'
self.result.num_failures += 1
return self._state
def kill(self):
if self._state == _RUNNING:
self._state = _KILLED
if self._spec.kill_handler:
self._spec.kill_handler(self)
self._process.terminate()
def suppress_failure_message(self):
self._suppress_failure_message = True
class Jobset(object):
def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time):
self._running = set()
self._check_cancelled = check_cancelled
self._cancelled = False
self._failures = 0
self._completed = 0
self._maxjobs = maxjobs
self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
self._newline_on_success = newline_on_success
self._travis = travis
self._stop_on_failure = stop_on_failure
self._add_env = add_env
self._quiet_success = quiet_success
self._max_time = max_time
self.resultset = {}
self._remaining = None
self._start_time = time.time()
def set_remaining(self, remaining):
self._remaining = remaining
def get_num_failures(self):
return self._failures
def cpu_cost(self):
c = 0
for job in self._running:
c += job._spec.cpu_cost
return c
def start(self, spec):
while True:
if self._max_time > 0 and time.time(
) - self._start_time > self._max_time:
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
message('SKIPPED', spec.shortname, do_newline=True)
self.resultset[spec.shortname] = [skipped_job_result]
return True
if self.cancelled():
return False
current_cpu_cost = self.cpu_cost()
if current_cpu_cost == 0:
break
if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
if len(self._running) < self._maxjobs_cpu_agnostic:
break
self.reap(spec.shortname, spec.cpu_cost)
if self.cancelled():
return False
job = Job(spec, self._newline_on_success, self._travis, self._add_env,
self._quiet_success)
self._running.add(job)
if job.GetSpec().shortname not in self.resultset:
self.resultset[job.GetSpec().shortname] = []
return True
def reap(self, waiting_for=None, waiting_for_cost=None):
while self._running:
dead = set()
for job in self._running:
st = eintr_be_gone(lambda: job.state())
if st == _RUNNING:
continue
if st == _FAILURE or st == _KILLED:
self._failures += 1
if self._stop_on_failure:
self._cancelled = True
for job in self._running:
job.kill()
dead.add(job)
break
for job in dead:
self._completed += 1
if not self._quiet_success or job.result.state != 'PASSED':
self.resultset[job.GetSpec().shortname].append(job.result)
self._running.remove(job)
if dead:
return
if not self._travis and platform_string() != 'windows':
rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
if self._remaining is not None and self._completed > 0:
now = time.time()
sofar = now - self._start_time
remaining = sofar / self._completed * (self._remaining +
len(self._running))
rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
if waiting_for is not None:
wstr = ' next: %s @ %.2f cpu' % (waiting_for,
waiting_for_cost)
else:
wstr = ''
message(
'WAITING',
'%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
(rstr, len(self._running), self._completed, self._failures,
self.cpu_cost(), wstr))
if platform_string() == 'windows':
time.sleep(0.1)
else:
signal.alarm(10)
signal.pause()
def cancelled(self):
if self._cancelled:
return True
if not self._check_cancelled():
return False
for job in self._running:
job.kill()
self._cancelled = True
return True
def finish(self):
while self._running:
if self.cancelled():
pass
self.reap()
if platform_string() != 'windows':
signal.alarm(0)
return not self.cancelled() and self._failures == 0
def _never_cancelled():
return False
def tag_remaining(xs):
staging = []
for x in xs:
staging.append(x)
if len(staging) > 5000:
yield (staging.pop(0), None)
n = len(staging)
for i, x in enumerate(staging):
yield (x, n - i - 1)
def run(cmdlines,
check_cancelled=_never_cancelled,
maxjobs=None,
maxjobs_cpu_agnostic=None,
newline_on_success=False,
travis=False,
infinite_runs=False,
stop_on_failure=False,
add_env={},
skip_jobs=False,
quiet_success=False,
max_time=-1):
if skip_jobs:
resultset = {}
skipped_job_result = JobResult()
skipped_job_result.state = 'SKIPPED'
for job in cmdlines:
message('SKIPPED', job.shortname, do_newline=True)
resultset[job.shortname] = [skipped_job_result]
return 0, resultset
js = Jobset(
check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else
_DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env,
quiet_success, max_time)
for cmdline, remaining in tag_remaining(cmdlines):
if not js.start(cmdline):
break
if remaining is not None:
js.set_remaining(remaining)
js.finish()
return js.get_num_failures(), js.resultset
| true | true |
1c3b7a50f8773259f368c03788544406f1cbe60a | 2,647 | py | Python | src/main/transcribers/Transcriber.py | BlkPingu/VoiceControl | f6a32d4307812c19d82cc997433271cc5a282f2b | [
"Apache-2.0"
] | null | null | null | src/main/transcribers/Transcriber.py | BlkPingu/VoiceControl | f6a32d4307812c19d82cc997433271cc5a282f2b | [
"Apache-2.0"
] | null | null | null | src/main/transcribers/Transcriber.py | BlkPingu/VoiceControl | f6a32d4307812c19d82cc997433271cc5a282f2b | [
"Apache-2.0"
] | null | null | null | from interfaces.TranscriberInterface import TranscriberInterface
import deepspeech
import numpy as np
from config import conf
import wave
from utility.Paths import path_to_base
import time
class Transcriber(TranscriberInterface):
def __init__(self):
mfp = path_to_base(conf['model_file_path'])
lmfp = path_to_base(conf['lm_file_path'])
tfp = path_to_base(conf['trie_file_path'])
self.model = deepspeech.Model(mfp, conf['beam_width'])
self.model.enableDecoderWithLM(lmfp, tfp, conf['lm_alpha'], conf['lm_beta'])
self.progress = 0
self.lambda_count = 1
self.df_size = 0
def get_df_size(self):
"""get results_df"""
return self.df_size
def set_df_size(self, new):
"""set results_df"""
self.df_size = new
def get_progress(self):
"""get results_df"""
return self.progress
def set_progress(self, new):
"""set results_df"""
self.progress = new
def get_lambda_count(self):
return self.lambda_count
# Print iterations progress
def printProgressBar(self, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def update_progress_bar(self):
time.sleep(0.1)
new_progress = self.get_progress() + 1
self.set_progress(new_progress)
self.printProgressBar(self.get_progress(), self.get_lambda_count() * self.get_df_size(), prefix = 'Progress:', suffix = 'Complete', length = 50)
def transcribe_from(data, *args, **kwargs) -> str:
"""transcribe data to string"""
pass | 34.376623 | 152 | 0.603702 | from interfaces.TranscriberInterface import TranscriberInterface
import deepspeech
import numpy as np
from config import conf
import wave
from utility.Paths import path_to_base
import time
class Transcriber(TranscriberInterface):
def __init__(self):
mfp = path_to_base(conf['model_file_path'])
lmfp = path_to_base(conf['lm_file_path'])
tfp = path_to_base(conf['trie_file_path'])
self.model = deepspeech.Model(mfp, conf['beam_width'])
self.model.enableDecoderWithLM(lmfp, tfp, conf['lm_alpha'], conf['lm_beta'])
self.progress = 0
self.lambda_count = 1
self.df_size = 0
def get_df_size(self):
return self.df_size
def set_df_size(self, new):
self.df_size = new
def get_progress(self):
return self.progress
def set_progress(self, new):
self.progress = new
def get_lambda_count(self):
return self.lambda_count
def printProgressBar(self, iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
if iteration == total:
print()
def update_progress_bar(self):
time.sleep(0.1)
new_progress = self.get_progress() + 1
self.set_progress(new_progress)
self.printProgressBar(self.get_progress(), self.get_lambda_count() * self.get_df_size(), prefix = 'Progress:', suffix = 'Complete', length = 50)
def transcribe_from(data, *args, **kwargs) -> str:
pass | true | true |
1c3b7b1f696bb4192c0fc57bc20909fded27f2d2 | 261 | py | Python | ipytone/__init__.py | davidbrochart/ipytone | 82dc97b9075ecb6e3ef411571b4de5c9c90365dd | [
"BSD-3-Clause"
] | null | null | null | ipytone/__init__.py | davidbrochart/ipytone | 82dc97b9075ecb6e3ef411571b4de5c9c90365dd | [
"BSD-3-Clause"
] | null | null | null | ipytone/__init__.py | davidbrochart/ipytone | 82dc97b9075ecb6e3ef411571b4de5c9c90365dd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Benoit Bovy.
# Distributed under the terms of the Modified BSD License.
from .ipytone import Oscillator
from ._version import __version__, version_info
from .nbextension import _jupyter_nbextension_paths
| 23.727273 | 58 | 0.793103 |
from .ipytone import Oscillator
from ._version import __version__, version_info
from .nbextension import _jupyter_nbextension_paths
| true | true |
1c3b7c00bbe41313142f861234e1170e41e02c90 | 3,170 | py | Python | alphaman/strategy/__init__.py | Changsung/Backtrader | 47707e15d08981f4e62d113227ee7a3d20a4201a | [
"MIT"
] | 5 | 2017-02-27T10:33:04.000Z | 2021-02-26T23:25:39.000Z | alphaman/strategy/__init__.py | Changsung/Alphaman | 47707e15d08981f4e62d113227ee7a3d20a4201a | [
"MIT"
] | null | null | null | alphaman/strategy/__init__.py | Changsung/Alphaman | 47707e15d08981f4e62d113227ee7a3d20a4201a | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2017 Changsung
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class BaseStrategy:
''' class for strategy.
write an algorithm in this class'''
__signals = {}
def __init__(self):
pass
def addSignals(self, key, signal):
self.__signals[key] = signal
signal.setStrategy(self)
def getSignal(self, key):
return self.__signals[key].getSignal()
def handleData(self):
raise NotImplementedError()
def setAlphaman(self, alphaman):
self.__alphaman = alphaman
def buy(self, instrument, volume, limit_price=None, stop_price=None, days=None):
self.__alphaman.buy(instrument, volume, limit_price, stop_price, days)
def sell(self, instrument, volume, limit_price=None, stop_price=None, days=None):
self.__alphaman.sell(instrument, volume, limit_price, stop_price, days)
def orderTarget(self, instrument, percentage, limit_price=None, stop_price=None, days=None):
self.__alphaman.orderTarget(instrument, percentage, limit_price, stop_price, days)
def getSchedules(self):
return self.__alphaman.getSchedules()
def getFeed(self):
return self.__alphaman.getFeed()
def get(self, instrument, key, date_idx):
# assert to only access to previous data
feed = self.getFeed()
if isinstance(date_idx, int):
assert(date_idx <= 0)
today_idx = self.__alphaman.getTodayIdx() + date_idx
if today_idx < 0:
today_idx = 0
try:
return feed.getDailyInstrumentData(today_idx, instrument).getBarData(key)
except KeyError:
return feed.getDailyInstrumentData(today_idx, instrument).getExtraData(key)
elif isinstance(date_idx, list):
assert(date_idx[-1] <= 0)
today_idx_list = map(lambda x: x+self.__alphaman.getTodayIdx(), date_idx)
#today_idx_list = list(set(today_idx_list)).sort()
data_list = []
for today_idx in today_idx_list:
if today_idx < 0:
continue
try:
data_list.append(feed.getDailyInstrumentData(today_idx, instrument).getBarData(key))
except KeyError:
data_list.append(feed.getDailyInstrumentData(today_idx, instrument).getExtraData(key))
return data_list
else:
raise Exception('date_idx must be int or list of int')
| 36.022727 | 93 | 0.752366 |
class BaseStrategy:
__signals = {}
def __init__(self):
pass
def addSignals(self, key, signal):
self.__signals[key] = signal
signal.setStrategy(self)
def getSignal(self, key):
return self.__signals[key].getSignal()
def handleData(self):
raise NotImplementedError()
def setAlphaman(self, alphaman):
self.__alphaman = alphaman
def buy(self, instrument, volume, limit_price=None, stop_price=None, days=None):
self.__alphaman.buy(instrument, volume, limit_price, stop_price, days)
def sell(self, instrument, volume, limit_price=None, stop_price=None, days=None):
self.__alphaman.sell(instrument, volume, limit_price, stop_price, days)
def orderTarget(self, instrument, percentage, limit_price=None, stop_price=None, days=None):
self.__alphaman.orderTarget(instrument, percentage, limit_price, stop_price, days)
def getSchedules(self):
return self.__alphaman.getSchedules()
def getFeed(self):
return self.__alphaman.getFeed()
def get(self, instrument, key, date_idx):
feed = self.getFeed()
if isinstance(date_idx, int):
assert(date_idx <= 0)
today_idx = self.__alphaman.getTodayIdx() + date_idx
if today_idx < 0:
today_idx = 0
try:
return feed.getDailyInstrumentData(today_idx, instrument).getBarData(key)
except KeyError:
return feed.getDailyInstrumentData(today_idx, instrument).getExtraData(key)
elif isinstance(date_idx, list):
assert(date_idx[-1] <= 0)
today_idx_list = map(lambda x: x+self.__alphaman.getTodayIdx(), date_idx)
data_list = []
for today_idx in today_idx_list:
if today_idx < 0:
continue
try:
data_list.append(feed.getDailyInstrumentData(today_idx, instrument).getBarData(key))
except KeyError:
data_list.append(feed.getDailyInstrumentData(today_idx, instrument).getExtraData(key))
return data_list
else:
raise Exception('date_idx must be int or list of int')
| true | true |
1c3b7f3815d87ef71439de3a52abe3ead37b3761 | 5,475 | py | Python | wagtail/admin/rich_text/editors/hallo.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 2 | 2019-05-23T01:31:18.000Z | 2020-06-27T21:19:10.000Z | wagtail/admin/rich_text/editors/hallo.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 6 | 2020-08-26T03:00:03.000Z | 2020-09-24T02:59:14.000Z | wagtail/admin/rich_text/editors/hallo.py | samgans/wagtail | 48a8af71e5333fb701476702bd784fa407567e25 | [
"BSD-3-Clause"
] | 1 | 2020-05-28T12:25:15.000Z | 2020-05-28T12:25:15.000Z | import json
from collections import OrderedDict
from django.forms import Media, widgets
from django.utils.functional import cached_property
from wagtail.admin.edit_handlers import RichTextFieldPanel
from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.rich_text import features
class HalloPlugin:
def __init__(self, **kwargs):
self.name = kwargs.get('name', None)
self.options = kwargs.get('options', {})
self.js = kwargs.get('js', [])
self.css = kwargs.get('css', {})
self.order = kwargs.get('order', 100)
def construct_plugins_list(self, plugins):
if self.name is not None:
plugins[self.name] = self.options
@property
def media(self):
js = [versioned_static(js_file) for js_file in self.js]
css = {}
for media_type, css_files in self.css.items():
css[media_type] = [versioned_static(css_file) for css_file in css_files]
return Media(js=js, css=css)
class HalloFormatPlugin(HalloPlugin):
def __init__(self, **kwargs):
kwargs.setdefault('name', 'halloformat')
kwargs.setdefault('order', 10)
self.format_name = kwargs['format_name']
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'formattings': {
'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False
}})
plugins[self.name]['formattings'][self.format_name] = True
class HalloHeadingPlugin(HalloPlugin):
default_order = 20
def __init__(self, **kwargs):
kwargs.setdefault('name', 'halloheadings')
kwargs.setdefault('order', self.default_order)
self.element = kwargs.pop('element')
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'formatBlocks': []})
plugins[self.name]['formatBlocks'].append(self.element)
class HalloListPlugin(HalloPlugin):
def __init__(self, **kwargs):
kwargs.setdefault('name', 'hallolists')
kwargs.setdefault('order', 40)
self.list_type = kwargs['list_type']
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'lists': {
'ordered': False, 'unordered': False
}})
plugins[self.name]['lists'][self.list_type] = True
class HalloRequireParagraphsPlugin(HalloPlugin):
@property
def media(self):
return Media(js=[
versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),
]) + super().media
# Plugins which are always imported, and cannot be enabled/disabled via 'features'
CORE_HALLO_PLUGINS = [
HalloPlugin(name='halloreundo', order=50),
HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),
HalloHeadingPlugin(element='p')
]
class HalloRichTextArea(widgets.Textarea):
template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'
# this class's constructor accepts a 'features' kwarg
accepts_features = True
def get_panel(self):
return RichTextFieldPanel
def __init__(self, *args, **kwargs):
self.options = kwargs.pop('options', None)
self.features = kwargs.pop('features', None)
if self.features is None:
self.features = features.get_default_features()
self.converter = EditorHTMLConverter(self.features)
# construct a list of plugin objects, by querying the feature registry
# and keeping the non-null responses from get_editor_plugin
self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [
features.get_editor_plugin('hallo', feature_name)
for feature_name in self.features
]))
self.plugins.sort(key=lambda plugin: plugin.order)
super().__init__(*args, **kwargs)
def format_value(self, value):
# Convert database rich text representation to the format required by
# the input field
value = super().format_value(value)
if value is None:
return None
return self.converter.from_database_format(value)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.options is not None and 'plugins' in self.options:
# explicit 'plugins' config passed in options, so use that
plugin_data = self.options['plugins']
else:
plugin_data = OrderedDict()
for plugin in self.plugins:
plugin.construct_plugins_list(plugin_data)
context['widget']['plugins_json'] = json.dumps(plugin_data)
return context
def value_from_datadict(self, data, files, name):
original_value = super().value_from_datadict(data, files, name)
if original_value is None:
return None
return self.converter.to_database_format(original_value)
@cached_property
def media(self):
media = Media(js=[
versioned_static('wagtailadmin/js/vendor/hallo.js'),
versioned_static('wagtailadmin/js/hallo-bootstrap.js'),
], css={
'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]
})
for plugin in self.plugins:
media += plugin.media
return media
| 33.384146 | 89 | 0.658447 | import json
from collections import OrderedDict
from django.forms import Media, widgets
from django.utils.functional import cached_property
from wagtail.admin.edit_handlers import RichTextFieldPanel
from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.rich_text import features
class HalloPlugin:
def __init__(self, **kwargs):
self.name = kwargs.get('name', None)
self.options = kwargs.get('options', {})
self.js = kwargs.get('js', [])
self.css = kwargs.get('css', {})
self.order = kwargs.get('order', 100)
def construct_plugins_list(self, plugins):
if self.name is not None:
plugins[self.name] = self.options
@property
def media(self):
js = [versioned_static(js_file) for js_file in self.js]
css = {}
for media_type, css_files in self.css.items():
css[media_type] = [versioned_static(css_file) for css_file in css_files]
return Media(js=js, css=css)
class HalloFormatPlugin(HalloPlugin):
def __init__(self, **kwargs):
kwargs.setdefault('name', 'halloformat')
kwargs.setdefault('order', 10)
self.format_name = kwargs['format_name']
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'formattings': {
'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False
}})
plugins[self.name]['formattings'][self.format_name] = True
class HalloHeadingPlugin(HalloPlugin):
default_order = 20
def __init__(self, **kwargs):
kwargs.setdefault('name', 'halloheadings')
kwargs.setdefault('order', self.default_order)
self.element = kwargs.pop('element')
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'formatBlocks': []})
plugins[self.name]['formatBlocks'].append(self.element)
class HalloListPlugin(HalloPlugin):
def __init__(self, **kwargs):
kwargs.setdefault('name', 'hallolists')
kwargs.setdefault('order', 40)
self.list_type = kwargs['list_type']
super().__init__(**kwargs)
def construct_plugins_list(self, plugins):
plugins.setdefault(self.name, {'lists': {
'ordered': False, 'unordered': False
}})
plugins[self.name]['lists'][self.list_type] = True
class HalloRequireParagraphsPlugin(HalloPlugin):
@property
def media(self):
return Media(js=[
versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),
]) + super().media
CORE_HALLO_PLUGINS = [
HalloPlugin(name='halloreundo', order=50),
HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),
HalloHeadingPlugin(element='p')
]
class HalloRichTextArea(widgets.Textarea):
template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'
accepts_features = True
def get_panel(self):
return RichTextFieldPanel
def __init__(self, *args, **kwargs):
self.options = kwargs.pop('options', None)
self.features = kwargs.pop('features', None)
if self.features is None:
self.features = features.get_default_features()
self.converter = EditorHTMLConverter(self.features)
# construct a list of plugin objects, by querying the feature registry
# and keeping the non-null responses from get_editor_plugin
self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [
features.get_editor_plugin('hallo', feature_name)
for feature_name in self.features
]))
self.plugins.sort(key=lambda plugin: plugin.order)
super().__init__(*args, **kwargs)
def format_value(self, value):
# Convert database rich text representation to the format required by
# the input field
value = super().format_value(value)
if value is None:
return None
return self.converter.from_database_format(value)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
if self.options is not None and 'plugins' in self.options:
# explicit 'plugins' config passed in options, so use that
plugin_data = self.options['plugins']
else:
plugin_data = OrderedDict()
for plugin in self.plugins:
plugin.construct_plugins_list(plugin_data)
context['widget']['plugins_json'] = json.dumps(plugin_data)
return context
def value_from_datadict(self, data, files, name):
original_value = super().value_from_datadict(data, files, name)
if original_value is None:
return None
return self.converter.to_database_format(original_value)
@cached_property
def media(self):
media = Media(js=[
versioned_static('wagtailadmin/js/vendor/hallo.js'),
versioned_static('wagtailadmin/js/hallo-bootstrap.js'),
], css={
'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]
})
for plugin in self.plugins:
media += plugin.media
return media
| true | true |
1c3b8019244656610632b52a3c5a9801cfcb4339 | 4,546 | py | Python | befh/ws_api_socket.py | philsong/BitcoinExchangeFH | 3c45d4be2ea2a258f132d982f62f69d649e0b083 | [
"Apache-2.0"
] | 32 | 2017-12-15T07:30:11.000Z | 2020-07-16T10:15:18.000Z | befh/ws_api_socket.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
] | null | null | null | befh/ws_api_socket.py | bijiasuo/BitcoinExchangeFH | 9aa7b790cf74cf9fe48662147c30fc05e045e9ed | [
"Apache-2.0"
] | 20 | 2017-11-09T15:28:39.000Z | 2019-12-10T01:02:57.000Z | from befh.api_socket import ApiSocket
from befh.util import Logger
import websocket
import threading
import json
import time
import zlib
class WebSocketApiClient(ApiSocket):
"""
Generic REST API call
"""
def __init__(self, id, received_data_compressed=False):
"""
Constructor
:param id: Socket id
"""
ApiSocket.__init__(self)
self.ws = None # Web socket
self.id = id
self.wst = None # Web socket thread
self._connecting = False
self._connected = False
self._received_data_compressed = received_data_compressed
self.on_message_handlers = []
self.on_open_handlers = []
self.on_close_handlers = []
self.on_error_handlers = []
def connect(self, url,
on_message_handler=None,
on_open_handler=None,
on_close_handler=None,
on_error_handler=None,
reconnect_interval=10):
"""
:param url: Url link
:param on_message_handler: Message handler which take the message as
the first argument
:param on_open_handler: Socket open handler which take the socket as
the first argument
:param on_close_handler: Socket close handler which take the socket as
the first argument
:param on_error_handler: Socket error handler which take the socket as
the first argument and the error as the second
argument
:param reconnect_interval: The time interval for reconnection
"""
Logger.info(self.__class__.__name__, "Connecting to socket <%s> <%s>..." % (self.id, url))
if on_message_handler is not None:
self.on_message_handlers.append(on_message_handler)
if on_open_handler is not None:
self.on_open_handlers.append(on_open_handler)
if on_close_handler is not None:
self.on_close_handlers.append(on_close_handler)
if on_error_handler is not None:
self.on_error_handlers.append(on_error_handler)
if not self._connecting and not self._connected:
self._connecting = True
self.ws = websocket.WebSocketApp(url,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error)
self.wst = threading.Thread(target=lambda: self.__start(reconnect_interval=reconnect_interval))
self.wst.start()
return self.wst
def send(self, msg):
"""
Send message
:param msg: Message
:return:
"""
self.ws.send(msg)
def __start(self, reconnect_interval=10):
while True:
self.ws.run_forever()
Logger.info(self.__class__.__name__, "Socket <%s> is going to reconnect..." % self.id)
time.sleep(reconnect_interval)
def __on_message(self, ws, m):
if self._received_data_compressed is True:
data = zlib.decompress(m, zlib.MAX_WBITS|16).decode('UTF-8')
m = json.loads(data)
else:
m = json.loads(m)
if len(self.on_message_handlers) > 0:
for handler in self.on_message_handlers:
handler(m)
def __on_open(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is opened." % self.id)
self._connected = True
if len(self.on_open_handlers) > 0:
for handler in self.on_open_handlers:
handler(ws)
def __on_close(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is closed." % self.id)
self._connecting = False
self._connected = False
if len(self.on_close_handlers) > 0:
for handler in self.on_close_handlers:
handler(ws)
def __on_error(self, ws, error):
Logger.error(self.__class__.__name__, "Socket <%s> error:\n %s" % (self.id, error))
if len(self.on_error_handlers) > 0:
for handler in self.on_error_handlers:
handler(ws, error)
if __name__ == '__main__':
Logger.init_log()
socket = WebSocketApiClient('test')
socket.connect('ws://localhost', reconnect_interval=1)
time.sleep(10)
| 37.570248 | 107 | 0.58205 | from befh.api_socket import ApiSocket
from befh.util import Logger
import websocket
import threading
import json
import time
import zlib
class WebSocketApiClient(ApiSocket):
def __init__(self, id, received_data_compressed=False):
ApiSocket.__init__(self)
self.ws = None
self.id = id
self.wst = None
self._connecting = False
self._connected = False
self._received_data_compressed = received_data_compressed
self.on_message_handlers = []
self.on_open_handlers = []
self.on_close_handlers = []
self.on_error_handlers = []
def connect(self, url,
on_message_handler=None,
on_open_handler=None,
on_close_handler=None,
on_error_handler=None,
reconnect_interval=10):
Logger.info(self.__class__.__name__, "Connecting to socket <%s> <%s>..." % (self.id, url))
if on_message_handler is not None:
self.on_message_handlers.append(on_message_handler)
if on_open_handler is not None:
self.on_open_handlers.append(on_open_handler)
if on_close_handler is not None:
self.on_close_handlers.append(on_close_handler)
if on_error_handler is not None:
self.on_error_handlers.append(on_error_handler)
if not self._connecting and not self._connected:
self._connecting = True
self.ws = websocket.WebSocketApp(url,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error)
self.wst = threading.Thread(target=lambda: self.__start(reconnect_interval=reconnect_interval))
self.wst.start()
return self.wst
def send(self, msg):
self.ws.send(msg)
def __start(self, reconnect_interval=10):
while True:
self.ws.run_forever()
Logger.info(self.__class__.__name__, "Socket <%s> is going to reconnect..." % self.id)
time.sleep(reconnect_interval)
def __on_message(self, ws, m):
if self._received_data_compressed is True:
data = zlib.decompress(m, zlib.MAX_WBITS|16).decode('UTF-8')
m = json.loads(data)
else:
m = json.loads(m)
if len(self.on_message_handlers) > 0:
for handler in self.on_message_handlers:
handler(m)
def __on_open(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is opened." % self.id)
self._connected = True
if len(self.on_open_handlers) > 0:
for handler in self.on_open_handlers:
handler(ws)
def __on_close(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is closed." % self.id)
self._connecting = False
self._connected = False
if len(self.on_close_handlers) > 0:
for handler in self.on_close_handlers:
handler(ws)
def __on_error(self, ws, error):
Logger.error(self.__class__.__name__, "Socket <%s> error:\n %s" % (self.id, error))
if len(self.on_error_handlers) > 0:
for handler in self.on_error_handlers:
handler(ws, error)
if __name__ == '__main__':
Logger.init_log()
socket = WebSocketApiClient('test')
socket.connect('ws://localhost', reconnect_interval=1)
time.sleep(10)
| true | true |
1c3b80e860cb6ee5de4408374e9f96e0b519ae33 | 1,299 | py | Python | aliyun-python-sdk-cloudesl/aliyunsdkcloudesl/request/v20180801/DeleteEslDeviceRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cloudesl/aliyunsdkcloudesl/request/v20180801/DeleteEslDeviceRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cloudesl/aliyunsdkcloudesl/request/v20180801/DeleteEslDeviceRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteEslDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudesl', '2018-08-01', 'DeleteEslDevice')
def get_EslBarCode(self):
return self.get_query_params().get('EslBarCode')
def set_EslBarCode(self,EslBarCode):
self.add_query_param('EslBarCode',EslBarCode)
def get_StoreId(self):
return self.get_query_params().get('StoreId')
def set_StoreId(self,StoreId):
self.add_query_param('StoreId',StoreId) | 36.083333 | 73 | 0.765204 |
from aliyunsdkcore.request import RpcRequest
class DeleteEslDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudesl', '2018-08-01', 'DeleteEslDevice')
def get_EslBarCode(self):
return self.get_query_params().get('EslBarCode')
def set_EslBarCode(self,EslBarCode):
self.add_query_param('EslBarCode',EslBarCode)
def get_StoreId(self):
return self.get_query_params().get('StoreId')
def set_StoreId(self,StoreId):
self.add_query_param('StoreId',StoreId) | true | true |
1c3b8343aeb2606f89ad5563292263a637bd9546 | 63 | py | Python | ex016teste.py | JoaoCrescioni/Exercicios-curso-em-video-python | 5db9c79af4e8894b0ed2cc4d0110cdb10b4a467e | [
"MIT"
] | null | null | null | ex016teste.py | JoaoCrescioni/Exercicios-curso-em-video-python | 5db9c79af4e8894b0ed2cc4d0110cdb10b4a467e | [
"MIT"
] | null | null | null | ex016teste.py | JoaoCrescioni/Exercicios-curso-em-video-python | 5db9c79af4e8894b0ed2cc4d0110cdb10b4a467e | [
"MIT"
] | null | null | null | import emoji
print(emoji.emojize(':mouse:', use_aliases=True))
| 21 | 49 | 0.761905 | import emoji
print(emoji.emojize(':mouse:', use_aliases=True))
| true | true |
1c3b838ce620b963a94003861d4f4eae6dadc3cf | 24,878 | py | Python | E03 - Learning programs and models/Architectures/models/backbones/hrnet.py | mialona/Stomatal-segmentation | 149d469ec572c41a13d62149d7d62d6805d19697 | [
"MIT"
] | null | null | null | E03 - Learning programs and models/Architectures/models/backbones/hrnet.py | mialona/Stomatal-segmentation | 149d469ec572c41a13d62149d7d62d6805d19697 | [
"MIT"
] | null | null | null | E03 - Learning programs and models/Architectures/models/backbones/hrnet.py | mialona/Stomatal-segmentation | 149d469ec572c41a13d62149d7d62d6805d19697 | [
"MIT"
] | null | null | null | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from typing import List
from .build import BACKBONE_REGISTRY
BN_MOMENTUM = 0.01
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, norm_layer, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.norm_layer = norm_layer
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, norm_layer, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.norm_layer = norm_layer
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = self.norm_layer(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, norm_layer, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self.norm_layer = norm_layer
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], self.norm_layer, stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
self.norm_layer(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(nn.Identity())
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x: List[torch.Tensor]):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i, branch in enumerate(self.branches):
x[i] = branch(x[i])
x_fuse = []
for i, fuse_layer in enumerate(self.fuse_layers):
y = x[0] if i == 0 else fuse_layer[0](x[0])
for j, fuse_sub_layer in enumerate(fuse_layer):
if j == 0 or j > self.num_branches:
pass
else:
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
fuse_sub_layer(x[j]),
size=[height_output, width_output],
mode='bilinear')
else:
y = y + fuse_sub_layer(x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, config, norm_layer, **kwargs):
super(HighResolutionNet, self).__init__()
self.norm_layer = norm_layer
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = self.norm_layer(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = self.norm_layer(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = config['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = config['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = config['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = config['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
self.last_inp_channels = np.int(np.sum(pre_stage_channels))
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
self.norm_layer(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
self.norm_layer(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, self.norm_layer, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
self.norm_layer,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
# return nn.Sequential(*modules), num_inchannels
return nn.ModuleList(modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for aux in self.transition1:
if not isinstance(aux,nn.Identity):
x_list.append(aux(x))
else:
x_list.append(x)
#y_list = self.stage2(x_list)
for aux in self.stage2:
x_list = aux(x_list)
y_list = x_list
x_list = []
for i, aux in enumerate(self.transition2):
if not isinstance(aux,nn.Identity):
x_list.append(aux(y_list[-1]))
else:
x_list.append(y_list[i])
#y_list = self.stage3(x_list)
for aux in self.stage3:
x_list = aux(x_list)
y_list = x_list
x_list = []
for i, aux in enumerate(self.transition3):
if not isinstance(aux,nn.Identity):
x_list.append(aux(y_list[-1]))
else:
x_list.append(y_list[i])
#x = self.stage4(x_list)
for aux in self.stage4:
x_list = aux(x_list)
x = x_list
# Upsampling
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear')
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear')
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear')
x = torch.cat([x[0], x1, x2, x3], 1)
# x = self.last_layer(x)
# #UpSample
# x = F.interpolate(x, size=(ori_height, ori_width),
# mode='bilinear')
return x
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
#for k, _ in pretrained_dict.items():
# logger.info(
# '=> loading {} pretrained model {}'.format(k, pretrained))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
return "HRNet backbone wieghts loaded"
backbone_config={
"hrnet_w18_small_v1": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [1],
"NUM_CHANNELS": [32],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2],
"NUM_CHANNELS": [16,32],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2],
"NUM_CHANNELS": [16,32,64],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2,2],
"NUM_CHANNELS": [16,32,64,128],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w18_small_v2": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [2],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2],
"NUM_CHANNELS": [18,36],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2],
"NUM_CHANNELS": [18,36,72],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 2,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2,2],
"NUM_CHANNELS": [18, 36, 72, 144],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w18": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [18,36],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4,4],
"NUM_CHANNELS": [18,36,72],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4,4,4],
"NUM_CHANNELS": [18, 36, 72, 144],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w30": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [30, 60],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [30, 60, 120],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [30, 60, 120, 240],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w32": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [32, 64],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [32, 64, 128],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [32, 64, 128, 256],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w48": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [48, 96],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [48, 96, 192],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [48, 96, 192, 384],
"FUSE_METHOD": "SUM"
}
}
}
@BACKBONE_REGISTRY.register()
def hrnet_w18_small_v1(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18_small_v1"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w18_small_v2(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18_small_v2"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w18(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w30(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w30"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w32(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w32"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w48(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w48"], norm_layer=norm_layer)
return model | 34.892006 | 98 | 0.518571 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import numpy as np
from typing import List
from .build import BACKBONE_REGISTRY
BN_MOMENTUM = 0.01
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, norm_layer, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.norm_layer = norm_layer
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, norm_layer, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.norm_layer = norm_layer
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = self.norm_layer(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = self.norm_layer(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, norm_layer, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self.norm_layer = norm_layer
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], self.norm_layer, stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], self.norm_layer))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
self.norm_layer(num_inchannels[i], momentum=BN_MOMENTUM)))
elif j == i:
fuse_layer.append(nn.Identity())
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
self.norm_layer(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x: List[torch.Tensor]):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i, branch in enumerate(self.branches):
x[i] = branch(x[i])
x_fuse = []
for i, fuse_layer in enumerate(self.fuse_layers):
y = x[0] if i == 0 else fuse_layer[0](x[0])
for j, fuse_sub_layer in enumerate(fuse_layer):
if j == 0 or j > self.num_branches:
pass
else:
if i == j:
y = y + x[j]
elif j > i:
width_output = x[i].shape[-1]
height_output = x[i].shape[-2]
y = y + F.interpolate(
fuse_sub_layer(x[j]),
size=[height_output, width_output],
mode='bilinear')
else:
y = y + fuse_sub_layer(x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, config, norm_layer, **kwargs):
super(HighResolutionNet, self).__init__()
self.norm_layer = norm_layer
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = self.norm_layer(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = self.norm_layer(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = config['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion*num_channels
self.stage2_cfg = config['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = config['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = config['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
self.last_inp_channels = np.int(np.sum(pre_stage_channels))
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
self.norm_layer(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
self.norm_layer(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
self.norm_layer(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, self.norm_layer, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes, self.norm_layer))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
self.norm_layer,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.ModuleList(modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for aux in self.transition1:
if not isinstance(aux,nn.Identity):
x_list.append(aux(x))
else:
x_list.append(x)
for aux in self.stage2:
x_list = aux(x_list)
y_list = x_list
x_list = []
for i, aux in enumerate(self.transition2):
if not isinstance(aux,nn.Identity):
x_list.append(aux(y_list[-1]))
else:
x_list.append(y_list[i])
for aux in self.stage3:
x_list = aux(x_list)
y_list = x_list
x_list = []
for i, aux in enumerate(self.transition3):
if not isinstance(aux,nn.Identity):
x_list.append(aux(y_list[-1]))
else:
x_list.append(y_list[i])
for aux in self.stage4:
x_list = aux(x_list)
x = x_list
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1 = F.interpolate(x[1], size=(x0_h, x0_w), mode='bilinear')
x2 = F.interpolate(x[2], size=(x0_h, x0_w), mode='bilinear')
x3 = F.interpolate(x[3], size=(x0_h, x0_w), mode='bilinear')
x = torch.cat([x[0], x1, x2, x3], 1)
return x
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
return "HRNet backbone wieghts loaded"
backbone_config={
"hrnet_w18_small_v1": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [1],
"NUM_CHANNELS": [32],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2],
"NUM_CHANNELS": [16,32],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2],
"NUM_CHANNELS": [16,32,64],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2,2],
"NUM_CHANNELS": [16,32,64,128],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w18_small_v2": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [2],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2],
"NUM_CHANNELS": [18,36],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2],
"NUM_CHANNELS": [18,36,72],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 2,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [2,2,2,2],
"NUM_CHANNELS": [18, 36, 72, 144],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w18": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [18,36],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4,4],
"NUM_CHANNELS": [18,36,72],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4,4,4],
"NUM_CHANNELS": [18, 36, 72, 144],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w30": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [30, 60],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [30, 60, 120],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [30, 60, 120, 240],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w32": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [32, 64],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [32, 64, 128],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [32, 64, 128, 256],
"FUSE_METHOD": "SUM"
}
},
"hrnet_w48": {
"STAGE1": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 1,
"BLOCK": "BOTTLENECK",
"NUM_BLOCKS": [4],
"NUM_CHANNELS": [64],
"FUSE_METHOD": "SUM"
},
"STAGE2": {
"NUM_MODULES": 1,
"NUM_BRANCHES": 2,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4,4],
"NUM_CHANNELS": [48, 96],
"FUSE_METHOD": "SUM"
},
"STAGE3": {
"NUM_MODULES": 4,
"NUM_BRANCHES": 3,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4],
"NUM_CHANNELS": [48, 96, 192],
"FUSE_METHOD": "SUM"
},
"STAGE4": {
"NUM_MODULES": 3,
"NUM_BRANCHES": 4,
"BLOCK": "BASIC",
"NUM_BLOCKS": [4, 4, 4, 4],
"NUM_CHANNELS": [48, 96, 192, 384],
"FUSE_METHOD": "SUM"
}
}
}
@BACKBONE_REGISTRY.register()
def hrnet_w18_small_v1(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18_small_v1"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w18_small_v2(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18_small_v2"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w18(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w18"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w30(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w30"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w32(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w32"], norm_layer=norm_layer)
return model
@BACKBONE_REGISTRY.register()
def hrnet_w48(norm_layer=nn.BatchNorm2d):
model = HighResolutionNet(config=backbone_config["hrnet_w48"], norm_layer=norm_layer)
return model | true | true |
1c3b83b20278f57b8f3914a591d43e4033d0a48c | 2,721 | py | Python | timefhuman/main.py | panchbhai1969/timefhuman | 5eb82e31eb13bdc098b86920feb8aea146e4f6a0 | [
"Apache-2.0"
] | null | null | null | timefhuman/main.py | panchbhai1969/timefhuman | 5eb82e31eb13bdc098b86920feb8aea146e4f6a0 | [
"Apache-2.0"
] | null | null | null | timefhuman/main.py | panchbhai1969/timefhuman | 5eb82e31eb13bdc098b86920feb8aea146e4f6a0 | [
"Apache-2.0"
] | null | null | null | """
timefhuman
===
Convert human-readable date-like string to Python datetime object.
1. Tokenize string
2. Parse possible synctatic categories: "day", "time", "time range" etc.
3. Build parse tree.
4. Use grammar to resolve lexical ambiguities.
5. Impute with default values. Output extracted datetime and/or ranges.
@author: Alvin Wan
@site: alvinwan.com
"""
from .tokenize import tokenize
from .categorize import categorize
from .tree import build_tree
from .data import Token
from .data import TimeToken
from .data import DayToken
from .data import TimeRange
from .data import DayRange
import datetime
import string
__all__ = ('timefhuman',)
def timefhuman(string, now=None, raw=None):
"""A simple parsing function for date-related strings.
:param string: date-like string to parse
:param now: datetime for now, will default to datetime.datetime.now()
>>> now = datetime.datetime(year=2018, month=8, day=4)
>>> timefhuman('upcoming Monday noon', now=now) # natural language
datetime.datetime(2018, 8, 6, 12, 0)
>>> timefhuman('Monday 3 pm, Tu noon', now=now) # multiple datetimes
[datetime.datetime(2018, 8, 6, 15, 0), datetime.datetime(2018, 8, 7, 12, 0)]
>>> timefhuman('7/17 3:30-4 PM', now=now) # time range
(datetime.datetime(2018, 7, 17, 15, 30), datetime.datetime(2018, 7, 17, 16, 0))
>>> timefhuman('7/17 3:30 p.m. - 4 p.m.', now=now)
(datetime.datetime(2018, 7, 17, 15, 30), datetime.datetime(2018, 7, 17, 16, 0))
>>> timefhuman('7/17 or 7/18 3 p.m.', now=now) # date range
[datetime.datetime(2018, 7, 17, 15, 0), datetime.datetime(2018, 7, 18, 15, 0)]
>>> timefhuman('today or tomorrow noon', now=now) # choices w. natural language
[datetime.datetime(2018, 8, 4, 12, 0), datetime.datetime(2018, 8, 5, 12, 0)]
>>> timefhuman('2 PM on 7/17 or 7/19', now=now) # time applies to both dates
[datetime.datetime(2018, 7, 17, 14, 0), datetime.datetime(2018, 7, 19, 14, 0)]
>>> timefhuman('2 PM on 7/17 or 7/19', raw=True, now=now)
[[7/17/2018 2 pm, 7/19/2018 2 pm]]
"""
now = datetime.datetime.now()
tokens = timefhuman_tokens(string, now)
if raw:
return tokens
datetimes = [tok.datetime(now) for tok in tokens if isinstance(tok, Token)]
if len(datetimes) == 1: # TODO: bad idea?
return datetimes[0]
return datetimes
# TODO: What if user specifies vernacular AND actual date time. Let
# specified date time take precedence.
def timefhuman_tokens(string, now):
"""Convert string into timefhuman parsed, imputed, combined tokens"""
tokens = tokenize(string)
tokens = categorize(tokens, now)
tokens = build_tree(tokens, now)
return tokens
| 34.884615 | 84 | 0.673282 |
from .tokenize import tokenize
from .categorize import categorize
from .tree import build_tree
from .data import Token
from .data import TimeToken
from .data import DayToken
from .data import TimeRange
from .data import DayRange
import datetime
import string
__all__ = ('timefhuman',)
def timefhuman(string, now=None, raw=None):
now = datetime.datetime.now()
tokens = timefhuman_tokens(string, now)
if raw:
return tokens
datetimes = [tok.datetime(now) for tok in tokens if isinstance(tok, Token)]
if len(datetimes) == 1:
return datetimes[0]
return datetimes
def timefhuman_tokens(string, now):
tokens = tokenize(string)
tokens = categorize(tokens, now)
tokens = build_tree(tokens, now)
return tokens
| true | true |
1c3b8410933ea9b483807e91eb31d4a6ffe40b97 | 3,201 | py | Python | ray/actors/persistent_account_2actors.py | scalingpythonml/scalingpythonml | 2700b7dc4e454ce802a4183aeed4a7b0ffea5b83 | [
"Apache-2.0"
] | 13 | 2020-02-09T16:03:10.000Z | 2022-03-19T14:08:16.000Z | ray/actors/persistent_account_2actors.py | scalingpythonml/scalingpythonml | 2700b7dc4e454ce802a4183aeed4a7b0ffea5b83 | [
"Apache-2.0"
] | 3 | 2020-10-31T16:20:05.000Z | 2020-11-04T01:17:02.000Z | ray/actors/persistent_account_2actors.py | scalingpythonml/scalingpythonml | 2700b7dc4e454ce802a4183aeed4a7b0ffea5b83 | [
"Apache-2.0"
] | 4 | 2020-12-21T22:23:16.000Z | 2022-03-29T20:25:28.000Z | import ray
from os.path import exists
# Start Ray
ray.init()
class BasePersitence:
def exists(self, key:str) -> bool:
pass
def save(self, key: str, data: dict):
pass
def restore(self, key:str) -> dict:
pass
@ray.remote
class FilePersistence(BasePersitence):
def __init__(self, basedir: str = '.'):
self.basedir = basedir
def exists(self, key:str) -> bool:
return exists(self.basedir + '/' + key)
def save(self, key: str, data: dict):
bytes = ray.cloudpickle.dumps(data)
with open(self.basedir + '/' + key, "wb") as f:
f.write(bytes)
def restore(self, key:str) -> dict:
if self.exists(key):
with open(self.basedir + '/' + key, "rb") as f:
bytes = f.read()
return ray.cloudpickle.loads(bytes)
else:
return None
persistence_actor = FilePersistence.remote()
@ray.remote
class Account:
def __init__(self, balance: float, minimal_balance: float, account_key: str, persistence):
self.persistence = persistence
self.key = account_key
if not self.restorestate():
if balance < minimal_balance:
print(f"Balance {balance} is less then minimal balance {minimal_balance}")
raise Exception("Starting balance is less then minimal balance")
self.balance = balance
self.minimal = minimal_balance
self.storestate()
def balance(self) -> float:
return self.balance
def deposit(self, amount: float) -> float:
self.balance = self.balance + amount
self.storestate()
return self.balance
def withdraw(self, amount: float) -> float:
balance = self.balance - amount
if balance < self.minimal:
print(f"Withdraw amount {amount} is too large for a current balance {self.balance}")
raise Exception("Withdraw is not supported by current balance")
self.balance = balance
self.storestate()
return balance
def restorestate(self) -> bool:
state = ray.get(self.persistence.restore.remote(self.key))
if state != None:
self.balance = state['balance']
self.minimal = state['minimal']
return True
else:
return False
def storestate(self):
self.persistence.save.remote(self.key,
{'balance' : self.balance, 'minimal' : self.minimal})
account_actor = Account.options(name='Account').remote(balance=100.,minimal_balance=20.,
account_key='1234567', persistence=persistence_actor)
print(f"Current balance {ray.get(account_actor.balance.remote())}")
print(f"New balance {ray.get(account_actor.withdraw.remote(40.))}")
print(f"New balance {ray.get(account_actor.deposit.remote(70.))}")
print(ray.get_actor('Account'))
ray.kill(account_actor)
account_actor = Account.options(name='Account') .remote(balance=100.,minimal_balance=20.,
account_key='1234567', persistence=persistence_actor)
print(f"Current balance {ray.get(account_actor.balance.remote())}")
| 32.663265 | 96 | 0.614183 | import ray
from os.path import exists
ray.init()
class BasePersitence:
def exists(self, key:str) -> bool:
pass
def save(self, key: str, data: dict):
pass
def restore(self, key:str) -> dict:
pass
@ray.remote
class FilePersistence(BasePersitence):
def __init__(self, basedir: str = '.'):
self.basedir = basedir
def exists(self, key:str) -> bool:
return exists(self.basedir + '/' + key)
def save(self, key: str, data: dict):
bytes = ray.cloudpickle.dumps(data)
with open(self.basedir + '/' + key, "wb") as f:
f.write(bytes)
def restore(self, key:str) -> dict:
if self.exists(key):
with open(self.basedir + '/' + key, "rb") as f:
bytes = f.read()
return ray.cloudpickle.loads(bytes)
else:
return None
persistence_actor = FilePersistence.remote()
@ray.remote
class Account:
def __init__(self, balance: float, minimal_balance: float, account_key: str, persistence):
self.persistence = persistence
self.key = account_key
if not self.restorestate():
if balance < minimal_balance:
print(f"Balance {balance} is less then minimal balance {minimal_balance}")
raise Exception("Starting balance is less then minimal balance")
self.balance = balance
self.minimal = minimal_balance
self.storestate()
def balance(self) -> float:
return self.balance
def deposit(self, amount: float) -> float:
self.balance = self.balance + amount
self.storestate()
return self.balance
def withdraw(self, amount: float) -> float:
balance = self.balance - amount
if balance < self.minimal:
print(f"Withdraw amount {amount} is too large for a current balance {self.balance}")
raise Exception("Withdraw is not supported by current balance")
self.balance = balance
self.storestate()
return balance
def restorestate(self) -> bool:
state = ray.get(self.persistence.restore.remote(self.key))
if state != None:
self.balance = state['balance']
self.minimal = state['minimal']
return True
else:
return False
def storestate(self):
self.persistence.save.remote(self.key,
{'balance' : self.balance, 'minimal' : self.minimal})
account_actor = Account.options(name='Account').remote(balance=100.,minimal_balance=20.,
account_key='1234567', persistence=persistence_actor)
print(f"Current balance {ray.get(account_actor.balance.remote())}")
print(f"New balance {ray.get(account_actor.withdraw.remote(40.))}")
print(f"New balance {ray.get(account_actor.deposit.remote(70.))}")
print(ray.get_actor('Account'))
ray.kill(account_actor)
account_actor = Account.options(name='Account') .remote(balance=100.,minimal_balance=20.,
account_key='1234567', persistence=persistence_actor)
print(f"Current balance {ray.get(account_actor.balance.remote())}")
| true | true |
1c3b843ec7f91fff2d89feaf4858010ee95ef60a | 3,233 | py | Python | folium/plugins/draw.py | beaswift/folium | b44e95be4ec2bdcf4898e48a749a64edfb8a2ea8 | [
"MIT"
] | 1 | 2018-03-21T13:17:19.000Z | 2018-03-21T13:17:19.000Z | folium/plugins/draw.py | beaswift/folium | b44e95be4ec2bdcf4898e48a749a64edfb8a2ea8 | [
"MIT"
] | null | null | null | folium/plugins/draw.py | beaswift/folium | b44e95be4ec2bdcf4898e48a749a64edfb8a2ea8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from branca.element import CssLink, Element, Figure, JavascriptLink, MacroElement
from jinja2 import Template
class Draw(MacroElement):
"""
Vector drawing and editing plugin for Leaflet.
Examples
--------
>>> m = folium.Map()
>>> Draw().draw.add_to(m)
For more info please check
https://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html
"""
def __init__(self, export=False):
super(Draw, self).__init__()
self._name = 'DrawControl'
self.export = export
self._template = Template(u"""
{% macro script(this, kwargs) %}
// FeatureGroup is to store editable layers.
var drawnItems = new L.featureGroup().addTo({{this._parent.get_name()}});
var {{this.get_name()}} = new L.Control.Draw({
"edit": {"featureGroup": drawnItems}
}).addTo({{this._parent.get_name()}})
{{this._parent.get_name()}}.on(L.Draw.Event.CREATED, function (event) {
var layer = event.layer,
type = event.layerType,
coords;
var coords = JSON.stringify(layer.toGeoJSON());
layer.on('click', function() {
alert(coords);
console.log(coords);
});
drawnItems.addLayer(layer);
});
{{this._parent.get_name()}}.on('draw:created', function(e) {
drawnItems.addLayer(e.layer);
});
document.getElementById('export').onclick = function(e) {
var data = drawnItems.toGeoJSON();
var convertedData = 'text/json;charset=utf-8,' + encodeURIComponent(JSON.stringify(data));
document.getElementById('export').setAttribute('href', 'data:' + convertedData);
document.getElementById('export').setAttribute('download','data.geojson');
}
{% endmacro %}
""")
def render(self, **kwargs):
super(Draw, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://cdn.rawgit.com/Leaflet/Leaflet.draw/v0.4.12/dist/leaflet.draw.js')) # noqa
figure.header.add_child(
CssLink('https://cdn.rawgit.com/Leaflet/Leaflet.draw/v0.4.12/dist/leaflet.draw.css')) # noqa
export_style = """<style>
#export {
position: absolute;
top: 5px;
right: 10px;
z-index: 999;
background: white;
color: black;
padding: 6px;
border-radius: 4px;
font-family: 'Helvetica Neue';
cursor: pointer;
font-size: 12px;
text-decoration: none;
top: 90px;
}
</style>"""
export_button = """<a href='#' id='export'>Export</a>"""
if self.export:
figure.header.add_child(Element(export_style), name='export')
figure.html.add_child(Element(export_button), name='export_button')
| 34.393617 | 111 | 0.560779 |
from __future__ import (absolute_import, division, print_function)
from branca.element import CssLink, Element, Figure, JavascriptLink, MacroElement
from jinja2 import Template
class Draw(MacroElement):
def __init__(self, export=False):
super(Draw, self).__init__()
self._name = 'DrawControl'
self.export = export
self._template = Template(u"""
{% macro script(this, kwargs) %}
// FeatureGroup is to store editable layers.
var drawnItems = new L.featureGroup().addTo({{this._parent.get_name()}});
var {{this.get_name()}} = new L.Control.Draw({
"edit": {"featureGroup": drawnItems}
}).addTo({{this._parent.get_name()}})
{{this._parent.get_name()}}.on(L.Draw.Event.CREATED, function (event) {
var layer = event.layer,
type = event.layerType,
coords;
var coords = JSON.stringify(layer.toGeoJSON());
layer.on('click', function() {
alert(coords);
console.log(coords);
});
drawnItems.addLayer(layer);
});
{{this._parent.get_name()}}.on('draw:created', function(e) {
drawnItems.addLayer(e.layer);
});
document.getElementById('export').onclick = function(e) {
var data = drawnItems.toGeoJSON();
var convertedData = 'text/json;charset=utf-8,' + encodeURIComponent(JSON.stringify(data));
document.getElementById('export').setAttribute('href', 'data:' + convertedData);
document.getElementById('export').setAttribute('download','data.geojson');
}
{% endmacro %}
""")
def render(self, **kwargs):
super(Draw, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
figure.header.add_child(
JavascriptLink('https://cdn.rawgit.com/Leaflet/Leaflet.draw/v0.4.12/dist/leaflet.draw.js'))
figure.header.add_child(
CssLink('https://cdn.rawgit.com/Leaflet/Leaflet.draw/v0.4.12/dist/leaflet.draw.css'))
export_style = """<style>
#export {
position: absolute;
top: 5px;
right: 10px;
z-index: 999;
background: white;
color: black;
padding: 6px;
border-radius: 4px;
font-family: 'Helvetica Neue';
cursor: pointer;
font-size: 12px;
text-decoration: none;
top: 90px;
}
</style>"""
export_button = """<a href='#' id='export'>Export</a>"""
if self.export:
figure.header.add_child(Element(export_style), name='export')
figure.html.add_child(Element(export_button), name='export_button')
| true | true |
1c3b84eb2d5ed36c3f2fc4ead536712260644ef3 | 299 | py | Python | mmskeleton/deprecated/processor/pseudo.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 1,347 | 2019-08-24T19:03:50.000Z | 2022-03-29T05:44:57.000Z | mmskeleton/deprecated/processor/pseudo.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 246 | 2019-08-24T15:36:11.000Z | 2022-03-23T06:57:02.000Z | mmskeleton/deprecated/processor/pseudo.py | fserracant/mmskeleton | 44008bdef3dd6354a17c220fac8bcd8cd08ed201 | [
"Apache-2.0"
] | 335 | 2019-08-25T14:54:19.000Z | 2022-03-31T23:07:18.000Z | from mmskeleton.utils import call_obj
def train(model_cfg, dataset_cfg, optimizer):
model = call_obj(**model_cfg)
dataset = call_obj(**dataset_cfg)
print('train a pseudo model...')
print('done.')
def hello_world(times=10):
for i in range(times):
print('Hello World!') | 23 | 45 | 0.672241 | from mmskeleton.utils import call_obj
def train(model_cfg, dataset_cfg, optimizer):
model = call_obj(**model_cfg)
dataset = call_obj(**dataset_cfg)
print('train a pseudo model...')
print('done.')
def hello_world(times=10):
for i in range(times):
print('Hello World!') | true | true |
1c3b8633a843a25773f6810027fdabf38915b85d | 586 | py | Python | packages/python/plotly/plotly/validators/layout/xaxis/_tickformatstopdefaults.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/xaxis/_tickformatstopdefaults.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/layout/xaxis/_tickformatstopdefaults.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickformatstopdefaults", parent_name="layout.xaxis", **kwargs
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
| 30.842105 | 88 | 0.622867 | import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickformatstopdefaults", parent_name="layout.xaxis", **kwargs
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
| true | true |
1c3b86740c517e4f6755d8a7dbc0f587b2e92e38 | 13,302 | py | Python | st2common/tests/unit/services/test_rbac.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/services/test_rbac.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/services/test_rbac.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pymongo import MongoClient
from st2tests.base import CleanDbTestCase
from st2common.services import rbac as rbac_services
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.rbac.types import SystemRole
from st2common.persistence.auth import User
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rule import Rule
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rule import RuleDB
class RBACServicesTestCase(CleanDbTestCase):
def setUp(self):
super(RBACServicesTestCase, self).setUp()
# TODO: Share mocks
self.users = {}
self.roles = {}
self.resources = {}
# Create some mock users
user_1_db = UserDB(name='admin')
user_1_db = User.add_or_update(user_1_db)
self.users['admin'] = user_1_db
user_2_db = UserDB(name='observer')
user_2_db = User.add_or_update(user_2_db)
self.users['observer'] = user_2_db
user_3_db = UserDB(name='no_roles')
user_3_db = User.add_or_update(user_3_db)
self.users['no_roles'] = user_3_db
user_5_db = UserDB(name='user_5')
user_5_db = User.add_or_update(user_5_db)
self.users['user_5'] = user_5_db
user_4_db = UserDB(name='custom_role')
user_4_db = User.add_or_update(user_4_db)
self.users['1_custom_role'] = user_4_db
# Create some mock roles
role_1_db = rbac_services.create_role(name='custom_role_1')
role_2_db = rbac_services.create_role(name='custom_role_2',
description='custom role 2')
self.roles['custom_role_1'] = role_1_db
self.roles['custom_role_2'] = role_2_db
rbac_services.create_role(name='role_1')
rbac_services.create_role(name='role_2')
rbac_services.create_role(name='role_3')
rbac_services.create_role(name='role_4')
# Create some mock role assignments
role_assignment_1 = UserRoleAssignmentDB(user=self.users['1_custom_role'].name,
role=self.roles['custom_role_1'].name)
role_assignment_1 = UserRoleAssignment.add_or_update(role_assignment_1)
# Note: User use pymongo to insert mock data because we want to insert a
# raw document and skip mongoengine to leave is_remote field unpopulated
client = MongoClient()
db = client['st2-test']
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_1'})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_2'})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_3',
'is_remote': False})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_4',
'is_remote': True})
# Create some mock resources on which permissions can be granted
rule_1_db = RuleDB(pack='test1', name='rule1', ref='test1.rule1')
rule_1_db = Rule.add_or_update(rule_1_db)
self.resources['rule_1'] = rule_1_db
def test_get_role_assignments_for_user(self):
# Test a case where a document doesn't exist is_remote field and when it
# does
user_db = self.users['user_5']
role_assignment_dbs = rbac_services.get_role_assignments_for_user(user_db=user_db,
include_remote=False)
self.assertEqual(len(role_assignment_dbs), 3)
self.assertEqual(role_assignment_dbs[0].role, 'role_1')
self.assertEqual(role_assignment_dbs[1].role, 'role_2')
self.assertEqual(role_assignment_dbs[2].role, 'role_3')
self.assertEqual(role_assignment_dbs[0].is_remote, False)
self.assertEqual(role_assignment_dbs[1].is_remote, False)
self.assertEqual(role_assignment_dbs[2].is_remote, False)
user_db = self.users['user_5']
role_assignment_dbs = rbac_services.get_role_assignments_for_user(user_db=user_db,
include_remote=True)
self.assertEqual(len(role_assignment_dbs), 4)
self.assertEqual(role_assignment_dbs[3].role, 'role_4')
self.assertEqual(role_assignment_dbs[3].is_remote, True)
def test_get_all_roles(self):
role_dbs = rbac_services.get_all_roles()
self.assertEqual(len(role_dbs), len(self.roles) + 4)
def test_get_roles_for_user(self):
# User with no roles
user_db = self.users['no_roles']
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
# User with one custom role
user_db = self.users['1_custom_role']
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
# User with remote roles
user_db = self.users['user_5']
role_dbs = user_db.get_roles()
self.assertEqual(len(role_dbs), 4)
user_db = self.users['user_5']
role_dbs = user_db.get_roles(include_remote=True)
self.assertEqual(len(role_dbs), 4)
user_db = self.users['user_5']
role_dbs = user_db.get_roles(include_remote=False)
self.assertEqual(len(role_dbs), 3)
def test_get_all_role_assignments(self):
role_assignment_dbs = rbac_services.get_all_role_assignments(include_remote=True)
self.assertEqual(len(role_assignment_dbs), 5)
role_assignment_dbs = rbac_services.get_all_role_assignments(include_remote=False)
self.assertEqual(len(role_assignment_dbs), 4)
for role_assignment_db in role_assignment_dbs:
self.assertFalse(role_assignment_db.is_remote)
def test_create_role_with_system_role_name(self):
# Roles with names which match system role names can't be created
expected_msg = '"observer" role name is blacklisted'
self.assertRaisesRegexp(ValueError, expected_msg, rbac_services.create_role,
name=SystemRole.OBSERVER)
def test_delete_system_role(self):
# System roles can't be deleted
system_roles = SystemRole.get_valid_values()
for name in system_roles:
expected_msg = 'System roles can\'t be deleted'
self.assertRaisesRegexp(ValueError, expected_msg, rbac_services.delete_role,
name=name)
def test_grant_and_revoke_role(self):
user_db = UserDB(name='test-user-1')
user_db = User.add_or_update(user_db)
# Initial state, no roles
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
# Assign a role, should have one role assigned
rbac_services.assign_role_to_user(role_db=self.roles['custom_role_1'],
user_db=user_db)
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
# Revoke previously assigned role, should have no roles again
rbac_services.revoke_role_from_user(role_db=self.roles['custom_role_1'],
user_db=user_db)
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
def test_get_all_permission_grants_for_user(self):
user_db = self.users['1_custom_role']
role_db = self.roles['custom_role_1']
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db)
self.assertItemsEqual(permission_grants, [])
# Grant some permissions
resource_db = self.resources['rule_1']
permission_types = [PermissionType.RULE_CREATE, PermissionType.RULE_MODIFY]
permission_grant = rbac_services.create_permission_grant_for_resource_db(
role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
# Retrieve all grants
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db)
self.assertItemsEqual(permission_grants, [permission_grant])
# Retrieve all grants, filter on resource with no grants
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db,
resource_types=[ResourceType.PACK])
self.assertItemsEqual(permission_grants, [])
# Retrieve all grants, filter on resource with grants
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db,
resource_types=[ResourceType.RULE])
self.assertItemsEqual(permission_grants, [permission_grant])
def test_create_and_remove_permission_grant(self):
role_db = self.roles['custom_role_2']
resource_db = self.resources['rule_1']
# Grant "ALL" permission to the resource
permission_types = [PermissionType.RULE_ALL]
rbac_services.create_permission_grant_for_resource_db(role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
role_db.reload()
self.assertItemsEqual(role_db.permission_grants, role_db.permission_grants)
# Remove the previously granted permission
rbac_services.remove_permission_grant_for_resource_db(role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
role_db.reload()
self.assertItemsEqual(role_db.permission_grants, [])
def test_manipulate_permission_grants_unsupported_resource_type(self):
# Try to manipulate permissions on an unsupported resource
role_db = self.roles['custom_role_2']
resource_db = UserDB()
permission_types = [PermissionType.RULE_ALL]
expected_msg = 'Permissions cannot be manipulated for a resource of type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.create_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
expected_msg = 'Permissions cannot be manipulated for a resource of type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.remove_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
def test_manipulate_permission_grants_invalid_permission_types(self):
# Try to assign / revoke a permission which is not supported for a particular resource
role_db = self.roles['custom_role_2']
resource_db = self.resources['rule_1']
permission_types = [PermissionType.ACTION_EXECUTE]
expected_msg = 'Invalid permission type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.create_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
expected_msg = 'Invalid permission type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.remove_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
| 45.71134 | 96 | 0.663058 |
from pymongo import MongoClient
from st2tests.base import CleanDbTestCase
from st2common.services import rbac as rbac_services
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.rbac.types import SystemRole
from st2common.persistence.auth import User
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rule import Rule
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rule import RuleDB
class RBACServicesTestCase(CleanDbTestCase):
def setUp(self):
super(RBACServicesTestCase, self).setUp()
self.users = {}
self.roles = {}
self.resources = {}
user_1_db = UserDB(name='admin')
user_1_db = User.add_or_update(user_1_db)
self.users['admin'] = user_1_db
user_2_db = UserDB(name='observer')
user_2_db = User.add_or_update(user_2_db)
self.users['observer'] = user_2_db
user_3_db = UserDB(name='no_roles')
user_3_db = User.add_or_update(user_3_db)
self.users['no_roles'] = user_3_db
user_5_db = UserDB(name='user_5')
user_5_db = User.add_or_update(user_5_db)
self.users['user_5'] = user_5_db
user_4_db = UserDB(name='custom_role')
user_4_db = User.add_or_update(user_4_db)
self.users['1_custom_role'] = user_4_db
role_1_db = rbac_services.create_role(name='custom_role_1')
role_2_db = rbac_services.create_role(name='custom_role_2',
description='custom role 2')
self.roles['custom_role_1'] = role_1_db
self.roles['custom_role_2'] = role_2_db
rbac_services.create_role(name='role_1')
rbac_services.create_role(name='role_2')
rbac_services.create_role(name='role_3')
rbac_services.create_role(name='role_4')
role_assignment_1 = UserRoleAssignmentDB(user=self.users['1_custom_role'].name,
role=self.roles['custom_role_1'].name)
role_assignment_1 = UserRoleAssignment.add_or_update(role_assignment_1)
client = MongoClient()
db = client['st2-test']
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_1'})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_2'})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_3',
'is_remote': False})
db.user_role_assignment_d_b.insert_one({'user': 'user_5', 'role': 'role_4',
'is_remote': True})
rule_1_db = RuleDB(pack='test1', name='rule1', ref='test1.rule1')
rule_1_db = Rule.add_or_update(rule_1_db)
self.resources['rule_1'] = rule_1_db
def test_get_role_assignments_for_user(self):
# does
user_db = self.users['user_5']
role_assignment_dbs = rbac_services.get_role_assignments_for_user(user_db=user_db,
include_remote=False)
self.assertEqual(len(role_assignment_dbs), 3)
self.assertEqual(role_assignment_dbs[0].role, 'role_1')
self.assertEqual(role_assignment_dbs[1].role, 'role_2')
self.assertEqual(role_assignment_dbs[2].role, 'role_3')
self.assertEqual(role_assignment_dbs[0].is_remote, False)
self.assertEqual(role_assignment_dbs[1].is_remote, False)
self.assertEqual(role_assignment_dbs[2].is_remote, False)
user_db = self.users['user_5']
role_assignment_dbs = rbac_services.get_role_assignments_for_user(user_db=user_db,
include_remote=True)
self.assertEqual(len(role_assignment_dbs), 4)
self.assertEqual(role_assignment_dbs[3].role, 'role_4')
self.assertEqual(role_assignment_dbs[3].is_remote, True)
def test_get_all_roles(self):
role_dbs = rbac_services.get_all_roles()
self.assertEqual(len(role_dbs), len(self.roles) + 4)
def test_get_roles_for_user(self):
# User with no roles
user_db = self.users['no_roles']
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
# User with one custom role
user_db = self.users['1_custom_role']
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
# User with remote roles
user_db = self.users['user_5']
role_dbs = user_db.get_roles()
self.assertEqual(len(role_dbs), 4)
user_db = self.users['user_5']
role_dbs = user_db.get_roles(include_remote=True)
self.assertEqual(len(role_dbs), 4)
user_db = self.users['user_5']
role_dbs = user_db.get_roles(include_remote=False)
self.assertEqual(len(role_dbs), 3)
def test_get_all_role_assignments(self):
role_assignment_dbs = rbac_services.get_all_role_assignments(include_remote=True)
self.assertEqual(len(role_assignment_dbs), 5)
role_assignment_dbs = rbac_services.get_all_role_assignments(include_remote=False)
self.assertEqual(len(role_assignment_dbs), 4)
for role_assignment_db in role_assignment_dbs:
self.assertFalse(role_assignment_db.is_remote)
def test_create_role_with_system_role_name(self):
# Roles with names which match system role names can't be created
expected_msg = '"observer" role name is blacklisted'
self.assertRaisesRegexp(ValueError, expected_msg, rbac_services.create_role,
name=SystemRole.OBSERVER)
def test_delete_system_role(self):
system_roles = SystemRole.get_valid_values()
for name in system_roles:
expected_msg = 'System roles can\'t be deleted'
self.assertRaisesRegexp(ValueError, expected_msg, rbac_services.delete_role,
name=name)
def test_grant_and_revoke_role(self):
user_db = UserDB(name='test-user-1')
user_db = User.add_or_update(user_db)
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
rbac_services.assign_role_to_user(role_db=self.roles['custom_role_1'],
user_db=user_db)
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [self.roles['custom_role_1']])
rbac_services.revoke_role_from_user(role_db=self.roles['custom_role_1'],
user_db=user_db)
role_dbs = rbac_services.get_roles_for_user(user_db=user_db)
self.assertItemsEqual(role_dbs, [])
role_dbs = user_db.get_roles()
self.assertItemsEqual(role_dbs, [])
def test_get_all_permission_grants_for_user(self):
user_db = self.users['1_custom_role']
role_db = self.roles['custom_role_1']
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db)
self.assertItemsEqual(permission_grants, [])
resource_db = self.resources['rule_1']
permission_types = [PermissionType.RULE_CREATE, PermissionType.RULE_MODIFY]
permission_grant = rbac_services.create_permission_grant_for_resource_db(
role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db)
self.assertItemsEqual(permission_grants, [permission_grant])
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db,
resource_types=[ResourceType.PACK])
self.assertItemsEqual(permission_grants, [])
permission_grants = rbac_services.get_all_permission_grants_for_user(user_db=user_db,
resource_types=[ResourceType.RULE])
self.assertItemsEqual(permission_grants, [permission_grant])
def test_create_and_remove_permission_grant(self):
role_db = self.roles['custom_role_2']
resource_db = self.resources['rule_1']
permission_types = [PermissionType.RULE_ALL]
rbac_services.create_permission_grant_for_resource_db(role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
role_db.reload()
self.assertItemsEqual(role_db.permission_grants, role_db.permission_grants)
rbac_services.remove_permission_grant_for_resource_db(role_db=role_db,
resource_db=resource_db,
permission_types=permission_types)
role_db.reload()
self.assertItemsEqual(role_db.permission_grants, [])
def test_manipulate_permission_grants_unsupported_resource_type(self):
role_db = self.roles['custom_role_2']
resource_db = UserDB()
permission_types = [PermissionType.RULE_ALL]
expected_msg = 'Permissions cannot be manipulated for a resource of type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.create_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
expected_msg = 'Permissions cannot be manipulated for a resource of type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.remove_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
def test_manipulate_permission_grants_invalid_permission_types(self):
role_db = self.roles['custom_role_2']
resource_db = self.resources['rule_1']
permission_types = [PermissionType.ACTION_EXECUTE]
expected_msg = 'Invalid permission type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.create_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
expected_msg = 'Invalid permission type'
self.assertRaisesRegexp(ValueError, expected_msg,
rbac_services.remove_permission_grant_for_resource_db,
role_db=role_db, resource_db=resource_db,
permission_types=permission_types)
| true | true |
1c3b882a637f80a50713d18dbe29569837195b4d | 6,860 | py | Python | src/town/town_manager.py | darealmop/botty | bdb8581b4f6b4ae0c20fc1030dfd00a97113e914 | [
"MIT"
] | null | null | null | src/town/town_manager.py | darealmop/botty | bdb8581b4f6b4ae0c20fc1030dfd00a97113e914 | [
"MIT"
] | null | null | null | src/town/town_manager.py | darealmop/botty | bdb8581b4f6b4ae0c20fc1030dfd00a97113e914 | [
"MIT"
] | null | null | null | from typing import Union
from item import ItemFinder
from template_finder import TemplateFinder
from config import Config
from pather import Location
from logger import Logger
from ui import UiManager
from town import IAct, A3, A4, A5
from utils.misc import wait
class TownManager:
def __init__(self, template_finder: TemplateFinder, ui_manager: UiManager, a3: A3, a4: A4, a5: A5):
self._config = Config()
self._template_finder = template_finder
self._ui_manager = ui_manager
self._item_finder = ItemFinder(self._config)
self._acts: dict[Location, IAct] = {
Location.A3_TOWN_START: a3,
Location.A4_TOWN_START: a4,
Location.A5_TOWN_START: a5
}
@staticmethod
def get_act_from_location(loc: Location) -> Location:
location = None
if loc.upper().startswith("A5_"):
location = Location.A5_TOWN_START
elif loc.upper().startswith("A4_"):
location = Location.A4_TOWN_START
elif loc.upper().startswith("A3_"):
location = Location.A3_TOWN_START
return location
def wait_for_town_spawn(self, time_out: float = None) -> Location:
"""Wait for the char to spawn in town after starting a new game
:param time_out: Optional float value for time out in seconds, defaults to None
:return: Location of the town (e.g. Location.A4_TOWN_START) or None if nothing was found within time_out time
"""
template_match = self._template_finder.search_and_wait([
"A5_TOWN_0", "A5_TOWN_1",
"A4_TOWN_4", "A4_TOWN_5",
"A3_TOWN_0", "A3_TOWN_1"
], best_match=True, time_out=time_out)
if template_match.valid:
return TownManager.get_act_from_location(template_match.name)
return None
def wait_for_tp(self, curr_loc: Location):
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
return self._acts[curr_act].wait_for_tp()
def open_wp(self, curr_loc: Location):
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
return self._acts[curr_act].open_wp(curr_loc)
def go_to_act(self, act_idx: int, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
# check if we already are in the desired act
if act_idx == 3: act = Location.A3_TOWN_START
elif act_idx == 4: act = Location.A4_TOWN_START
elif act_idx == 5: act = Location.A5_TOWN_START
else:
Logger.error(f"Act {act_idx} is not supported")
return False
if curr_act == act:
return curr_loc
# if not, move to the desired act via waypoint
if not self._acts[curr_act].open_wp(curr_loc): return False
self._ui_manager.use_wp(act_idx, 0)
return self._acts[act].get_wp_location()
def heal(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
# check if we can heal in current act
if self._acts[curr_act].can_heal():
return self._acts[curr_act].heal(curr_loc)
Logger.warning(f"Could not heal in {curr_act}. Continue without healing")
return curr_loc
def resurrect(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
# check if we can resurrect in current act
if self._acts[curr_act].can_resurrect():
return self._acts[curr_act].resurrect(curr_loc)
new_loc = self.go_to_act(4, curr_loc)
if not new_loc: return False
return self._acts[Location.A4_TOWN_START].resurrect(new_loc)
def stash(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
# check if we can stash in current act
if self._acts[curr_act].can_stash():
new_loc = self._acts[curr_act].open_stash(curr_loc)
if not new_loc: return False
self._ui_manager.stash_all_items(self._config.char["num_loot_columns"], self._item_finder)
return new_loc
new_loc = self.go_to_act(5, curr_loc)
if not new_loc: return False
new_loc = self._acts[Location.A5_TOWN_START].open_stash(new_loc)
if not new_loc: return False
self._ui_manager.stash_all_items(self._config.char["num_loot_columns"], self._item_finder)
return new_loc
def repair_and_fill_tps(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
# check if we can rapair in current act
if self._acts[curr_act].can_trade_and_repair():
new_loc = self._acts[curr_act].open_trade_and_repair_menu(curr_loc)
if not new_loc: return False
if self._ui_manager.repair_and_fill_up_tp():
wait(0.1, 0.2)
self._ui_manager.close_vendor_screen()
return new_loc
new_loc = self.go_to_act(5, curr_loc)
if not new_loc: return False
new_loc = self._acts[Location.A5_TOWN_START].open_trade_and_repair_menu(new_loc)
if not new_loc: return False
if self._ui_manager.repair_and_fill_up_tp():
wait(0.1, 0.2)
self._ui_manager.close_vendor_screen()
return new_loc
return False
# Test: Move to desired location in d2r and run any town action you want to test from there
if __name__ == "__main__":
import keyboard
import os
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
print("Move to d2r window and press f11")
keyboard.wait("f11")
from char import Hammerdin
from pather import Pather
from screen import Screen
from npc_manager import NpcManager
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
npc_manager = NpcManager(screen, template_finder)
pather = Pather(screen, template_finder)
ui_manager = UiManager(screen, template_finder)
char = Hammerdin(config.hammerdin, config.char, screen, template_finder, ui_manager, pather)
a5 = A5(screen, template_finder, pather, char, npc_manager)
a4 = A4(screen, template_finder, pather, char, npc_manager)
a3 = A3(screen, template_finder, pather, char, npc_manager)
town_manager = TownManager(template_finder, ui_manager, a3, a4, a5)
print(town_manager.repair_and_fill_tps(Location.A3_TOWN_START))
| 43.974359 | 117 | 0.675802 | from typing import Union
from item import ItemFinder
from template_finder import TemplateFinder
from config import Config
from pather import Location
from logger import Logger
from ui import UiManager
from town import IAct, A3, A4, A5
from utils.misc import wait
class TownManager:
def __init__(self, template_finder: TemplateFinder, ui_manager: UiManager, a3: A3, a4: A4, a5: A5):
self._config = Config()
self._template_finder = template_finder
self._ui_manager = ui_manager
self._item_finder = ItemFinder(self._config)
self._acts: dict[Location, IAct] = {
Location.A3_TOWN_START: a3,
Location.A4_TOWN_START: a4,
Location.A5_TOWN_START: a5
}
@staticmethod
def get_act_from_location(loc: Location) -> Location:
location = None
if loc.upper().startswith("A5_"):
location = Location.A5_TOWN_START
elif loc.upper().startswith("A4_"):
location = Location.A4_TOWN_START
elif loc.upper().startswith("A3_"):
location = Location.A3_TOWN_START
return location
def wait_for_town_spawn(self, time_out: float = None) -> Location:
template_match = self._template_finder.search_and_wait([
"A5_TOWN_0", "A5_TOWN_1",
"A4_TOWN_4", "A4_TOWN_5",
"A3_TOWN_0", "A3_TOWN_1"
], best_match=True, time_out=time_out)
if template_match.valid:
return TownManager.get_act_from_location(template_match.name)
return None
def wait_for_tp(self, curr_loc: Location):
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
return self._acts[curr_act].wait_for_tp()
def open_wp(self, curr_loc: Location):
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
return self._acts[curr_act].open_wp(curr_loc)
def go_to_act(self, act_idx: int, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
if act_idx == 3: act = Location.A3_TOWN_START
elif act_idx == 4: act = Location.A4_TOWN_START
elif act_idx == 5: act = Location.A5_TOWN_START
else:
Logger.error(f"Act {act_idx} is not supported")
return False
if curr_act == act:
return curr_loc
if not self._acts[curr_act].open_wp(curr_loc): return False
self._ui_manager.use_wp(act_idx, 0)
return self._acts[act].get_wp_location()
def heal(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
if self._acts[curr_act].can_heal():
return self._acts[curr_act].heal(curr_loc)
Logger.warning(f"Could not heal in {curr_act}. Continue without healing")
return curr_loc
def resurrect(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
if self._acts[curr_act].can_resurrect():
return self._acts[curr_act].resurrect(curr_loc)
new_loc = self.go_to_act(4, curr_loc)
if not new_loc: return False
return self._acts[Location.A4_TOWN_START].resurrect(new_loc)
def stash(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
if self._acts[curr_act].can_stash():
new_loc = self._acts[curr_act].open_stash(curr_loc)
if not new_loc: return False
self._ui_manager.stash_all_items(self._config.char["num_loot_columns"], self._item_finder)
return new_loc
new_loc = self.go_to_act(5, curr_loc)
if not new_loc: return False
new_loc = self._acts[Location.A5_TOWN_START].open_stash(new_loc)
if not new_loc: return False
self._ui_manager.stash_all_items(self._config.char["num_loot_columns"], self._item_finder)
return new_loc
def repair_and_fill_tps(self, curr_loc: Location) -> Union[Location, bool]:
curr_act = TownManager.get_act_from_location(curr_loc)
if curr_act is None: return False
if self._acts[curr_act].can_trade_and_repair():
new_loc = self._acts[curr_act].open_trade_and_repair_menu(curr_loc)
if not new_loc: return False
if self._ui_manager.repair_and_fill_up_tp():
wait(0.1, 0.2)
self._ui_manager.close_vendor_screen()
return new_loc
new_loc = self.go_to_act(5, curr_loc)
if not new_loc: return False
new_loc = self._acts[Location.A5_TOWN_START].open_trade_and_repair_menu(new_loc)
if not new_loc: return False
if self._ui_manager.repair_and_fill_up_tp():
wait(0.1, 0.2)
self._ui_manager.close_vendor_screen()
return new_loc
return False
if __name__ == "__main__":
import keyboard
import os
keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1))
print("Move to d2r window and press f11")
keyboard.wait("f11")
from char import Hammerdin
from pather import Pather
from screen import Screen
from npc_manager import NpcManager
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
npc_manager = NpcManager(screen, template_finder)
pather = Pather(screen, template_finder)
ui_manager = UiManager(screen, template_finder)
char = Hammerdin(config.hammerdin, config.char, screen, template_finder, ui_manager, pather)
a5 = A5(screen, template_finder, pather, char, npc_manager)
a4 = A4(screen, template_finder, pather, char, npc_manager)
a3 = A3(screen, template_finder, pather, char, npc_manager)
town_manager = TownManager(template_finder, ui_manager, a3, a4, a5)
print(town_manager.repair_and_fill_tps(Location.A3_TOWN_START))
| true | true |
1c3b883d40ecbfa89fe733e7e5f4064d03109ce2 | 17,794 | py | Python | tensorflow/tools/api/tests/api_compatibility_test.py | irvifa/tensorflow | b5973195532a786343de6a4278322056574b207c | [
"Apache-2.0"
] | 1 | 2018-08-15T01:28:13.000Z | 2018-08-15T01:28:13.000Z | tensorflow/tools/api/tests/api_compatibility_test.py | irvifa/tensorflow | b5973195532a786343de6a4278322056574b207c | [
"Apache-2.0"
] | 1 | 2019-12-15T06:51:21.000Z | 2019-12-15T06:51:21.000Z | tensorflow/tools/api/tests/api_compatibility_test.py | irvifa/tensorflow | b5973195532a786343de6a4278322056574b207c | [
"Apache-2.0"
] | 1 | 2020-12-16T06:33:59.000Z | 2020-12-16T06:33:59.000Z | # Lint as: python2, python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import six
import tensorflow as tf
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# pylint: disable=g-import-not-at-top,unused-import
_TENSORBOARD_AVAILABLE = True
try:
import tensorboard as _tb
except ImportError:
_TENSORBOARD_AVAILABLE = False
# pylint: enable=g-import-not-at-top,unused-import
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, only_test_core_api, default False:
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
# DEFINE_boolean, verbose_diffs, default True:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
# Initialized with _InitPathConstants function below.
_API_GOLDEN_FOLDER_V1 = None
_API_GOLDEN_FOLDER_V2 = None
def _InitPathConstants():
global _API_GOLDEN_FOLDER_V1
global _API_GOLDEN_FOLDER_V2
root_golden_path_v2 = os.path.join(resource_loader.get_data_files_path(),
'..', 'golden', 'v2', 'tensorflow.pbtxt')
if FLAGS.update_goldens:
root_golden_path_v2 = os.path.realpath(root_golden_path_v2)
# Get API directories based on the root golden file. This way
# we make sure to resolve symbolic links before creating new files.
_API_GOLDEN_FOLDER_V2 = os.path.dirname(root_golden_path_v2)
_API_GOLDEN_FOLDER_V1 = os.path.normpath(
os.path.join(_API_GOLDEN_FOLDER_V2, '..', 'v1'))
_TEST_README_FILE = resource_loader.get_path_to_datafile('README.txt')
_UPDATE_WARNING_FILE = resource_loader.get_path_to_datafile(
'API_UPDATE_WARNING.txt')
_NON_CORE_PACKAGES = ['estimator']
# TODO(annarev): remove this once we test with newer version of
# estimator that actually has compat v1 version.
if not hasattr(tf.compat.v1, 'estimator'):
tf.compat.v1.estimator = tf.estimator
tf.compat.v2.estimator = tf.estimator
def _KeyToFilePath(key, api_version):
"""From a given key, construct a filepath.
Filepath will be inside golden folder for api_version.
Args:
key: a string used to determine the file path
api_version: a number indicating the tensorflow API version, e.g. 1 or 2.
Returns:
A string of file path to the pbtxt file which describes the public API
"""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash,
six.ensure_str(key))
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
six.ensure_str(base_filename_without_ext))
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
"""A Visitor that crashes on subclasses of generated proto classes."""
# If the traversed object is a proto Message class
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
# Check that it is a direct subclass of Message.
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
"""Filter out non-core API pbtxt files."""
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
six.ensure_str(f).rsplit('/')[-1].startswith(pre)
for pre in filtered_package_prefixes):
continue
filtered_file_list.append(f)
return filtered_file_list
def _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):
"""Filter out golden proto dict symbols that should be omitted."""
if not omit_golden_symbols_map:
return golden_proto_dict
filtered_proto_dict = dict(golden_proto_dict)
for key, symbol_list in six.iteritems(omit_golden_symbols_map):
api_object = api_objects_pb2.TFAPIObject()
api_object.CopyFrom(filtered_proto_dict[key])
filtered_proto_dict[key] = api_object
module_or_class = None
if api_object.HasField('tf_module'):
module_or_class = api_object.tf_module
elif api_object.HasField('tf_class'):
module_or_class = api_object.tf_class
if module_or_class is not None:
for members in (module_or_class.member, module_or_class.member_method):
filtered_members = [m for m in members if m.name not in symbol_list]
# Two steps because protobuf repeated fields disallow slice assignment.
del members[:]
members.extend(filtered_members)
return filtered_proto_dict
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
additional_missing_object_message: Message to print when a symbol is
missing.
api_version: TensorFlow API version to test.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Do not truncate diff
self.maxDiff = None # pylint: disable=invalid-name
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Include the actual differences to help debugging.
for d in diffs:
logging.error(' %s', d)
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
# Skip compat.v1 and compat.v2 since they are validated in separate tests.
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None,
omit_golden_symbols_map=None):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'].append('contrib')
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,
omit_golden_symbols_map)
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
def testAPIBackwardsCompatibility(self):
api_version = 1
if hasattr(tf, '_major_api_version') and tf._major_api_version == 2:
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if (api_version == 2 and FLAGS.only_test_core_api and
not _TENSORBOARD_AVAILABLE):
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text'
]
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
# Skip compat.v1 and compat.v2 since they are validated
# in separate tests.
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
# Check that V2 API does not have contrib
self.assertTrue(api_version == 1 or not hasattr(tf, 'contrib'))
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf.compat.v1,
golden_file_pattern,
api_version,
additional_private_map={
'tf': ['pywrap_tensorflow'],
'tf.compat': ['v1', 'v2'],
},
omit_golden_symbols_map={'tensorflow': ['pywrap_tensorflow']})
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if FLAGS.only_test_core_api and not _TENSORBOARD_AVAILABLE:
# In TF 2.0 these summary symbols are imported from TensorBoard.
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text'
]
self._checkBackwardsCompatibility(
tf.compat.v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# TODO(mikecase): Create Estimator's own API compatibility test or
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=True, # only_test_core_api default value
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
_InitPathConstants()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| 38.184549 | 80 | 0.70417 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import six
import tensorflow as tf
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
_TENSORBOARD_AVAILABLE = True
try:
import tensorboard as _tb
except ImportError:
_TENSORBOARD_AVAILABLE = False
FLAGS = None
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
_ONLY_TEST_CORE_API_HELP = """
Some TF APIs are being moved outside of the tensorflow/ directory. There is
no guarantee which versions of these APIs will be present when running this
test. Therefore, do not error out on API changes in non-core TF code
if this flag is set.
"""
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER_V1 = None
_API_GOLDEN_FOLDER_V2 = None
def _InitPathConstants():
global _API_GOLDEN_FOLDER_V1
global _API_GOLDEN_FOLDER_V2
root_golden_path_v2 = os.path.join(resource_loader.get_data_files_path(),
'..', 'golden', 'v2', 'tensorflow.pbtxt')
if FLAGS.update_goldens:
root_golden_path_v2 = os.path.realpath(root_golden_path_v2)
_API_GOLDEN_FOLDER_V2 = os.path.dirname(root_golden_path_v2)
_API_GOLDEN_FOLDER_V1 = os.path.normpath(
os.path.join(_API_GOLDEN_FOLDER_V2, '..', 'v1'))
_TEST_README_FILE = resource_loader.get_path_to_datafile('README.txt')
_UPDATE_WARNING_FILE = resource_loader.get_path_to_datafile(
'API_UPDATE_WARNING.txt')
_NON_CORE_PACKAGES = ['estimator']
if not hasattr(tf.compat.v1, 'estimator'):
tf.compat.v1.estimator = tf.estimator
tf.compat.v2.estimator = tf.estimator
def _KeyToFilePath(key, api_version):
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash,
six.ensure_str(key))
api_folder = (
_API_GOLDEN_FOLDER_V2 if api_version == 2 else _API_GOLDEN_FOLDER_V1)
return os.path.join(api_folder, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub('((-[a-z]){1})', _ReplaceDashWithCaps,
six.ensure_str(base_filename_without_ext))
return api_object_key
def _VerifyNoSubclassOfMessageVisitor(path, parent, unused_children):
if not (isinstance(parent, type) and issubclass(parent, message.Message)):
return
if parent is message.Message:
return
if message.Message not in parent.__bases__:
raise NotImplementedError(
'Object tf.%s is a subclass of a generated proto Message. '
'They are not yet supported by the API tools.' % path)
def _FilterNonCoreGoldenFiles(golden_file_list):
filtered_file_list = []
filtered_package_prefixes = ['tensorflow.%s.' % p for p in _NON_CORE_PACKAGES]
for f in golden_file_list:
if any(
six.ensure_str(f).rsplit('/')[-1].startswith(pre)
for pre in filtered_package_prefixes):
continue
filtered_file_list.append(f)
return filtered_file_list
def _FilterGoldenProtoDict(golden_proto_dict, omit_golden_symbols_map):
if not omit_golden_symbols_map:
return golden_proto_dict
filtered_proto_dict = dict(golden_proto_dict)
for key, symbol_list in six.iteritems(omit_golden_symbols_map):
api_object = api_objects_pb2.TFAPIObject()
api_object.CopyFrom(filtered_proto_dict[key])
filtered_proto_dict[key] = api_object
module_or_class = None
if api_object.HasField('tf_module'):
module_or_class = api_object.tf_module
elif api_object.HasField('tf_class'):
module_or_class = api_object.tf_class
if module_or_class is not None:
for members in (module_or_class.member, module_or_class.member_method):
filtered_members = [m for m in members if m.name not in symbol_list]
del members[:]
members.extend(filtered_members)
return filtered_proto_dict
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False,
additional_missing_object_message='',
api_version=2):
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed). %s' % (
key, additional_missing_object_message)
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
self.maxDiff = None
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
if update_goldens:
logging.warning(self._update_golden_warning)
for key in only_in_expected:
filepath = _KeyToFilePath(key, api_version)
file_io.delete_file(filepath)
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key, api_version)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
for d in diffs:
logging.error(' %s', d)
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
def testNoSubclassOfMessage(self):
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf, visitor)
def testNoSubclassOfMessageV1(self):
if not hasattr(tf.compat, 'v1'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v1, visitor)
def testNoSubclassOfMessageV2(self):
if not hasattr(tf.compat, 'v2'):
return
visitor = public_api.PublicAPIVisitor(_VerifyNoSubclassOfMessageVisitor)
visitor.do_not_descend_map['tf'].append('contrib')
if FLAGS.only_test_core_api:
visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
visitor.private_map['tf.compat'] = ['v1', 'v2']
traverse.traverse(tf.compat.v2, visitor)
def _checkBackwardsCompatibility(self,
root,
golden_file_pattern,
api_version,
additional_private_map=None,
omit_golden_symbols_map=None):
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.private_map['tf'].append('contrib')
if api_version == 2:
public_api_visitor.private_map['tf'].append('enable_v2_behavior')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
if FLAGS.only_test_core_api:
public_api_visitor.do_not_descend_map['tf'].extend(_NON_CORE_PACKAGES)
if additional_private_map:
public_api_visitor.private_map.update(additional_private_map)
traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
golden_file_list = file_io.get_matching_files(golden_file_pattern)
if FLAGS.only_test_core_api:
golden_file_list = _FilterNonCoreGoldenFiles(golden_file_list)
def _ReadFileToProto(filename):
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
golden_proto_dict = _FilterGoldenProtoDict(golden_proto_dict,
omit_golden_symbols_map)
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens,
api_version=api_version)
def testAPIBackwardsCompatibility(self):
api_version = 1
if hasattr(tf, '_major_api_version') and tf._major_api_version == 2:
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if (api_version == 2 and FLAGS.only_test_core_api and
not _TENSORBOARD_AVAILABLE):
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text'
]
self._checkBackwardsCompatibility(
tf,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
self.assertTrue(api_version == 1 or not hasattr(tf, 'contrib'))
def testAPIBackwardsCompatibilityV1(self):
api_version = 1
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
self._checkBackwardsCompatibility(
tf.compat.v1,
golden_file_pattern,
api_version,
additional_private_map={
'tf': ['pywrap_tensorflow'],
'tf.compat': ['v1', 'v2'],
},
omit_golden_symbols_map={'tensorflow': ['pywrap_tensorflow']})
def testAPIBackwardsCompatibilityV2(self):
api_version = 2
golden_file_pattern = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*', api_version))
omit_golden_symbols_map = {}
if FLAGS.only_test_core_api and not _TENSORBOARD_AVAILABLE:
omit_golden_symbols_map['tensorflow.summary'] = [
'audio', 'histogram', 'image', 'scalar', 'text'
]
self._checkBackwardsCompatibility(
tf.compat.v2,
golden_file_pattern,
api_version,
additional_private_map={'tf.compat': ['v1', 'v2']},
omit_golden_symbols_map=omit_golden_symbols_map)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
# a more general API compatibility test for use for TF components.
parser.add_argument(
'--only_test_core_api',
type=bool,
default=True, # only_test_core_api default value
help=_ONLY_TEST_CORE_API_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=True, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
_InitPathConstants()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
| true | true |
1c3b884e735121a071a27e02fa0a5822a54cc9b7 | 986 | py | Python | app/people/utils.py | kiprotichdominic/Moringa-Project-Pitch | 96d532205a82941eb8b9802715815e1aadf0408f | [
"MIT"
] | null | null | null | app/people/utils.py | kiprotichdominic/Moringa-Project-Pitch | 96d532205a82941eb8b9802715815e1aadf0408f | [
"MIT"
] | 3 | 2021-06-08T20:49:09.000Z | 2022-03-12T00:11:37.000Z | app/people/utils.py | kiprotichdominic/Moringa-Project-Pitch | 96d532205a82941eb8b9802715815e1aadf0408f | [
"MIT"
] | null | null | null | import os
import secrets
from PIL import Image
from flask import url_for, current_app
from flask_mail import Message
from app import mail
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, "static/profile_pics", picture_fn)
output_size = (125,125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='kiprotichkorir36@gmail.com',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('user.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg) | 31.806452 | 91 | 0.710953 | import os
import secrets
from PIL import Image
from flask import url_for, current_app
from flask_mail import Message
from app import mail
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, "static/profile_pics", picture_fn)
output_size = (125,125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def send_reset_email(user):
token = user.get_reset_token()
msg = Message('Password Reset Request',
sender='kiprotichkorir36@gmail.com',
recipients=[user.email])
msg.body = f'''To reset your password, visit the following link:
{url_for('user.reset_token', token=token, _external=True)}
If you did not make this request then simply ignore this email and no changes will be made.
'''
mail.send(msg) | true | true |
1c3b88c9aac619ae987c9d5fcf65026bf68d19ee | 1,355 | py | Python | test/test_segment_word.py | bertsky/ocrd_tesserocr | c0e1440a53722d617e356901cec79e14b7999c94 | [
"MIT"
] | 37 | 2018-04-16T20:18:25.000Z | 2022-03-06T09:06:12.000Z | test/test_segment_word.py | bertsky/ocrd_tesserocr | c0e1440a53722d617e356901cec79e14b7999c94 | [
"MIT"
] | 162 | 2018-04-18T12:17:53.000Z | 2022-03-09T11:07:36.000Z | test/test_segment_word.py | bertsky/ocrd_tesserocr | c0e1440a53722d617e356901cec79e14b7999c94 | [
"MIT"
] | 12 | 2018-04-11T11:56:22.000Z | 2021-02-12T15:12:13.000Z | import os
import shutil
from test.base import TestCase, main, assets
from ocrd import Resolver
from ocrd_tesserocr import TesserocrSegmentRegion
from ocrd_tesserocr import TesserocrSegmentLine
from ocrd_tesserocr import TesserocrSegmentWord
#METS_HEROLD_SMALL = assets.url_of('SBB0000F29300010000/mets_one_file.xml')
METS_HEROLD_SMALL = assets.url_of('kant_aufklaerung_1784-binarized/data/mets.xml')
WORKSPACE_DIR = '/tmp/pyocrd-test-segment-word-tesserocr'
class TestProcessorSegmentWordTesseract(TestCase):
def setUp(self):
if os.path.exists(WORKSPACE_DIR):
shutil.rmtree(WORKSPACE_DIR)
os.makedirs(WORKSPACE_DIR)
def runTest(self):
resolver = Resolver()
workspace = resolver.workspace_from_url(METS_HEROLD_SMALL, dst_dir=WORKSPACE_DIR)
TesserocrSegmentRegion(
workspace,
input_file_grp="OCR-D-IMG",
output_file_grp="OCR-D-SEG-BLOCK"
).process()
TesserocrSegmentLine(
workspace,
input_file_grp="OCR-D-SEG-BLOCK",
output_file_grp="OCR-D-SEG-LINE"
).process()
TesserocrSegmentWord(
workspace,
input_file_grp="OCR-D-SEG-LINE",
output_file_grp="OCR-D-SEG-WORD"
).process()
workspace.save_mets()
if __name__ == '__main__':
main()
| 30.111111 | 89 | 0.684871 | import os
import shutil
from test.base import TestCase, main, assets
from ocrd import Resolver
from ocrd_tesserocr import TesserocrSegmentRegion
from ocrd_tesserocr import TesserocrSegmentLine
from ocrd_tesserocr import TesserocrSegmentWord
METS_HEROLD_SMALL = assets.url_of('kant_aufklaerung_1784-binarized/data/mets.xml')
WORKSPACE_DIR = '/tmp/pyocrd-test-segment-word-tesserocr'
class TestProcessorSegmentWordTesseract(TestCase):
def setUp(self):
if os.path.exists(WORKSPACE_DIR):
shutil.rmtree(WORKSPACE_DIR)
os.makedirs(WORKSPACE_DIR)
def runTest(self):
resolver = Resolver()
workspace = resolver.workspace_from_url(METS_HEROLD_SMALL, dst_dir=WORKSPACE_DIR)
TesserocrSegmentRegion(
workspace,
input_file_grp="OCR-D-IMG",
output_file_grp="OCR-D-SEG-BLOCK"
).process()
TesserocrSegmentLine(
workspace,
input_file_grp="OCR-D-SEG-BLOCK",
output_file_grp="OCR-D-SEG-LINE"
).process()
TesserocrSegmentWord(
workspace,
input_file_grp="OCR-D-SEG-LINE",
output_file_grp="OCR-D-SEG-WORD"
).process()
workspace.save_mets()
if __name__ == '__main__':
main()
| true | true |
1c3b89ac20e89e529baf47b178aa860d90f2e7ed | 3,901 | py | Python | cinder/api/v1/router.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | null | null | null | cinder/api/v1/router.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | 2 | 2018-10-25T13:04:01.000Z | 2019-08-17T13:15:24.000Z | cinder/api/v1/router.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | 2 | 2018-10-17T13:32:50.000Z | 2018-11-08T08:39:39.000Z | # Copyright 2011 OpenStack Foundation
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack Volume API.
"""
from cinder.api import extensions
import cinder.api.openstack
from cinder.api.v1 import snapshots
from cinder.api.v1 import volumes
from cinder.api.v2 import limits
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import types
from cinder.api.v2 import volume_metadata
from cinder.api import versions
class APIRouter(cinder.api.openstack.APIRouter):
"""Routes requests on the API to the appropriate controller and method."""
ExtensionManager = extensions.ExtensionManager
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='index')
mapper.redirect("", "/")
self.resources['volumes'] = volumes.create_resource(ext_mgr)
mapper.resource("volume", "volumes",
controller=self.resources['volumes'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['types'] = types.create_resource()
mapper.resource("type", "types",
controller=self.resources['types'])
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
mapper.resource("snapshot", "snapshots",
controller=self.resources['snapshots'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['snapshot_metadata'] = \
snapshot_metadata.create_resource()
snapshot_metadata_controller = self.resources['snapshot_metadata']
mapper.resource("snapshot_metadata", "metadata",
controller=snapshot_metadata_controller,
parent_resource=dict(member_name='snapshot',
collection_name='snapshots'))
mapper.connect("metadata",
"/{project_id}/snapshots/{snapshot_id}/metadata",
controller=snapshot_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
self.resources['limits'] = limits.create_resource()
mapper.resource("limit", "limits",
controller=self.resources['limits'])
self.resources['volume_metadata'] = \
volume_metadata.create_resource()
volume_metadata_controller = self.resources['volume_metadata']
mapper.resource("volume_metadata", "metadata",
controller=volume_metadata_controller,
parent_resource=dict(member_name='volume',
collection_name='volumes'))
mapper.connect("metadata",
"/{project_id}/volumes/{volume_id}/metadata",
controller=volume_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
| 41.946237 | 78 | 0.616765 |
from cinder.api import extensions
import cinder.api.openstack
from cinder.api.v1 import snapshots
from cinder.api.v1 import volumes
from cinder.api.v2 import limits
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import types
from cinder.api.v2 import volume_metadata
from cinder.api import versions
class APIRouter(cinder.api.openstack.APIRouter):
ExtensionManager = extensions.ExtensionManager
def _setup_routes(self, mapper, ext_mgr):
self.resources['versions'] = versions.create_resource()
mapper.connect("versions", "/",
controller=self.resources['versions'],
action='index')
mapper.redirect("", "/")
self.resources['volumes'] = volumes.create_resource(ext_mgr)
mapper.resource("volume", "volumes",
controller=self.resources['volumes'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['types'] = types.create_resource()
mapper.resource("type", "types",
controller=self.resources['types'])
self.resources['snapshots'] = snapshots.create_resource(ext_mgr)
mapper.resource("snapshot", "snapshots",
controller=self.resources['snapshots'],
collection={'detail': 'GET'},
member={'action': 'POST'})
self.resources['snapshot_metadata'] = \
snapshot_metadata.create_resource()
snapshot_metadata_controller = self.resources['snapshot_metadata']
mapper.resource("snapshot_metadata", "metadata",
controller=snapshot_metadata_controller,
parent_resource=dict(member_name='snapshot',
collection_name='snapshots'))
mapper.connect("metadata",
"/{project_id}/snapshots/{snapshot_id}/metadata",
controller=snapshot_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
self.resources['limits'] = limits.create_resource()
mapper.resource("limit", "limits",
controller=self.resources['limits'])
self.resources['volume_metadata'] = \
volume_metadata.create_resource()
volume_metadata_controller = self.resources['volume_metadata']
mapper.resource("volume_metadata", "metadata",
controller=volume_metadata_controller,
parent_resource=dict(member_name='volume',
collection_name='volumes'))
mapper.connect("metadata",
"/{project_id}/volumes/{volume_id}/metadata",
controller=volume_metadata_controller,
action='update_all',
conditions={"method": ['PUT']})
| true | true |
1c3b8a6a66224b1070bb699a4bf3678c4a18043d | 4,090 | py | Python | tests3/testutils.py | lidonglifighting/pyodbc-dbmaker | 38d97cdeb05f3b4caf28b4131a85a5c66f999cd4 | [
"MIT-0"
] | null | null | null | tests3/testutils.py | lidonglifighting/pyodbc-dbmaker | 38d97cdeb05f3b4caf28b4131a85a5c66f999cd4 | [
"MIT-0"
] | null | null | null | tests3/testutils.py | lidonglifighting/pyodbc-dbmaker | 38d97cdeb05f3b4caf28b4131a85a5c66f999cd4 | [
"MIT-0"
] | null | null | null | import os, sys, platform
from os.path import join, dirname, abspath, basename, isdir
import unittest
def add_to_path(library):
"""
Prepends the build directory to the path so that newly built pyodbc or pyiodbc libraries
are used, allowing it to be tested without installing it.
* library: The library to load: pyodbc or pyiodbc
"""
# Put the build directory into the Python path so we pick up the version we just built.
#
# To make this cross platform, we'll search the directories until we find the .pyd file.
import imp
library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ]
library_names = [ '%s%s' % (library, ext) for ext in library_exts ]
# Only go into directories that match our version number.
dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1])
root = dirname(dirname(abspath(__file__)))
build = join(root, library, 'build')
if not isdir(build):
sys.exit('Build dir not found: %s' % build)
for root, dirs, files in os.walk(build):
for d in dirs[:]:
if not d.endswith(dir_suffix):
dirs.remove(d)
for name in library_names:
if name in files:
sys.path.insert(0, root)
return
print('Did not find the %s library in the build directory (%s). Will use an installed version.' %
(library, build))
def print_library_info(name, module, cnxn):
print('python: %s' % sys.version)
print('%s: %s %s' % (name, module.version, os.path.abspath(module.__file__)))
print('odbc: %s' % cnxn.getinfo(module.SQL_ODBC_VER))
print('driver: %s %s' % (cnxn.getinfo(module.SQL_DRIVER_NAME), cnxn.getinfo(module.SQL_DRIVER_VER)))
print(' supports ODBC version %s' % cnxn.getinfo(module.SQL_DRIVER_ODBC_VER))
print('os: %s' % platform.system())
print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (module.UNICODE_SIZE, module.SQLWCHAR_SIZE))
cursor = cnxn.cursor()
for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']:
t = getattr(module, 'SQL_' + typename)
cursor.getTypeInfo(t)
row = cursor.fetchone()
print('Max %s = %s' % (typename, row and row[2] or '(not supported)'))
if platform.system() == 'Windows':
print(' %s' % ' '.join([s for s in platform.win32_ver() if s]))
def load_tests(testclass, name, *args):
"""
Returns a TestSuite for tests in `testclass`.
name
Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded.
args
Arguments for the test class constructor. These will be passed after the test method name.
"""
if name:
if not name.startswith('test_'):
name = 'test_%s' % name
names = [ name ]
else:
names = [ method for method in dir(testclass) if method.startswith('test_') ]
return unittest.TestSuite([ testclass(name, *args) for name in names ])
def load_setup_connection_string(section):
"""
Attempts to read the default connection string from the setup.cfg file.
If the file does not exist or if it exists but does not contain the connection string, None is returned. If the
file exists but cannot be parsed, an exception is raised.
"""
from os.path import exists, join, dirname, splitext, basename
from configparser import SafeConfigParser
FILENAME = 'setup.cfg'
KEY = 'connection-string'
path = dirname(abspath(__file__))
while True:
fqn = join(path, 'tmp', FILENAME)
if exists(fqn):
break
parent = dirname(path)
if parent == path:
return None
path = parent
try:
p = SafeConfigParser()
p.read(fqn)
except:
raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1]))
if p.has_option(section, KEY):
return p.get(section, KEY)
| 34.661017 | 117 | 0.610269 | import os, sys, platform
from os.path import join, dirname, abspath, basename, isdir
import unittest
def add_to_path(library):
import imp
library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ]
library_names = [ '%s%s' % (library, ext) for ext in library_exts ]
# Only go into directories that match our version number.
dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1])
root = dirname(dirname(abspath(__file__)))
build = join(root, library, 'build')
if not isdir(build):
sys.exit('Build dir not found: %s' % build)
for root, dirs, files in os.walk(build):
for d in dirs[:]:
if not d.endswith(dir_suffix):
dirs.remove(d)
for name in library_names:
if name in files:
sys.path.insert(0, root)
return
print('Did not find the %s library in the build directory (%s). Will use an installed version.' %
(library, build))
def print_library_info(name, module, cnxn):
print('python: %s' % sys.version)
print('%s: %s %s' % (name, module.version, os.path.abspath(module.__file__)))
print('odbc: %s' % cnxn.getinfo(module.SQL_ODBC_VER))
print('driver: %s %s' % (cnxn.getinfo(module.SQL_DRIVER_NAME), cnxn.getinfo(module.SQL_DRIVER_VER)))
print(' supports ODBC version %s' % cnxn.getinfo(module.SQL_DRIVER_ODBC_VER))
print('os: %s' % platform.system())
print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (module.UNICODE_SIZE, module.SQLWCHAR_SIZE))
cursor = cnxn.cursor()
for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']:
t = getattr(module, 'SQL_' + typename)
cursor.getTypeInfo(t)
row = cursor.fetchone()
print('Max %s = %s' % (typename, row and row[2] or '(not supported)'))
if platform.system() == 'Windows':
print(' %s' % ' '.join([s for s in platform.win32_ver() if s]))
def load_tests(testclass, name, *args):
if name:
if not name.startswith('test_'):
name = 'test_%s' % name
names = [ name ]
else:
names = [ method for method in dir(testclass) if method.startswith('test_') ]
return unittest.TestSuite([ testclass(name, *args) for name in names ])
def load_setup_connection_string(section):
from os.path import exists, join, dirname, splitext, basename
from configparser import SafeConfigParser
FILENAME = 'setup.cfg'
KEY = 'connection-string'
path = dirname(abspath(__file__))
while True:
fqn = join(path, 'tmp', FILENAME)
if exists(fqn):
break
parent = dirname(path)
if parent == path:
return None
path = parent
try:
p = SafeConfigParser()
p.read(fqn)
except:
raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1]))
if p.has_option(section, KEY):
return p.get(section, KEY)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.