content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import logging
from typing import TYPE_CHECKING, Optional
import numpy as np
from .base import BaseCallback
if TYPE_CHECKING:
from ..base import BaseTuner
class EarlyStopping(BaseCallback):
"""
Callback to stop training when a monitored metric has stopped improving.
A `model.fit()` training loop will check at the end of every epoch whether
the monitered metric is no longer improving.
"""
def __init__(
self,
monitor: str = 'val_loss',
mode: str = 'auto',
patience: int = 2,
min_delta: int = 0,
baseline: Optional[float] = None,
verbose: bool = False,
):
"""
:param monitor: if `monitor='loss'` best bodel saved will be according
to the training loss, if `monitor='val_loss'` best model saved will be
according to the validation loss
:param mode: one of {'auto', 'min', 'max'}. the
decision to overwrite the current_value save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the mode is set to `max` if the quantities
monitored are 'acc' or start with 'fmeasure' and are set to `min` for
the rest of the quantities.
:param patience: integer, the number of epochs after which the training is
stopped if there is no improvement.
i.e. if `patience = 2`, if the model doesn't improve for 2 consecutive
epochs the training is stopped.
:param min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
:param baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
:param verbose: Wheter to log score improvement events
"""
self._logger = logging.getLogger('finetuner.' + self.__class__.__name__)
self._logger.setLevel(logging.INFO if verbose else logging.WARNING)
self._monitor = monitor
self._mode = mode
self._patience = patience
self._min_delta = min_delta
self._baseline = baseline
self._train_losses = []
self._validation_losses = []
self._epoch_counter = 0
if mode not in ['auto', 'min', 'max']:
self._logger.warning('mode %s is unknown, ' 'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self._monitor_op = np.less
self._best = np.Inf
elif mode == 'max':
self._monitor_op = np.greater
self._best = -np.Inf
else:
if 'acc' in self._monitor: # to adjust other metrics are added
self._monitor_op = np.greater
self._best = -np.Inf
else:
self._monitor_op = np.less
self._best = np.Inf
if self._monitor_op == np.greater:
self._min_delta *= 1
else:
self._min_delta *= -1
def on_epoch_end(self, tuner: 'BaseTuner'):
"""
Called at the end of the training epoch. Checks if the model has improved
or not for a certain metric `monitor`. If the model hasn't improved for
more than `patience` epochs, the training is stopped
"""
self._check(tuner)
self._train_losses = []
self._validation_losses = []
def on_train_batch_end(self, tuner: 'BaseTuner'):
self._train_losses.append(tuner.state.current_loss)
def on_val_batch_end(self, tuner: 'BaseTuner'):
self._validation_losses.append(tuner.state.current_loss)
def _check(self, tuner):
"""
Checks if training should be stopped. If `True`
it stops the training.
"""
current_value = None
if self._baseline is not None:
self._best = self._baseline
if self._monitor == 'val_loss':
current_value = np.mean(self._validation_losses)
elif self._monitor == 'train_loss':
current_value = np.mean(self._train_losses)
else:
self._logger.warning(f'Metric {self._monitor} not available, skipping.')
return
if self._monitor_op(current_value - self._min_delta, self._best):
self._logger.info(f'Model improved from {self._best} to {current_value}')
self._best = current_value
self._epoch_counter = 0
else:
self._epoch_counter += 1
if self._epoch_counter == self._patience:
self._logger.info(
f'Training is stopping, no improvement for {self._patience} epochs'
)
tuner.stop_training = True
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Zeke/Google Drive/dev/python/zeex/zeex/core/ui/actions/import.ui'
#
# Created: Mon Nov 13 22:57:16 2017
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_ImportFileDialog(object):
def setupUi(self, ImportFileDialog):
ImportFileDialog.setObjectName("ImportFileDialog")
ImportFileDialog.resize(624, 279)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(ImportFileDialog.sizePolicy().hasHeightForWidth())
ImportFileDialog.setSizePolicy(sizePolicy)
self.gridLayout_7 = QtGui.QGridLayout(ImportFileDialog)
self.gridLayout_7.setObjectName("gridLayout_7")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.gridLayout_4 = QtGui.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.btnBrowseFilePath = QtGui.QPushButton(ImportFileDialog)
self.btnBrowseFilePath.setObjectName("btnBrowseFilePath")
self.gridLayout_4.addWidget(self.btnBrowseFilePath, 0, 2, 1, 1)
self.labelFilePath = QtGui.QLabel(ImportFileDialog)
self.labelFilePath.setObjectName("labelFilePath")
self.gridLayout_4.addWidget(self.labelFilePath, 0, 0, 1, 1)
self.lineEditFilePath = QtGui.QLineEdit(ImportFileDialog)
self.lineEditFilePath.setObjectName("lineEditFilePath")
self.gridLayout_4.addWidget(self.lineEditFilePath, 0, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_4, 0, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 5, 0, 1, 1)
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout_3 = QtGui.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.lineEditOtherSeparator = QtGui.QLineEdit(ImportFileDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditOtherSeparator.sizePolicy().hasHeightForWidth())
self.lineEditOtherSeparator.setSizePolicy(sizePolicy)
self.lineEditOtherSeparator.setObjectName("lineEditOtherSeparator")
self.gridLayout_3.addWidget(self.lineEditOtherSeparator, 2, 1, 1, 1)
self.radioBtnOtherSeparator = QtGui.QRadioButton(ImportFileDialog)
self.radioBtnOtherSeparator.setObjectName("radioBtnOtherSeparator")
self.gridLayout_3.addWidget(self.radioBtnOtherSeparator, 2, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout_3, 0, 2, 1, 1)
self.comboBoxSeparator = QtGui.QComboBox(ImportFileDialog)
self.comboBoxSeparator.setObjectName("comboBoxSeparator")
self.gridLayout_2.addWidget(self.comboBoxSeparator, 0, 1, 1, 1)
self.labelSeparator = QtGui.QLabel(ImportFileDialog)
self.labelSeparator.setObjectName("labelSeparator")
self.gridLayout_2.addWidget(self.labelSeparator, 0, 0, 1, 1)
self.gridLayout.addLayout(self.gridLayout_2, 3, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(ImportFileDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 6, 0, 1, 1)
self.gridLayout_6 = QtGui.QGridLayout()
self.gridLayout_6.setObjectName("gridLayout_6")
self.labelEncoding = QtGui.QLabel(ImportFileDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelEncoding.sizePolicy().hasHeightForWidth())
self.labelEncoding.setSizePolicy(sizePolicy)
self.labelEncoding.setMinimumSize(QtCore.QSize(197, 0))
self.labelEncoding.setObjectName("labelEncoding")
self.gridLayout_6.addWidget(self.labelEncoding, 0, 0, 1, 1)
self.comboBoxEncoding = QtGui.QComboBox(ImportFileDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.comboBoxEncoding.sizePolicy().hasHeightForWidth())
self.comboBoxEncoding.setSizePolicy(sizePolicy)
self.comboBoxEncoding.setObjectName("comboBoxEncoding")
self.gridLayout_6.addWidget(self.comboBoxEncoding, 0, 1, 1, 1)
self.gridLayout.addLayout(self.gridLayout_6, 4, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 2, 0, 1, 1)
self.gridLayout_5 = QtGui.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.checkBoxScrubLinebreaks = QtGui.QCheckBox(ImportFileDialog)
self.checkBoxScrubLinebreaks.setObjectName("checkBoxScrubLinebreaks")
self.gridLayout_5.addWidget(self.checkBoxScrubLinebreaks, 0, 2, 1, 1)
self.checkBoxHasHeaders = QtGui.QCheckBox(ImportFileDialog)
self.checkBoxHasHeaders.setObjectName("checkBoxHasHeaders")
self.gridLayout_5.addWidget(self.checkBoxHasHeaders, 0, 0, 1, 1)
self.checkBoxParseDates = QtGui.QCheckBox(ImportFileDialog)
self.checkBoxParseDates.setObjectName("checkBoxParseDates")
self.gridLayout_5.addWidget(self.checkBoxParseDates, 0, 1, 1, 1)
self.checkBoxTrimSpaces = QtGui.QCheckBox(ImportFileDialog)
self.checkBoxTrimSpaces.setObjectName("checkBoxTrimSpaces")
self.gridLayout_5.addWidget(self.checkBoxTrimSpaces, 0, 3, 1, 1)
self.gridLayout.addLayout(self.gridLayout_5, 1, 0, 1, 1)
self.gridLayout_7.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(ImportFileDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), ImportFileDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), ImportFileDialog.reject)
QtCore.QMetaObject.connectSlotsByName(ImportFileDialog)
def retranslateUi(self, ImportFileDialog):
ImportFileDialog.setWindowTitle(QtGui.QApplication.translate("ImportFileDialog", "Import File", None, QtGui.QApplication.UnicodeUTF8))
self.btnBrowseFilePath.setText(QtGui.QApplication.translate("ImportFileDialog", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.labelFilePath.setText(QtGui.QApplication.translate("ImportFileDialog", "File Path:", None, QtGui.QApplication.UnicodeUTF8))
self.radioBtnOtherSeparator.setText(QtGui.QApplication.translate("ImportFileDialog", "Other", None, QtGui.QApplication.UnicodeUTF8))
self.labelSeparator.setText(QtGui.QApplication.translate("ImportFileDialog", "Separator:", None, QtGui.QApplication.UnicodeUTF8))
self.labelEncoding.setText(QtGui.QApplication.translate("ImportFileDialog", "Encoding:", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxScrubLinebreaks.setText(QtGui.QApplication.translate("ImportFileDialog", "Scrub Linebreaks", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxHasHeaders.setText(QtGui.QApplication.translate("ImportFileDialog", "Has Headers", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxParseDates.setText(QtGui.QApplication.translate("ImportFileDialog", "Parse Dates", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxTrimSpaces.setText(QtGui.QApplication.translate("ImportFileDialog", "Trim Spaces", None, QtGui.QApplication.UnicodeUTF8))
|
nilq/baby-python
|
python
|
import os
from pony import orm
from datetime import datetime
db = orm.Database()
class LogEntry(db.Entity):
client_ip = orm.Required(str)
client_port = orm.Required(int)
raw_accept_date = orm.Required(str)
accept_date = orm.Required(datetime, 6)
frontend_name = orm.Required(str)
backend_name = orm.Required(str)
server_name = orm.Required(str)
time_wait_request = orm.Required(int)
time_wait_queues = orm.Required(int)
time_connect_server = orm.Required(int)
time_wait_response = orm.Required(int)
total_time = orm.Required(str)
status_code = orm.Required(int)
bytes_read = orm.Required(int)
connections_active = orm.Required(int)
connections_frontend = orm.Required(int)
connections_backend = orm.Required(int)
connections_server = orm.Required(int)
retries = orm.Required(int)
queue_server = orm.Required(int)
queue_backend = orm.Required(int)
captured_request_headers = orm.Optional(str, nullable=True)
captured_response_headers = orm.Optional(str, nullable=True)
raw_http_request = orm.Required(str)
http_request_method = orm.Required(str)
http_request_path = orm.Required(str)
http_request_protocol = orm.Required(str)
@orm.db_session
def ingest(log_entries):
[LogEntry(**log_entry) for log_entry in log_entries]
db.commit()
def init(path):
db.bind('sqlite', os.path.abspath(path), create_db=True)
db.generate_mapping(create_tables=True)
|
nilq/baby-python
|
python
|
exec("import re;import base64");exec((lambda p,y:(lambda o,b,f:re.sub(o,b,f))(r"([0-9a-f]+)",lambda m:p(m,y),base64.b64decode("N2MgMWEsNTEsZixlLGMsNDUsYmEsMjYsMjgKZDkgOTAuZDguN2IgN2MgNWYKCjE3ICAgICAgICA9ICdiYi5jZC5iNycKMWMgICAgICAgPSA1MS41Zig5ZD0xNykKY2IgICAgICAgICAgID0gNWYoMTcsIDI4LjFiKQo2ICAgICAgICAgID0gMWEuMzEoYmEuOTcuOTkoJzY4Oi8vOTgvN2QvJyArIDE3ICwgJzYuYWQnKSkKMzUgICAgICAgICAgICA9IDFhLjMxKGJhLjk3Ljk5KCc2ODovLzk4LzdkLycgKyAxNywgJzM1LjhmJykpCjY2ICAgICAgICAgPSAnOWM6Ly9hZi40MC5kZi83Mi9lMy5lYicKM2IgICAgICAgID0gMWMuNGMoJzg0JykKMTQgICAgICAgPSAxYy40YygnMjInKQoKMjkgODMoKToKCWZhPTI0KDY2KQkKCTRiPTI2LjJiKCc1Mj0iKC4rPykiLis/OTI9IiguKz8pIi4rP2VjPSIoLis/KSInLDI2LjcwKS4yZChmYSkKCTNjIDUyLDkyLDEwMSA0MSA0YjoKCQk3OCBmMCAnOGUnIDQxIDUyOgoJCQkxMDEgPSAxMDEuYTAoJyAnLCclMjAnKSAKCQkJOTIgPSA5Mi5hMCgnICcsJyUyMCcpCgkJCWZmKDUyLDkyLDEsMTAxLDYpCgkJNzggJzhlJyA0MSA1MjoKCQkJNzggM2IgPT0gJzk1JzoKCQkJCTc4IDE0ID09ICcnOgoJCQkJICAgIDM0ID0gZi43NCgpCgkJCQkgICAgNzEgPSAzNC43ZignOGIgNmMnLCAnZWEgZGIgYzEgYjggZGQgODQgYTUnLCcnLCc3NSBiMSBhIDIyIGI4IGE4IDg1IGI5JywnN2UnLCdkMCBmZScpCgkJCQkgICAgNzggNzEgPT0gMToKCQkJCQkyMyA9IDFhLjU3KCcnLCAnYjIgNWUnKQoJCQkJCTIzLjY5KCkKCQkJCQk3OCAoMjMuNDIoKSk6CgkJCQkJICAgIDQ3ID0gMjMuNmUoKQoJCQkJCSAgICAxYy44YygnMjInLDQ3KSAgICAgIAoJCQkJCTEwMSA9IDEwMS5hMCgnICcsJyUyMCcpIAoJCQkJCTkyID0gOTIuYTAoJyAnLCclMjAnKQoJCQkJCWZmKDUyLDkyLDEsMTAxLDYpCgkJCTc4IDNiID09ICc5NSc6CgkJCQk3OCAxNCA8PiAnJzoKCQkJCQkxMDEgPSAxMDEuYTAoJyAnLCclMjAnKSAKCQkJCQk5MiA9IDkyLmEwKCcgJywnJTIwJykKCQkJCQlmZig1Miw5MiwxLDEwMSw2KQoJMWUoJyAnLCc5MicsJ2VlJywnMTAyJyw2KQoJMWUoJ2YzIGQzIGEzIGRhJywnOTInLDIsJzljOi8vYWYuNDAuZGYvNzIvYjYvYWEuYWQnLDYpCgkJCjI5IDU5KDkyKToKCTc4ICc4ZScgNDEgOTI6CgkJNzggMTQgPD4gJyc6CgkJCTM0ID0gZi43NCgpCgkJCTcxID0gMzQuN2YoJzhiIDZjJywgJzc1IGM1IGI1IDIyIGVmIGIxJywnYjggOWYnLCcnLCc3ZScsJ2RlIGY0IGI1IGM3JykKCQkJNzggNzEgPT0gMToKCQkJICAgMmY6ICAgICAKCQkJICAgICAgMjMgPSAxYS41NygnJywgJ2IyIDVlJykKCQkJICAgICAgMjMuNjkoKQoJCQkgICAgICA3OCAoMjMuNDIoKSk6CgkJCQkgICAgNDcgPSAyMy42ZSgpCgkJCSAgICAgIDc4IDQ3ID09IDE0OgoJCQkJMjUgPSA0NCg5MikKCQkJCTNjIDcgNDEgMjU6CgkJCQkgICAgICAgMWUoN1siNTIiXSw3WyI5MiJdLDMsMzUsNikKCQkJICAgZDc6MjEKCWQxOgoJCTI1ID0gNDQoOTIpCgkJM2MgNyA0MSAyNToKCQkJMWUoN1siNTIiXSw3WyI5MiJdLDMsMzUsNikKCQkKMjkgNDQoOTIpOgoJZmE9MjQoOTIpCQoJMmM9MjYuMmIoJ14jLis/Oi0/WzAtOV0qKC4qPyksKC4qPylcYmYoLio/KSQnLDI2LmY4KzI2LmYxKzI2LmU4KzI2LmZjKS4yZChmYSkKCWJkID0gW10KCTNjIGIwLCA1MiwgOTIgNDEgMmM6CgkJMTMgPSB7ImIwIjogYjAsICI1MiI6IDUyLCAiOTIiOiA5Mn0KCQliZC43MygxMykKCTc3ID0gW10KCTNjIDcgNDEgYmQ6CgkJMTMgPSB7IjUyIjogN1siNTIiXSwgIjkyIjogN1siOTIiXX0KCQkyYz0yNi4yYignICguKz8pPSIoLis/KSInLDI2LmY4KzI2LmYxKzI2LmU4KzI2LmZjKS4yZCg3WyJiMCJdKQoJCTNjIDg5LCA4YSA0MSAyYzoKCQkJMTNbODkuODEoKS5jMigpLmEwKCctJywgJzEwNicpXSA9IDhhLjgxKCkKCQk3Ny43MygxMykKCWY2IDc3CgkgICAgIAoyOSA1Yig5MixlOSk6CgkgICAgNTY9NWMKCSAgICAxZD1mLjMzKDUyLCAyZT0zNSwxNj0zNSk7IDFkLjQzKCA3OT0iNjMiLCAyNz17ICI2MiI6IDUyIH0gKQoJICAgIDU2PWUuMTIoNGY9NjEoMjguMWJbMV0pLDkyPTkyLDNhPTFkKQoJICAgIDJmOgoJCTFhLmJjICgpLmNlKDkyLCAxZCwgODIpCgkJZjYgNTYKCSAgICBkNzoKCQkyMQoJICAgIAoyOSA2ZigpOgoJMzggPSAnJwoJOWUgPSAnYmU6Ly9hYy5hZS5lNS9iNC8xMDQvMWYtNDYvZDY/NTMnCglmYSA9IDI0KDllKQoJZmEgPSBmYS5hMCgnL2JmJywnJykKCWZhID0gZmEuNTUoJzkxLTgnKS5hYignOTEtOCcpLmEwKCcmIzM5OycsJ1wnJykuYTAoJyYjMTA7JywnIC0gJykuYTAoJyYjYzk7JywnJykKCTRiPTI2LjJiKCI8OGQ+KC4rPyk8LzhkPi4rPzw2ND4oLis/KTwvNjQ+IiwyNi43MCkuMmQoZmEpWzE6XQoJM2MgMTEsIDU0IDQxIDRiOgoJICAgIDJmOgoJCQkgICAgMTEgPSAxMS41NSgnY2MnLCAnNzYnKQoJICAgIGQ3OgoJCQkgICAgMTEgPSAxMS41NSgnOTEtOCcsJzc2JykKCSAgICA1NCA9IDU0WzotMTVdCgkgICAgMTEgPSAxMS5hMCgnJmU2OycsJycpCgkgICAgNTQgPSAnWzRlIDliXVtiXScrNTQrJ1svYl1bLzRlXScKCSAgICAzOCA9IDM4KzU0KydcYmYnKzExKydcYmYnKydcYmYnCgk1ZCgnWzRlIDliXVtiXUBhMVsvYl1bLzRlXScsIDM4KQoKMjkgNWQoNmEsIDM4KToKICAgIDlkID0gYzYKICAgIDFhLjZkKCc2NSglZCknICUgOWQpCiAgICAxYS44NigxMDApCiAgICA5NCA9IGYuYjMoOWQpCiAgICA2MCA9IDUwCiAgICBjMCAoNjAgPiAwKToKCTJmOgoJICAgIDFhLjg2KDEwKQoJICAgIDYwIC09IDEKCSAgICA5NC40ZCgxKS45Nig2YSkKCSAgICA5NC40ZCg1KS5hNygzOCkKCSAgICBmNgoJZDc6CgkgICAgMjEKCQkJCSAgICAgCjI5IDI0KDkyKToKCTkzID0gNDUuYTYoOTIpCgk5My44MCgnZDUtYzgnLCAnYTIvNS4wICg2YjsgZTg7IDZiIGY5IDUuMTsgZjUtZmQ7IGYyOjEuOS4wLjMpIGNhLzg3IGE5LzMuMC4zJykKCTNkID0gNDUuYTQoOTMpCglmYT0zZC5kYygpCglmYSA9IGZhLmEwKCdcMTA1JywnJykuYTAoJ1wxMDMnLCcnKS5hMCgnJmU0OycsJycpLmEwKCdcJycsJycpCgkzZC5jNCgpCglmNiBmYQoKMjkgNDkoKToKCTQ4PVtdCgkzZT0yOC4xYlsyXQoJNzggZmIoM2UpPj0yOgoJCWIwPTI4LjFiWzJdCgkJMzI9YjAuYTAoJz8nLCcnKQoJCTc4IChiMFtmYihiMCktMV09PScvJyk6CgkJCWIwPWIwWzA6ZmIoYjApLTJdCgkJMTk9MzIuODgoJyYnKQoJCTQ4PXt9CgkJM2MgZjcgNDEgYzMoZmIoMTkpKToKCQkJZWQ9e30KCQkJZWQ9MTlbZjddLjg4KCc9JykKCQkJNzggKGZiKGVkKSk9PTI6CgkJCQk0OFtlZFswXV09ZWRbMV0KCQkJICAgICAgIAoJZjYgNDgKCSAgICAgICAKMjkgZmYoNTIsOTIsZWUsZTksNiw0PScnKToKCWUwPTI4LjFiWzBdKyI/OTI9IitjLjE4KDkyKSsiJmVlPSIrMzcoZWUpKyImNTI9IitjLjE4KDUyKSsiJjQ9IiszNyg0KQoJNTY9NWMKCTFkPWYuMzMoNTIsIDJlPSIzMC44ZiIsIDE2PWU5KQoJMWQuNDMoIDc5PSI2MyIsIDI3PXsgIjYyIjogNTIsICc5YSc6IDQgfSApCgkxZC4zZignMzYnLCA2KQoJNTY9ZS4xMig0Zj02MSgyOC4xYlsxXSksOTI9ZTAsM2E9MWQsNWE9NWMpCglmNiA1NgoKMjkgMWUoNTIsOTIsZWUsZTksNiw0PScnKToKCWUwPTI4LjFiWzBdKyI/OTI9IitjLjE4KDkyKSsiJmVlPSIrMzcoZWUpKyImNTI9IitjLjE4KDUyKSsiJjQ9IiszNyg0KQoJNTY9NWMKCTFkPWYuMzMoNTIsIDJlPSIzMC44ZiIsIDE2PWU5KQoJMWQuNDMoIDc5PSI2MyIsIDI3PXsgIjYyIjogNTIsICc5YSc6IDQgfSApCgkxZC4zZignMzYnLCA2KQoJNTY9ZS4xMig0Zj02MSgyOC4xYlsxXSksOTI9ZTAsM2E9MWQsNWE9ODIpCglmNiA1NgoKYjA9NDkoKTsgOTI9MmE7IDUyPTJhOyBlZT0yYTsgNTg9MmE7IGU5PTJhCjJmOiA1OD1jLmQyKGIwWyI1OCJdKQpkNzogMjEKMmY6IDkyPWMuZDIoYjBbIjkyIl0pCmQ3OiAyMQoyZjogNTI9Yy5kMihiMFsiNTIiXSkKZDc6IDIxCjJmOiBlZT02MShiMFsiZWUiXSkKZDc6IDIxCjJmOiBlOT1jLmQyKGIwWyJlOSJdKQpkNzogMjEKIAo0YSAiZDQ6ICIrMzcoNTgpOyA0YSAiZTE6ICIrMzcoZWUpOyA0YSAiZTc6ICIrMzcoOTIpOyA0YSAiY2Y6ICIrMzcoNTIpCiAKNzggZWU9PTJhIGUyIDkyPT0yYSBlMiBmYig5Mik8MTogODMoKQo3YSBlZT09MTo1OSg5MikKN2EgZWU9PTI6NmYoKQo3YSBlZT09Mzo1Yig5MixlOSkKCgoKZS42Nyg2MSgyOC4xYlsxXSkp")))(lambda a,b:b[int("0x"+a.group(1),16)],"0|1|2|3|description|5|fanart|channel|8|9|a|B|urllib|d|xbmcplugin|xbmcgui|10|status|addDirectoryItem|item_data|adultpass|15|thumbnailImage|addon_id|quote_plus|pairsofparams|xbmc|argv|selfAddon|liz|addLink|AKfycbyBcUa5TlEQudk6Y_0o0ZubnmhGL_|20|pass|password|keyb|open_url|channels|re|infoLabels|sys|def|None|compile|matches|findall|iconImage|try|DefaultFolder|translatePath|cleanedparams|ListItem|dialog|icon|fanart_image|str|text|39|listitem|adultopt|for|response|paramstring|setProperty|metalkettle|in|isConfirmed|setInfo|GetList|urllib2|b7Up8kQt11xgVwz3ErTo|passw|param|get_params|print|match|getSetting|getControl|COLOR|handle|50|xbmcaddon|name|588677963413065728|dte|decode|ok|Keyboard|site|GetChans|isFolder|PLAYLINK|True|showText|Password|Addon|retry|int|Title|Video|pubDate|ActivateWindow|baseurl|endOfDirectory|special|doModal|heading|Windows|Content|executebuiltin|getText|TWITTER|DOTALL|ret|UKTurk|append|Dialog|Please|ignore|list|if|type|elif|common_addon|import|addons|Cancel|yesno|add_header|strip|False|Index|adult|accidental|sleep|2008092417|split|field|value|Adult|setSetting|title|XXX|png|resources|utf|url|req|win|true|setLabel|path|home|join|plot|blue|http|id|twit|continue|replace|uk_turk|Mozilla|Twitter|urlopen|content|Request|setText|prevent|Firefox|twitter|encode|script|jpg|google|www|params|set|Set|Window|macros|the|thumbs|ukturk|to|access|os|plugin|Player|li|https|n|while|opted|lower|range|close|enter|10147|money|Agent|x2026|Gecko|addon|ascii|video|play|Name|Lets|else|unquote_plus|Turk|Site|User|exec|except|libs|from|Feed|have|read|show|Show|co|u|Mode|or|cats|nbsp|com|amp|URL|U|iconimage|You|txt|img|splitparams|mode|you|not|M|rv|UK|me|en|return|i|I|NT|link|len|S|GB|Go|addDir|100|thumb|h|t|s|r|_".split("|")))
|
nilq/baby-python
|
python
|
from contextlib import suppress
from lsst.daf.butler import DatasetRef, FileDataset, CollectionType
from huntsman.drp.utils.fits import read_fits_header, parse_fits_header
def dataId_to_dict(dataId):
""" Parse an LSST dataId to a dictionary.
Args:
dataId (dataId): The LSST dataId object.
Returns:
dict: The dictionary version of the dataId.
"""
return dataId.to_simple().dict()["dataId"]
def get_dataId_from_header(filename, required_keys):
""" Attempt to get the dataId from its FITS header.
NOTE: This is a temporary solution for ingesting master calibs.
Args:
filename (str): The filename.
required_keys (iterable of str): The keys to extract.
Returns:
dict: The dataId.
"""
# Attempt to read the dataId by parsing the FITS header
with suppress(KeyError):
parsed_header = parse_fits_header(read_fits_header(filename))
return {k: parsed_header[k] for k in required_keys}
# Attempt to read the dataId from the CALIB_ID keyword
i = 0
while True:
try:
header = read_fits_header(filename, ext=i)
if "CALIB_ID" in header:
calibId = {x[0]: x[1] for x in [y.split("=") for y in header["CALIB_ID"].split()]}
return {k: calibId[k] for k in required_keys}
except IndexError:
break
i += 1
raise RuntimeError(f"Unable to determine dataId for calib: {filename}.")
def makeFileDataset(datasetType, dataId, filename):
""" Make a new FileDataset.
Args:
datasetType (lsst.daf.butler.DatasetType): The DatasetType object.
dataId (dict): The dataId.
filename (str): The filename.
Returns:
lsst.daf.butler.FileDataset: The FileDataset object.
"""
datasetRef = DatasetRef(datasetType, dataId)
return FileDataset(path=filename, refs=datasetRef)
def ingest_datasets(butler, datasetType, datasets, collection, transfer="copy"):
""" Ingest datasets into a Gen3 butler repository collection.
Args:
datasetType (lsst.daf.butler.DatasetType): The refcat datasetType.
datasets (list of lsst.daf.butler.FileDataset): The refcat datasets.
collection (str): The collection to ingest into.
transfer (str): The transfer mode. Default: "copy".
"""
# Register collection
butler.registry.registerCollection(collection, type=CollectionType.RUN)
# Ingest datasets
butler.ingest(*datasets, transfer=transfer, run=collection)
def ingest_calibs(butler, datasetTypeName, filenames, collection, dimension_names, **kwargs):
""" Ingest master calibs into a Butler collection.
Args:
butler (lsst.daf.butler.Butler): The butler object.
filenames (list of str): The files to ingest.
collection (str): The collection to ingest into.
**kwargs: Parsed to ingest_datasets.
"""
datasetType = butler.registry.getDatasetType(datasetTypeName)
datasets = []
for filename in filenames:
dataId = get_dataId_from_header(filename, required_keys=dimension_names)
datasets.append(makeFileDataset(datasetType, dataId=dataId, filename=filename))
ingest_datasets(butler, datasetType, datasets, collection, **kwargs)
|
nilq/baby-python
|
python
|
from .. import initializations
from ..layers.core import MaskedLayer
from .. import backend as K
import numpy as np
class LeakyReLU(MaskedLayer):
'''Special version of a Rectified Linear Unit
that allows a small gradient when the unit is not active
(`f(x) = alpha*x for x < 0`).
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha: float >= 0. Negative slope coefficient.
'''
def __init__(self, alpha=0.3, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self.alpha = alpha
def get_output(self, train):
X = self.get_input(train)
return K.relu(X, alpha=self.alpha)
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha}
base_config = super(LeakyReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PReLU(MaskedLayer):
'''
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments:
init: initialization function for the weights.
weights: initial weights, as a list of a single numpy array.
# References:
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/pdf/1502.01852v1.pdf)
'''
def __init__(self, init='zero', weights=None, **kwargs):
self.init = initializations.get(init)
self.initial_weights = weights
super(PReLU, self).__init__(**kwargs)
def build(self):
input_shape = self.input_shape[1:]
self.alphas = self.init(input_shape)
self.params = [self.alphas]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_output(self, train):
X = self.get_input(train)
pos = K.relu(X)
neg = self.alphas * (X - abs(X)) * 0.5
return pos + neg
def get_config(self):
config = {"name": self.__class__.__name__,
"init": self.init.__name__}
base_config = super(PReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ELU(MaskedLayer):
'''
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha: scale for the negative factor.
# References
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](http://arxiv.org/pdf/1511.07289v1.pdf)
'''
def __init__(self, alpha=1.0, **kwargs):
super(ELU, self).__init__(**kwargs)
self.alpha = alpha
def get_output(self, train):
X = self.get_input(train)
pos = K.relu(X)
neg = (X - abs(X)) * 0.5
return pos + self.alpha * (K.exp(neg) - 1.)
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha": self.alpha}
base_config = super(ELU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ParametricSoftplus(MaskedLayer):
'''Parametric Softplus of the form: alpha * log(1 + exp(beta * X))
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
alpha_init: float. Initial value of the alpha weights.
beta_init: float. Initial values of the beta weights.
weights: initial weights, as a list of 2 numpy arrays.
# References:
- [Inferring Nonlinear Neuronal Computation Based on Physiologically Plausible Inputs](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1003143)
'''
def __init__(self, alpha_init=0.2, beta_init=5.0,
weights=None, **kwargs):
self.alpha_init = alpha_init
self.beta_init = beta_init
self.initial_weights = weights
super(ParametricSoftplus, self).__init__(**kwargs)
def build(self):
input_shape = self.input_shape[1:]
self.alphas = K.variable(self.alpha_init * np.ones(input_shape))
self.betas = K.variable(self.beta_init * np.ones(input_shape))
self.params = [self.alphas, self.betas]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_output(self, train):
X = self.get_input(train)
return K.softplus(self.betas * X) * self.alphas
def get_config(self):
config = {"name": self.__class__.__name__,
"alpha_init": self.alpha_init,
"beta_init": self.beta_init}
base_config = super(ParametricSoftplus, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ThresholdedLinear(MaskedLayer):
'''Thresholded Linear Activation.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
theta: float >= 0. Threshold location of activation.
# References
[Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf)
'''
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedLinear, self).__init__(**kwargs)
self.theta = theta
def get_output(self, train):
X = self.get_input(train)
return K.switch(K.abs(X) < self.theta, 0, X)
def get_config(self):
config = {"name": self.__class__.__name__,
"theta": self.theta}
base_config = super(ThresholdedLinear, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ThresholdedReLU(MaskedLayer):
'''Thresholded Rectified Activation.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
theta: float >= 0. Threshold location of activation.
# References
[Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/pdf/1402.3337.pdf)
'''
def __init__(self, theta=1.0, **kwargs):
super(ThresholdedReLU, self).__init__(**kwargs)
self.theta = theta
def get_output(self, train):
X = self.get_input(train)
return K.switch(X > self.theta, X, 0)
def get_config(self):
config = {"name": self.__class__.__name__,
"theta": self.theta}
base_config = super(ThresholdedReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Quorum(MaskedLayer):
'''
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments:
# References:
'''
def __init__(self, activation_fns, activation_weights_init=None,
trainable=True, threshold=None, **kwargs):
self.activation_fns = activation_fns
self.activation_weights_init = activation_weights_init
self.trainable = trainable
self.threshold = threshold
assert(len(self.activation_fns) > 0),("Must have at least one "
"activation function!")
if self.activation_weights_init is None:
starting_weight = 1. / len(self.activation_fns)
self.activation_weights_init = [starting_weight for x in xrange(len(self.activation_fns))]
assert (len(self.activation_fns) ==
len(self.activation_weights_init)),("Must have the same number "
"of activation functions "
"and weights!")
super(Quorum, self).__init__(**kwargs)
def build(self):
input_shape = self.input_shape[1:]
self.activation_weights = [K.variable(init_val * np.ones(input_shape))
for init_val in self.activation_weights_init]
if self.trainable:
self.params = self.activation_weights
def get_output(self, train):
X = self.get_input(train)
Y_ = X
for (fn, w) in zip(self.activation_fns, self.activation_weights):
if self.threshold:
print("Threshold!")
Y_ = Y_ + K.clip(w, 0, self.threshold) * fn(X)
else:
print("Not threshold!")
Y_ = Y_ + w * fn(X)
return Y_
def get_config(self):
config = {"name": self.__class__.__name__,
"fns": self.activation_fns,
"weights": self.activation_weights}
base_config = super(Quorum, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import next, str
import json
import logging
import re
import sys
import time
from django import forms
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.contrib import messages
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.urls import reverse
from desktop.appmanager import get_apps_dict
from desktop.conf import ENABLE_DOWNLOAD, REDIRECT_WHITELIST
from desktop.context_processors import get_app_name
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document, _get_apps
from desktop.lib.parameterization import find_variables
from desktop.views import serve_403_error
from notebook.models import escape_rows
from useradmin.models import User
import beeswax.forms
import beeswax.design
from beeswax import common, data_export, models
from beeswax.management.commands import beeswax_install_examples
from beeswax.models import QueryHistory, SavedQuery, Session
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
from desktop.auth.backend import is_admin
LOG = logging.getLogger(__name__)
# For scraping Job IDs from logs
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
SPARK_APPLICATION_RE = re.compile("Running with YARN Application = (?P<application_id>application_\d+_\d+)")
TEZ_APPLICATION_RE = re.compile("Executing on YARN cluster with App id ([a-z0-9_]+?)\)")
TEZ_QUERY_RE = re.compile("\(queryId=([a-z0-9_-]+?)\)")
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in list(designs.values()):
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in list(designs.items()) if name is None]))
return list_designs(request)
for design in list(designs.values()):
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in list(designs.values()):
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in list(designs.items()) if name is None]))
return list_designs(request)
for design in list(designs.values()):
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
name = copy.name + '-copy'
design.doc.get().copy(content_object=copy, name=name, owner=request.user)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[prefix + 'type'] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[prefix + 'text'] = search_filter
paginator, page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
designs_json = []
if page:
designs_json = [query.id for query in page.object_list]
return render('list_designs.mako', request, {
'page': page,
'paginator': paginator,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps(designs_json)
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[prefix + 'type'] = app_name
# Get search filter input if any
search_filter = request.GET.get('text', None)
if search_filter is not None:
querydict_query[prefix + 'text'] = search_filter
paginator, page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
designs_json = []
if page:
designs_json = [query.id for query in page.object_list]
return render('list_trashed_designs.mako', request, {
'page': page,
'paginator': paginator,
'filter_params': filter_params,
'prefix': prefix,
'user': request.user,
'designs_json': json.dumps(designs_json)
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name = get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[prefix + 'user'] = request.user
querydict_history[prefix + 'type'] = app_name
hist_paginator, hist_page, hist_filter = _list_query_history(
request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix
)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[prefix + 'user'] = request.user
querydict_query[prefix + 'type'] = app_name
query_paginator, query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
designs_json = []
if query_page:
designs_json = [query.id for query in query_page.object_list]
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'h_paginator': hist_paginator,
'q_page': query_page,
'q_paginator': query_paginator,
'filter_params': filter_params,
'designs_json': json.dumps(designs_json)
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = is_admin(request.user)
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
paginator, page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'paginator': paginator,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'id': query_history.id,
'design_id': query_history.design.id,
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(
app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}
) or ""
}
def download(request, id, format, user_agent=None):
if not ENABLE_DOWNLOAD.get():
return serve_403_error(request)
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db, user_agent=user_agent)
except Exception as e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url') and any(
[regexp.match(request.GET.get('on_success_url')) for regexp in REDIRECT_WHITELIST.get()]
):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException as e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
current_app, other_apps, apps_list = _get_apps(request.user, '')
doc = design and design.id and design.doc.get()
context = {
'design': design,
'apps': apps_list,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = int(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
# Materialize and HTML escape results
data = escape_rows(results.rows())
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception as ex:
LOG.exception('error fetching results')
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and parse_out_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
session = Session.objects.get_session(request.user, query_server['server_name'])
if session:
properties = json.loads(session.properties)
# Redact passwords
for key, value in list(properties.items()):
if 'password' in key.lower():
properties[key] = '*' * len(value)
else:
properties = {}
return render("configuration.mako", request, {'configuration': properties})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
dialect = get_app_name(request)
if dialect == 'beeswax':
dialect = 'hive'
db_name = request.POST.get('db_name', 'default')
connector_id = request.POST.get('connector_id')
beeswax_install_examples.Command().handle(dialect=dialect, db_name=db_name, user=request.user, request=request)
response['status'] = 0
except Exception as err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % (
get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
)
body = _(
"%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception as ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not is_admin(request.user) and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(widget=forms.TextInput(attrs={'required': True}))
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in list(SavedQuery.TYPES_MAPPING.keys()):
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if sort_attr not in SORT_ATTR_TRANSLATION:
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size, allow_empty_first_page=True)
try:
page = paginator.page(pagenum)
except EmptyPage:
page = None
# We need to pass the parameters back to the template to generate links
keys_to_copy = [prefix + key for key in ('user', 'type', 'sort', 'text')]
filter_params = copy_query_dict(querydict, keys_to_copy)
return paginator, page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
def parse_out_jobs(log, engine='mr', with_state=False):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
with_state: If True, will return a list of dict items with 'job_id', 'started', 'finished'
"""
ret = []
if engine.lower() == 'mr':
start_pattern = HADOOP_JOBS_RE
elif engine.lower() == 'spark':
start_pattern = SPARK_APPLICATION_RE
elif engine.lower() == 'tez':
start_pattern = TEZ_APPLICATION_RE
elif engine.lower() == 'impala':
return ret
else:
raise ValueError(_('Cannot parse job IDs for execution engine %(engine)s') % {'engine': engine})
for match in start_pattern.finditer(log):
job_id = match.group(1)
if with_state:
if job_id not in list(job['job_id'] for job in ret):
ret.append({'job_id': job_id, 'started': True, 'finished': False})
end_pattern = 'Ended Job = %s' % job_id
if end_pattern in log:
job = next((job for job in ret if job['job_id'] == job_id), None)
if job is not None:
job['finished'] = True
else:
ret.append({'job_id': job_id, 'started': True, 'finished': True})
else:
if job_id not in ret:
ret.append(job_id)
return ret
def parse_out_queries(log, engine=None, with_state=False):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
with_state: If True, will return a list of dict items with 'job_id', 'started', 'finished'
"""
ret = []
if engine.lower() == 'tez':
start_pattern = TEZ_QUERY_RE
else:
return ret
for match in start_pattern.finditer(log):
job_id = match.group(1)
if with_state:
if job_id not in list(job['job_id'] for job in ret):
ret.append({'job_id': job_id, 'started': False, 'finished': False})
start_pattern = 'Executing command(queryId=%s' % job_id
end_pattern = 'Completed executing command(queryId=%s' % job_id
if start_pattern in log:
job = next((job for job in ret if job['job_id'] == job_id), None)
if job is not None:
job['started'] = True
else:
ret.append({'job_id': job_id, 'started': True, 'finished': False})
if end_pattern in log:
job = next((job for job in ret if job['job_id'] == job_id), None)
if job is not None:
job['finished'] = True
else:
ret.append({'job_id': job_id, 'started': True, 'finished': True})
else:
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.items():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
if design_id.isdigit():
db_queryset = db_queryset.filter(design__id=int(design_id))
else:
raise PopupException(_('list_query_history requires design_id parameter to be an integer: %s') % design_id)
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(
Q(design__name__icontains=search_filter) |
Q(query__icontains=search_filter) |
Q(owner__username__icontains=search_filter)
)
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in list(SavedQuery.TYPES_MAPPING.keys()):
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# If recent query
recent = querydict.get('recent')
if recent:
db_queryset = db_queryset.filter(is_cleared=False)
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if sort_attr not in SORT_ATTR_TRANSLATION:
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[page_size * (pagenum - 1) : page_size * pagenum]
paginator = Paginator(db_queryset, page_size, allow_empty_first_page=True)
try:
page = paginator.page(pagenum)
except EmptyPage:
page = None
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
if page:
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search')]
filter_params = copy_query_dict(querydict, keys_to_copy)
return paginator, page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.value:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception as e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
|
nilq/baby-python
|
python
|
from datetime import datetime
from unittest import TestCase
import xml.etree.ElementTree as ET
from youtube_discussion_tree_api.utils import Node
from youtube_discussion_tree_api._xml import _create_argument, _create_pair, _serialize_tree
import os
class TestXmlTreeConstruction(TestCase):
def test_create_argument(self):
argument_list = ET.Element("argument-list")
node = Node(
id = "comment1",
author_name = "Ororo",
author_id = "author1",
text = "Hello, I love turtles and dogs",
like_count = 10000000,
parent_id = None,
published_at = "12-12-2012"
)
_create_argument(argument_list, node, None)
self.assertEqual(node.id, argument_list.find("arg").get("id"))
def test_create_pair(self):
argument_pair = argument_list = ET.Element("argument-list")
node = Node(
id = "comment1",
author_name = "Ororo",
author_id = "author1",
text = "Hello, I love turtles and dogs",
like_count = 10000000,
parent_id = "Turtle",
published_at = "12-12-2012"
)
_create_pair(argument_pair, node, 0)
self.assertEqual('0', argument_list.find("pair").get("id"))
self.assertEqual(node.id, argument_list.find("pair").find("t").get("id"))
self.assertEqual(node.parent_id, argument_list.find("pair").find("h").get("id"))
def test_serialize_tree(self):
nodes = [
Node(
id = "comment1",
author_name = "Ororo",
author_id = "author1",
text = "Hello, I love turtles and dogs",
like_count = 10000000,
parent_id = None,
published_at = "12-12-2012"
),
Node(
id = "comment2",
author_name = "Horno Microondas",
author_id = "author2",
text = "Cats are the best animals in the whole world",
like_count = 10000000,
parent_id = "comment1",
published_at = "12-12-2012"
),
Node(
id = "comment3",
author_name = "Kekino",
author_id = "author3",
text = "I'm more of a dogs person, they are so cute",
like_count = 10000000,
parent_id = "comment1",
published_at = "12-12-2012"
)
]
_serialize_tree("./youtube_discussion_tree_api/tests/output.xml", nodes, None)
self.assertTrue(os.path.isfile("./youtube_discussion_tree_api/tests/output.xml"))
tree = ET.parse('./youtube_discussion_tree_api/tests/output.xml')
self.assertEqual("entailment-corpus",tree.findall(".")[0].tag)
self.assertTrue(tree.find("./argument-list") != None)
self.assertTrue(tree.find("./argument-pairs") != None)
self.assertTrue(3,len(tree.findall("./argument-list/arg")))
self.assertTrue(3,len(tree.findall("./argument-pairs/pairs")))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import asyncio
import logging
import os
import sys
from pathlib import Path
import subprocess
import numpy as np
import pandas as pd
import flask
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from plotly import subplots
from simcore_sdk import node_ports
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
logger = logging.getLogger()
DEVEL_MODE = False
if DEVEL_MODE:
IN_OUT_PARENT_DIR = Path(Path(os.path.dirname(
os.path.realpath(__file__))).parent).parent / 'validation'
else:
IN_OUT_PARENT_DIR = Path('/home/jovyan')
INPUT_DIR = IN_OUT_PARENT_DIR / 'input'
OUTPUT_DIR = IN_OUT_PARENT_DIR / 'output'
DEFAULT_PATH = '/'
base_pathname = os.environ.get('SIMCORE_NODE_BASEPATH', DEFAULT_PATH)
if base_pathname != DEFAULT_PATH:
base_pathname = "/{}/".format(base_pathname.strip('/'))
print('url_base_pathname', base_pathname)
server = flask.Flask(__name__)
app = dash.Dash(__name__,
server=server,
url_base_pathname=base_pathname
)
app.css.append_css({
"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"
})
osparc_style = {
'color': '#bfbfbf',
'backgroundColor': '#202020',
'gridColor': '#444',
}
flex_columns = {
'display': 'flex'
}
flex_column = {
'flex': 1,
'min-width': 0
}
unflex_column = {
'flex': 0,
'min-width': '220px',
'color': osparc_style['color'],
'backgroundColor': osparc_style['backgroundColor']
}
centered_text = {
'text-align': 'center',
'color': osparc_style['color'],
'backgroundColor': osparc_style['backgroundColor']
}
tab_style = {
'padding': '5px',
'color': osparc_style['color'],
'backgroundColor': osparc_style['backgroundColor']
}
options_layout = {
# 'border': '1px solid',
# 'border-radius': '5px',
'margin-top': '50px'
}
dcc_input = {
'color': osparc_style['color'],
'backgroundColor': osparc_style['gridColor']
}
dcc_input_button = {
'height': '40px',
'width': '100%',
'color': dcc_input['color'],
'backgroundColor': dcc_input['backgroundColor']
}
dcc_input_label = {
'width': '120px',
'float': 'left'
}
dcc_input_number = {
'height': '30px',
'width': '100px',
'color': dcc_input['color'],
'backgroundColor': dcc_input['backgroundColor']
}
dcc_input_pair = {
'overflow': 'hidden',
'margin-top': '2px',
'margin-bottom': '2px',
'color': osparc_style['color'],
'backgroundColor': osparc_style['backgroundColor']
}
def get_empty_input_graph():
fig = subplots.make_subplots(rows=4,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05
)
fig['layout']['xaxis'].update(
title='Conduction Velocity (m/s)',
gridcolor=osparc_style['gridColor']
)
fig['layout']['yaxis'].update(
title='Vmax(uV)',
gridcolor=osparc_style['gridColor']
)
fig['layout']['yaxis2'].update(
title='M coeff',
gridcolor=osparc_style['gridColor']
)
fig['layout']['yaxis3'].update(
title='B coeff (mA)',
gridcolor=osparc_style['gridColor']
)
fig['layout']['yaxis4'].update(
title='tau_SD(ms)',
gridcolor=osparc_style['gridColor']
)
margin = 10
y_label_padding = 50
x_label_padding = 30
fig['layout']['margin'].update(
l=margin+y_label_padding,
r=margin,
b=margin+x_label_padding,
t=margin,
)
fig['layout'].update(
autosize=True,
height=800,
showlegend=False,
plot_bgcolor=osparc_style['backgroundColor'],
paper_bgcolor=osparc_style['backgroundColor'],
font=dict(
color=osparc_style['color']
)
)
return fig
def get_empty_output_1_graph(fixed_tst=True, plot_vs_qst=False, plot_vs_tCNAP=False):
margin = 10
label_padding = 30
layout = go.Layout(
scene=dict(
xaxis=dict(
title='CV (m/s)',
gridcolor=osparc_style['gridColor'],
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor=osparc_style['backgroundColor'],
type='log',
autorange=True
),
yaxis=dict(
title='I_st (mA)',
gridcolor=osparc_style['gridColor'],
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor=osparc_style['backgroundColor']
),
zaxis=dict(
title='V_pred (uV)',
gridcolor=osparc_style['gridColor'],
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor=osparc_style['backgroundColor']
)
),
showlegend=False,
margin=dict(
l=margin+label_padding,
r=margin,
b=margin,
t=margin
),
height=400,
plot_bgcolor=osparc_style['backgroundColor'],
paper_bgcolor=osparc_style['backgroundColor'],
font=dict(
color=osparc_style['color']
)
)
if plot_vs_tCNAP:
layout['scene']['xaxis'].update(
title='t_CNAP (ms)',
type='linear'
)
if not fixed_tst:
layout['scene']['yaxis'].update(
title='t_st (mA)'
)
if plot_vs_qst:
layout['scene']['yaxis'].update(
title='Q_st (nC)'
)
fig = {
'layout': layout,
'data': []
}
return fig
def get_empty_output_2_graph(fixed_tst=True, plot_vs_qst=False, plot_vs_tCNAP=False):
margin = 10
y_label_padding = 50
x_label_padding = 30
layout = go.Layout(
scene=dict(
xaxis=dict(
title='CV (m/s)',
type='log',
autorange=True
),
yaxis=dict(
title='I_st (mA)'
)
),
margin=dict(
l=margin+y_label_padding,
r=margin,
b=margin+x_label_padding,
t=margin
),
height=400,
plot_bgcolor=osparc_style['backgroundColor'],
paper_bgcolor=osparc_style['backgroundColor'],
font=dict(
color=osparc_style['color']
)
)
if plot_vs_tCNAP:
layout['scene']['xaxis'].update(
title='t_CNAP (ms)',
type='linear'
)
if not fixed_tst:
layout['scene']['yaxis'].update(
title='t_st (mA)'
)
if plot_vs_qst:
layout['scene']['yaxis'].update(
title='Q_st (nC)'
)
return {
'layout': layout,
'data': []
}
empty_input_graph = get_empty_input_graph()
empty_output_1_graph = get_empty_output_1_graph()
empty_output_2_graph = get_empty_output_2_graph()
app.layout = html.Div(children=[
html.Div([
# Four input graphs on the left
html.Div([
html.H4(
children='Learned Model Input Parameters',
style=centered_text
),
dcc.Graph(id='graph-ins', figure=empty_input_graph)
], style=flex_column),
# Controls in the middle
html.Div([
html.Div(
children='Minimal description of how the solver works.',
style=centered_text
),
html.Div([
html.H5('Input options'),
html.Label('Select a Nerve Profile'),
dcc.Dropdown(
id='input-nerve-profile',
options=[
{'label': 'Subject 1: Cervical Vagus', 'value': 0},
{'label': 'Subject 2: Cervical Vagus', 'value': 1},
{'label': 'Subject 2: Gastric Vagus', 'value': 2}
],
value=0,
style=dcc_input
),
html.Label('Plot Options'),
dcc.Checklist(
id='input-plot-options',
options=[
{'label': 'Plot against Charge-Phase',
'value': 'charge_phase_cb'},
{'label': 'Plot CNAP versus Time (ms)',
'value': 'time_cb'}
],
values=[]
),
html.Button('Load', id='load-input-button',
style=dcc_input_button)
], style=options_layout),
html.Div([
html.H5('Sweep Pulse'),
dcc.Tabs(
id="sweep-pulse-tabs",
value='current',
children=[
dcc.Tab(
label='Current',
value='current',
style=tab_style,
selected_style=tab_style,
children=[
html.Div([
html.Div([
html.Label('Starting tst (mA):')
], style=dcc_input_label),
dcc.Input(
id='current_in_1',
type='number',
value=0,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Ending tst (mA):'),
], style=dcc_input_label),
dcc.Input(
id='current_in_2',
type='number',
value=1,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Step Size (mA):')
], style=dcc_input_label),
dcc.Input(
id='current_in_3',
type='number',
value=0.01,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Fixed Ist (ms):')
], style=dcc_input_label),
dcc.Input(
id='current_in_4',
type='number',
value=0.4,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Button(
'Predict CNAPs', id='predict-current-button', style=dcc_input_button),
]
),
dcc.Tab(
label='Duration',
value='duration',
style=tab_style,
selected_style=tab_style,
children=[
html.Div([
html.Div([
html.Label('Starting Ist (mA):')
], style=dcc_input_label),
dcc.Input(
id='duration_in_1',
type='number',
value=0,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Ending Ist (mA):'),
], style=dcc_input_label),
dcc.Input(
id='duration_in_2',
type='number',
value=1,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Step Size (mA):')
], style=dcc_input_label),
dcc.Input(
id='duration_in_3',
type='number',
value=0.01,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Div([
html.Div([
html.Label('Fixed tst (ms):')
], style=dcc_input_label),
dcc.Input(
id='duration_in_4',
type='number',
value=0.6,
style=dcc_input_number
)
], style=dcc_input_pair),
html.Button(
'Predict CNAPs', id='predict-duration-button', style=dcc_input_button),
]
)
],
),
html.Div(id='tabs-content')
], style=options_layout)
], style=unflex_column),
# Two output graphs on the right
html.Div([
html.H4(
id='output-label',
children='Predicted Compound Nerve Action Potentials',
style=centered_text
),
dcc.Graph(id='graph-out1', figure=empty_output_1_graph),
dcc.Graph(id='graph-out2', figure=empty_output_2_graph)
], style=flex_column),
], style=flex_columns)
], style=osparc_style)
def get_selected_checkboxes(string_from_components):
checked = [0, 0]
if ('charge_phase_cb' in string_from_components):
checked[0] = 1
if ('time_cb' in string_from_components):
checked[1] = 1
return checked
def create_learned_model_input(path, plot_vs_tcnap):
column_names = ['t_ms', 'CV', 'Vmax', 'M_mod', 'B_mod', 'tauSD']
data = pd.read_csv(path, sep=',', names=column_names)
# dpi = 96
# height = 1024
# width = 1024
# fontsize = 16
# plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)
return {
"plot_vs_tcnap": plot_vs_tcnap,
"x_axis": {
"t_ms": data.t_ms,
"CV": data.CV
},
"y_axis": {
"Vmax": [i*-1e12 for i in data.Vmax],
"M_mod": data.M_mod,
"B_mod": data.B_mod,
"tauSD": data.tauSD,
}
}
def create_predicted_compound_nerve_action(cv_path, t_path, ist_path, tst_path, qst_path, vpred_path, lpred_path, fixed_tst, plot_vs_qst, plot_vs_tCNAP): # pylint:disable=too-many-arguments
data_cv = pd.read_csv(cv_path, sep=',', header=None)
data_tcnap = pd.read_csv(t_path, sep=',', header=None)
data_ist = None
data_tst = None
if fixed_tst:
data_ist = pd.read_csv(ist_path, sep=',', header=None)
else:
data_tst = pd.read_csv(tst_path, sep=',', header=None)
data_CAP = pd.read_csv(qst_path, sep=',', header=None)
data_vpred = pd.read_csv(vpred_path, sep=',', header=None)
data_lpred = pd.read_csv(lpred_path, sep=',', header=None)
# dpi = 96
# height = 1024
# width = 800
# fontsize = 16
data_cv[data_cv > 100] = None
x_axis = data_cv
if plot_vs_tCNAP:
x_axis = data_tcnap
y_axis = data_ist
if not fixed_tst:
y_axis = data_tst
if plot_vs_qst:
y_axis = data_CAP
x_axis = x_axis.values[:, 0]
y_axis = y_axis.values[0, :]
return {
"fixed_tst": fixed_tst,
"plot_vs_qst": plot_vs_qst,
"plot_vs_tCNAP": plot_vs_tCNAP,
"3d": {
"x": y_axis,
"y": x_axis,
"z": data_vpred.values.T,
},
"heatmap": {
"x": x_axis,
"y": y_axis,
"z": data_lpred.values.T,
}
}
async def _upload_data(output_files):
ports = await node_ports.ports()
for idx, path in enumerate(output_files):
if path.exists():
await (await ports.outputs)[idx].set(path)
def push_output_data():
input_path = OUTPUT_DIR / 'input.csv'
cv_path = OUTPUT_DIR / 'CV_plot.csv'
t_path = OUTPUT_DIR / 't_plot.csv'
ist_path = OUTPUT_DIR / 'Ist_plot.csv'
tst_path = OUTPUT_DIR / 'tst_plot.csv'
qst_path = OUTPUT_DIR / 'CAP_plot.csv'
vpred_path = OUTPUT_DIR / 'V_pred_plot.csv'
lpred_path = OUTPUT_DIR / 'Lpred_plot.csv'
output_files = [input_path, cv_path, t_path, ist_path,
tst_path, qst_path, vpred_path, lpred_path]
for p in output_files:
logger.info('file %s', str(p))
logger.info('exsits %s', p.exists())
asyncio.get_event_loop().run_until_complete(_upload_data(output_files))
# ports = node_ports.ports()
# tasks = asyncio.gather(*[ports.outputs[idx].set(path) for idx, path in enumerate(output_files)])
# paths_to_outputs = asyncio.get_event_loop().run_until_complete( tasks )
# assert all( p.exists() for p in paths_to_outputs )
# return paths_to_outputs
def run_solver(*args):
if DEVEL_MODE:
return
subprocess.call(["execute_cnap.sh", *args], cwd=OUTPUT_DIR)
def create_input_files(model_id, plot_vs_tCNAP):
# !execute_cnap.sh $model_id 0 0.0 1.0 0.5 0.4
run_solver(str(model_id), "0", "0.0", "1.0", "0.5", "0.4")
path = OUTPUT_DIR / 'input.csv'
return create_learned_model_input(path, plot_vs_tCNAP)
def build_input_graphs(data):
marker_size = 2
line_width = 1
plot_vs_tcnap = data["plot_vs_tcnap"]
if (plot_vs_tcnap):
x_data = data["x_axis"]["t_ms"]
else:
x_data = data["x_axis"]["CV"]
trace1 = go.Scatter(
x=x_data,
y=data["y_axis"]["Vmax"],
mode='lines+markers',
marker=dict(
size=marker_size
),
line=dict(
width=line_width
)
)
trace2 = go.Scatter(
x=x_data,
y=data["y_axis"]["M_mod"],
mode='lines+markers',
marker=dict(
size=marker_size
),
line=dict(
width=line_width
)
)
trace3 = go.Scatter(
x=x_data,
y=data["y_axis"]["B_mod"],
mode='lines+markers',
marker=dict(
size=marker_size
),
line=dict(
width=line_width
)
)
trace4 = go.Scatter(
x=x_data,
y=data["y_axis"]["tauSD"],
mode='lines+markers',
marker=dict(
size=marker_size
),
line=dict(
width=line_width
)
)
fig = get_empty_input_graph()
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 2, 1)
fig.append_trace(trace3, 3, 1)
fig.append_trace(trace4, 4, 1)
if (plot_vs_tcnap):
fig['layout']['xaxis'].update(
autorange=True
)
else:
fig['layout']['xaxis'].update(
type='log',
autorange=True
)
return fig
# When pressing 'Load' this callback will be triggered.
# Also, its output will trigger the rebuilding of the four input graphs.
@app.callback(
Output('graph-ins', 'figure'),
[Input('load-input-button', 'n_clicks')],
state=[
State(component_id='input-nerve-profile', component_property='value'),
State(component_id='input-plot-options', component_property='values')
]
)
def read_input_file(_n_clicks, input_nerve_profile, input_plot_options):
model_id = input_nerve_profile + 1
selected_cb = get_selected_checkboxes(input_plot_options)
data = create_input_files(model_id, selected_cb[1])
push_output_data()
return build_input_graphs(data)
# When pressing 'Predict' this callback will be triggered.
# Also, its output will trigger the rebuilding of the two output graphs.
@app.callback(
Output('output-label', 'children'),
[
Input('predict-current-button', 'n_clicks_timestamp'),
Input('predict-duration-button', 'n_clicks_timestamp')
]
)
def update_output_label(button_current_ts, button_duration_ts):
if button_current_ts is None:
button_current_ts = 0
if button_duration_ts is None:
button_duration_ts = 0
base_text = 'Predicted Compound Nerve Action Potentials'
if button_current_ts < button_duration_ts:
return base_text + ' (Duration)'
return base_text + ' (Current)'
def build_graph_out_1(data):
fig = get_empty_output_1_graph()
if not data:
return fig
fig = get_empty_output_1_graph(
data["fixed_tst"], data["plot_vs_qst"], data["plot_vs_tCNAP"])
dummy_wireframe = False
if dummy_wireframe:
x = np.linspace(-5, 5, 50)
y = np.linspace(-5, 5, 50)
xGrid, yGrid = np.meshgrid(y, x)
R = np.sqrt(xGrid ** 2 + yGrid ** 2)
z = np.sin(R)
# Creating the plot
lines = []
line_marker = dict(color='#0066FF', width=2)
for i, j, k in zip(xGrid, yGrid, z):
lines.append(go.Scatter3d(
x=i, y=j, z=k, mode='lines', line=line_marker))
fig['data'] = lines
return fig
data_3d = data["3d"]
x = data_3d["x"]
y = data_3d["y"]
xGrid, yGrid = np.meshgrid(y, x)
z = data_3d["z"]
# Creating the plot
lines = []
line_marker = dict(color='#0066FF', width=2)
for i, j, k in zip(xGrid, yGrid, z):
lines.append(go.Scatter3d(
x=i, y=j, z=k, mode='lines', line=line_marker))
fig['data'] = lines
return fig
def build_graph_out_2(data):
fig = get_empty_output_2_graph()
if not data:
return fig
fig = get_empty_output_2_graph(
data["fixed_tst"], data["plot_vs_qst"], data["plot_vs_tCNAP"])
data_heatmap = data["heatmap"]
x = data_heatmap["x"]
y = data_heatmap["y"]
z = data_heatmap["z"]
data = go.Heatmap(x=x, y=y, z=z)
fig['data'] = [data]
return fig
@app.callback(
[
Output('graph-out1', 'figure'),
Output('graph-out2', 'figure'),
],
[
Input('predict-current-button', 'n_clicks_timestamp'),
Input('predict-duration-button', 'n_clicks_timestamp')
],
state=[
State(component_id='input-nerve-profile', component_property='value'),
State(component_id='input-plot-options', component_property='values'),
State(component_id='current_in_1', component_property='value'),
State(component_id='current_in_2', component_property='value'),
State(component_id='current_in_3', component_property='value'),
State(component_id='current_in_4', component_property='value'),
State(component_id='duration_in_1', component_property='value'),
State(component_id='duration_in_2', component_property='value'),
State(component_id='duration_in_3', component_property='value'),
State(component_id='duration_in_4', component_property='value')
]
)
def predict( # pylint:disable=too-many-arguments
button_current_ts, button_duration_ts,
input_nerve_profile,
input_plot_options,
current_1, current_2, current_3, current_4,
duration_1, duration_2, duration_3, duration_4):
if button_current_ts is None:
button_current_ts = 0
if button_duration_ts is None:
button_duration_ts = 0
if button_current_ts == 0 and button_duration_ts == 0:
return [get_empty_output_1_graph(), get_empty_output_2_graph()]
model_id = input_nerve_profile + 1
selected_cb = get_selected_checkboxes(input_plot_options)
plot_vs_qst = selected_cb[0]
plot_vs_tCNAP = selected_cb[1]
cv_path = OUTPUT_DIR / 'CV_plot.csv'
t_path = OUTPUT_DIR / 't_plot.csv'
ist_path = OUTPUT_DIR / 'Ist_plot.csv'
tst_path = OUTPUT_DIR / 'tst_plot.csv'
qst_path = OUTPUT_DIR / 'CAP_plot.csv'
vpred_path = OUTPUT_DIR / 'V_pred_plot.csv'
lpred_path = OUTPUT_DIR / 'Lpred_plot.csv'
data = None
if button_current_ts > button_duration_ts:
sweep_param = 1
fixed_tst = True
print("Current clicked.", model_id, sweep_param, plot_vs_qst,
plot_vs_tCNAP, current_1, current_2, current_3, current_4)
# !execute_cnap.sh $model_id $sweep_param $start_ist.value $end_ist.value $step_size_current.value $fixed_tst.value
run_solver(str(model_id), str(sweep_param), str(current_1),
str(current_2), str(current_3), str(current_4))
data = create_predicted_compound_nerve_action(cv_path=cv_path, t_path=t_path, ist_path=ist_path, tst_path=tst_path, qst_path=qst_path,
vpred_path=vpred_path, lpred_path=lpred_path, fixed_tst=fixed_tst, plot_vs_qst=plot_vs_qst, plot_vs_tCNAP=plot_vs_tCNAP)
else:
sweep_param = 0
fixed_tst = False
print("Duration clicked.", model_id, sweep_param, plot_vs_qst,
plot_vs_tCNAP, duration_1, duration_2, duration_3, duration_4)
# !execute_cnap.sh $model_id $sweep_param $start_ist.value $end_ist.value $step_size_current.value $fixed_tst.value
run_solver(str(model_id), str(sweep_param), str(duration_1),
str(duration_2), str(duration_3), str(duration_4))
data = create_predicted_compound_nerve_action(cv_path=cv_path, t_path=t_path, ist_path=ist_path, tst_path=tst_path, qst_path=qst_path,
vpred_path=vpred_path, lpred_path=lpred_path, fixed_tst=fixed_tst, plot_vs_qst=plot_vs_qst, plot_vs_tCNAP=plot_vs_tCNAP)
graph1 = build_graph_out_1(data)
graph2 = build_graph_out_2(data)
return [graph1, graph2]
class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
"""Event loop policy that allows loop creation on any thread."""
def get_event_loop(self) -> asyncio.AbstractEventLoop:
try:
return super().get_event_loop()
except (RuntimeError, AssertionError):
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
if __name__ == '__main__':
# the following line is needed for async calls
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
app.run_server(debug=DEVEL_MODE, port=8888, host="0.0.0.0")
|
nilq/baby-python
|
python
|
"""
This week’s question:
Implement a simple version of autocomplete, where given an input string s and a dictionary of words dict, return the word(s) in dict that partially match s (or an empty string if nothing matches).
Example:
let dict = ['apple', 'banana', 'cranberry', 'strawberry']
$ simpleAutocomplete('app')
$ ['apple']
$ simpleAutocomplete('berry')
$ ['cranberry', 'strawberry']
$ simpleAutocomplete('fart')
$ []
"""
class AutoComplete:
def __init__(self, words):
self.words = words
def simple_autocomplete(self, s):
"""
>> a = AutoComplete(['apple', 'banana', 'cranberry', 'strawberry'])
>> a.simple_autocomplete('app')
['apple']
>> a.simple_autocomplete('berry')
['cranberry', 'strawberry']
>> a.simple_autocomplete('fart')
[]
"""
return [word for word in self.words if word.contains(s)]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
import re
from setuptools import setup, find_packages
INIT_FILE = 'dimensigon/__init__.py'
with open("README.md", "r") as fh:
long_description = fh.read()
def find_version():
with open(INIT_FILE) as fp:
for line in fp:
# __version__ = '0.1.0'
match = re.search(r"__version__\s*=\s*(['\"])([^\1]+)\1", line)
if match:
return match.group(2)
assert False, 'cannot find version'
def find_author_email():
with open(INIT_FILE) as fp:
m_author, m_email = None, None
for line in fp:
if not m_author:
m_author = re.search(r"__author__\s*=\s*(['\"])([^\1]*)\1", line)
if not m_email:
m_email = re.search(r"__email__\s*=\s*(['\"])([^\1]*)\1", line)
if m_author and m_email:
return m_author.group(2), m_email.group(2)
assert False, 'cannot find author or email'
def find_licence():
with open(INIT_FILE) as fp:
for line in fp:
match = re.search(r"__license__\s*=\s*(['\"])([^\1]*)\1", line)
if match:
return match.group(2)
assert False, 'cannot find license'
def required_packages():
with open('requirements.txt') as fp:
return [line.strip() for line in fp if line.strip()]
author, email = find_author_email()
setup(
name='dimensigon',
version=find_version(),
package_dir={"": "."},
packages=find_packages(where=".", exclude=["contrib", "docs", "tests*", "tasks"]),
url='https://github.com/dimensigon/dimensigon',
license=find_licence(),
author=author,
author_email=email,
description="Distributed Management and orchestration through RESTful, Mesh Networking and with a flair of IoT.",
long_description=long_description,
long_description_content_type="text/markdown",
test_suite="tests",
install_requires=required_packages(),
classifiers=[
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: POSIX",
],
entry_points={'console_scripts': ["dshell=dimensigon.dshell.batch.dshell:main",
"dimensigon=dimensigon.__main__:main"]},
python_requires='>=3.6',
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
app.modules.logs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
日志模块
"""
from flask_smorest import Blueprint
blp = Blueprint("Log", __name__, url_prefix="/logs", description="日志模块")
|
nilq/baby-python
|
python
|
import pytest
import numpy as np
from whatlies.language import SpacyLanguage
from whatlies.transformers import Pca
words = [
"prince",
"princess",
"nurse",
"doctor",
"banker",
"man",
"woman",
"cousin",
"neice",
"king",
"queen",
"dude",
"guy",
"gal",
"fire",
"dog",
"cat",
"mouse",
"red",
"blue",
"green",
"yellow",
"water",
"person",
"family",
"brother",
"sister",
]
# I'm loading in the spaCy model globally because it is much faster this way.
lang = SpacyLanguage("en_core_web_md")
@pytest.fixture
def embset():
return lang[words]
def test_set_title_works(embset):
ax = embset.plot_3d(annot=True, title="foobar")
assert ax.title._text == "foobar"
def test_correct_points_plotted(embset):
embset_plt = embset.transform(Pca(3))
ax = embset_plt.plot_3d(annot=True)
offset = ax.collections[0]._offsets3d
assert np.all(np.array(offset).T == embset_plt.to_X())
def test_correct_points_plotted_mapped(embset):
embset_plt = embset.transform(Pca(3))
ax = embset_plt.plot_3d("king", "red", "dog", annot=True)
offset = ax.collections[0]._offsets3d
king, red, dog = [v for v in np.array(offset)]
assert np.all(king == np.array([embset_plt[w] > embset_plt["king"] for w in words]))
assert np.all(red == np.array([embset_plt[w] > embset_plt["red"] for w in words]))
assert np.all(dog == np.array([embset_plt[w] > embset_plt["dog"] for w in words]))
def test_basic_dimensions_3d_chart(embset):
embset_plt = embset.transform(Pca(3))
ax = embset_plt.plot_3d(annot=True, title="foobar")
assert ax.xaxis.get_label_text() == "Dimension 0"
assert ax.yaxis.get_label_text() == "Dimension 1"
assert ax.zaxis.get_label_text() == "Dimension 2"
assert [t.get_text() for t in ax.texts] == words
def test_named_dimensions_3d_chart(embset):
ax = embset.transform(Pca(3)).plot_3d("king", "queen", "prince", annot=True)
assert ax.xaxis.get_label_text() == "king"
assert ax.yaxis.get_label_text() == "queen"
assert ax.zaxis.get_label_text() == "prince"
assert [t.get_text() for t in ax.texts] == words
def test_named_dimensions_3d_chart_rename(embset):
ax = embset.transform(Pca(3)).plot_3d(
"king", "queen", "prince", annot=True, x_label="x", y_label="y"
)
assert ax.xaxis.get_label_text() == "x"
assert ax.yaxis.get_label_text() == "y"
assert ax.zaxis.get_label_text() == "prince"
assert [t.get_text() for t in ax.texts] == words
|
nilq/baby-python
|
python
|
import functools
import numpy as np
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
input_layer = pnl.TransferMechanism(
size=3,
name='Input Layer'
)
action_selection = pnl.TransferMechanism(
size=3,
function=psyneulink.core.components.functions.transferfunctions.SoftMax(
output=pnl.ALL,
gain=1.0),
output_ports={pnl.NAME: 'SELECTED ACTION',
pnl.VARIABLE:[(pnl.INPUT_PORT_VARIABLES, 0), (pnl.OWNER_VALUE, 0)],
pnl.FUNCTION: psyneulink.core.components.functions.selectionfunctions.OneHot(mode=pnl.PROB).function},
# output_ports={pnl.NAME: "SOFT_MAX",
# pnl.VARIABLE: (pnl.OWNER_VALUE,0),
# pnl.FUNCTION: pnl.SoftMax(output=pnl.PROB,gain=1.0)},
name='Action Selection'
)
p = pnl.Pathway(
pathway=([input_layer, action_selection], pnl.Reinforcement),
)
actions = ['left', 'middle', 'right']
reward_values = [10, 0, 0]
first_reward = 0
# Must initialize reward (won't be used, but needed for declaration of lambda function)
action_selection.output_port.value = [0, 0, 1]
# Get reward value for selected action)
def reward(context=None):
"""Return the reward associated with the selected action"""
return [reward_values[int(np.nonzero(action_selection.output_port.parameters.value.get(context))[0])]]
def print_header(comp):
print("\n\n**** Time: ", comp.scheduler.get_clock(comp).simple_time)
def show_weights(comp):
comparator = action_selection.output_port.efferents[0].receiver.owner
learn_mech = action_selection.output_port.efferents[1].receiver.owner
print(
'\n'
'\naction_selection value: {} '
'\naction_selection output: {} '
'\ncomparator sample: {} '
'\ncomparator target: {} '
'\nlearning mech act in: {} '
'\nlearning mech act out: {} '
'\nlearning mech error in: {} '
'\nlearning mech error out: {} '
'\nlearning mech learning_sig: {} '
'\npredicted reward: {} '.format(
action_selection.parameters.value.get(comp),
action_selection.output_port.parameters.value.get(comp),
comparator.input_ports[pnl.SAMPLE].parameters.value.get(comp),
comparator.input_ports[pnl.TARGET].parameters.value.get(comp),
learn_mech.input_ports[pnl.ACTIVATION_INPUT].parameters.value.get(comp),
learn_mech.input_ports[pnl.ACTIVATION_OUTPUT].parameters.value.get(comp),
learn_mech.input_ports[pnl.ERROR_SIGNAL].parameters.value.get(comp),
learn_mech.output_ports[pnl.ERROR_SIGNAL].parameters.value.get(comp),
learn_mech.output_ports[pnl.LEARNING_SIGNAL].parameters.value.get(comp),
action_selection.output_port.parameters.value.get(comp)[np.nonzero(action_selection.output_port.parameters.value.get(comp))][0]
)
)
input_list = {input_layer: [[1, 1, 1]]}
c = pnl.Composition(pathways=[p])
print('reward prediction weights: \n', action_selection.input_port.path_afferents[0].matrix)
print('target_mechanism weights: \n', action_selection.output_port.efferents[0].matrix)
c.show_graph(show_learning=pnl.ALL)
c.learn(
num_trials=10,
inputs=input_list,
# FIX: PROPER FORMAT FOR ASSIGNING TARGET AS FUNCTION?
targets={action_selection:reward},
call_before_trial=functools.partial(print_header, c),
call_after_trial=functools.partial(show_weights, c)
)
|
nilq/baby-python
|
python
|
"""
This class provides functionality for managing a generig sqlite or mysql
database:
* reading specific fields (with the possibility to filter by field values)
* storing calculated values in the dataset
Created on May 11 2018
@author: Jerónimo Arenas García
"""
from __future__ import print_function # For python 2 copmatibility
import os
import pandas as pd
import MySQLdb
import sqlite3
import numpy as np
from tabulate import tabulate
import copy
import ipdb
class BaseDMsql(object):
"""
Data manager base class.
"""
def __init__(self, db_name, db_connector, path2project=None,
db_server=None, db_user=None, db_password=None):
"""
Initializes a DataManager object
Args:
db_name :Name of the DB
db_connector :Connector. Available options are mysql or sqlite
path2project :Path to the project folder (sqlite only)
db_server :Server (mysql only)
db_user :User (mysql only)
db_password :Password (mysql only)
"""
# Store paths to the main project folders and files
self._path2project = copy.copy(path2project)
self.dbname = db_name
self.connector = db_connector
self.server = db_server
self.user = db_user
self.password = db_password
# Other class variables
self.dbON = False # Will switch to True when the db was connected.
# Connector to database
self._conn = None
# Cursor of the database
self._c = None
# Try connection
try:
if self.connector == 'mysql':
self._conn = MySQLdb.connect(self.server, self.user,
self.password, self.dbname)
self._c = self._conn.cursor()
print("MySQL database connection successful")
self.dbON = True
self._conn.set_character_set('utf8')
elif self.connector == 'sqlite3':
# sqlite3
# sqlite file will be in the root of the project, we read the
# name from the config file and establish the connection
db_fname = os.path.join(self._path2project,
self.dbname + '.db')
print("---- Connecting to {}".format(db_fname))
self._conn = sqlite3.connect(db_fname)
self._c = self._conn.cursor()
self.dbON = True
else:
print("---- Unknown DB connector {}".format(self.connector))
except:
print("---- Error connecting to the database")
def __del__(self):
"""
When destroying the object, it is necessary to commit changes
in the database and close the connection
"""
try:
self._conn.commit()
self._conn.close()
except:
print("---- Error closing database")
def resetDBtables(self, tables=None):
"""
Delete existing database, and regenerate empty tables
Args:
tables: If string, name of the table to reset.
If list, list of tables to reset
If None (default), all tables are deleted, and all tables
(inlcuding those that might not exist previously)
"""
# If tables is None, all tables are deleted an re-generated
if tables is None:
# Delete all existing tables
for table in self.getTableNames():
self._c.execute("DROP TABLE " + table)
# Create tables. No tables as specifies in order to create tables
# that did not exist previously also.
self.createDBtables()
else:
# It tables is not a list, make the appropriate list
if type(tables) is str:
tables = [tables]
# Remove all selected tables (if exist in the database).
for table in set(tables) & set(self.getTableNames()):
self._c.execute("DROP TABLE " + table)
# All deleted tables are created again
self.createDBtables(tables)
self._conn.commit()
return
def resetDB(self):
"""
Deletes existing database, and regenerate empty tables
"""
if self.connector == 'mysql':
# In mysql we simply drop all existing tables
for tablename in self.getTableNames():
self._c.execute("DROP TABLE " + tablename)
self._conn.commit()
else:
# If sqlite3, we need to delete the file, and start over
try:
self._conn.commit()
self._conn.close()
except:
print("Error closing database")
# Delete sqlite3 file
db_fname = os.path.join(self._path2project, self.dbname + '.db')
os.remove(db_fname)
try:
self._conn = sqlite3.connect(db_fname)
self._c = self._conn.cursor()
except:
print("Error connecting to the database")
self.createDBtables()
def addTableColumn(self, tablename, columnname, columntype):
"""
Add a new column to the specified table.
Args:
tablename :Table to which the column will be added
columnname :Name of new column
columntype :Type of new column.
Note that, for mysql, if type is TXT or VARCHAR, the character set if
forzed to be utf8.
"""
# Check if the table exists
if tablename in self.getTableNames():
# Check that the column does not already exist
if columnname not in self.getColumnNames(tablename):
# Fit characters to the allowed format if necessary
fmt = ''
if (self.connector == 'mysql' and
('TEXT' in columntype or 'VARCHAR' in columntype) and
not ('CHARACTER SET' in columntype or
'utf8' in columntype)):
# We need to enforze utf8 for mysql
fmt = ' CHARACTER SET utf8'
sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +
columnname + ' ' + columntype + fmt)
self._c.execute(sqlcmd)
# Commit changes
self._conn.commit()
else:
print(("WARNING: Column {0} already exist in table {1}."
).format(columnname, tablename))
else:
print('Error adding column to table. Please, select a valid ' +
'table name from the list')
print(self.getTableNames())
def dropTableColumn(self, tablename, columnname):
"""
Remove column from the specified table
Args:
tablename :Table to which the column will be added
columnname :Name of column to be removed
"""
# Check if the table exists
if tablename in self.getTableNames():
# Check that the column does not already exist
if columnname in self.getColumnNames(tablename):
# ALTER TABLE DROP COLUMN IS ONLY SUPPORTED IN MYSQL
if self.connector == 'mysql':
sqlcmd = ('ALTER TABLE ' + tablename + ' DROP COLUMN ' +
columnname)
self._c.execute(sqlcmd)
# Commit changes
self._conn.commit()
else:
print('Column drop not yet supported for SQLITE')
else:
print('Error deleting column. The column does not exist')
print(tablename, columnname)
else:
print('Error deleting column. Please, select a valid table name' +
' from the list')
print(self.getTableNames())
return
def readDBtable(self, tablename, limit=None, selectOptions=None,
filterOptions=None, orderOptions=None):
"""
Read data from a table in the database can choose to read only some
specific fields
Args:
tablename : Table to read from
selectOptions: string with fields that will be retrieved
(e.g. 'REFERENCIA, Resumen')
filterOptions: string with filtering options for the SQL query
(e.g., 'WHERE UNESCO_cd=23')
orderOptions: string with field that will be used for sorting the
results of the query
(e.g, 'Cconv')
limit: The maximum number of records to retrieve
"""
try:
# Check that table name is valid
if tablename in self.getTableNames():
sqlQuery = 'SELECT '
if selectOptions:
sqlQuery = sqlQuery + selectOptions
else:
sqlQuery = sqlQuery + '*'
sqlQuery = sqlQuery + ' FROM ' + tablename + ' '
if filterOptions:
sqlQuery = sqlQuery + ' WHERE ' + filterOptions
if orderOptions:
sqlQuery = sqlQuery + ' ORDER BY ' + orderOptions
if limit:
sqlQuery = sqlQuery + ' LIMIT ' + str(limit)
# This is to update the connection to changes by other
# processes.
self._conn.commit()
# Return the pandas dataframe. Note that numbers in text format
# are not converted to
return pd.read_sql(sqlQuery, con=self._conn,
coerce_float=False)
else:
print('Error in query. Please, select a valid table name ' +
'from the list')
print(self.getTableNames())
except Exception as E:
print(str(E))
def getTableNames(self):
"""
Returns a list with the names of all tables in the database
"""
# The specific command depends on whether we are using mysql or sqlite
if self.connector == 'mysql':
sqlcmd = ("SELECT table_name FROM INFORMATION_SCHEMA.TABLES " +
"WHERE table_schema='" + self.dbname + "'")
else:
sqlcmd = "SELECT name FROM sqlite_master WHERE type='table'"
self._c.execute(sqlcmd)
tbnames = [el[0] for el in self._c.fetchall()]
return tbnames
def getColumnNames(self, tablename):
"""
Returns a list with the names of all columns in the indicated table
Args:
tablename: the name of the table to retrieve column names
"""
# Check if tablename exists in database
if tablename in self.getTableNames():
# The specific command depends on whether we are using mysql or
# sqlite
if self.connector == 'mysql':
sqlcmd = "SHOW COLUMNS FROM " + tablename
self._c.execute(sqlcmd)
columnnames = [el[0] for el in self._c.fetchall()]
else:
sqlcmd = "PRAGMA table_info(" + tablename + ")"
self._c.execute(sqlcmd)
columnnames = [el[1] for el in self._c.fetchall()]
return columnnames
else:
print('Error retrieving column names: Table does not exist on ' +
'database')
return []
def getTableInfo(self, tablename):
# Get columns
cols = self.getColumnNames(tablename)
# Get number of rows
sqlcmd = "SELECT COUNT(*) FROM " + tablename
self._c.execute(sqlcmd)
n_rows = self._c.fetchall()[0][0]
return cols, n_rows
def showTable(self, tablename, max_rows=500, max_width=200):
""" A simple method to display the content of a single table.
Args:
max_rows: Maximum number of rows to display. It the size of
the table is higher, only the first max_rows rows
are shown
max_width: Maximum with of the table to display. If the size
of the table is higher, the tabulate environment
is not used and only a table heading is shown
"""
title = "= Database {} ====================".format(self.dbname)
print("="*len(title))
print(title)
print("="*len(title))
print("")
print("==== Table {} ".format(tablename))
cols, n_rows = self.getTableInfo(tablename)
df = self.readDBtable(tablename, limit=max_rows, selectOptions=None,
filterOptions=None, orderOptions=None)
txt = tabulate(df, headers='keys', tablefmt='psql')
txt_width = max(len(z) for z in txt.split('\n'))
if txt_width > max_width:
print('---- The table is too wide (up to {}'.format(txt_width) +
' characters per line). Showing a portion of the table ' +
'header only')
print(df.head(25))
else:
print(txt)
return
def insertInTable(self, tablename, columns, arguments):
"""
Insert new records into table
Args:
tablename: Name of table in which the data will be inserted
columns: Name of columns for which data are provided
arguments: A list of lists or tuples, each element associated
to one new entry for the table
"""
# Make sure columns is a list, and not a single string
if not isinstance(columns, (list,)):
columns = [columns]
ncol = len(columns)
if len(arguments[0]) == ncol:
# Make sure the tablename is valid
if tablename in self.getTableNames():
# Make sure we have a list of tuples; necessary for mysql
arguments = list(map(tuple, arguments))
# # Update DB entries one by one.
# for arg in arguments:
# # sd
# sqlcmd = ('INSERT INTO ' + tablename + '(' +
# ','.join(columns) + ') VALUES(' +
# ','.join('{}'.format(a) for a in arg) + ')'
# )
# try:
# self._c.execute(sqlcmd)
# except:
# import ipdb
# ipdb.set_trace()
sqlcmd = ('INSERT INTO ' + tablename +
'(' + ','.join(columns) + ') VALUES (')
if self.connector == 'mysql':
sqlcmd += '%s' + (ncol-1)*',%s' + ')'
else:
sqlcmd += '?' + (ncol-1)*',?' + ')'
self._c.executemany(sqlcmd, arguments)
# Commit changes
self._conn.commit()
else:
print('Error inserting data in table: number of columns mismatch')
return
def setField(self, tablename, keyfld, valueflds, values):
"""
Update records of a DB table
Args:
tablename: Table that will be modified
keyfld: string with the column name that will be used as key
(e.g. 'REFERENCIA')
valueflds: list with the names of the columns that will be updated
(e.g., 'Lemas')
values: A list of tuples in the format
(keyfldvalue, valuefldvalue)
(e.g., [('Ref1', 'gen celula'),
('Ref2', 'big_data, algorithm')])
"""
# Make sure valueflds is a list, and not a single string
if not isinstance(valueflds, (list,)):
valueflds = [valueflds]
ncol = len(valueflds)
if len(values[0]) == (ncol+1):
# Make sure the tablename is valid
if tablename in self.getTableNames():
# Update DB entries one by one.
# WARNING: THIS VERSION MAY NOT WORK PROPERLY IF v
# HAS A STRING CONTAINING "".
for v in values:
sqlcmd = ('UPDATE ' + tablename + ' SET ' +
', '.join(['{0} ="{1}"'.format(f, v[i + 1])
for i, f in enumerate(valueflds)]) +
' WHERE {0}="{1}"'.format(keyfld, v[0]))
self._c.execute(sqlcmd)
# This is the old version: it might not have the problem of
# the above version, but did not work properly with sqlite.
# # Make sure we have a list of tuples; necessary for mysql
# # Put key value last in the tuples
# values = list(map(circ_left_shift, values))
# sqlcmd = 'UPDATE ' + tablename + ' SET '
# if self.connector == 'mysql':
# sqlcmd += ', '.join([el+'=%s' for el in valueflds])
# sqlcmd += ' WHERE ' + keyfld + '=%s'
# else:
# sqlcmd += ', '.join([el+'=?' for el in valueflds])
# sqlcmd += ' WHERE ' + keyfld + '=?'
# self._c.executemany(sqlcmd, values)
# Commit changes
self._conn.commit()
else:
print('Error updating table values: number of columns mismatch')
return
def upsert(self, tablename, keyfld, df):
"""
Update records of a DB table with the values in the df
This function implements the following additional functionality:
* If there are coumns in df that are not in the SQL table,
columns will be added
* New records will be created in the table if there are rows
in the dataframe without an entry already in the table. For this,
keyfld indicates which is the column that will be used as an
index
Args:
tablename: Table that will be modified
keyfld: string with the column name that will be used as key
(e.g. 'REFERENCIA')
df: Dataframe that we wish to save in table tablename
"""
# Check that table exists and keyfld exists both in the Table and the
# Dataframe
if tablename in self.getTableNames():
if not ((keyfld in df.columns) and
(keyfld in self.getColumnNames(tablename))):
print("Upsert function failed: Key field does not exist",
"in the selected table and/or dataframe")
return
else:
print('Upsert function failed: Table does not exist')
return
# Reorder dataframe to make sure that the key field goes first
flds = [keyfld] + [x for x in df.columns if x != keyfld]
df = df[flds]
# Create new columns if necessary
for clname in df.columns:
if clname not in self.getColumnNames(tablename):
if df[clname].dtypes == np.float64:
self.addTableColumn(tablename, clname, 'DOUBLE')
else:
if df[clname].dtypes == np.int64:
self.addTableColumn(tablename, clname, 'INTEGER')
else:
self.addTableColumn(tablename, clname, 'TEXT')
# Check which values are already in the table, and split
# the dataframe into records that need to be updated, and
# records that need to be inserted
keyintable = self.readDBtable(tablename, limit=None,
selectOptions=keyfld)
keyintable = keyintable[keyfld].tolist()
values = [tuple(x) for x in df.values]
values_insert = list(filter(lambda x: x[0] not in keyintable, values))
values_update = list(filter(lambda x: x[0] in keyintable, values))
if len(values_update):
self.setField(tablename, keyfld, df.columns[1:].tolist(),
values_update)
if len(values_insert):
self.insertInTable(tablename, df.columns.tolist(), values_insert)
return
def exportTable(self, tablename, fileformat, path, filename, cols=None):
"""
Export columns from a table to a file.
Args:
:tablename: Name of the table
:fileformat: Type of output file. Available options are
- 'xlsx'
- 'pkl'
:filepath: Route to the output folder
:filename: Name of the output file
:columnames: Columns to save. It can be a list or a string
of comma-separated columns.
If None, all columns saved.
"""
# Path to the output file
fpath = os.path.join(path, filename)
# Read data:
if cols is list:
options = ','.join(cols)
else:
options = cols
df = self.readDBtable(tablename, selectOptions=options)
# ######################
# Export results to file
if fileformat == 'pkl':
df.to_pickle(fpath)
else:
df.to_excel(fpath)
return
|
nilq/baby-python
|
python
|
"""Jurisdictions are a small complete list.
Thus they can be operated from a dictionary.
Still, persisted in DB for query consitency.
Thus maintaining both synchronised and using at convenience."""
from db import Session
from db import Jurisdiction
from .lazyinit import _cached_jurisdictions
from .api_wikipedia import import_countries
from .statements import get_jurisdictions_statement
# from . import debug
debug = False
def _jurisdiction_by_code(s, code):
return s.query(Jurisdiction).filter(Jurisdiction.code == code).first()
def get_jurisdiction_code(id):
s = Session()
result = s.query(Jurisdiction).get(id).code
s.close()
return result
def jurisdiction_by_code(code):
return _cached_jurisdictions[code] if code in _cached_jurisdictions else _cached_jurisdictions["XX"]
def _query_db_cache(s):
return s.query(Jurisdiction).all()
def _load_db_cache(s):
global _cached_jurisdictions
committed = _query_db_cache(s)
for next in committed:
_cached_jurisdictions[next.code] = next.id
return len(_cached_jurisdictions)
def cached_jurisdictions():
if not _cached_jurisdictions:
s = Session()
if len(_query_db_cache(s)) == 0:
all = []
for next in import_countries():
all.append(Jurisdiction(name=next["name"], code=next["code"]))
all.append(Jurisdiction(name="Unknown", code="XX"))
s.add_all(all)
s.commit()
_load_db_cache(s)
if debug: print(_cached_jurisdictions)
s.close()
return _cached_jurisdictions
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Utility functions.
import sys
import os
import re
# Prints passed objects to stderr.
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
# Converts passed string by uppercasing first letter.
firstLetterToUppercase = lambda s: s[:1].upper() + s[1:] if s else ''
# Converts passed string by lowercasing first letter.
firstLetterToLowercase = lambda s: s[:1].lower() + s[1:] if s else ''
# Converts description in form of a sentence (words separated by
# spaces, ends with period) into a camel case form.
def descriptionToCamelCase(command):
words = []
for word in command.split():
words.append(firstLetterToUppercase(word))
words[0] = firstLetterToLowercase(words[0])
out = "".join(words)
out = re.sub(' ', '', out)
out = re.sub('\.', '', out)
return "__"+out
# Converts text in form of camel case into a sentence (First
# letter of first word in upper case, words separated by spaces,
# ends with period).
def camelCaseToDescription(command):
command = command.strip('_')
command = re.sub(r'([A-Z])',r' \1',command)
command = command.lower()
return firstLetterToUppercase(command)+"."
# Retruns files lines as list of strings.
def getFileContents(fileName):
with open(fileName) as f:
return f.readlines()
def underscoreToCamelcase(command):
out = ""
command = command.strip('_')
command = command.strip(' ')
tokens = command.split('_')
first = True
for token in tokens:
token = token.lower()
if not first:
token = firstLetterToUppercase(token)
out += token
first = False
return out
def camelcaseToUnderscore(command):
command = command.strip(' ')
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', command)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
import collections
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.1 on 2018-09-18 12:24
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.IntegerField(default=0)),
('quantity', models.IntegerField(default=0)),
('created', models.DateTimeField(verbose_name='created date')),
('updated_date', models.DateTimeField(verbose_name='updated date')),
],
),
]
|
nilq/baby-python
|
python
|
import math
import numpy as np
from mlpy.numberGenerator.bounds import Bounds
from experiments.problems.functions.structure.function import Function
class Elliptic(Function):
def function(self, x):
return np.sum(np.power(np.power(10., 6 ), np.divide(np.arange(len(x)), np.subtract(x, 1.))))
def getBounds(self):
return Bounds(-100, 100)
def test(self):
assert(1 == self.function(np.array([5])))
assert(math.pow(10, 6) + 1 == self.function(np.array([5, 2])))
|
nilq/baby-python
|
python
|
"""
Created by Constantin Philippenko, 18th January 2022.
"""
import matplotlib
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'text.latex.preamble': r'\usepackage{amsfonts}'
})
import hashlib
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from src.PickleHandler import pickle_saver, pickle_loader
from src.Utilities import create_folder_if_not_existing
from src.hyperparameters_exploration import Explorer
from src.hyperparameters_exploration.Hyperparameters import Hyperparameters
from src.hyperparameters_exploration.Metric import Metric
class Exploration:
def __init__(self, name, hyperparameters: Hyperparameters, explorer: Explorer, metrics: Metric):
# super().__init__()
self.name = name
self.hyperparameters = hyperparameters
self.explorer = explorer
self.metrics = metrics
self.nb_runs = 2
self.results = np.zeros((self.explorer.nb_outputs, self.nb_runs, self.hyperparameters.nb_hyperparams))
self.string_before_hash = str(self.hyperparameters.range_hyperparameters)
self.hash_string = self.explorer.function.__name__ + "-" + hashlib.shake_256(self.string_before_hash.encode()).hexdigest(4) # returns a hash value of length 2*4
self.pickle_folder = "./pickle/exploration/"
self.pictures_folder = "./pictures/exploration/"
create_folder_if_not_existing(self.pickle_folder)
create_folder_if_not_existing(self.pictures_folder)
def run_exploration(self):
print("====> Starting exploration : ", self.name)
for idx_param in range(self.hyperparameters.nb_hyperparams):
param = self.hyperparameters.range_hyperparameters[idx_param]
print("Hyperparameter's value:", param)
# self.blockPrint()
for idx_run in range(self.nb_runs):
output = self.explorer.explore(param)
for i in range(len(output)):
self.results[i, idx_run, idx_param] = self.metrics.compute(output[i])
pickle_saver(self, self.pickle_folder + self.hash_string)
self.enablePrint()
def load(self):
self.results = pickle_loader(self.pickle_folder + self.hash_string).results[:,:,:-1]
self.hyperparameters.range_hyperparameters = self.hyperparameters.range_hyperparameters[:-1]
self.hyperparameters.nb_hyperparams -= 1
def plot_exploration(self):
fig, ax = plt.subplots(figsize=(8, 7))
for i in range(len(self.explorer.outputs_label)):
plt.errorbar(range(self.hyperparameters.nb_hyperparams), np.mean(self.results[i], axis=0),
yerr=np.std(self.results[i], axis=0),
label=self.explorer.outputs_label[i],
lw=4)
plt.xticks([i for i in range(0, len(self.hyperparameters.range_hyperparameters))],
self.hyperparameters.range_hyperparameters,
rotation=30, fontsize=15)
plt.yticks(fontsize=15)
ax.set_xlabel(self.hyperparameters.x_axis_label, fontsize=15)
ax.set_ylabel(self.metrics.y_axis_label, fontsize=15)
plt.title(self.hyperparameters.name, fontsize=15)
plt.legend(loc='best', fontsize=15)
ax.grid()
plt.savefig('{0}.eps'.format(self.pictures_folder + self.hash_string), format='eps')
plt.close()
# Disable
def blockPrint(self):
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint(self):
sys.stdout = sys.__stdout__
|
nilq/baby-python
|
python
|
x = 10.4
y = 3.5
x -= y
print x
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-03-15 22:30
from django.db import migrations, models
import django.db.models.deletion
import manager.storage
import projects.models.sources
class Migration(migrations.Migration):
dependencies = [
('socialaccount', '0003_extra_data_default_dict'),
('projects', '0029_auto_20210210_2340'),
]
operations = [
migrations.AddField(
model_name='googledocssource',
name='social_app',
field=models.ForeignKey(blank=True, help_text='The OAuth client that this Google Doc was linked using.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='socialaccount.socialapp'),
),
]
|
nilq/baby-python
|
python
|
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.utils import class_weight
import numpy as np
tf.keras.backend.set_floatx('float64')
drop = 0.5
img_size = (128, 128)
model = Sequential([
Conv2D(8, 5, activation = 'relu', input_shape = (img_size[0], img_size[1], 1)),
MaxPool2D(3),
Conv2D(16, 4, activation = 'relu'),
MaxPool2D(2),
Conv2D(32, 3, activation = 'relu'),
Flatten(),
Dense(32, activation = 'relu'),
Dropout(drop),
Dense(8, activation = 'relu'),
Dense(3, activation = 'softmax')
])
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
datagen = ImageDataGenerator(
rescale = 1. / 255.,
shear_range = 0.2,
zoom_range = 0.05,
rotation_range = 10,
width_shift_range = 0.1,
height_shift_range = 0.05,
brightness_range = [1, 1.5],
horizontal_flip = True,
dtype = tf.float64)
train_generator = datagen.flow_from_directory(
'Dataset/Train',
target_size = img_size,
color_mode = 'grayscale',
batch_size = 32,
shuffle = True,
class_mode='categorical')
test_datagen = ImageDataGenerator(
rescale = 1. / 255.,
dtype = tf.float64)
test_generator = test_datagen.flow_from_directory(
'Dataset/Test',
target_size = img_size,
color_mode = 'grayscale',
batch_size = 16,
shuffle = True,
class_mode='categorical')
class_weights = class_weight.compute_class_weight(
'balanced',
np.unique(train_generator.classes),
train_generator.classes)
model.fit(train_generator,
epochs = 10,
shuffle = True,
validation_data = test_generator,
class_weight = class_weights,
workers = 8,
max_queue_size = 512)
model.save('saved/saved.h5')
|
nilq/baby-python
|
python
|
import requests
import re
import json
import os
import copy
class JsonProcessorFile(object):
"""Generate a dict of processing options that exist in a dictionary of dictionaries. Allow renaming
of the fields. The results of this class is used to flatten out a JSON into CSV style.
For example the following Dict below will generate another dictionary outlined below.
Limitations: only works with 2 levels of dictionaries
.. code-block:: python
{
"internal_loads_multiplier": {
"lpd_multiplier": 0.7544625053841931,
"epd_multiplier": 1.0,
"people_per_floor_area_multiplier": 0.8572429796331562,
"lpd_average": 7.30887013864965,
"epd_average": 8.07293281253229,
"ppl_average": 0.046136433190623,
"applicable": true
},
}
.. code-block:: python
{
level_1: 'internal_loads_multiplier',
level_2: 'lpd_multiplier',
rename_to: '',
order: 1
},
{
level_1: 'internal_loads_multiplier',
level_2: 'epd_multiplier',
rename_to: '',
order: 1
},
{
level_1: 'internal_loads_multiplier',
level_2: 'lpd_average',
rename_to: '',
order: 1
},
"""
def __init__(self, json_files):
"""
:param json_files: list of files to process
"""
self.files = json_files
self.data = []
self.process()
def process(self):
"""Process the list of json files"""
for file in self.files:
data = {
"file": os.path.basename(file),
"data": []
}
with open(file) as f:
f = json.load(f)
for k, v in f.items():
new_var = {
"level_1": k,
"level_2": None,
"rename_to": "", # if there is no rename_to, then the name is set to the key
"order": 1, # if there are duplicates, then the fields will be sorted alphabetically
}
if isinstance(v, dict):
# The value is a dict, so process the dict values too
for k2, v2 in v.items():
new_var_2 = copy.deepcopy(new_var)
new_var_2["level_2"] = k2
data["data"].append(new_var_2)
else:
# single key -- just save the new variable
data["data"].append(new_var)
self.data.append(data)
def save_as(self, filename):
"""Save the format to be used in the post_processor scripts"""
if os.path.exists(filename):
print(f"File already exists, will not overwrite, {filename}")
return False
else:
with open(filename, 'w') as f:
json.dump(self.data, f, indent=2)
return True
|
nilq/baby-python
|
python
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
import torch.nn.functional as F
from op_tester import op_tester
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(Path(__file__).resolve().parent.parent)
def test_scaledadd_constant(op_tester):
d1 = np.random.rand(2).astype(np.float32)
d2 = np.random.rand(2).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiGraphcore.scaledadd([i1, i2], scale0=0.5, scale1=0.8)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=False)
t2 = torch.tensor(d2, requires_grad=False)
out = 0.5 * t1 + 0.8 * t2
return [out]
op_tester.setPatterns(['PreUniRepl', 'MulArgGradOp'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, step_type='infer')
def test_scaledadd_tensor(op_tester):
d1 = np.random.rand(2).astype(np.float32)
d2 = np.random.rand(2).astype(np.float32)
d3 = np.random.rand(1).astype(np.float32)
d4 = np.random.rand(1).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
i3 = builder.addInputTensor(d3)
i4 = builder.addInputTensor(d4)
o = builder.aiGraphcore.scaledadd([i1, i2, i3, i4])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, requires_grad=False)
t2 = torch.tensor(d2, requires_grad=False)
t3 = torch.tensor(d3, requires_grad=False)
t4 = torch.tensor(d4, requires_grad=False)
out = t3 * t1 + t4 * t2
return [out]
op_tester.setPatterns(['PreUniRepl', 'MulArgGradOp'],
enableRuntimeAsserts=False)
op_tester.run(init_builder, reference, step_type='infer')
|
nilq/baby-python
|
python
|
import argparse
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from pytorch_lightning import seed_everything
from sklearn.decomposition import PCA
from sggm.data.uci_boston.datamodule import (
UCIBostonDataModule,
UCIBostonDataModuleShifted,
)
from sggm.data.uci_ccpp.datamodule import (
UCICCPPDataModule,
UCICCPPDataModuleShifted,
COLUMNS as uci_ccpp_columns,
)
from sggm.data.uci_concrete.datamodule import (
UCIConcreteDataModule,
UCIConcreteDataModuleShifted,
COLUMNS as uci_concrete_columns,
)
from sggm.data.uci_wine_red.datamodule import (
UCIWineRedDataModule,
UCIWineRedDataModuleShifted,
COLUMNS as uci_wine_red_columns,
)
from sggm.data.uci_wine_white.datamodule import (
UCIWineWhiteDataModule,
UCIWineWhiteDataModuleShifted,
COLUMNS as uci_wine_white_columns,
)
from sggm.data.uci_yacht.datamodule import (
UCIYachtDataModule,
UCIYachtDataModuleShifted,
UCIYachtDataModuleShiftedSplit,
COLUMNS as uci_yacht_columns,
)
from sggm.definitions import (
FASHION_MNIST,
UCI_BOSTON,
UCI_CONCRETE,
UCI_CCPP,
UCI_SUPERCONDUCT,
UCI_WINE_RED,
UCI_WINE_WHITE,
UCI_YACHT,
)
def main(experiment_name, with_pca=False):
# Order of plotting
TEST_FIRST = True
# Investigate shift effect on pairplot
SHIFTED = False
sp_tot = 0.3
sp_k = 0.0002
TEST_FIRST = False if SHIFTED else TEST_FIRST
seed_everything(123)
# Get correct datamodule
bs = 10000
if experiment_name == UCI_BOSTON:
dm = (
UCIBostonDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCIBostonDataModule(bs, 0)
)
columns = [i for i in range(1, 15)]
elif experiment_name == UCI_CCPP:
dm = (
UCICCPPDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCICCPPDataModule(bs, 0)
)
columns = uci_ccpp_columns
elif experiment_name == UCI_CONCRETE:
dm = (
UCIConcreteDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCIConcreteDataModule(bs, 0)
)
columns = uci_concrete_columns
elif experiment_name == UCI_WINE_RED:
dm = (
UCIWineRedDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCIWineRedDataModule(bs, 0)
)
columns = uci_wine_red_columns
elif experiment_name == UCI_WINE_WHITE:
dm = (
UCIWineWhiteDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCIWineWhiteDataModule(bs, 0)
)
columns = uci_wine_white_columns
elif experiment_name == UCI_YACHT:
dm = (
UCIYachtDataModuleShifted(
bs, 0, shifting_proportion_total=sp_tot, shifting_proportion_k=sp_k
)
if SHIFTED
else UCIYachtDataModule(bs, 0)
)
# dm = UCIYachtDataModuleShiftedSplit(bs, 0)
columns = uci_yacht_columns
dm.setup()
# Extract data
train = next(iter(dm.train_dataloader()))
val = next(iter(dm.val_dataloader()))
test = next(iter(dm.test_dataloader()))
print(
f"N_train={len(dm.train_dataset)}, N_val={len(dm.val_dataset)}, N_test={len(dm.test_dataset)}"
)
# 1 = train, 2 = val, 3 = test
df_columns = columns + ["dataset"]
df = pd.DataFrame(columns=df_columns)
if TEST_FIRST:
dataset_order = [test, val, train]
dataset_names = ["test", "val", "train"]
else:
dataset_order = [train, val, test]
dataset_names = ["train", "val", "test"]
for idx_ds, ds in enumerate(dataset_order):
x, y = ds
dump = np.concatenate(
(x.numpy(), y.numpy(), idx_ds * torch.ones_like(y).numpy()), axis=1
)
update_df = pd.DataFrame(dump, columns=df_columns)
df = df.append(update_df, ignore_index=True)
# correct dataset name
df["dataset"] = df["dataset"].map({i: v for i, v in enumerate(dataset_names)})
sns.pairplot(
df, hue="dataset", palette=sns.color_palette("Set2", len(dataset_names))
)
if with_pca:
pca = PCA(n_components=5)
pca.fit(df.values[:-1])
print(pca.explained_variance_ratio_)
print(pca.singular_values_)
pca_x = pca.transform(df.values[:-1])
fig, ax = plt.subplots(1, 1)
ax.scatter(pca_x[:, 0], pca_x[:, 1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--experiment_name",
type=str,
required=True,
choices=[
UCI_BOSTON,
UCI_CONCRETE,
UCI_CCPP,
UCI_SUPERCONDUCT,
UCI_WINE_RED,
UCI_WINE_WHITE,
UCI_YACHT,
],
)
args = parser.parse_args()
main(args.experiment_name)
plt.show()
|
nilq/baby-python
|
python
|
import PySimpleGUI as sg
from NetLogoDOE.src.gui.custom_components import title, question_mark_button
from NetLogoDOE.src.gui.custom_windows import show_help_window
from NetLogoDOE.src.gui.help_dictionary import help_text
class StandardResultsScreen:
def __init__(self):
button_size = (30, 1)
button_pad = ((5, 5), (20, 5))
self.layout = [[title("Reporter value analysis results")],
[sg.Frame(title='Plots', border_width=1, relief='solid', layout=
[[sg.Button('Timeseries', key='standard_results_timeseries_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_timeseries_help_button', padding=button_pad)],
[sg.Button('Boxplot', key='standard_results_boxplot_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_boxplot_help_button', padding=button_pad)],
[sg.Button('Violin plot', key='standard_results_violinplot_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_violinplot_help_button', padding=button_pad)],
[sg.Button('Histogram', key='standard_results_histogram_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_histogram_help_button', padding=button_pad)],
[sg.Button('Distribution plot', key='standard_results_distplot_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_distplot_help_button', padding=button_pad)]])],
[sg.Button('Experiment Configuration Information', key='standard_results_configtable_button',
size=button_size, pad=button_pad),
question_mark_button('standard_results_configtable_help_button', padding=button_pad)],
[sg.Input(key='standard_results_dummy_export', enable_events=True, visible=False, size=(0, 0)),
sg.SaveAs('Save Results', file_types=[("Text Files", "*.txt")],
target='standard_results_dummy_export', key="standard_results_save_button",
size=button_size, pad=button_pad),
question_mark_button('standard_results_save_help_button', padding=button_pad)],
[sg.Button('Back to main menu', key='standard_results_back_button', pad=button_pad)]]
self.results = None
def check_events(self, event, values, window):
if event == 'standard_write_results_event':
self.results = values['standard_write_results_event']
if event == 'standard_results_configtable_button':
window['standard_result_panel'].update(visible=False)
window['standard_configtable_panel'].update(visible=True)
if event == 'standard_results_timeseries_button':
window['standard_result_panel'].update(visible=False)
window['timeseries_panel'].update(visible=True)
if event == 'standard_results_boxplot_button':
window['standard_result_panel'].update(visible=False)
window['boxplot_panel'].update(visible=True)
if event == 'standard_results_violinplot_button':
window['standard_result_panel'].update(visible=False)
window['violinplot_panel'].update(visible=True)
if event == 'standard_results_histogram_button':
window['standard_result_panel'].update(visible=False)
window['histogram_panel'].update(visible=True)
if event == 'standard_results_distplot_button':
window['standard_result_panel'].update(visible=False)
window['distplot_panel'].update(visible=True)
if event == 'standard_results_dummy_export' and not (values['standard_results_dummy_export'] == ''):
self.export_standard_results(values, values['standard_results_dummy_export'])
if event == 'standard_results_back_button':
window['standard_result_panel'].update(visible=False)
window['main_panel'].update(visible=True)
# Help events
if event == 'standard_results_configtable_help_button':
show_help_window(help_text['config_information'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_timeseries_help_button':
show_help_window(help_text['timeseries'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_boxplot_help_button':
show_help_window(help_text['boxplot'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_violinplot_help_button':
show_help_window(help_text['violinplot'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_histogram_help_button':
show_help_window(help_text['histogram'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_distplot_help_button':
show_help_window(help_text['distributionplot'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
if event == 'standard_results_save_help_button':
show_help_window(help_text['save_results'],
location=(window.CurrentLocation()[0] - ((434 - window.size[0]) / 2),
window.CurrentLocation()[1] + 100))
def export_standard_results(self, values, file_path):
results_dict = {}
results_dict['Configuration'] = self.results[2]
results_dict['Parameter settings'] = self.results[0]
results_dict['Reporter values'] = self.results[1]
f = open(file_path, "w")
f.write(str(results_dict))
f.close()
|
nilq/baby-python
|
python
|
from models.gru_net import GRUNet
from models.res_gru_net import ResidualGRUNet
from models.multi_res_gru_net import MultiResidualGRUNet
from models.multi_seres_gru_net import MultiSEResidualGRUNet
from models.multi_res2d3d_gru_net import MultiResidual2D3DGRUNet
from models.multi_seres2d3d_gru_net import MultiSEResidual2D3DGRUNet
MODELS = (GRUNet, ResidualGRUNet, MultiResidualGRUNet, MultiSEResidualGRUNet, MultiResidual2D3DGRUNet, MultiSEResidual2D3DGRUNet)
def get_models():
'''Returns a tuple of sample models.'''
return MODELS
def load_model(name):
'''Creates and returns an instance of the model given its class name.
The created model has a single placeholder node for feeding images.
'''
# Find the model class from its name
all_models = get_models()
mdict = {model.__name__: model for model in all_models}
if name not in mdict:
print('Invalid model index. Options are:')
# Display a list of valid model names
for model in all_models:
print('\t* {}'.format(model.__name__))
return None
NetClass = mdict[name]
return NetClass
|
nilq/baby-python
|
python
|
# programa que solicita uma temperatura em graus Farenheit e converte para graus Celsius
# solicita a temperatura em graus Farenheit
temperatura_farenheit = float(input("Informe a temperatura em graus Farenheit: "))
# Converte a temperatura de Farenheit para Celsius
temperatura_celsius = (5 * (temperatura_farenheit - 32) / 9)
# apresenta o resultado da conversão
print(temperatura_farenheit, " graus Farenheit corresponde a ", temperatura_celsius, " graus Celsius.")
|
nilq/baby-python
|
python
|
import math
import discord
import mail
import os
client = discord.Client()
env = os.environ
DISCORD_TOKEN = env.get("DISCORD_TOKEN")
CHANS = env.get("DISCORD_CHANS")
if CHANS:
CHANS = list(map(lambda x: int(x), CHANS.split(",")))
else:
CHANS = []
@client.event
async def on_ready():
print("Logged in")
@client.event
async def on_message(message):
if message.channel.id not in CHANS:
return
text = message.content
if len(text) > 0:
remote_attachments = list(map(lambda x: x.url, message.attachments))
print(text)
print(remote_attachments)
mail.send_email(text, remote_attachments)
client.run(DISCORD_TOKEN)
|
nilq/baby-python
|
python
|
#
# This file is part of Python Client Library for STAC.
# Copyright (C) 2019 INPE.
#
# Python Client Library for STAC is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Utility data structures and algorithms."""
import json
import pkg_resources
from jsonschema import validate, RefResolver
import requests
import os
resource_package = __name__
try:
schema_path = 'file:///{0}/'.format(
os.path.dirname(pkg_resources.resource_filename('stac.utils', 'jsonschemas/0.8.0/catalog.json')))
catalog_schema = json.loads(pkg_resources.resource_string(resource_package,
f'jsonschemas/0.8.0/catalog.json'))
collection_schema = json.loads(pkg_resources.resource_string(resource_package,
f'jsonschemas/0.8.0/collection.json'))
item_schema = json.loads(pkg_resources.resource_string(resource_package,
f'jsonschemas/0.8.0/item.json'))
item_collection_schema = json.loads(pkg_resources.resource_string(resource_package,
f'jsonschemas/0.8.0/itemcollection.json'))
except Exception as e:
raise Exception(f'Error while loading validation schemas: {e}')
class Utils:
"""Utils STAC object."""
@staticmethod
def _get(url, params=None):
"""Query the STAC service using HTTP GET verb and return the result as a JSON document.
:param url: The URL to query must be a valid STAC endpoint.
:type url: str
:param params: (optional) Dictionary, list of tuples or bytes to send
in the query string for the underlying `Requests`.
:type params: dict
:rtype: dict
:raises ValueError: If the response body does not contain a valid json.
"""
response = requests.get(url, params=params)
response.raise_for_status()
content_type = response.headers.get('content-type')
if content_type not in ('application/json', 'application/geo+json'):
raise ValueError('HTTP response is not JSON: Content-Type: {}'.format(content_type))
return response.json()
class Link(dict):
"""Link object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Link metadata.
"""
super(Link, self).__init__(data or {})
@property
def rel(self):
""":return: the Link relation."""
return self['rel']
@property
def href(self):
""":return: the Link url."""
return self['href']
@property
def type(self):
""":return: the type of the Link object."""
return self['type']
@property
def title(self):
""":return: the title of the Link object."""
return self['title']
class Extent(dict):
"""The Extent object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Extent metadata.
"""
super(Extent, self).__init__(data or {})
@property
def spatial(self):
""":return: the spatial extent."""
return self['spatial']
@property
def temporal(self):
""":return: the temporal extent."""
return self['temporal']
class Provider(dict):
"""The Provider Object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Provider metadata.
"""
super(Provider, self).__init__(data or {})
@property
def name(self):
""":return: the Provider name."""
return self['name']
@property
def description(self):
""":return: the Provider description."""
return self['description']
@property
def roles(self):
""":return: the Provider roles."""
return self['description']
@property
def url(self):
""":return: the Provider url."""
return self['url']
class Catalog(dict):
"""The STAC Catalog."""
def __init__(self, data, validation=False):
"""Initialize instance with dictionary data.
:param data: Dict with catalog metadata.
:param validation: True if the Catalog must be validated. (Default is False)
"""
if validation:
validate(data, schema=catalog_schema)
super(Catalog, self).__init__(data or {})
@property
def stac_version(self):
""":return: the STAC version."""
return self['stac_version']
@property
def stac_extensions(self):
""":return: the STAC extensions."""
return self['stac_extensions']
@property
def id(self):
""":return: the catalog identifier."""
return self['id']
@property
def title(self):
""":return: the catalog title."""
return self['title'] if 'title' in self else None
@property
def description(self):
""":return: the catalog description."""
return self['description']
@property
def summaries(self):
""":return: the catalog summaries."""
return self['summaries']
@property
def links(self):
""":return: a list of resources in the catalog."""
return self['links']
class Collection(Catalog):
"""The STAC Collection."""
def __init__(self, data, validation=False):
"""Initialize instance with dictionary data.
:param data: Dict with collection metadata.
:param validation: True if the Collection must be validated. (Default is False)
"""
if validation:
validate(data, schema=collection_schema, resolver=RefResolver(schema_path, collection_schema))
super(Collection, self).__init__(data or {})
@property
def keywords(self):
""":return: the Collection list of keywords."""
return self['keywords']
@property
def version(self):
""":return: the Collection version."""
return self['version']
@property
def license(self):
""":return: the Collection license."""
return self['license']
@property
def providers(self):
""":return: the Collection list of providers."""
return self['providers']
@property
def extent(self):
""":return: the Collection extent."""
return self['extent']
@property
def properties(self):
""":return: the Collection properties."""
return self['properties']
def items(self, filter=None):
""":return: the Collection list of items."""
for link in self['links']:
if link['rel'] == 'items':
data = Utils._get(link['href'], params=filter)
return ItemCollection(data)
return ItemCollection({})
class Geometry(dict):
"""The Geometry Object."""
def __init__(self, data):
"""Initialize instance with dictionary data.
:param data: Dict with Geometry metadata.
"""
super(Geometry, self).__init__(data or {})
@property
def type(self):
""":return: the Geometry type."""
return self['type']
@property
def coordinates(self):
""":return: the Geometry coordinates."""
return self['coordinates']
class Item(dict):
"""The GeoJSON Feature of a STAC Item."""
def __init__(self, data, validation=False):
"""Initialize instance with dictionary data.
:param data: Dict with Item metadata.
:param validation: True if the Item must be validated. (Default is False)
"""
if validation:
validate(data, schema=item_schema, )
super(Item, self).__init__(data or {})
@property
def stac_version(self):
""":return: the STAC version."""
return self['stac_version']
@property
def stac_extensions(self):
""":return: the STAC extensions."""
return self['stac_extensions']
@property
def id(self):
""":return: the Item identifier."""
return self['id']
@property
def type(self):
""":return: the Item type."""
return self['type']
@property
def bbox(self):
""":return: the Item Bounding Box."""
return self['bbox']
@property
def collection(self):
""":return: the Item Collection."""
return self['collection']
@property
def geometry(self):
""":return: the Item Geometry."""
return self['geometry']
@property
def properties(self):
""":return: the Item properties."""
return self['properties']
@property
def links(self):
""":return: the Item related links."""
return self['links']
@property
def assets(self):
""":return: the Item related assets."""
return self['assets']
class ItemCollection(dict):
"""The GeoJSON Feature Collection of STAC Items."""
def __init__(self, data, validation=False):
"""Initialize instance with dictionary data.
:param data: Dict with Item Collection metadata.
:param validation: True if the Item Collection must be validated. (Default is False)
"""
if validation:
validate(data, schema=item_collection_schema, resolver=RefResolver(schema_path, item_collection_schema))
super(ItemCollection, self).__init__(data or {})
@property
def type(self):
""":return: the Item Collection type."""
return self['type']
@property
def features(self):
""":return: the Item Collection list of GeoJSON Features."""
return [Item(i) for i in self['features']]
|
nilq/baby-python
|
python
|
import random
import re
from abc import ABC, abstractmethod
from datetime import datetime
from pathlib import Path
from typing import Dict, List
from itertools import groupby, chain
import pandas as pd
from models import ListNews, News, Cluster
from sqlalchemy.orm import Session
from utils import convert_str_to_date
from config import LIMIT_NEWS
from db_lib import crud
from db_lib.database import SessionLocal
from statistics import NgramsBuilder, StatisticsByResource, ByDayCounter, CategoriesStatistics
class BaseNewsExtractor(ABC):
"""
Предполагается использовать этот класс как прародитель для всех
остальных при обращении к разным источникам данных
"""
@abstractmethod
def show_random_news(self, db: Session, num_random_news: int) -> ListNews:
"""
Метод для показа нескольких случаных новостей
"""
pass
@abstractmethod
def show_news_by_days(self, db: Session, start_date: str, end_date: str):
"""
Метод для показа новостей за конкретный день
"""
pass
@abstractmethod
def show_news_by_topic(
self, db: Session, topic: str, start_date: str, end_date: str
):
"""
Метод для показа новостей по определённой теме
"""
pass
@abstractmethod
def show_news_by_filters(self, db: Session, topic: str, end_date: str, start_date: str, num_random_news: int):
"""
Метод для показа новостей по заданным фильтрам
"""
pass
@abstractmethod
def show_clusters_by_filters(self, db: Session, topic: str, end_date: str, start_date: str, num_news: int = 10):
"""
Метод для кластеров новостей по заданным фильтрам
"""
pass
@abstractmethod
def show_cluster_by_id(self, db: Session, cluster_id):
"""
Метод для показа новостей по кластеру
"""
pass
@abstractmethod
def show_news_by_regex(self, db: Session, word: str, mode: str, cnt: int):
"""
Метод для поиска новостей по регулярному выражению
"""
pass
@abstractmethod
def show_single_news(self, db: Session, news_id: int):
"""
Метод для показа новости по id
"""
pass
@abstractmethod
def show_posts_by_filters(self, db: Session, end_date: str, start_date: str, num_news: int):
"""
Метод для вывода последних постов с филтрацией
"""
pass
@abstractmethod
def show_last_posts(self, db: Session, num: int):
"""
Метод для вывода последних постов
"""
pass
@abstractmethod
def show_vk_tg_news(self, db, news_id):
pass
@abstractmethod
def show_vk_tg_stat(self, db, post_id, social_network):
pass
class PandasNewsExtractor(BaseNewsExtractor):
def __init__(self, path_to_df: Path):
self.source_df = pd.read_csv(path_to_df, parse_dates=['date'])
self.source_df['date'] = self.source_df['date'].map(lambda x: x.date())
def show_random_news(self, num_random_news: int = 10, **kwargs) -> ListNews:
df_random = self.source_df.sample(n=num_random_news)
news_list = self._convert_df_to_list_news(df_random)
return news_list
def show_news_by_days(
self,
start_date: str = '1991-05-12',
end_date: str = '1991-05-12',
**kwargs,
) -> ListNews:
start_date = convert_str_to_date(start_date)
end_date = convert_str_to_date(end_date)
df_date = self.source_df[
(self.source_df['date'] >= start_date)
& (self.source_df['date'] <= end_date)
]
news_list = self._convert_df_to_list_news(df_date)
return news_list
def show_news_by_topic(
self,
topic: str = 'Футбол',
start_date: str = '1991-05-12',
end_date: str = '1991-05-12',
**kwargs,
) -> ListNews:
start_date = convert_str_to_date(start_date)
end_date = convert_str_to_date(end_date)
df_topic = self.source_df[
(self.source_df['topic'] == topic)
& (self.source_df['date'] >= start_date)
& (self.source_df['date'] <= end_date)
]
news_list = self._convert_df_to_list_news(df_topic)
return news_list
def _convert_df_to_list_news(self, selected_df: pd.DataFrame) -> ListNews:
news_list = [None] * len(selected_df)
for i, (_, row) in enumerate(selected_df.iterrows()):
news_list[i] = self._convert_row_to_news(row)
news_list_dict = {'news_list': news_list}
# TODO here one can add interesting statistics
news_list_dict['statistics'] = None
return ListNews(**news_list_dict)
@staticmethod
def _convert_row_to_news(row) -> News:
news_dict = {
'source_url': row['url'],
'title': row['title'],
'content': row['text'],
'topic': row['topic'],
'tags': row['tags'],
'date': row['date'],
'time': datetime.combine(row['date'], datetime.min.time()),
}
return News(**news_dict)
class DBNewsExtractor(BaseNewsExtractor):
def get_db(self):
db = SessionLocal()
try:
yield db
finally:
db.close()
def show_random_news(self, db: Session, num_random_news: int = 10) -> ListNews:
news_list = random.choices(crud.get_all_news(db), k=num_random_news)
return ListNews(
**{'news_list': news_list, 'statistics': [
NgramsBuilder().predict(news_list, )
]}
)
def show_news_by_days(
self, db, start_date: str = '1991-05-12', end_date: str = '1991-05-12'
) -> ListNews:
news_list = crud.get_news_by_date(
db, convert_str_to_date(start_date), convert_str_to_date(end_date)
)
return ListNews(
**{'news_list': news_list, 'statistics': [
NgramsBuilder().predict(news_list)
]}
)
def show_news_by_topic(
self,
db,
topic: str = 'Футбол',
start_date: str = '1991-05-12',
end_date: str = '1991-05-12',
) -> ListNews:
news_list = crud.get_news_by_topic_and_date(
db, topic, convert_str_to_date(start_date), convert_str_to_date(end_date)
)
return ListNews(
**{'news_list': news_list, 'statistics': [
NgramsBuilder().predict(news_list, )
]}
)
def _clusters_from_news(self,
news_list: List[News]) -> List[Cluster]:
news_list.sort(key=lambda x: x.cluster_num, reverse=True)
return [
Cluster.parse_obj(
{
'cluster_id': key,
'news': list(group_news),
'topic': list(set([n.category for n in list(group_news) if n.category])),
'tags': list(set(chain(*[n.tags for n in list(group_news) if n.tags]))),
'statistics': []
}
)
for key, group_news in groupby(news_list, lambda news: news.cluster_num)
]
def show_clusters_by_filters(self,
db: Session,
topic: str,
end_date: str,
start_date: str = '1991-05-12',
num_news: int = 10) -> Dict:
news_list = crud.get_news_by_filters_with_cluster(db,
topic=topic,
start_date=convert_str_to_date(start_date),
end_date=convert_str_to_date(end_date),
limit=num_news * 20)
cluster_nums = [n.cluster_num for n in news_list]
news_list = crud.get_news_in_clusters(db, cluster_nums)
news_list = _clean_img_urls(news_list)
news_list = _json_tags_to_list(news_list)
clusters = self._clusters_from_news(news_list)[:num_news]
return {
'clusters': clusters,
'statistics': []
}
def show_cluster_by_id(self,
db: Session,
cluster_id) -> Dict:
news_list = crud.get_news_by_cluster_id(db, cluster_id)
news_list = _clean_img_urls(news_list)
news_list = _json_tags_to_list(news_list)
cluster = {
'cluster_id': cluster_id,
'news': news_list,
'topic': list(set([n.category for n in news_list if n.category])),
'tags': list(set(chain(*[n.tags for n in news_list if n.tags]))),
'statistics': [NgramsBuilder().predict(news_list)]
}
return cluster
def show_news_by_filters(
self,
db: Session,
topic: str,
end_date: str,
start_date: str = '1991-05-12',
num_news: int = 10,
) -> ListNews:
news_list = crud.get_news_by_filters(db,
topic=topic,
start_date=convert_str_to_date(start_date),
end_date=convert_str_to_date(end_date),
limit=num_news)
# news_list_len = len(news_list)
# if num_news > news_list_len:
# num_news = news_list_len
# news_list = random.choices(news_list, k=num_random_news)
news_list = _clean_img_urls(news_list)
news_list = _json_tags_to_list(news_list)
# Не менять порядок в statistics
return ListNews.parse_obj(
{
'news_list': news_list,
'statistics': [
NgramsBuilder().predict(news_list),
# StatisticsByResource().predict(news_list),
# ByDayCounter().predict(news_list),
# CategoriesStatistics().predict(news_list),
]
}
)
def show_news_by_regex(self, db: Session, word: str, mode: str = 'full', cnt: int = 2) -> ListNews:
if word:
news_list = crud.get_n_last_news(db, limit=LIMIT_NEWS)
else:
news_list = crud.get_all_news(db, limit=0)
word_re = rf'\b{word}\b'
if mode == 'full':
selected_news = [
one_news for one_news in news_list if
re.search(word_re, _one_news_to_string(one_news), flags=re.IGNORECASE)
]
else:
news_list = clean_nones_from_content(news_list)
selected_news = [
one_news for one_news in news_list if re.search(word_re, str(one_news.content), flags=re.IGNORECASE)
]
selected_news = _clean_img_urls(selected_news)
selected_news = _json_tags_to_list(selected_news)
# Не менять порядок в statistics
return ListNews.parse_obj(
{
'news_list': selected_news,
'statistics': [
NgramsBuilder().predict(selected_news, cnt),
StatisticsByResource().predict(selected_news),
ByDayCounter().predict(selected_news),
# CategoriesStatistics().predict(selected_news),
]
}
)
def show_single_news(self, db: Session, news_id: int) -> Dict:
single_news = crud.get_single_news(db, news_id)
single_news.image_url = _remove_extra_link(single_news.image_url)
if single_news.tags:
single_news.tags = single_news.tags.lstrip('{').rstrip('}').replace('"', '').split(',')
return {
'single_news': single_news,
}
def show_posts_by_filters(self,
db: Session,
end_date: str,
start_date: str = '1991-05-12',
num: int = 100) -> Dict:
vk_tg_news_list = crud.get_social_network_news_list_by_filters(db,
convert_str_to_date(start_date),
convert_str_to_date(end_date),
num)
return {
'news_list': vk_tg_news_list,
}
def show_last_posts(self, db: Session, num: int) -> Dict:
vk_tg_news_list = crud.get_social_network_news_list(db, num)
return {
'news_list': vk_tg_news_list,
}
def show_vk_tg_news(self, db: Session, news_id: int) -> Dict:
vk_tg_news = crud.get_social_network_news(db, news_id)
return {
'single_news': vk_tg_news,
}
def show_vk_tg_stat(self, db: Session, post_id: int, social_network: str):
vk_tg_stat = crud.get_social_network_stats(db, post_id, social_network) # List[SocialNetworkStats]
return vk_tg_stat
def _to_str(text):
return '' if text is None else str(text)
def _one_news_to_string(one_news: News) -> str:
return _to_str(one_news.title) + ' ' + _to_str(one_news.content)
def clean_nones_from_content(news_list: List[News]) -> List[News]:
for i, news in enumerate(news_list):
if news.content is None:
news_list[i].content = news.title
return news_list
def _clean_img_urls(news_list: List[News]) -> List[News]:
for i, news in enumerate(news_list):
news_list[i].image_url = _remove_extra_link(news_list[i].image_url)
return news_list
def _json_tags_to_list(news_list: List[News]) -> List[News]:
for i, news in enumerate(news_list):
if news_list[i].tags and not isinstance(news_list[i].tags, list):
news_list[i].tags = news_list[i].tags \
.lstrip('{').rstrip('}') \
.replace('"', '').replace('«', '').replace('»', '') \
.replace('[', '').replace(']', '').replace("'", "") \
.split(',')
return news_list
def _remove_extra_link(links: str) -> str:
if links:
return links.lstrip('{').rstrip('}').split(',')[0]
|
nilq/baby-python
|
python
|
import sys
import io
from arc import color, CLI
def test_colorize():
colored = color.colorize("test", color.fg.RED)
assert colored.startswith(str(color.fg.RED))
assert colored.endswith(str(color.effects.CLEAR))
colored = color.colorize("test", color.fg.RED, clear=False)
assert colored.startswith(str(color.fg.RED))
assert not colored.endswith(str(color.effects.CLEAR))
# Because StringIO is not terminal-like, escape-sequnces will be removed
def test_output(cli: CLI):
try:
stdout = sys.stdout
fake = io.StringIO()
sys.stdout = fake
@cli.command()
def test():
print(f"{color.fg.GREEN}green!{color.effects.CLEAR}")
cli("test")
fake.seek(0)
assert fake.read() == "green!\n"
finally:
sys.stdout = stdout
|
nilq/baby-python
|
python
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MapReduce extensions for ETL."""
__author__ = [
'johncox@google.com (John Cox)',
'juliaoh@google.com (Julia Oh)',
]
import csv
import os
import sys
from xml.etree import ElementTree
from models import transforms
import mrs
from tools.etl import etl_lib
class MapReduceJob(etl_lib.Job):
"""Parent classes for custom jobs that run a mapreduce.
Usage:
python etl.py run path.to.my.job / appid server.appspot.com \
--disable_remote \
--job_args='path_to_input_file path_to_output_directory'
"""
# Subclass of mrs.MapReduce; override in child.
MAPREDUCE_CLASS = None
def _configure_parser(self):
"""Shim that works with the arg parser expected by mrs.Mapreduce."""
self.parser.add_argument(
'file', help='Absolute path of the input file', type=str)
self.parser.add_argument(
'output', help='Absolute path of the output directory', type=str)
def main(self):
if not os.path.exists(self.args.file):
sys.exit('Input file %s not found' % self.args.file)
if not os.path.exists(self.args.output):
sys.exit('Output directory %s not found' % self.args.output)
mrs.main(self.MAPREDUCE_CLASS, args=self._parsed_etl_args.job_args)
class MapReduceBase(mrs.MapReduce):
"""Common functionalities of MR jobs combined into one class."""
def json_parse(self, value):
"""Parses JSON file into Python."""
if value.strip()[-1] == ',':
value = value.strip()[:-1]
try:
return transforms.loads(value)
# Skip unparseable rows like the first and last
# pylint: disable=bare-except
except:
return None
def make_reduce_data(self, job, interm_data):
"""Change the outout format to JSON."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=JsonWriter)
return output_data
class JsonWriter(mrs.fileformats.Writer):
"""Outputs one JSON literal per line.
Example JSON output may look like:
{'foo': 123, 'bar': 456, 'quz': 789}
{'foo': 321, 'bar': 654, 'quz': 987}
.
.
.
{'foo': 456, 'bar': 534, 'quz': 154}
"""
ext = 'json'
def __init__(self, fileobj, *args, **kwds):
super(JsonWriter, self).__init__(fileobj, *args, **kwds)
def _write_json(self, write_fn, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
write_fn: Python file object write() method.
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
TypeError: if python_object is not a dict or a list.
"""
if isinstance(python_object, dict):
write_fn(unicode(
transforms.dumps(python_object) + '\n').encode('utf-8'))
elif isinstance(python_object, list):
for item in python_object:
self._write_json(write_fn, item)
else:
raise TypeError('Value must be a dict or a list of dicts.')
def writepair(self, kvpair, **unused_kwds):
unused_key, value = kvpair
self._write_json(self.fileobj.write, value)
class Histogram(object):
"""Histogram that bins values into _bucket_size sized intervals."""
# Int. Number of consecutive zeros in list of integer values to determine
# the cutoff point.
_NUM_ZEROS = 3
def __init__(self, bucket_size):
# Map of 0-indexed bin #int -> count int
self._values = {}
self._bucket_size = bucket_size
def add(self, value):
"""Adds value into self._values."""
bin_number = self._get_bin_number(value)
self._increment_bin(bin_number)
def _get_bin_number(self, value):
"""Returns appropriate bin number for given value."""
if value < 0:
raise ValueError('Cannot calculate index for negative value')
return max(0, (value - 1) // self._bucket_size)
def _increment_bin(self, n):
self._values[n] = self._values.get(n, 0) + 1
def to_list(self):
"""Returns self._values converted into a list, sorted by its keys."""
try:
max_key = max(self._values.iterkeys())
return [self._values.get(n, 0) for n in xrange(0, max_key+1)]
except ValueError:
return []
def to_noise_filtered_list(self):
"""Converts self._values to a list with junk data removed.
Returns:
self.to_list(), with junk data removed
"Junk data" refers to noise in EventEntity data caused by API
misbehaviors and certain user behavior. Two known issues are:
1. Youtube video data from event source 'tag-youtube-video' and
'tag-youtube-milestone' represent user engagement at certain playhead
positions. Youtube API continues to emit these values even when the
video has stopped playing, causing a trail of meaningless values in
the histogram.
2. Data from event source 'visit-page' logs duration of a page visit.
If a user keeps the browser open and goes idle, the duration value
recorded is skewed since the user wasn't engaged. These values tend
to be significantly larger than more reliable duration values.
This method filters the long trail of insignificant data by counting
number of consecutive zeros set in self._NUM_ZEROS and disregarding
any data after the zeros.
Example:
self.to_list() returns [1, 2, 3, 4, 5, 0, 0, 0, 0, 1]
_NUM_ZEROS = 3
output = [1, 2, 3, 4, 5]
"""
zero_counts = 0
cutoff_index = 0
values = self.to_list()
for index, value in enumerate(values):
if value == 0:
zero_counts += 1
if zero_counts == 1:
cutoff_index = index
if zero_counts == self._NUM_ZEROS:
return values[:cutoff_index]
else:
cutoff_index = 0
zero_counts = 0
return values
class XmlWriter(mrs.fileformats.Writer):
"""Writes file in XML format.
The writer does not use the key from kvpair and expects the value to be a
list of string representation of XML elements.
Example:
kvpair: some_key, ['<row><name>Jane</name></row>',
'<row><name>John</name></row>']
Output:
<rows>
<row>
<name>Jane</name>
</row>
<row>
<name>John</name>
</row>
</rows>
"""
ext = 'xml'
def __init__(self, fileobj, *args, **kwds):
super(XmlWriter, self).__init__(fileobj, *args, **kwds)
self.fileobj.write('<rows>')
def writepair(self, kvpair, **unused_kwds):
unused_key, values = kvpair
write = self.fileobj.write
for value in values:
write(value)
write('\n')
def finish(self):
self.fileobj.write('</rows>')
self.fileobj.flush()
class XmlGenerator(MapReduceBase):
"""Generates a XML file from a JSON formatted input file."""
def map(self, key, value):
"""Converts JSON object to xml.
Args:
key: int. line number of the value in Entity file.
value: str. A line of JSON literal extracted from Entity file.
Yields:
A tuple with the string 'key' and a tuple containing line number and
string representaiton of the XML element.
"""
json = self.json_parse(value)
if json:
root = ElementTree.Element('row')
transforms.convert_dict_to_xml(root, json)
yield 'key', (key, ElementTree.tostring(root, encoding='utf-8'))
def reduce(self, unused_key, values):
"""Sorts the values by line number to keep the order of the document.
Args:
unused_key: str. The arbitrary string 'key' set to accumulate all
values under one key.
values: list of tuples. Each tuple contains line number and JSON
literal converted to XML string.
Yields:
A list of XML strings sorted by the line number.
"""
sorted_values = sorted(values, key=lambda x: x[0])
yield [value[1] for value in sorted_values]
def make_reduce_data(self, job, interm_data):
"""Change the outout format to XML."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=XmlWriter)
return output_data
class JsonToXml(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to XML.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToXml \
/coursename appid server.appspot.com \
--job_args='path_to_any_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = XmlGenerator
class CsvWriter(mrs.fileformats.Writer):
"""Writes file in CSV format.
The default value to be written if the dictionary is missing a key is an
empty string.
Example:
kvpair: (some_key, (['bar', 'foo', 'quz'],
[{'foo': 1, 'bar': 2, 'quz': 3},
{'bar': 2, 'foo': 3}])
Output:
'bar', 'foo', 'quz'
2, 1, 3
2, 3, ''
"""
ext = 'csv'
def __init__(self, fileobj, *args, **kwds):
super(CsvWriter, self).__init__(fileobj, *args, **kwds)
def writepair(self, kvpair, **unused_kwds):
"""Writes list of JSON objects to CSV format.
Args:
kvpair: tuple of unused_key, and a tuple of master_list and
json_list. Master_list is a list that contains all the
fieldnames across json_list sorted in alphabetical order, and
json_list is a list of JSON objects.
**unused_kwds: keyword args that won't be used.
"""
unused_key, (master_list, json_list) = kvpair
writer = csv.DictWriter(
self.fileobj, fieldnames=master_list, restval='')
writer.writeheader()
writer.writerows(json_list)
class CsvGenerator(MapReduceBase):
"""Generates a CSV file from a JSON formatted input file."""
@classmethod
def _flatten_json(cls, json, prefix=''):
"""Flattens given JSON object and encodes all the values in utf-8."""
for k, v in json.items():
try:
if type(transforms.loads(v)) == dict:
flattened = cls._flatten_json(
transforms.loads(json.pop(k)), prefix=prefix + k + '_')
json.update(flattened)
# pylint: disable=bare-except
except:
json[prefix + k] = unicode(json.pop(k)).encode('utf-8')
return json
def map(self, unused_key, value):
"""Loads JSON object and flattens it.
Example:
json['data']['foo'] = 'bar' -> json['data_foo'] = 'bar', with
json['data'] removed.
Args:
unused_key: int. line number of the value in Entity file.
value: str. instance of Entity file extracted from file.
Yields:
A tuple of string key and flattened dictionary. map() outputs
constant string 'key' as the key so that all the values can be
accumulated under one key in reduce(). This accumulation is
necessary because reduce() must go through the list of all JSON
literals and determine all existing fieldnames. Then, reduce()
supplies the master_list of fieldnames to CSVWriter's writepair()
which uses the list as csv header.
"""
json = self.json_parse(value)
if json:
json = CsvGenerator._flatten_json(json)
yield 'key', json
def reduce(self, unused_key, values):
"""Creates a master_list of all the keys present in an Entity file.
Args:
unused_key: str. constant string 'key' emitted by map().
values: a generator over list of json objects.
Yields:
A tuple of master_list and list of json objects.
master_list is a list of all keys present across every json object.
This list is used to create header for CSV files.
"""
master_list = []
values = [value for value in values]
for value in values:
for key in value:
if key not in master_list:
master_list.append(key)
try:
# Convert integer keys from unicode to ints to be sorted correctly.
# pylint: disable-msg=unnecessary-lambda
master_list = sorted(master_list, key=lambda item: int(item))
except ValueError:
# String keys cannot be converted into integers..
master_list = sorted(master_list)
yield master_list, values
def make_reduce_data(self, job, interm_data):
"""Set the output data format to CSV."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=CsvWriter)
return output_data
class JsonToCsv(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to CSV format.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToCSV
/coursename appid server.appspot.com \
--job_args='path_to_an_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = CsvGenerator
mrs.fileformats.writer_map['csv'] = CsvWriter
mrs.fileformats.writer_map['json'] = JsonWriter
mrs.fileformats.writer_map['xml'] = XmlWriter
|
nilq/baby-python
|
python
|
import numpy as np
import lmdb
import caffe
N = 1000
# Test Data
X = np.zeros((N, 3, 32, 32), dtype=np.uint8)
y = np.zeros(N, dtype=np.int64)
# We need to prepare the database for the size. We'll set it 10 times
# greater than what we theoretically need. There is little drawback to
# setting this too big. If you still run into problem after raising
# this, you might want to try saving fewer entries in a single
# transaction.
map_size = X.nbytes * 10
env = lmdb.open('mylmdb', map_size=map_size)
with env.begin(write=True) as txn:
# txn is a Transaction object
for i in range(N):
datum = caffe.proto.caffe_pb2.Datum()
datum.channels = X.shape[1]
datum.height = X.shape[2]
datum.width = X.shape[3]
datum.data = X[i].tobytes() # or .tostring() if numpy < 1.9
datum.label = int(y[i])
str_id = '{:08}'.format(i)
# The encode is only essential in Python 3
txn.put(str_id.encode('ascii'), datum.SerializeToString())
|
nilq/baby-python
|
python
|
import sys
from glob import glob
from os.path import join, dirname
from kivy.uix.scatter import Scatter
from kivy.app import App
from kivy.graphics.svg import Svg
from kivy.core.window import Window
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
Builder.load_string("""
<SvgWidget>:
do_rotation: False
<FloatLayout>:
canvas.before:
Color:
rgb: (1, 1, 1)
Rectangle:
pos: self.pos
size: self.size
""")
class SvgWidget(Scatter):
def __init__(self, filename, **kwargs):
super(SvgWidget, self).__init__(**kwargs)
with self.canvas:
svg = Svg(filename)
self.size = svg.width, svg.height
class SvgApp(App):
def build(self):
self.root = FloatLayout()
filenames = sys.argv[1:]
if not filenames:
filenames = glob(join(dirname(__file__), '*.svg'))
for filename in filenames:
svg = SvgWidget(filename, size_hint=(None, None))
self.root.add_widget(svg)
svg.scale = 5.
svg.center = Window.center
if __name__ == '__main__':
SvgApp().run()
|
nilq/baby-python
|
python
|
from decimal import Decimal
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.db.models import Sum
class CustomUser(AbstractUser):
"""
Clase que modela un usuario del sistema.
Atributos:
avatar: avatar que identifica al usuario. (String)
cash: dinero liquido disponible para el usuario. (Decimal)
rank: posicion que ocupa en el ranking de usuarios. (Int)
"""
avatar = models.CharField(blank=False, max_length=12)
cash = models.DecimalField(max_digits=14, decimal_places=2, default=5000)
rank = models.IntegerField(default=0)
def __str__(self):
return self.email
def wallet_quote(self):
"""
Metodo que recorre las inversiones del usuario y cotiza su cartera
en base a los precios actuales de los activos y al dinero liquido.
"""
quote = 0
investments = self.investments.all()\
.values('asset__buy')\
.order_by()\
.annotate(amountSum=Sum('amount'))\
.filter(amountSum__gte=0.01)
for investment in investments:
quote += investment['amountSum'] * investment['asset__buy']
return Decimal('%.2f' % (quote + self.cash))
|
nilq/baby-python
|
python
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"USE_64": "01_Pairwise_Distance.ipynb",
"gpu_dist_matrix": "01_Pairwise_Distance.ipynb",
"component_mixture_dist_matrix": "01_Pairwise_Distance.ipynb",
"makeCurvesFromDistanceMatrix": "02_Curve_Constructor.ipynb",
"makeCurve": "02_Curve_Constructor.ipynb",
"plotCurve": "02_Curve_Constructor.ipynb",
"getEstimatorModel": "03_get_estimator_model.ipynb",
"getTrainedEstimator": "03_get_estimator_model.ipynb",
"prepFeatures": "04_Univariate_Transforms.ipynb",
"trainOOBClassifier": "04_Univariate_Transforms.ipynb",
"trainKFoldClassifier": "04_Univariate_Transforms.ipynb",
"getOptimalTransform": "04_Univariate_Transforms.ipynb"}
modules = ["pairwise_distance.py",
"curve_constructor.py",
"model.py",
"transforms.py"]
doc_url = "https://Dzeiberg.github.io/dist_curve/"
git_url = "https://github.com/Dzeiberg/dist_curve/tree/master/"
def custom_doc_links(name): return None
|
nilq/baby-python
|
python
|
import pytest
from app.models import Expense, User
from app.models import expense
pytestmark = pytest.mark.nologin
def headers(tok):
return {'Authorization': f'Bearer {tok}'}
def test_get_expenses(db_with_expenses, token, client):
resp = client.get('/api/expenses?page=1&page_size=10',
headers=headers(token))
assert resp.status_code == 200
expenses = resp.get_json()
assert len(expenses) == 10
for i, e in enumerate(expenses):
assert e['description'] == f'Item {15-i}'
def test_get_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
db_data = {
'id': exp.id,
'description': exp.description,
'amount': exp.amount_str,
'date': exp.date.isoformat(),
'payment_mode': exp.payment_mode.mode,
'estimate': exp.estimate.item if exp.estimate else None,
'tags': ','.join([tag.tagname for tag in exp.tags]),
'comments': exp.comments,
'created_on': exp.created_on.isoformat(),
'updated_on': exp.updated_on.isoformat()
}
resp = client.get(f'/api/expenses/{exp.id}',
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e == db_data
def test_update_expense(db_with_expenses, token, client):
# Following code is needed because we are accessing amount
expense.current_user = User.query.get(1)
exp = Expense.query.filter_by(description='Item 10').first()
orig_amount = exp.amount
orig_comments = exp.comments
data = {
'amount': int(orig_amount + 10),
'comments': 'Amount increased by 10'
}
resp = client.patch(f'/api/expenses/{exp.id}',
json=data,
headers=headers(token))
assert resp.status_code == 200
e = resp.get_json()
assert e['id'] == exp.id
assert e['amount'] == str(orig_amount + 10)
assert e['comments'] != orig_comments
assert e['comments'] == 'Amount increased by 10'
def test_delete_expense(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item 10').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 204
def test_delete_forbidden(db_with_expenses, token, client):
exp = Expense.query.filter_by(description='Item user2').first()
resp = client.delete(f'/api/expenses/{exp.id}', headers=headers(token))
assert resp.status_code == 403
assert resp.get_json()['msg'].startswith('Forbidden')
def test_delete_not_found(db_with_expenses, token, client):
resp = client.delete('/api/expenses/50', headers=headers(token))
assert resp.status_code == 404
assert resp.get_json()['msg'] == 'Expense not found.'
|
nilq/baby-python
|
python
|
import os
from oraculo.gods import faveo, faveo_db, exceptions
from . import entitys, base
from .exceptions import (
DoesNotExist, NotSetHelpDeskUserInstance,
NotIsInstance)
class Prioritys(object):
_url = 'api/v1/helpdesk/priority'
_entity = entitys.Priority
objects = base.BaseManageEntity(_url, _entity, key_name='priority')
class Topics(object):
_url = 'api/v1/helpdesk/help-topic'
_entity = entitys.Topic
objects = base.BaseManageEntity(_url, _entity, key_name='topic')
class Deparments(object):
_url = 'api/v1/helpdesk/department'
_entity = entitys.Department
objects = base.BaseManageEntity(_url, _entity)
class Status(base.BaseManageEntity):
_url = 'api/v1/helpdesk/dependency'
_entity = entitys.State
_client = faveo.APIClient()
@staticmethod
def get_entitys(
entity=entitys.State,
url='api/v1/status',
client=faveo_db.APIClient):
client = client()
response = client.get(url)
return [entity(**state) for state in response]
@staticmethod
def get_state_by_name(
name,
entity=entitys.State,
url='api/v1/status',
client=faveo_db.APIClient):
client = client()
response = client.get(url, params=dict(name=name))
return entity(**response[0])
class HelpDeskTicket(
base.BaseEntityHelpDesk,
base.BaseHelpDesk):
_user = None
_client = faveo.APIClient()
_status_close_name = 'Closed'
_department = os.environ.get('PARTENON_DEPARTMENT')
_create_url = 'api/v1/helpdesk/create'
_list_url = 'api/v1/helpdesk/my-tickets-user'
_url_detail = 'api/v1/helpdesk/ticket'
_url_to_change_status = 'api/v2/helpdesk/status/change'
_url_to_add_note = 'api/v1/helpdesk/internal-note'
ticket_name = None
@property
def state(self):
if not self.ticket_name:
response = self._client.get(
self._url_detail, params=dict(id=self.ticket_id))
ticket = response.get('data').get('ticket')
self.ticket_name = ticket.get('status_name')
return Status.get_state_by_name(self.ticket_name)
@property
def user(self):
return self._user
@staticmethod
def get_specific_ticket(
ticket_id,
url='api/v1/helpdesk/ticket',
client=faveo.APIClient()):
response = client.get(url, params=dict(id=ticket_id))
ticket_detail = response.get('data').get('ticket')
ticket_detail['ticket_id'] = ticket_detail['id']
user = HelpDeskUser(**ticket_detail.get('from'))
return HelpDeskTicket(_user=user, **ticket_detail)
def add_note(self, note, user):
body = dict(ticket_id=self.ticket_id, user_id=user.id, body=note)
response = self._client.post(self._url_to_add_note, body=body)
return response
def create(self, subject, body, priority, topic, department):
if not isinstance(priority, entitys.Priority):
raise NotIsInstance('Priority not is instance of Priority')
if not isinstance(topic, entitys.Topic):
raise NotIsInstance('Topic not is instance of Topic')
if not isinstance(department, entitys.Department):
raise NotIsInstance('Department not is instance of Department')
body = dict(
subject=subject, body=body, first_name=self._user.first_name,
email=self._user.email, priority=priority.priority_id,
help_topic=topic.id, department=department.id)
response = self._client.post(self._create_url, body).get('response')
return self.get_specific_ticket(response.get('ticket_id'))
def list(self):
if not self._user:
raise NotSetHelpDeskUserInstance('Need set _user instance')
params = dict(user_id=self._user.id)
response = self._client.get(self._list_url, params)
if not isinstance(response, dict) and response.get('tickets', None):
raise NotIsInstance(response.get('error'))
return [HelpDeskTicket(**ticket) for ticket in response.get('tickets')]
def change_state(self, state):
body = dict(ticket_id=self.ticket_id, status_id=state.id)
response = self._client.post(self._url_to_change_status, body)
return response.get('success')
def close(self):
state_close = Status.get_state_by_name(self._status_close_name)
body = dict(ticket_id=self.ticket_id, status_id=state_close.id)
response = self._client.post(self._url_to_change_status, body)
return response.get('success')
class HelpDeskUser(base.BaseEntityHelpDesk, base.BaseHelpDesk):
_url = 'api/v1/helpdesk/register'
_search_url = 'api/v1/helpdesk/agents'
def __init__(self, *args, **kwargs):
self.ticket = HelpDeskTicket(**{'_user': self})
super().__init__(*args, **kwargs)
@staticmethod
def get(email,
url='api/v1/users',
client=faveo_db.APIClient):
client = client()
response = client.get(url, params={'email': email})
return HelpDeskUser(**response) if response else None
@staticmethod
def create_user(
email, first_name, last_name,
url='api/v1/helpdesk/register',
client=faveo.APIClient):
client = client()
user = HelpDeskUser.get(email)
if user:
return user
params = dict(email=email, first_name=first_name, last_name=last_name)
result = client.post(url, params)
return HelpDeskUser(**result[0].get('user'))
class HelpDesk(object):
user = HelpDeskUser
topics = Topics
prioritys = Prioritys
departments = Deparments
|
nilq/baby-python
|
python
|
import re
import time
import io
import sys
import argparse
from collections import defaultdict
# parse/validate arguments
argParser = argparse.ArgumentParser()
argParser.add_argument("-d", "--delimiter", type=str, default='\t', help="delimiter defaults to \t")
argParser.add_argument("-1", "--firstFilename", type=str)
argParser.add_argument("-2", "--secondFilename", type=str)
argParser.add_argument("-o", "--outputFilename", type=str)
argParser.add_argument("-ie", "--input_encoding", type=str, default='utf8')
argParser.add_argument("-oe", "--output_encoding", type=str, default='utf8')
args = argParser.parse_args()
firstFile = io.open(args.firstFilename, encoding=args.input_encoding, mode='r')
secondFile = io.open(args.secondFilename, encoding=args.input_encoding, mode='r')
outputFile = io.open(args.outputFilename, encoding=args.output_encoding, mode='w')
counter = 0
max_line = 100001
try:
for firstLine in firstFile:
secondLine = secondFile.readline()
if len(secondLine) == 0:
print 'error: second file is shorter than first file at line {0}'.format(counter)
exit(1)
if counter == 0:
# outputFile.write(firstLine.strip() + ' ' + str(len(secondLine.strip().split())) + '\n')
outputFile.write(u'{0}'.format(str(max_line - 1) + ' ' + str(len(secondLine.strip().split())) + '\n'))
else:
outputFile.write(u'{0}{1}{2}'.format(firstLine.strip(), args.delimiter, secondLine))
counter += 1
if counter == max_line:
break
except UnicodeDecodeError:
print 'unicode error'
firstFile.close()
secondFile.close()
outputFile.close()
|
nilq/baby-python
|
python
|
"""
File name: predict_full_brain.py
Author: Jana Rieger
Date created: 03/12/2018
This is the main script for predicting a segmentation of an input MRA image. Segmentations can be predicted for multiple
models eather on rough grid (the parameters are then read out from the Unet/models/tuned_params.cvs file) or on fine
grid.
"""
import os
from Unet import config
from Unet.utils.helper import read_tuned_params_from_csv
from Unet.utils.predict_function import predict_and_save
################################################
# SET PARAMETERS FOR FINE GRID
################################################
dataset = 'test' # train / val / set
patch_size_list = [96] # list of sizes of one patch n x n
batch_size_list = [8, 16, 32, 64] # list of batch sizes
num_epochs = 10 # number of epochs
learning_rate_list = [1e-4, 1e-5] # list of learning rates of the optimizer Adam
dropout_list = [0.0, 0.1, 0.2] # list of dropout rates: percentage of weights to be dropped
fine_grid = True # True for tuning hyperparameters in fine grid tuning, False for random rough grid
################################################
num_patients_train = len(config.PATIENTS['train']['working']) + len(
config.PATIENTS['train']['working_augmented']) # number of patients in training category
num_patients_val = len(config.PATIENTS['val']['working']) + len(
config.PATIENTS['val']['working_augmented']) # number of patients in validation category
patients = config.PATIENTS[dataset]['working'] + config.PATIENTS[dataset]['working_augmented']
data_dirs = config.MODEL_DATA_DIRS
if not os.path.exists(config.RESULTS_DIR + dataset + '/'):
os.makedirs(config.RESULTS_DIR + dataset + '/')
tuned_params_file = config.get_tuned_parameters()
# PARAMETER LOOPS
if fine_grid:
# -----------------------------------------------------------
# FINE GRID FOR PARAMETER TUNING
# -----------------------------------------------------------
for patch_size in patch_size_list:
for batch_size in batch_size_list:
for lr in learning_rate_list:
for dropout in dropout_list:
for patient in patients:
predict_and_save(patch_size, num_epochs, batch_size, lr, dropout, patient, num_patients_train,
num_patients_val, data_dirs, dataset)
else:
# -----------------------------------------------------------
# RANDOM ROUGH GRID FOR PARAMETER TUNING
# -----------------------------------------------------------
patch_size_list, num_epochs_list, batch_size_list, learning_rate_list, dropout_list = read_tuned_params_from_csv(
tuned_params_file)
for i in range(len(patch_size_list)):
patch_size = patch_size_list[i]
num_epochs = num_epochs_list[i]
batch_size = batch_size_list[i]
lr = learning_rate_list[i]
dropout = dropout_list[i]
for patient in patients:
predict_and_save(patch_size, num_epochs, batch_size, lr, dropout, patient, num_patients_train,
num_patients_val, data_dirs, dataset)
print('DONE')
|
nilq/baby-python
|
python
|
import lumos.numpy.casadi_numpy as np
import pytest
def test_basic_logicals_numpy():
a = np.array([True, True, False, False])
b = np.array([True, False, True, False])
assert np.all(a & b == np.array([True, False, False, False]))
if __name__ == "__main__":
pytest.main()
|
nilq/baby-python
|
python
|
import asyncio
from string import capwords
import DiscordUtils
import discord
from discord.ext.commands import Context
from embeds import error_embed, success_embed, infoCheckEmbed
from settings import BOT_TOKEN, prefix, description, verified_role_id
from settings import verification_channel_id
from database import emailTaken, addVerification, verifyUser, idTaken
from database import isEDUEmail, addEDUEmail, authCodeTaken
from database import getUserFromId
from database import newInvite, wasInvited, removeInvite, useInvite
from logs import logRegistered, logVerified, logRejoin
# discord gateway intents
intents = discord.Intents.all()
allowed_mentions = discord.AllowedMentions(everyone=False,
users=True,
roles=False)
# bot instance
bot = discord.ext.commands.Bot(command_prefix=prefix,
intents=intents,
description=description,
case_insensitive=True,
allowed_mentions=allowed_mentions)
# invite tracker
tracker = DiscordUtils.InviteTracker(bot)
@bot.event
async def on_member_join(member):
# check if user was previously registered
if await idTaken(member.id):
# get user
user = await getUserFromId(member.id)
# set role and nick
nick = f"{user['first_name']} {user['last_name'][0]}"
await member.add_roles(member.guild.get_role(verified_role_id))
await member.edit(nick=nick)
# send a dm
channel = await member.create_dm()
await channel.send(f"Welcome back to the "
f"OHSEA Discord {user['first_name']}! "
f"I've automagically applied "
f"your verification again."
f"\n\n"
f"If you think this was a mistake, "
f"let an admin know :smile:")
# log it down
await logRejoin(member.id, nick, bot)
# otherwise log down the inviter
else:
# get inviter
inviter = await tracker.fetch_inviter(member)
await newInvite(member.id, inviter.id)
@bot.event
async def on_member_remove(member):
if await wasInvited(member.id):
await removeInvite(member.id)
@bot.command()
async def register(ctx):
if not isinstance(ctx.channel, discord.channel.DMChannel):
await ctx.send(embed=await error_embed("Command can only be run "
"in my DM's!"))
return
def messageCheck(message):
return message.channel == ctx.channel and ctx.author == message.author
user = {}
# get first name
await ctx.send('What is your first name?')
msg = await bot.wait_for('message',
check=messageCheck,
timeout=1800)
user['first_name'] = capwords(msg.content)
# get last name
await ctx.send('What is your last name?')
msg = await bot.wait_for('message',
check=messageCheck,
timeout=1800)
user['last_name'] = capwords(msg.content)
while True:
# get email
await ctx.send('What is your **personal** school email?')
msg = await bot.wait_for('message',
check=messageCheck,
timeout=1800)
user['email'] = msg.content
# break out if valid edu email
if await isEDUEmail(msg.content):
break
# else tell them its not a valid edu email
await ctx.send(embed=await error_embed('That is not a valid '
'EDU email!'
'\n\n'
'Contact modmail if you\'d '
'like to add yours.'))
# check if email is already used
if await emailTaken(msg.content):
await ctx.send(embed=await error_embed('Your email is already taken!'
'\n\nPlease contact modmail'
'if you think this '
'was a mistake.'))
return
# get confirmation
msg = await ctx.send(embed=await infoCheckEmbed(user, ctx.author.id))
await msg.add_reaction('✅')
await msg.add_reaction('❌')
def confirmCheck(react, person):
return person == ctx.author and \
(str(react.emoji) == '✅' or str(react.emoji == '❌'))
try:
reaction, member = await bot.wait_for(event='reaction_add',
timeout=60.0,
check=confirmCheck)
# exit if wrong info
if str(reaction.emoji) == '❌':
await ctx.send("Try again with `!register`")
return
except asyncio.TimeoutError:
await ctx.send('Timed out. Please try again.')
return
# otherwise add user to verification
await addVerification(user)
await ctx.send(embed=await success_embed('Check your email for further'
' instructions :smile:'))
await logRegistered(ctx, user, bot)
@bot.command()
async def verify(ctx: discord.ext.commands.Context, auth_code=None):
# check if auth code was provided
if auth_code is None:
await ctx.send(embed=await error_embed("No email verification code "
"provided."))
# cancel the command
return
# command can only be run verification channel
elif ctx.channel.id != verification_channel_id:
await ctx.send(embed=await error_embed(f"Command can only be run in "
f"<#{verification_channel_id}>"))
return
# check if user is already verified
elif await idTaken(ctx.author.id):
await ctx.send(embed=await error_embed("Your ID is already registered."
"\n"
"If you think this was a "
"mistake "
"please contact an admin."))
return
# check if auth code is valid
elif not await authCodeTaken(auth_code):
await ctx.reply(embed=await error_embed("Not a valid verification "
"code."))
return
# verify user
nick = await verifyUser(ctx.author.id, auth_code)
await ctx.reply(embed=await success_embed("You're in! :smile:"))
# give role
await ctx.author.add_roles(ctx.guild.get_role(verified_role_id))
await ctx.author.edit(nick=nick)
await logVerified(ctx, nick, bot)
# log invite if was invited
if await wasInvited(ctx.author.id):
await useInvite(ctx.author.id)
@bot.command()
async def addemail(ctx, address):
if await isEDUEmail(address, True):
await ctx.send(embed=await error_embed('Already a valid '
'email address.'))
else:
await addEDUEmail(address)
await ctx.send(embed=await success_embed(f'Added @{address} as a '
f'valid email address.'))
# run the bot
bot.run(BOT_TOKEN)
|
nilq/baby-python
|
python
|
class DataIntegrityException(Exception):
pass
class AuthenticationException(Exception):
pass
class UnauthorizedException(Exception):
pass
|
nilq/baby-python
|
python
|
"""Module for defining primitives and primitve categories
"""
from collections import defaultdict
from enum import Enum
import json
import pprint
import pkgutil
class Primitive(object):
"""A primitive"""
def __init__(self):
self.name = ''
self.task = ''
self.learning_type = ''
self.ml_algorithm = ''
self.tags = ['NA', 'NA']
self.weight = 1
def __str__(self):
return 'Primitive("{}")'.format(self.name)
def __repr__(self):
return 'Primitive("{}")'.format(self.name)
def __eq__(self, other):
"""Define equals based on name"""
if isinstance(other, self.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Overide non-equality test"""
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
"""Overide hash by using name attribute"""
return hash(self.name)
class DSBoxPrimitive(Primitive):
"""A primitive"""
def __init__(self, definition):
super(DSBoxPrimitive, self).__init__()
self.name = definition['Name']
self.task = definition['Task']
self.learning_type = definition['LearningType']
self.ml_algorithm = definition['MachineLearningAlgorithm']
self.tags = [self.ml_algorithm, self.ml_algorithm]
self.weight = 1
def __str__(self):
return 'DSBoxPrimitive("{}")'.format(self.name)
def __repr__(self):
return 'DSBoxPrimitive("{}")'.format(self.name)
class D3mPrimitive(Primitive):
"""Primitive defined using D3M metadata"""
def __init__(self, definition):
super(D3mPrimitive, self).__init__()
self.name = definition['id'].split('.')[-1]
self.task = ''
if 'task_type' in definition:
if 'feature extraction' in definition['task_type']:
self.task = "FeatureExtraction"
if 'data preprocessing' in definition['task_type']:
self.task = "DataPreprocessing"
self.learning_type = 'NA'
if 'handles_classification' in definition and definition['handles_classification']:
self.learning_type = 'classification'
self.task = 'Modeling'
if 'handles_regression' in definition and definition['handles_regression']:
self.learning_type = 'regression'
self.task = 'Modeling'
self.handles_multiclass = False
if 'handles_multiclass' in definition:
self.handles_multiclass = definition['handles_multiclass']
self.handles_multilabel = False
if 'handles_multilabel' in definition:
self.handles_multilabel = definition['handles_multilabel']
if 'algorithm_type' in definition:
# !!!! For now get only the first type
self.ml_algorithm = definition['algorithm_type'][0]
if 'graph matching' in definition['algorithm_type']:
self.learning_type = 'graphMatching'
else:
self.ml_algorithm = 'NA'
self.tags = definition['tags']
# make sure tag hierarchy is at least 2
if len(self.tags) == 0:
self.tags = ['NA', 'NA']
elif len(self.tags) == 1:
self.tags = [self.tags[0], self.tags[0]]
self.weight = 1
def __str__(self):
return 'D3mPrimitive("{}")'.format(self.name)
def __repr__(self):
return 'D3mPrimitive("{}")'.format(self.name)
class HierarchyNode(object):
"""Node in the Hierarchy"""
def __init__(self, hierarchy, name, parent, content=None):
self.hierarchy = hierarchy
self.name = name
self.parent = parent
self.children = []
self._content = content
def get_content(self):
return self._content
def add_child(self, name, content=None):
"""Add a child to the hierarchy"""
child = HierarchyNode(self.hierarchy, name, self, content)
self.children.append(child)
return child
def has_child(self, name):
"""Return true if node has child with given name"""
for node in self.children:
if node.name == name:
return True
return False
def get_child(self, name):
"""Return child by name"""
for node in self.children:
if node.name == name:
return node
raise Exception('Child not found: {}'.format(name))
def get_siblings(self):
if self.parent is None:
result = []
else:
result = [x for x in self.parent.children if not x==self]
return result
def add_primitive(self, primitive):
self.hierarchy.add_primitive(self, primitive)
def _add_primitive(self, primitive):
if self._content is None:
self._content = [primitive]
else:
self._content.append(primitive)
def __str__(self):
return 'Node({},num_child={})'.format(self.name, len(self.children))
class Hierarchy(object):
"""Generic tree of nodes"""
def __init__(self, name):
# name of this hierarchy
self.name = name
self.root = HierarchyNode(self, 'root', None)
self._changed = False
self._level_count = []
self.node_by_primitive = dict()
def add_child(self, node, name, content=None):
"""Create and add child node"""
assert node.hierarchy == self
node.add_child(name, content)
if content is not None:
for primitive in content:
self.node_by_primitive[primitive] = node
self._changed = True
def add_path(self, names):
"""Create and add all nodes in path"""
curr_node = self.root
for name in names:
if curr_node.has_child(name):
curr_node = curr_node.get_child(name)
else:
curr_node = curr_node.add_child(name)
self._changed = True
return curr_node
def add_primitive(self, node, primitive):
self.node_by_primitive[primitive] = node
node._add_primitive(primitive)
def get_level_counts(self):
"""Computes the number of nodes at each level"""
if not self._changed:
return self._level_count
self._level_count = self._compute_level_counts(self.root, 0, list())
self._changed = False
return self._level_count
def _compute_level_counts(self, node, level, counts):
"""Computes the number of nodes at each level"""
if len(counts) < level + 1:
counts = counts + [0]
counts[level] = counts[level] + 1
for child in node.children:
counts = self._compute_level_counts(child, level+1, counts)
return counts
def get_primitive_count(self):
"""Returns the number of primitives"""
return self._get_primitive_count(self.root)
def _get_primitive_count(self, curr_node):
"""Returns the number of primitives"""
count = 0
if curr_node._content is not None:
count = count + len(curr_node._content)
for child in curr_node.children:
count = count + self._get_primitive_count(child)
return count
def get_primitives(self, curr_node=None):
if curr_node is None:
curr_node = self.root
result = []
if curr_node._content is not None:
result.append(curr_node._content)
for child in curr_node.children:
result = result + self.get_primitives(child)
return result
def get_primitives_as_list(self, curr_node=None):
return [p for plist in self.get_primitives(curr_node) for p in plist]
def get_nodes_by_level(self, level):
"""Returns node at a specified level of the tree"""
return self._get_nodes_by_level(self.root, 0, level)
def _get_nodes_by_level(self, curr_node, curr_level, target_level):
"""Returns node at a specified level of the tree"""
if curr_level >= target_level:
return [curr_node]
elif curr_level +1 == target_level:
return curr_node.children
else:
result = []
for node in curr_node.children:
result = result + self._get_nodes_by_level(node, curr_level + 1, target_level)
return result
def get_node_by_primitive(self, primitive):
return self.node_by_primitive[primitive]
def pprint(self):
"""Print hierarchy"""
print('Hierarchy({}, level_counts={})'.format(self.name, self.get_level_counts()))
self._print(self.root, [])
def _print(self, curr_node, path, max_depth=2):
new_path = path + [curr_node]
if len(new_path) > max_depth:
print(' '*4 + ':'.join([node.name for node in new_path[1:]]))
for line in pprint.pformat(curr_node._content).splitlines():
print(' '*8 + line)
else:
for child in curr_node.children:
self._print(child, new_path, max_depth=max_depth)
def __str__(self):
return 'Hierarchy({}, num_primitives={}, level_node_counts={})'.format(
self.name, self.get_primitive_count(), self.get_level_counts())
class Category(Enum):
PREPROCESSING = 1
FEATURE = 2
CLASSIFICATION = 3
REGRESSION = 4
UNSUPERVISED = 5
EVALUATION = 6
METRICS = 7
GRAPH = 8
OTHER = 9
class Primitives(object):
"""Base Primitives class"""
def __init__(self):
self.primitives = []
self._index = dict()
self.size = 0
self.hierarchy_types = [Category.PREPROCESSING, Category.FEATURE,
Category.CLASSIFICATION, Category.REGRESSION,
Category.UNSUPERVISED, Category.EVALUATION,
Category.METRICS, Category.GRAPH, Category.OTHER]
self.hierarchies = dict()
for name in Category:
self.hierarchies[name] = Hierarchy(name)
def filter_equality(self, aspect, name):
"""Find primitive by aspect and name value"""
result = [p for p in self.primitives if getattr(p, aspect) == name]
return result
def filter_by_task(self, name):
"""Find primitive by task aspect and name value"""
return self.filter_equality('task', name)
def filter_by_learning_type(self, name):
"""Find primitive by learning-type aspect and name value"""
return self.filter_equality('learning_type', name)
def filter_by_algo(self, name):
"""Find primitive by algorithm aspect and name value"""
return self.filter_equality('ml_algorithm', name)
def get_by_name(self, name):
"""Get primitve by unique name"""
for primitive in self.primitives:
if primitive.name == name:
return primitive
return None
def get_index(self, name):
"""Returns the index of the primitive given its name"""
return self._index[name]
def get_hierarchies(self):
"""Returns all primitive hierarchies as dict"""
return self.hierarchies
def print_statistics_old(self):
"""Print statistics of the primitives"""
classification = 0
regression = 0
classification_algo = defaultdict(int)
regression_algo = defaultdict(int)
tag_primitive = defaultdict(list)
for primitive in self.primitives:
if len(primitive.tags) > 0:
tag_str = ':'.join(primitive.tags)
if primitive.learning_type == Category.CLASSIFICATION:
classification = classification + 1
# classification_algo[primitive.ml_algorithm] += 1
classification_algo[tag_str] = classification_algo[tag_str] + 1
tag_primitive['C:' + tag_str].append(primitive.name)
elif primitive.learning_type == Category.REGRESSION:
regression = regression + 1
regression_algo[tag_str] = regression_algo[tag_str] + 1
tag_primitive['R:' + tag_str].append(primitive.name)
else:
tag_primitive['O:' + tag_str].append(primitive.name)
print('Primtive by Tag:')
pprint.pprint(tag_primitive)
print('Total number of primitives = {}'.format(self.size))
print('num classifiers = {}'.format(classification))
pprint.pprint(classification_algo)
print('num regressors = {}'.format(regression))
pprint.pprint(regression_algo)
def print_statistics(self):
"""Print statistics of the primitives"""
print('Total number of primitives = {}'.format(self.size))
print('Number of primitives by hierarchy:')
hierarchies = self.get_hierarchies()
for name in Category:
print(' '*4 + str(hierarchies[name]))
def _compute_tag_hierarchy(self):
"""Compute hierarchy based on sklearn tags"""
for primitive in self.primitives:
# Put base/mixin and functions into other category
if primitive.tags[0] in ['metrics']:
node = self.hierarchies[Category.METRICS].add_path(primitive.tags[:2])
elif (primitive.tags[0] == 'base'
or (primitive.tags[1] == 'base' and not 'LinearRegression' in primitive.name)
or 'Base' in primitive.name
or 'Mixin' in primitive.name
or primitive.name[0].islower()
or primitive.name == 'ForestRegressor'
or primitive.name == 'ForestClassifier'):
node = self.hierarchies[Category.OTHER].add_path(primitive.tags[:2])
elif (primitive.learning_type == 'classification'
or primitive.tags[0] in ['lda', 'qda', 'naive_bayes']
or ('Classifier' in primitive.name
and not primitive.tags[0] in
['multiclass', 'multioutput', 'calibration'])
or 'SVC' in primitive.name
or 'LogisticRegression' in primitive.name
or 'Perceptron' in primitive.name ): # Same as SGDClassifier
node = self.hierarchies[Category.CLASSIFICATION].add_path(primitive.tags[:2])
# Modify primitive learning type
primitive.learning_type = 'classification'
elif (primitive.learning_type == 'regression'
or primitive.tags[0] in ['isotonic']
or ('Regressor' in primitive.name
and not primitive.tags[0] in ['multioutput'])
or 'SVR' in primitive.name
or 'ElasticNet' in primitive.name
or 'KernelRidge' in primitive.name
or 'Lars' in primitive.name
or 'Lasso' in primitive.name
or 'LinearRegression' in primitive.name
or 'Ridge' in primitive.name):
node = self.hierarchies[Category.REGRESSION].add_path(primitive.tags[:2])
# Modify primitive learning type
primitive.learning_type = 'regression'
elif primitive.tags[0] in ['cluster', 'mixture']:
node = self.hierarchies[Category.UNSUPERVISED].add_path(primitive.tags[:2])
elif (primitive.tags[0] in ['feature_extraction', 'feature_selection', 'decomposition',
'random_projection', 'manifold']
or 'OrthogonalMatchingPursuit' in primitive.name):
node = self.hierarchies[Category.FEATURE].add_path(primitive.tags[:2])
elif primitive.tags[0] == 'preprocessing':
node = self.hierarchies[Category.PREPROCESSING].add_path(primitive.tags[:2])
elif (primitive.tags[0] in ['cross_validation', 'model_selection']):
node = self.hierarchies[Category.EVALUATION].add_path(primitive.tags[:2])
elif (primitive.tags[0] in ['cross_validation', 'graph_matching']):
node = self.hierarchies[Category.GRAPH].add_path(primitive.tags[:2])
else:
node = self.hierarchies[Category.OTHER].add_path(primitive.tags[:2])
node.add_primitive(primitive)
class DSBoxPrimitives(Primitives):
"""Maintain available primitives"""
PRIMITIVE_FILE = 'primitives.json'
def __init__(self):
super(DSBoxPrimitives, self).__init__()
self._load()
for index, primitive in enumerate(self.primitives):
self._index[primitive] = index
self.size = len(self.primitives)
self._compute_tag_hierarchy()
def _load(self):
"""Load primitive definition from JSON file"""
text = pkgutil.get_data('dsbox.planner.levelone', self.PRIMITIVE_FILE)
content = json.loads(text.decode())
self.primitives = [DSBoxPrimitive(primitive_dict)
for primitive_dict in content['Primitives']]
def _compute_tag_hierarchy(self):
"""Compute hierarchy based on sklearn tags"""
for primitive in self.primitives:
if primitive.learning_type == 'Classification':
node = self.hierarchies[Category.CLASSIFICATION].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
primitive.learning_type = 'classification'
elif primitive.learning_type == 'Regression':
node = self.hierarchies[Category.REGRESSION].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
primitive.learning_type = 'regression'
elif primitive.learning_type == 'UnsupervisedLearning':
node = self.hierarchies[Category.UNSUPERVISED].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
elif primitive.task == 'FeatureExtraction':
node = self.hierarchies[Category.FEATURE].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
elif primitive.task == 'DataPreprocessing':
node = self.hierarchies[Category.PREPROCESSING].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
elif primitive.task == 'Evaluation':
node = self.hierarchies[Category.EVALUATION].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
else:
node = self.hierarchies[Category.OTHER].add_path(
[primitive.ml_algorithm, primitive.ml_algorithm])
node.add_primitive(primitive)
class D3mPrimitives(Primitives):
"""Primitives from D3M metadata"""
PRIMITIVE_FILE = 'sklearn.json'
def __init__(self, additional_primitives):
# additional_primitives is list of Primitives
super(D3mPrimitives, self).__init__()
self._load()
if additional_primitives:
self.primitives = self.primitives + additional_primitives
for index, primitive in enumerate(self.primitives):
self._index[primitive] = index
self.size = len(self.primitives)
self._compute_tag_hierarchy()
def _load(self):
"""Load primitve from json"""
text = pkgutil.get_data('dsbox.planner.levelone', self.PRIMITIVE_FILE)
content = json.loads(text.decode())
self.primitives = [D3mPrimitive(primitive_dict)
for primitive_dict in content['search_primitives']]
|
nilq/baby-python
|
python
|
from django.db.models import Q
from rest_framework.filters import (OrderingFilter, SearchFilter)
from rest_framework.generics import (ListAPIView,
RetrieveAPIView)
from posts.models import Comment
from posts.api.permissions import IsOwnerOrReadOnly
from posts.api.pagination import PostLimitOffsetPagination, PostPageNumberPagination
from .serializers import CommentSerializer
class CommentDetailAPIView(RetrieveAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
class CommentListAPIView(ListAPIView):
serializer_class = CommentSerializer
filter_backends = [SearchFilter, OrderingFilter]
search_fields = ['content', 'user__first_name']
pagination_class = PostPageNumberPagination
def get_queryset(self, *args, **kwargs):
queryset_list = Comment.objects.all()
query = self.request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
return queryset_list
|
nilq/baby-python
|
python
|
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
my_window = Tk()
my_window.title("our final project/help")
my_window.geometry("1366x768")
my_window.resizable(1, 1)
my_window.configure(bg="grey")
L1 = Label(my_window, text="ENQUIRY MANAGEMENT SYSTEM", bg="lavender", fg="blue", font=("Algerian", 40), bd=4)
L1.pack(fill=X)
L2=Label(my_window, text="How can i help you? :", bg="grey", fg="white", font=("cambria",20),width=40)
L2.place(x=200,y=200)
E2=Entry(my_window,width=50,font="20",show="*",bd=3)
E2.place(x=230,y=250)
F1 = Frame(my_window, height=60, width=1366, bg="#ffff00")
F1.place(x=0, y=620)
L7 = Label(F1, text="Designed & Developed by : ", fg="red", bg="#ffff00", font=("cambria", 20), width="30")
L7.place(x=600, y=20)
L8 = Label(F1, text="Pushpa Kumari", bg="#ffff00", fg="black", font=("arial black", 13), width="20")
L8.place(x=1000, y=30)
my_window.mainloop()
|
nilq/baby-python
|
python
|
import os
import shutil
import sys
import time
import unittest
from sitetree import *
class TestCopyFuncs(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
os.makedirs('testdata/test_sitetree/src/nested')
with open('testdata/test_sitetree/src/nested/fileA.txt', "w") as f:
f.write("fileA")
with open('testdata/test_sitetree/src/nested/fileB.txt', "w") as f:
f.write("fileB")
with open('testdata/test_sitetree/src/fileC.txt', "w") as f:
f.write("fileC")
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree('testdata/test_sitetree')
def test_is_newer(self):
time.sleep(0.1)
with open('testdata/test_sitetree/fileC.txt', "w") as f:
f.write("fileC")
self.assertTrue(is_newer('testdata/test_sitetree/fileC.txt',
'testdata/test_sitetree/src/fileC.txt'))
self.assertFalse(is_newer('testdata/test_sitetree/src/fileC.txt',
'testdata/test_sitetree/fileC.txt'))
os.rename('testdata/test_sitetree/fileC.txt',
'testdata/test_sitetree/fileD.txt')
self.assertRaises(ValueError, is_newer,
'testdata/test_sitetree/fileD.txt',
'testdata/test_sitetree/src/fileC.txt')
os.remove('testdata/test_sitetree/fileD.txt')
self.assertTrue(is_newer('testdata/test_sitetree/src/fileC.txt',
'testdata/test_sitetree/fileC.txt'))
def test_copy_on_condition(self):
time.sleep(0.1)
ALT_TEXT = "fileC alternative version"
with open('testdata/test_sitetree/fileC.txt', "w") as f:
f.write(ALT_TEXT)
copy_on_condition('testdata/test_sitetree/fileC.txt',
'testdata/test_sitetree/src/fileC.txt', is_newer)
with open('testdata/test_sitetree/src/fileC.txt', "r") as f:
content = f.read()
self.assertEqual(content, ALT_TEXT)
time.sleep(1)
with open('testdata/test_sitetree/src/fileC.txt', "w") as f:
f.write("fileC")
copy_on_condition('testdata/test_sitetree/fileC.txt',
'testdata/test_sitetree/src/fileC.txt', is_newer)
with open('testdata/test_sitetree/src/fileC.txt', "r") as f:
content = f.read()
self.assertNotEqual(content, ALT_TEXT)
os.remove('testdata/test_sitetree/fileC.txt')
# if __name__ == "__main__":
# sys.path.append(
# os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))[0])
# from sitetree import *
# unittest.main()
|
nilq/baby-python
|
python
|
# copyright 1999 McMillan Enterprises, Inc.
# license: use as you please. No warranty.
#
# A subclass of Archive that can be understood
# by a C program. See uplaunch.cpp for unpacking
# from C.
import archive
import struct
import zlib
import strop
class CTOC:
"""A class encapsulating the table of contents of a CArchive.
When written to disk, it is easily read from C."""
ENTRYSTRUCT = 'iiiibc' #(structlen, dpos, dlen, ulen, flag, typcd) followed by name
def __init__(self):
self.data = []
def frombinary(self, s):
"""Decode the binary string into an in memory list.
S is a binary string."""
entrylen = struct.calcsize(self.ENTRYSTRUCT)
p = 0
while p<len(s):
(slen, dpos, dlen, ulen, flag, typcd) = struct.unpack(self.ENTRYSTRUCT,
s[p:p+entrylen])
nmlen = slen - entrylen
p = p + entrylen
(nm,) = struct.unpack(repr(nmlen)+'s', s[p:p+nmlen])
p = p + nmlen
self.data.append((dpos, dlen, ulen, flag, typcd, nm[:-1]))
def tobinary(self):
"""Return self as a binary string."""
import string
entrylen = struct.calcsize(self.ENTRYSTRUCT)
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.data:
nmlen = len(nm) + 1 # add 1 for a '\0'
rslt.append(struct.pack(self.ENTRYSTRUCT+repr(nmlen)+'s',
nmlen+entrylen, dpos, dlen, ulen, flag, typcd, nm+'\0'))
return string.join(rslt, '')
def add(self, dpos, dlen, ulen, flag, typcd, nm):
"""Add an entry to the table of contents.
DPOS is data position.
DLEN is data length.
ULEN is the uncompressed data len.
FLAG says if the data is compressed.
TYPCD is the "type" of the entry (used by the C code)
NM is the entry's name."""
self.data.append((dpos, dlen, ulen, flag, typcd, nm))
def get(self, ndx):
"""return the toc entry (tuple) at index NDX"""
return self.data[ndx]
def __getitem__(self, ndx):
return self.data[ndx]
def find(self, name):
"""Return the index of the toc entry with name NAME.
Return -1 for failure."""
for i in range(len(self.data)):
if self.data[i][-1] == name:
return i
return -1
class CArchive(archive.Archive):
"""An Archive subclass that an hold arbitrary data.
Easily handled from C or from Python."""
MAGIC = 'MEI\014\013\012\013\015'
HDRLEN = 0
TOCTMPLT = CTOC
TRLSTRUCT = '8siii'
TRLLEN = 20
LEVEL = 9
def __init__(self, path=None, start=0, len=0):
"""Constructor.
PATH is path name of file (create an empty CArchive if path is None).
START is the seekposition within PATH.
LEN is the length of the CArchive (if 0, then read till EOF). """
self.len = len
archive.Archive.__init__(self, path, start)
def checkmagic(self):
"""Verify that self is a valid CArchive.
Magic signature is at end of the archive."""
#magic is at EOF; if we're embedded, we need to figure where that is
if self.len:
self.lib.seek(self.start+self.len, 0)
else:
self.lib.seek(0, 2)
filelen = self.lib.tell()
if self.len:
self.lib.seek(self.start+self.len-self.TRLLEN, 0)
else:
self.lib.seek(-self.TRLLEN, 2)
(magic, totallen, tocpos, toclen) = struct.unpack(self.TRLSTRUCT,
self.lib.read(self.TRLLEN))
if magic != self.MAGIC:
raise RuntimeError, "%s is not a valid %s archive file" \
% (self.path, self.__class__.__name__)
self.pkgstart = filelen - totallen
if self.len:
if totallen != self.len or self.pkgstart != self.start:
raise RuntimeError, "Problem with embedded archive in %s" % self.path
self.tocpos, self.toclen = tocpos, toclen
def loadtoc(self):
"""Load the table of contents into memory."""
self.toc = self.TOCTMPLT()
self.lib.seek(self.pkgstart+self.tocpos)
tocstr = self.lib.read(self.toclen)
self.toc.frombinary(tocstr)
def extract(self, name):
"""Get the contents of an entry.
NAME is an entry name.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism."""
if type(name) == type(''):
ndx = self.toc.find(name)
if ndx == -1:
return None
else:
ndx = name
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
self.lib.seek(self.pkgstart+dpos)
rslt = self.lib.read(dlen)
if flag == 1:
rslt = zlib.decompress(rslt)
if typcd == 'M':
return (1, rslt)
return (0, rslt)
def contents(self):
"""Return the names of the entries"""
rslt = []
for (dpos, dlen, ulen, flag, typcd, nm) in self.toc:
rslt.append(nm)
return rslt
def add(self, entry):
"""Add an ENTRY to the CArchive.
ENTRY must have:
entry[0] is name (under which it will be saved).
entry[1] is fullpathname of the file.
entry[2] is a flag for it's storage format (0==uncompressed,
1==compressed, 2==Python source format)
entry[3] is the entry's type code."""
(nm, pathnm, flag, typcd) = entry[:4]
if flag == 2:
s = open(pathnm, 'r').read()
s = s + '\n\0'
else:
s = open(pathnm, 'rb').read()
ulen = len(s)
if flag == 1:
s = zlib.compress(s, self.LEVEL)
dlen = len(s)
where = self.lib.tell()
if typcd == 'm':
if strop.find(pathnm, '.__init__.py') > -1:
typcd = 'M'
self.toc.add(where, dlen, ulen, flag, typcd, nm)
self.lib.write(s)
def save_toc(self, tocpos):
"""Save the table of contents to disk."""
self.tocpos = tocpos
tocstr = self.toc.tobinary()
self.toclen = len(tocstr)
self.lib.write(tocstr)
def save_trailer(self, tocpos):
"""Save the trailer to disk.
CArchives can be opened from the end - the trailer points
back to the start. """
totallen = tocpos + self.toclen + self.TRLLEN
trl = struct.pack(self.TRLSTRUCT, self.MAGIC, totallen,
tocpos, self.toclen)
self.lib.write(trl)
def openEmbedded(self, name):
"""Open a CArchive of name NAME embedded within this CArchive."""
ndx = self.toc.find(name)
if ndx == -1:
raise KeyError, "Member '%s' not found in %s" % (name, self.path)
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
if flag:
raise ValueError, "Cannot open compressed archive %s in place"
return CArchive(self.path, self.pkgstart+dpos, dlen)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Organization, Metadata
admin.site.register(Organization)
admin.site.register(Metadata)
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sim.py in the exp_framework module."""
from absl.testing import absltest
import numpy as np
from sparse_data.data import sim
NUM_DATASET = 5
NUM_REPLICATE = 5
SEED = 2462723
def setUpModule():
np.random.seed(SEED)
class TestSim(absltest.TestCase):
def setUp(self):
super(TestSim, self).setUp()
self.init_method = None # children should define this
def test_reproducability(self):
if self.init_method is None:
return
# pylint: disable=not-callable
datasets = [self.init_method() for _ in range(NUM_DATASET)]
# check reproducability of get() function
for _ in range(NUM_REPLICATE):
xs, ys = [], []
for d in datasets:
d.reset()
x, y = d.generate()
xs.append(x)
ys.append(y)
np.random.randn() # make calls to global np.random RNG
for i in range(NUM_DATASET - 1):
self.assertTrue(np.array_equal(xs[i], xs[i + 1]))
self.assertTrue(np.array_equal(ys[i], ys[i + 1]))
# check reproducability of generate() function
for _ in range(NUM_REPLICATE):
x_trains, y_trains, x_tests, y_tests = [], [], [], []
for d in datasets:
d.reset()
x_train, y_train, x_test, y_test = d.get()
x_trains.append(x_train)
y_trains.append(y_train)
x_tests.append(x_test)
y_tests.append(y_test)
np.random.randn() # make calls to global np.random RNG
for i in range(NUM_DATASET - 1):
self.assertTrue(np.array_equal(x_trains[i], x_trains[i + 1]))
self.assertTrue(np.array_equal(y_trains[i], y_trains[i + 1]))
self.assertTrue(np.array_equal(x_tests[i], x_tests[i + 1]))
self.assertTrue(np.array_equal(y_tests[i], y_tests[i + 1]))
class TestLinear(TestSim):
def setUp(self):
super(TestLinear, self).setUp()
self.init_method = sim.LinearSimulation
def test_shape(self):
num_sample = np.random.randint(10, 20)
num_feature = np.random.randint(10, 20)
problem = 'classification'
for _ in range(NUM_REPLICATE):
d = self.init_method(
num_sample=num_sample, num_feature=num_feature, problem=problem)
d.reset()
x, y = d.generate()
self.assertEqual(x.shape, (num_sample, num_feature))
self.assertEqual(y.shape, (num_sample,))
def test_sparsity(self):
num_sample = 1000
num_feature = 10
problem = 'classification'
for _ in range(NUM_REPLICATE):
prop_nonzero = np.random.uniform(0.2, 0.8)
d = self.init_method(
num_sample=num_sample,
num_feature=num_feature,
prop_nonzero=prop_nonzero,
problem=problem)
d.reset()
x, _ = d.generate()
observed_prop_nonzero = np.true_divide(np.sum(x > 0), np.size(x))
self.assertLess(
np.abs(observed_prop_nonzero - prop_nonzero), 0.1 * prop_nonzero)
class TestCardinality(TestLinear):
def setUp(self):
super(TestCardinality, self).setUp()
self.init_method = sim.CardinalitySimulation
def test_shape(self):
num_sample = np.random.randint(10, 20)
num_feature = np.random.randint(10, 20) * 2 # should be even
problem = 'classification'
for _ in range(NUM_REPLICATE):
d = self.init_method(
num_sample=num_sample, num_feature=num_feature, problem=problem)
d.reset()
x, y = d.generate()
self.assertEqual(x.shape, (num_sample, num_feature))
self.assertEqual(y.shape, (num_sample,))
def test_sparsity(self):
pass
class TestSparsity(TestCardinality):
def setUp(self):
super(TestSparsity, self).setUp()
self.init_method = sim.SparsitySimulation
def test_sparsity(self):
num_sample = 1000
num_feature = 50
problem = 'classification'
for _ in range(NUM_REPLICATE):
prop_nonzero = np.random.uniform(0.2, 0.8)
d = self.init_method(
num_sample=num_sample,
num_feature=num_feature,
prop_nonzero=prop_nonzero,
problem=problem)
d.reset()
x, _ = d.generate()
x_inf = x[:, :int(num_feature / 2)]
observed_prop_nonzero = np.true_divide(np.sum(x_inf > 0), np.size(x_inf))
self.assertLess(
np.abs(observed_prop_nonzero - prop_nonzero), 0.1 * prop_nonzero)
class TestMultiplicative(TestLinear):
def setUp(self):
super(TestMultiplicative, self).setUp()
self.init_method = sim.MultiplicativeSimulation
def test_shape(self):
orders = range(1, 10)
problem = 'classification'
for _ in range(NUM_REPLICATE):
num_sample = np.random.randint(10, 20)
num_group_per_order = np.random.randint(10, 20)
num_feature = np.sum([o * num_group_per_order for o in orders],
dtype=np.int)
d = self.init_method(
num_sample=num_sample,
num_feature=num_feature,
orders=orders,
problem=problem)
d.reset()
x, y = d.generate()
self.assertEqual(x.shape, (num_sample, num_feature))
self.assertEqual(y.shape, (num_sample,))
def test_sparsity(self):
num_sample = 1000
num_group_per_order = 10
orders = range(1, 10)
problem = 'classification'
num_feature = np.sum([o * num_group_per_order for o in orders],
dtype=np.int)
for _ in range(NUM_REPLICATE):
prop_nonzero = np.random.uniform(0.2, 0.8)
d = self.init_method(
num_sample=num_sample,
num_feature=num_feature,
orders=orders,
prop_nonzero=prop_nonzero,
problem=problem)
d.reset()
x, _ = d.generate()
observed_prop_nonzero = np.true_divide(np.sum(x > 0), np.size(x))
self.assertLess(
np.abs(observed_prop_nonzero - prop_nonzero), 0.1 * prop_nonzero)
class TestXOR(TestLinear):
def setUp(self):
super(TestXOR, self).setUp()
self.init_method = sim.XORSimulation
def test_shape(self):
problem = 'classification'
for _ in range(NUM_REPLICATE):
num_sample = np.random.randint(10, 20)
num_features = 2 * np.random.randint(10, 20)
d = self.init_method(
num_sample=num_sample, num_feature=num_features, problem=problem)
d.reset()
x, y = d.generate()
self.assertEqual(x.shape, (num_sample, num_features))
self.assertEqual(y.shape, (num_sample,))
def test_sparsity(self):
num_sample = 1000
num_pair = 10
problem = 'classification'
for _ in range(NUM_REPLICATE):
prop_nonzero = np.random.uniform(0.2, 0.8)
d = self.init_method(
num_sample=num_sample,
num_feature=num_pair / 2,
prop_nonzero=prop_nonzero,
problem=problem)
d.reset()
x, _ = d.generate()
observed_prop_nonzero = np.true_divide(np.sum(x > 0), np.size(x))
self.assertLess(
np.abs(observed_prop_nonzero - prop_nonzero), 0.1 * prop_nonzero)
class TestFunctions(absltest.TestCase):
def test_continuous_to_binary(self):
# TODO(jisungkim) add more tests here
y = [0, 1, 2, 3, 4, 5]
exp_y_squashed = [0, 0, 0, 1, 1, 1]
self.assertTrue(
np.array_equal(exp_y_squashed,
sim.continuous_to_binary(y, squashing='linear')))
if __name__ == '__main__':
absltest.main()
|
nilq/baby-python
|
python
|
# __init__.py
#
# Copyright(c) Exequiel Ceasar Navarrete <exequiel.navarrete09gmail.com>
# Licensed under MIT
# Version 1.0.0-alpha2
from compiler.tokenizer import Tokenizer
from compiler.parser import Parser
from compiler.transformer import Transformer
from compiler.code_generator import CodeGenerator
class Compiler(object):
""" Class that compiles given code to another language """
def __init__(self, input_code):
self.input_code = input_code
def compile(self):
tknizer = Tokenizer(self.input_code)
parser = Parser(tknizer.run())
transformer = Transformer(parser.run())
code_generator = CodeGenerator(transformer.run())
return code_generator.run()
|
nilq/baby-python
|
python
|
from testtube.helpers import Frosted, Nosetests, Pep257, Flake8
PATTERNS = (
# Run pep257 check against a file if it changes, excluding files that have
# test_ or tube.py in the name.
# If this test fails, don't make any noise (0 bells on failure)
(
r'((?!test_)(?!tube\.py).)*\.py$',
[Pep257(bells=0)]
),
# Run flake8 and Frosted on all python files when they change. If these
# checks fail, abort the entire test suite because it might be due to a
# syntax error. There's no point running the subsequent tests if there
# is such an error.
(
r'.*\.py$',
[Flake8(all_files=True), Frosted(all_files=True)],
{'fail_fast': True}
),
# Run the test suite whenever python or test config files change
(
r'(.*setup\.cfg$)|(.*\.coveragerc)|(.*\.py$)',
[Nosetests()]
)
)
|
nilq/baby-python
|
python
|
# Automatically generated by pb2py
# fmt: off
from .. import protobuf as p
if __debug__:
try:
from typing import Dict, List # noqa: F401
from typing_extensions import Literal # noqa: F401
except ImportError:
pass
class MoneroSubAddressIndicesList(p.MessageType):
def __init__(
self,
account: int = None,
minor_indices: List[int] = None,
) -> None:
self.account = account
self.minor_indices = minor_indices if minor_indices is not None else []
@classmethod
def get_fields(cls) -> Dict:
return {
1: ('account', p.UVarintType, 0),
2: ('minor_indices', p.UVarintType, p.FLAG_REPEATED),
}
|
nilq/baby-python
|
python
|
def power_set(s):
return power_set_util(list(s), 0)
def power_set_util(s, index):
if index == len(s):
all_subsets = [set()]
else:
all_subsets = power_set_util(s, index + 1)
new_subsets = []
for subset in all_subsets:
concat = set(subset)
concat.add(s[index])
new_subsets.append(concat)
all_subsets.extend(new_subsets)
return all_subsets
def main():
print(power_set({8, 9, 3}))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
name = "mtgtools"
|
nilq/baby-python
|
python
|
import os
import re
import setuptools
with open("README.MD", "r") as fh:
long_description = fh.read()
def find_version(fnam, version="VERSION"):
with open(fnam) as f:
cont = f.read()
regex = f'{version}\s*=\s*["]([^"]+)["]'
match = re.search(regex, cont)
if match is None:
raise Exception(
f"version with spec={version} not found, use double quotes for version string"
)
return match.group(1)
def find_projectname():
cwd = os.getcwd()
name = os.path.basename(cwd)
return name
projectname = find_projectname()
file = os.path.join(projectname, "__init__.py")
version = find_version(file)
setuptools.setup(
name="pttydev",
version=version,
author="k.r. goger",
author_email="k.r.goger+{projectname}@gmail.com",
description="TTYDev - Pseudo TTY Device",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kr-g/pttydev",
packages=setuptools.find_packages(),
license="MIT",
keywords="python threading pyserial websocket websocket-client micropython webrepl esp8266 esp32",
install_requires=[
"pyatomic",
"pyserial",
"websocket-client",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
python_requires=">=3.8",
)
|
nilq/baby-python
|
python
|
#Crie um programa que leia uma frase qualquer
# e dia se ela é um palindromo; desconsiderando os espaços
# Ex. apos a sopa ; a sacada da casa, a torre da derrota
frase = str(input("Digite uma frase: ")).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
for letra in range(len(junto) -1, -1, -1):
inverso += junto[letra]
print(junto, inverso)
if inverso == junto:
print("Temos um palíndromo")
else:
print("A frase digitada nõ é um palíndromo")
|
nilq/baby-python
|
python
|
class Agent:
def __init__(self, screen_resolution):
self.resolution = screen_resolution
self.fitness = 0
self.dead = False
self.screen = None
self.y = screen_resolution[1]//2
self.rect = ((10, self.y), (self.resolution[0]//20, self.resolution[0]//25))
self.vvelocity = 0
def copyview(self, surface_array):
self.screen = surface_array
def jump(self):
self.vvelocity = 3.0
def move(self):
self.vvelocity -= .08
self.y -= 1*self.vvelocity
self.rect = ((10, self.y), (self.resolution[0]//20, self.resolution[1]//20))
|
nilq/baby-python
|
python
|
from lstm_model import LstmModel
import numpy as np
from trajectory import Trajectory, generate_trajectory, generate_trajectories, stochastic_policy_adapter
from solver import value_iteration, stochastic_policy_from_value_expectation
from snake_ladder import SnakeLadderWorld
from supervised_utils import trajectory_list_to_xy, shuffle, x_to_ragged, train_test_split, compute_class_accuracy, sigmoid, get_fixed_policies, get_expert_policy, generate_trajectories_from_policy_list
import tensorflow as tf
from lstm_model import LstmModel
import plotly.express as px
import datetime
from importance import calc_instance_score, instance_importance_plot
def main():
# define some consants
world_size = 20
shortcut_density = 0.1
# create our world
world = SnakeLadderWorld(size=world_size, shortcut_density=shortcut_density)
# game board
print("board: ")
print(world.game_board)
# create "fixed" policies which each execeute one of the three actions w/ prob p (success_prob)
# randomly sample from all actions w/ prob 1 - p
# so excute one action with prob p + 1/3(1 - p) and others with 1/3(1 - p)
fixed_policies = get_fixed_policies
# get policy using value iteration
expert_policy = get_expert_policy(world)
# create list of policies
policies = []
#policies = policies_fixed
policies.append(expert_policy) # add expert policy to list
policies.append(world._smartish_policy)
num_policies = len(policies)
# generate trajectories for all policies
# each index of list contains array of corresponding policy trajectories
n_trajectories_per_policy = 1000
trajectories_list = generate_trajectories_from_policy_list(world, policies,n_trajectories_per_policy=n_trajectories_per_policy)
# print an example trajectory
# a trajectory from policy 0
print(trajectories_list[0][0])
# seperate trajectories into x,y data
x_data, y_data = trajectory_list_to_xy(trajectories_list)
x_data, y_data = shuffle(x_data, y_data)
# convert data to ragged tensor
# max_seq contains length of longest trajectory
x_data, max_seq = x_to_ragged(x_data)
y_data = np.array(y_data)
# do a simple train/test split
x_train, y_train, x_test, y_test = train_test_split(x_data,y_data, test_prop =.20)
# create lstm model
lstm_model = LstmModel(max_trajectory_len=max_seq, num_features=3, num_outputs=num_policies)
print(lstm_model.model.summary())
# train model
lstm_model.train(x_train, y_train, x_test, y_test, log_dir="./logs/fit/",
epochs = 500, batch_size=int(n_trajectories_per_policy / 10), early_stopping=True,patience=10)
# compute accuracy by class
y_predicted = lstm_model.predict_classes(x_test)
print(compute_class_accuracy(y_test, y_predicted))
# create instance importance plot
for i in range(5):
trajectory_index = i
fig = instance_importance_plot(x_test, y_test, trajectory_index, lstm_model, scale_constant=10)
fig.show()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from .legion_tools import *
from .hamiltonian_gen import *
from collections import OrderedDict
def mf_calc(base_params):
fd = base_params.fd
fd_array = np.linspace(10.44, 10.5, 2001)
fd_array = np.hstack([fd_array, fd])
fd_array = np.unique(np.sort(fd_array))
mf_amplitude_frame = mf_characterise(base_params, fd_array)
return mf_amplitude_frame.loc[fd]
def slowdown_sim(job_index, output_directory='./results', bistable_initial=True, transmon=True, transformation=False, mf_init=False, g=np.sqrt(2)):
bistable_initial = bistable_initial
print('In slowdown_sim.py we have bistable_initial = ' + str(bistable_initial) + ' for job_index = ' + str(job_index))
with open('stack.csv', 'r') as f:
header = f.readline()
stack_name = header.split('\n')[0]
stack_frame = pd.read_csv(f)
stack_directory = output_directory
kappa_phi = 0.0
sys_params = stack_frame.iloc[job_index]
frame_params = sys_params
if transmon is True:
packaged_params = Parameters(frame_params.fc, frame_params.Ej, frame_params.g, frame_params.Ec,
frame_params.eps,
frame_params.fd, frame_params.kappa, frame_params.gamma, frame_params.t_levels,
frame_params.c_levels, frame_params.gamma_phi, kappa_phi, frame_params.n_t,
frame_params.n_c)
H = hamiltonian(packaged_params, transmon=transmon)
c_ops = collapse_operators(packaged_params)
else:
packaged_params = Parameters(frame_params.fc, None, frame_params.g, None, frame_params.eps,
frame_params.fd, frame_params.kappa, frame_params.gamma, frame_params.t_levels,
frame_params.c_levels, frame_params.gamma_phi, kappa_phi, frame_params.n_t,
frame_params.n_c, frame_params.f01)
H = hamiltonian(packaged_params, transmon=transmon)
c_ops = collapse_operators(packaged_params)
a = tensor(destroy(sys_params.c_levels), qeye(sys_params.t_levels))
sm = tensor(qeye(sys_params.c_levels), destroy(sys_params.t_levels))
directory = stack_directory + '/' + sys_params.group_folder + '/' + str(sys_params.job_index)
if not os.path.exists(directory):
os.makedirs(directory)
cwd = os.getcwd()
os.chdir(directory)
print('The working directory for the current job index is ' + str(directory))
sys_params.to_csv('settings.csv')
options = Options(nsteps=1e6)
if os.path.exists('./state_checkpoint.qu'):
print('Loading state checkpoint for job_index = '+str(sys_params.job_index))
initial_state = qload('./state_checkpoint')
H = qload('./hamiltonian')
c_ops = qload('./c_ops')
previous_results = pd.read_csv('./results.csv')
delta_t = 1.0 * sys_params.end_time / (sys_params.snapshots - 1)
start_time = float(previous_results['times'].iloc[-1])
new_snapshots = sys_params.snapshots - start_time / delta_t
snapshot_times = np.linspace(start_time, sys_params.end_time, new_snapshots)
save = False #don't save the first row of results, it's already there
bistability = True
else:
start_time = 0
snapshot_times = np.linspace(start_time, sys_params.end_time, sys_params.snapshots)
save = True #save the first row of results
if bistable_initial is True:
bistability_characteristics = dict()
if os.path.exists('./steady_state.qu'):
rho_ss = qload('steady_state')
print('MF init = ' + str(mf_init))
if mf_init:
print('mf_init is true')
mf_amplitudes = mf_calc(packaged_params)
if mf_amplitudes.dropna().shape[0] == 4:
bistability = True
bright_alpha = mf_amplitudes.a_bright
bright_projector = tensor(coherent_dm(packaged_params.c_levels, g*bright_alpha), qeye(packaged_params.t_levels))
rho_bright = bright_projector * rho_ss
rho_bright /= rho_bright.norm()
dim_alpha = mf_amplitudes.a_dim
dim_projector = tensor(coherent_dm(packaged_params.c_levels, g*dim_alpha), qeye(packaged_params.t_levels))
rho_dim = dim_projector * rho_ss
rho_dim /= rho_dim.norm()
characteristics = None
else:
bistability = False
rho_dim = None
rho_bright = None
characteristics = None
else:
#raise AssertionError
bistability, rho_dim, rho_bright, characteristics = bistable_states_calc(rho_ss)
if sys_params.qubit_state == 0:
print('Dim initial state.')
initial_state = rho_dim
else:
print('Bright initial state.')
initial_state = rho_bright
else:
print('Finding steady state for job_index = '+str(sys_params.job_index))
rho_ss = steadystate(H, c_ops)
qsave(rho_ss, './steady_state')
bistability, rho_dim, rho_bright, characteristics = bistable_states_calc(rho_ss)
if sys_params.qubit_state == 0:
print('Dim initial state.')
initial_state = rho_dim
else:
print('Bright initial state.')
initial_state = rho_bright
if transformation and bistability:
alpha_bright = expect(a,rho_bright)
alpha_dim = expect(a,rho_dim)
bistability_characteristics['alpha_bright'] = alpha_bright
bistability_characteristics['alpha_dim'] = alpha_dim
alpha = 0.5*(alpha_bright+alpha_dim)
beta = 0.0
else:
alpha = 0.0
beta = 0.0
bistability_characteristics['bistability'] = bistability
bistability_characteristics['rho_dim'] = rho_dim
bistability_characteristics['rho_bright'] = rho_bright
bistability_characteristics['characteristics'] = characteristics
bistability_characteristics['alpha'] = alpha
bistability_characteristics['beta'] = beta
qsave(bistability_characteristics, './characteristics')
else:
print('Choosing initial state in the transmon basis.')
initial_state = tensor(fock_dm(sys_params.c_levels,0), fock_dm(sys_params.t_levels, sys_params.qubit_state))
bistability = None
alpha = 0.0
beta = 0.0
if transmon is True:
packaged_params = Parameters(frame_params.fc, frame_params.Ej, frame_params.g, frame_params.Ec,
frame_params.eps,
frame_params.fd, frame_params.kappa, frame_params.gamma, frame_params.t_levels,
frame_params.c_levels, frame_params.gamma_phi, kappa_phi, frame_params.n_t,
frame_params.n_c)
H = hamiltonian(packaged_params, transmon=transmon, alpha=alpha, beta=beta)
c_ops = collapse_operators(packaged_params, alpha=alpha, beta=beta)
else:
packaged_params = Parameters(frame_params.fc, None, frame_params.g, None, frame_params.eps,
frame_params.fd, frame_params.kappa, frame_params.gamma, frame_params.t_levels,
frame_params.c_levels, frame_params.gamma_phi, kappa_phi, frame_params.n_t,
frame_params.n_c, frame_params.f01)
H = hamiltonian(packaged_params, transmon=transmon, alpha=alpha, beta=beta)
c_ops = collapse_operators(packaged_params, alpha=alpha, beta=beta)
qsave(H, 'hamiltonian')
qsave(c_ops, 'c_ops')
options.store_final_state = True
e_ops = OrderedDict()
e_ops['a_op_re'] = (a + a.dag()) / 2
e_ops['a_op_im'] = -1j * (a - a.dag()) / 2
e_ops['photons'] = a.dag() * a
for level in range(sys_params.c_levels):
e_ops['c_level_' + str(level)] = tensor(fock_dm(sys_params.c_levels, level), qeye(sys_params.t_levels))
e_ops['sm_op_re'] = (sm.dag() + sm) / 2
e_ops['sm_op_im'] = -1j * (sm - sm.dag()) / 2
e_ops['excitations'] = sm.dag() * sm
for level in range(sys_params.t_levels):
e_ops['t_level_' + str(level)] = tensor(qeye(sys_params.c_levels), fock_dm(sys_params.t_levels, level))
qsave(H,'slowdown_hamiltonian')
if bistability:
print('Going into the mesolve function we have a_op_re = ' + str(expect(e_ops['a_op_re'],initial_state)))
output = mesolve_checkpoint(H, initial_state, snapshot_times, c_ops, e_ops, save, directory, options=options)
os.chdir(cwd)
|
nilq/baby-python
|
python
|
#!/bin/python
def sortSelection(A, k):
"""
Selects the @k-th smallest number from @A in O(nlogn) time by sorting
and returning A[k]
Note that indexing begins at 0, so
call sortSelection(A, 0) to get the smallest number in the list,
call sortselection(A, len(A) / 2) to get the median number of the list,
call sortselection(A, len(A) - 1) to get the largest number of the list
param A : an unsorted list
param k : the k-th smallest number of @A to find
return : the k-th smallest number of @A
"""
if k < 0 or k >= len(A):
raise IndexError\
('Requested k-th smallest value is out of index range for the provided list')
B = A[:]
B.sort()
return B[k]
def pickPivot(A):
"""
Picks a pivot by arbitrarily partitioning @A into groups of 5,
finding the median of each group, and selecting the median of those
medians as a pivot
param A : an unsorted list
return : the pivot
"""
i = 0
j = 5
B = []
for _ in range((len(A) / j) + 1):
if A[i:j]:
B.append(selection(A[i:j], len(A[i:j]) / 2))
i += 5
j += 5
return selection(B, len(B) / 2)
def selection(A, k):
"""
Selects the @k-th smallest number from @A in O(n) time
Note that indexing begins at 0, so
call selection(A, 0) to get the smallest number in the list,
call selection(A, len(A) / 2) to get the median number of the list,
call selection(A, len(A) - 1) to get the largest number of the list
param A : an unsorted list
param k : the k-th smallest number of @A to find
return : the k-th smallest number of @A
"""
if k < 0 or k >= len(A):
raise IndexError\
('Requested k-th smallest value is out of index range for the provided list')
if len(A) <= 100:
return sortSelection(A, k)
pivot = pickPivot(A)
A_1 = []
A_2 = []
A_3 = []
for item in A:
if item < pivot:
A_1.append(item) # A_1 = items of @A less than pivot
elif item > pivot:
A_2.append(item) # A_2 = items of @A greater than pivot
else:
A_3.append(item) # A_3 = items of @A equal to pivot
i = len(A_1)
j = len(A_3)
if i <= k <= (i + j):
return pivot
if k < i:
return selection(A_1, k)
if k > (i + j):
return selection(A_2, k - i - j)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import sys
import os
import re
import shutil
optimize = 3
link_time_optimize = 3
sources = [
'audio/audio.c',
'audio/source.c',
'audio/staticsource.c',
'audio/wav_decoder.c',
'audio/vorbis_decoder.c',
'filesystem/filesystem.c',
'graphics/batch.c',
'graphics/font.c',
'graphics/graphics.c',
'graphics/geometry.c',
'graphics/image.c',
'graphics/matrixstack.c',
'graphics/quad.c',
'graphics/shader.c',
'graphics/gltools.c',
'image/imagedata.c',
'luaapi/audio.c',
'luaapi/boot.c',
'luaapi/math.c',
'luaapi/filesystem.c',
'luaapi/keyboard.c',
'luaapi/event.c',
'luaapi/bonding.c',
'luaapi/mouse.c',
'luaapi/graphics.c',
'luaapi/graphics_geometry.c',
'luaapi/graphics_batch.c',
'luaapi/graphics_font.c',
'luaapi/graphics_image.c',
'luaapi/graphics_quad.c',
'luaapi/graphics_shader.c',
'luaapi/graphics_window.c',
'luaapi/image.c',
'luaapi/timer.c',
'luaapi/tools.c',
'math/vector.c',
'math/minmax.c',
'math/random.c',
'math/randomgenerator.c',
'main.c',
'bonding.c',
'mouse.c',
'keyboard.c',
'tools/utf8.c',
'tools/log.c',
'timer/timer.c',
# SLRE
'3rdparty/slre/slre.c',
# Lua
'3rdparty/lua/src/lapi.c',
'3rdparty/lua/src/lauxlib.c',
'3rdparty/lua/src/lbaselib.c',
'3rdparty/lua/src/lcode.c',
'3rdparty/lua/src/ldblib.c',
'3rdparty/lua/src/ldebug.c',
'3rdparty/lua/src/ldo.c',
'3rdparty/lua/src/ldump.c',
'3rdparty/lua/src/lfunc.c',
'3rdparty/lua/src/lgc.c',
'3rdparty/lua/src/linit.c',
'3rdparty/lua/src/liolib.c',
'3rdparty/lua/src/llex.c',
'3rdparty/lua/src/lmathlib.c',
'3rdparty/lua/src/lmem.c',
'3rdparty/lua/src/loadlib.c',
'3rdparty/lua/src/lobject.c',
'3rdparty/lua/src/lopcodes.c',
'3rdparty/lua/src/loslib.c',
'3rdparty/lua/src/lparser.c',
'3rdparty/lua/src/lstate.c',
'3rdparty/lua/src/lstring.c',
'3rdparty/lua/src/lstrlib.c',
'3rdparty/lua/src/ltable.c',
'3rdparty/lua/src/ltablib.c',
'3rdparty/lua/src/ltm.c',
'3rdparty/lua/src/lundump.c',
'3rdparty/lua/src/lvm.c',
'3rdparty/lua/src/lzio.c',
# FreeType
'3rdparty/freetype/src/base/ftbitmap.c',
'3rdparty/freetype/src/base/ftcalc.c',
'3rdparty/freetype/src/base/ftgloadr.c',
'3rdparty/freetype/src/base/ftglyph.c',
'3rdparty/freetype/src/base/ftinit.c',
'3rdparty/freetype/src/base/ftobjs.c',
'3rdparty/freetype/src/base/ftoutln.c',
'3rdparty/freetype/src/base/ftrfork.c',
'3rdparty/freetype/src/base/ftstream.c',
'3rdparty/freetype/src/base/ftsystem.c',
'3rdparty/freetype/src/base/fttrigon.c',
'3rdparty/freetype/src/base/ftutil.c',
'3rdparty/freetype/src/gzip/ftgzip.c',
'3rdparty/freetype/src/sfnt/sfnt.c',
'3rdparty/freetype/src/smooth/smooth.c',
'3rdparty/freetype/src/truetype/truetype.c',
]
SRCDIR = os.path.dirname(sys.argv[0]) + "/src"
ftinc = " ".join(map(lambda x: "-I" + os.path.relpath(SRCDIR) + "/3rdparty/freetype/src/" + x, ["truetype", "sfnt", "autofit", "smooth", "raster", "psaux", "psnames"])) + " -I" + os.path.relpath(SRCDIR) + "/3rdparty/freetype/include"
output = ''
CFLAGS = ''
LDFLAGS = ''
CC = ''
LD = ''
if SRCDIR == '.' or SRCDIR == '':
print("Please build out-of-source")
sys.exit(1)
includeregex = re.compile('^\s*#\s*include\s*"([^"]+)"\s*$')
os.system('sed -e "s/FT_CONFIG_OPTIONS_H/<ftoption.h>/" -e "s/FT_CONFIG_STANDARD_LIBRARY_H/<ftstdlib.h>/" -e "s?/undef ?#undef ?" <{srcdir}/3rdparty/freetype/builds/unix/ftconfig.in >ftconfig.h'.format(srcdir=os.path.relpath(SRCDIR)))
os.system('mkdir -p config; sed -e \'/tt_driver\\|sfnt_module\\|ft_smooth_renderer/ !d\' < {srcdir}/3rdparty/freetype/include/config/ftmodule.h >config/ftmodule.h'.format(srcdir=os.path.relpath(SRCDIR)))
def newestDependency(filename, trace=[]):
newest = os.path.getmtime(sys.argv[0])
with open(filename) as f:
for line in f:
res = includeregex.match(line)
if res:
dep = os.path.dirname(filename) + "/" + res.group(1)
if os.path.exists(dep) and dep not in trace:
newest = max(newest, os.path.getmtime(dep), newestDependency(dep, trace + [dep]))
return newest
def makeFontFile():
sourcefile = os.path.join(os.path.dirname(sys.argv[0]), "data", "vera.ttf")
compiled = os.path.exists("vera_ttf.c") and os.path.getmtime("vera_ttf.c") or 0
source = os.path.getmtime(sourcefile)
if compiled > source:
return False
with open(sourcefile, "rb") as datafile, open("vera_ttf.c", "w") as outputfile:
content = bytearray(datafile.read())
outputfile.write("static unsigned char const defaultFontData[] = {\n")
for i in range(len(content)):
outputfile.write("0x{:02x}, ".format(content[i]))
if i % 16 == 15:
outputfile.write("\n")
outputfile.write("}};\nstatic size_t defaultFontSize = {};".format(len(content)))
return True
def needsRebuild(filename):
return not os.path.exists(getObjFilename(filename)) or \
os.path.getmtime(SRCDIR + "/" + filename) > os.path.getmtime(getObjFilename(filename)) or \
newestDependency(SRCDIR + "/" + filename) > os.path.getmtime(getObjFilename(filename))
def getObjFilename(filename):
return os.path.splitext(filename)[0] + ".o"
def compile(filename):
objfile = getObjFilename(filename)
outdir = os.path.dirname(filename)
if outdir == "":
outdir = "."
if not os.path.exists(outdir):
print("Making directory " + outdir)
os.makedirs(outdir)
print("Compile {filename} -> {objfile}".format(filename=filename, objfile=objfile))
cmd = "{CC} {CFLAGS} -c -o {objfile} {filename}".format(CC=CC, CFLAGS=CFLAGS, filename=SRCDIR + "/" + filename, objfile=objfile)
return os.system(cmd) == 0
def build():
global output, CFLAGS, LDFLAGS, CC, LD
if '--native' in sys.argv:
output = 'love'
CFLAGS = '-g -O{optimize} -I/usr/include/SDL2 -DFT2_BUILD_LIBRARY -Wall -g -std=c11 -I{ftconfig} -I{srcdir}/3rdparty/lua/src'.format(optimize=optimize, link_time_optimize=link_time_optimize, srcdir = os.path.relpath(SRCDIR), ftconfig=".") + " " + ftinc
LDFLAGS = '-lm -lSDL2 -lGL -lGLEW -lopenal -g'.format(optimize=optimize, link_time_optimize=link_time_optimize)
CC = 'clang'
LD = 'clang'
else:
output = 'love.js'
CFLAGS = '-s USE_SDL=1 -s -FULL_ES2=1 -O{optimize} --memory-init-file 0 --llvm-lto {link_time_optimize} -DFT2_BUILD_LIBRARY -Wall -std=c11 -I{ftconfig} -I{srcdir}/3rdparty/lua/src'.format(optimize=optimize, link_time_optimize=link_time_optimize, srcdir = os.path.relpath(SRCDIR), ftconfig=".") + " " + ftinc
LDFLAGS = '-s USE_SDL=1 -s ALLOW_MEMORY_GROWTH=1 --no-heap-copy -O{optimize} --llvm-lto {link_time_optimize} --memory-init-file 0 -o love.html --preload-file lib/ --preload-file main.lua --preload-file logic/ --preload-file res/ --preload-file res/bgs/ --preload-file res/sounds/'.format(optimize=optimize, link_time_optimize=link_time_optimize)
CC = 'emcc'
LD = 'emcc'
needsLinking = False
fontChanged = makeFontFile()
needsLinking = needsLinking or fontChanged
for i in sources:
filename = i
if needsRebuild(filename):
if not compile(filename):
print("Failed")
sys.exit(1)
needsLinking = True
if needsLinking:
print("Linking {output}".format(output=output))
cmd = "{LD} {LDFLAGS} -o {outfile} {infiles}".format(LD=LD, LDFLAGS=LDFLAGS, outfile=output, infiles=" ".join(map(getObjFilename, sources)))
if os.system(cmd) != 0:
print("Failed")
def remove(f):
if os.path.exists(f):
os.remove(f)
def clean():
for i in sources:
remove(getObjFilename(i))
remove('motor2d.js')
remove('motor2d')
try:
for i in extra_output:
remove(i)
except NameError:
pass
def usage():
print(sys.argv[0] + " (build|buildloader|clean) [--native]")
print(" Verbs:")
print(" build build motor2d executable")
print(" clean delete intermediate files and final executable (doesn't clean loader)")
print(" Flags:")
print(" --native build native executable (not supported for buildloader)")
if len(sys.argv) == 1:
usage()
elif sys.argv[1] == 'build':
build()
elif sys.argv[1] == 'clean':
clean()
|
nilq/baby-python
|
python
|
import unittest
from board import Board
class BoardTests(unittest.TestCase):
def setUp(self):
self.b = Board()
return super().setUp()
def test_initial_board(self):
expected = [['.', '.', '.'], ['.', '.', '.'], ['.', '.', '.']]
self.assertEqual(self.b.board, expected)
def test_player_move(self):
self.b.player_move(0, 0)
expected = [['X', '.', '.'], ['.', '.', '.'], ['.', '.', '.']]
self.assertEqual(self.b.board, expected)
def test_bot_move(self):
self.b.bot_move(1, 1)
expected = [['.', '.', '.'], ['.', 'O', '.'], ['.', '.', '.']]
self.assertEqual(self.b.board, expected)
def test_make_move(self):
self.b.make_move('X', 0, 0)
expected = [['X', '.', '.'], ['.', '.', '.'], ['.', '.', '.']]
self.assertEqual(self.b.board, expected)
def test_bot_make_move(self):
self.b.bot_make_move()
expected = [['O', '.', '.'], ['.', '.', '.'], ['.', '.', '.']]
self.assertEqual(self.b.board, expected)
|
nilq/baby-python
|
python
|
import os
from bot import app_vars
def clean_file_name(file_name: str) -> str:
for char in ["\\", "/", "%", "*", "?", ":", '"', "|"] + [
chr(i) for i in range(1, 32)
]:
file_name = file_name.replace(char, "_")
file_name = file_name.strip()
return file_name
def get_abs_path(file_name: str) -> str:
return os.path.join(app_vars.directory, file_name)
|
nilq/baby-python
|
python
|
import copy
import six
from .error import SchemaError, Error, Invalid
class Schema(object):
"""
A schema that validates data given to it using the specified rules.
The ``schema`` must be a dictionary of key-value mappings. Values must
be callable validators. See ``XX``.
The ``entire`` argument allows specifying a callable validator that runs on
the entire input after every field is validated. If provided, the validator
will always run, even if validation errors are raised beforehand. Failed
keys will not be included in the given data.
The ``extra_keys`` argument must be one of :attr:`.ACCEPT`, :attr:`.IGNORE`
or :attr:`.REJECT`.
The ``required_error`` argument specifies the error message used when a
key is missing. :attr:`.REQUIRED_ERROR` is the default.
"""
ACCEPT = 'ACCEPT'
IGNORE = 'IGNORE'
REJECT = 'REJECT'
REQUIRED_ERROR = "This field is required."
"""
The default error message for a missing required key.
"""
REJECT_ERROR = "This field is unknown."
"""
The default error message for an unknown rejected key.
"""
def __init__(self, schema, entire=None, extra_keys=IGNORE, required_error=None):
self.extra_keys = extra_keys
self.entire = entire
self.required_error = required_error or self.REQUIRED_ERROR
if not isinstance(schema, dict):
raise SchemaError("The provided schema must be a dictionary.")
self.schema = schema
self.validator = self._build(schema)
def __call__(self, data):
"""
Validates the given ``data`` dictionary and returns transformed values.
Will raise :class:`decent.error.Invalid` if any validation errors are
encountered.
"""
return self.validator(copy.deepcopy(data))
def _build(self, schema):
extra_keys = self.extra_keys
entire = self.entire
# Enumerate all the keys in the schema.
all_keys = set(schema.keys())
_required_keys = set([key for key in all_keys if not isinstance(key, Optional)])
# Enumerate default key values.
defaults = {}
for key in all_keys:
if isinstance(key, Marker) and key.default != None:
defaults[key] = key.default
# Make sure all validators are callable.
for key, value in six.iteritems(schema):
if not hasattr(value, '__call__'):
raise SchemaError("Validator {!r} for key '{!s}' is not callable.".format(value, key))
def validator(data):
# Sanity check.
if not isinstance(data, dict):
raise Invalid([Error("Data must be a dictionary.")])
# Track which required keys are not present.
required_keys = _required_keys.copy()
# Fill available defaults before validating.
missing = all_keys.copy() - set(data.keys())
for key in missing:
if key in defaults:
data[key] = defaults[key]
errors = []
result = {}
for key, value in six.iteritems(data):
# If this key is not in the schema, decide what to do with it.
if key not in all_keys:
if extra_keys == self.ACCEPT:
# Pass through as is.
result[key] = value
elif extra_keys == self.REJECT:
# Reject with error.
errors.append(Error(self.REJECT_ERROR, [key]))
continue # pragma: no cover
# Validate.
validator = schema[key]
result_value = self._run_validator(validator, value, errors, key)
if result_value:
result[key] = result_value
# Track required keys.
if key in required_keys:
required_keys.remove(key)
# Add an error for every missing key.
for key in required_keys:
errors.append(Error(self.required_error, [key]))
# Run the validator for the entire schema.
if entire:
result = self._run_validator(entire, result, errors)
if errors:
raise Invalid(errors)
return result
return validator
def _run_validator(self, validator, data, errors, key=None):
try:
return validator(data)
except Invalid as all:
for e in all:
self._add_error(e, errors, key)
except Error as e:
self._add_error(e, errors, key)
def _add_error(self, error, errors, key=None):
if key:
error.path.insert(0, key)
errors.append(error)
class Marker(object):
"""
A base class for key markers that wrap a key.
"""
def __init__(self, key, default=None):
self.key = key
self.default = default
def __str__(self):
return str(self.key)
def __eq__(self, other):
return self.key == other
def __hash__(self):
return hash(self.key)
__repr__ = __str__
class Default(Marker):
"""
A marker for specifying a default value for a key.
"""
pass
class Optional(Marker):
"""
A marker for specifying a key as optional. The schema will validate data
without the key present.
"""
pass
__all__ = ('Schema', 'Marker', 'Default', 'Optional',)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import shutil
import re
import sys
import textwrap
from util import build_utils
import jar
sys.path.append(build_utils.COLORAMA_ROOT)
import colorama
def ColorJavacOutput(output):
fileline_prefix = r'(?P<fileline>(?P<file>[-.\w/\\]+.java):(?P<line>[0-9]+):)'
warning_re = re.compile(
fileline_prefix + r'(?P<full_message> warning: (?P<message>.*))$')
error_re = re.compile(
fileline_prefix + r'(?P<full_message> (?P<message>.*))$')
marker_re = re.compile(r'\s*(?P<marker>\^)\s*$')
warning_color = ['full_message', colorama.Fore.YELLOW + colorama.Style.DIM]
error_color = ['full_message', colorama.Fore.MAGENTA + colorama.Style.BRIGHT]
marker_color = ['marker', colorama.Fore.BLUE + colorama.Style.BRIGHT]
def Colorize(line, regex, color):
match = regex.match(line)
start = match.start(color[0])
end = match.end(color[0])
return (line[:start]
+ color[1] + line[start:end]
+ colorama.Fore.RESET + colorama.Style.RESET_ALL
+ line[end:])
def ApplyColor(line):
if warning_re.match(line):
line = Colorize(line, warning_re, warning_color)
elif error_re.match(line):
line = Colorize(line, error_re, error_color)
elif marker_re.match(line):
line = Colorize(line, marker_re, marker_color)
return line
return '\n'.join(map(ApplyColor, output.split('\n')))
ERRORPRONE_OPTIONS = [
# These crash on lots of targets.
'-Xep:ParameterPackage:OFF',
'-Xep:OverridesGuiceInjectableMethod:OFF',
'-Xep:OverridesJavaxInjectableMethod:OFF',
]
def _FilterJavaFiles(paths, filters):
return [f for f in paths
if not filters or build_utils.MatchesGlob(f, filters)]
_MAX_MANIFEST_LINE_LEN = 72
def _CreateManifest(manifest_path, classpath, main_class=None,
manifest_entries=None):
"""Creates a manifest file with the given parameters.
This generates a manifest file that compiles with the spec found at
http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#JAR_Manifest
Args:
manifest_path: The path to the manifest file that should be created.
classpath: The JAR files that should be listed on the manifest file's
classpath.
main_class: If present, the class containing the main() function.
manifest_entries: If present, a list of (key, value) pairs to add to
the manifest.
"""
output = ['Manifest-Version: 1.0']
if main_class:
output.append('Main-Class: %s' % main_class)
if manifest_entries:
for k, v in manifest_entries:
output.append('%s: %s' % (k, v))
if classpath:
sanitized_paths = []
for path in classpath:
sanitized_paths.append(os.path.basename(path.strip('"')))
output.append('Class-Path: %s' % ' '.join(sanitized_paths))
output.append('Created-By: ')
output.append('')
wrapper = textwrap.TextWrapper(break_long_words=True,
drop_whitespace=False,
subsequent_indent=' ',
width=_MAX_MANIFEST_LINE_LEN - 2)
output = '\r\n'.join(w for l in output for w in wrapper.wrap(l))
with open(manifest_path, 'w') as f:
f.write(output)
def _ExtractClassFiles(jar_path, dest_dir, java_files):
"""Extracts all .class files not corresponding to |java_files|."""
# Two challenges exist here:
# 1. |java_files| have prefixes that are not represented in the the jar paths.
# 2. A single .java file results in multiple .class files when it contains
# nested classes.
# Here's an example:
# source path: ../../base/android/java/src/org/chromium/Foo.java
# jar paths: org/chromium/Foo.class, org/chromium/Foo$Inner.class
# To extract only .class files not related to the given .java files, we strip
# off ".class" and "$*.class" and use a substring match against java_files.
def extract_predicate(path):
if not path.endswith('.class'):
return False
path_without_suffix = re.sub(r'(?:\$[^/]+)?\.class$', '', path)
return not any(path_without_suffix in p for p in java_files)
build_utils.ExtractAll(jar_path, path=dest_dir, predicate=extract_predicate)
def _OnStaleMd5(changes, options, javac_cmd, java_files, classpath_inputs,
runtime_classpath):
with build_utils.TempDir() as temp_dir:
srcjars = options.java_srcjars
# The .excluded.jar contains .class files excluded from the main jar.
# It is used for incremental compiles.
excluded_jar_path = options.jar_path.replace('.jar', '.excluded.jar')
classes_dir = os.path.join(temp_dir, 'classes')
os.makedirs(classes_dir)
changed_paths = None
if options.incremental and changes.AddedOrModifiedOnly():
changed_paths = set(changes.IterChangedPaths())
# Do a full compile if classpath has changed.
if any(p in changed_paths for p in classpath_inputs):
changed_paths = None
else:
java_files = [p for p in java_files if p in changed_paths]
srcjars = [p for p in srcjars if p in changed_paths]
if srcjars:
java_dir = os.path.join(temp_dir, 'java')
os.makedirs(java_dir)
for srcjar in options.java_srcjars:
extract_predicate = None
if changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(srcjar))
extract_predicate = lambda p: p in changed_subpaths
build_utils.ExtractAll(srcjar, path=java_dir, pattern='*.java',
predicate=extract_predicate)
jar_srcs = build_utils.FindInDirectory(java_dir, '*.java')
java_files.extend(_FilterJavaFiles(jar_srcs, options.javac_includes))
if java_files:
if changed_paths:
# When no files have been removed and the output jar already
# exists, reuse .class files from the existing jar.
_ExtractClassFiles(options.jar_path, classes_dir, java_files)
_ExtractClassFiles(excluded_jar_path, classes_dir, java_files)
# Add the extracted files to the classpath.
classpath_idx = javac_cmd.index('-classpath')
javac_cmd[classpath_idx + 1] += ':' + classes_dir
# Don't include the output directory in the initial set of args since it
# being in a temp dir makes it unstable (breaks md5 stamping).
cmd = javac_cmd + ['-d', classes_dir] + java_files
build_utils.CheckOutput(
cmd,
print_stdout=options.chromium_code,
stderr_filter=ColorJavacOutput)
if options.main_class or options.manifest_entry:
entries = []
if options.manifest_entry:
entries = [e.split(':') for e in options.manifest_entry]
manifest_file = os.path.join(temp_dir, 'manifest')
_CreateManifest(manifest_file, runtime_classpath, options.main_class,
entries)
else:
manifest_file = None
glob = options.jar_excluded_classes
inclusion_predicate = lambda f: not build_utils.MatchesGlob(f, glob)
exclusion_predicate = lambda f: not inclusion_predicate(f)
jar.JarDirectory(classes_dir,
options.jar_path,
manifest_file=manifest_file,
predicate=inclusion_predicate)
jar.JarDirectory(classes_dir,
excluded_jar_path,
predicate=exclusion_predicate)
def _ParseOptions(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option(
'--src-gendirs',
help='Directories containing generated java files.')
parser.add_option(
'--java-srcjars',
action='append',
default=[],
help='List of srcjars to include in compilation.')
parser.add_option(
'--bootclasspath',
action='append',
default=[],
help='Boot classpath for javac. If this is specified multiple times, '
'they will all be appended to construct the classpath.')
parser.add_option(
'--classpath',
action='append',
help='Classpath for javac. If this is specified multiple times, they '
'will all be appended to construct the classpath.')
parser.add_option(
'--use-ijars',
action='store_true',
help='Whether to use interface jars (.interface.jar) when compiling')
parser.add_option(
'--incremental',
action='store_true',
help='Whether to re-use .class files rather than recompiling them '
'(when possible).')
parser.add_option(
'--javac-includes',
default='',
help='A list of file patterns. If provided, only java files that match'
'one of the patterns will be compiled.')
parser.add_option(
'--jar-excluded-classes',
default='',
help='List of .class file patterns to exclude from the jar.')
parser.add_option(
'--chromium-code',
type='int',
help='Whether code being compiled should be built with stricter '
'warnings for chromium code.')
parser.add_option(
'--use-errorprone-path',
help='Use the Errorprone compiler at this path.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option(
'--main-class',
help='The class containing the main method.')
parser.add_option(
'--manifest-entry',
action='append',
help='Key:value pairs to add to the .jar manifest.')
parser.add_option('--stamp', help='Path to touch on success.')
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, required=('jar_path',))
bootclasspath = []
for arg in options.bootclasspath:
bootclasspath += build_utils.ParseGypList(arg)
options.bootclasspath = bootclasspath
classpath = []
for arg in options.classpath:
classpath += build_utils.ParseGypList(arg)
options.classpath = classpath
java_srcjars = []
for arg in options.java_srcjars:
java_srcjars += build_utils.ParseGypList(arg)
options.java_srcjars = java_srcjars
if options.src_gendirs:
options.src_gendirs = build_utils.ParseGypList(options.src_gendirs)
options.javac_includes = build_utils.ParseGypList(options.javac_includes)
options.jar_excluded_classes = (
build_utils.ParseGypList(options.jar_excluded_classes))
return options, args
def main(argv):
colorama.init()
argv = build_utils.ExpandFileArgs(argv)
options, java_files = _ParseOptions(argv)
if options.src_gendirs:
java_files += build_utils.FindInDirectories(options.src_gendirs, '*.java')
java_files = _FilterJavaFiles(java_files, options.javac_includes)
runtime_classpath = options.classpath
compile_classpath = runtime_classpath
if options.use_ijars:
ijar_re = re.compile(r'\.jar$')
compile_classpath = (
[ijar_re.sub('.interface.jar', p) for p in runtime_classpath])
javac_cmd = ['javac']
if options.use_errorprone_path:
javac_cmd = [options.use_errorprone_path] + ERRORPRONE_OPTIONS
javac_cmd.extend((
'-g',
# Chromium only allows UTF8 source files. Being explicit avoids
# javac pulling a default encoding from the user's environment.
'-encoding', 'UTF-8',
'-classpath', ':'.join(compile_classpath),
# Prevent compiler from compiling .java files not listed as inputs.
# See: http://blog.ltgt.net/most-build-tools-misuse-javac/
'-sourcepath', ''
))
if options.bootclasspath:
javac_cmd.extend([
'-bootclasspath', ':'.join(options.bootclasspath),
'-source', '1.7',
'-target', '1.7',
])
if options.chromium_code:
javac_cmd.extend(['-Xlint:unchecked', '-Xlint:deprecation'])
else:
# XDignore.symbol.file makes javac compile against rt.jar instead of
# ct.sym. This means that using a java internal package/class will not
# trigger a compile warning or error.
javac_cmd.extend(['-XDignore.symbol.file'])
classpath_inputs = options.bootclasspath
# TODO(agrieve): Remove this .TOC heuristic once GYP is no more.
if options.use_ijars:
classpath_inputs.extend(compile_classpath)
else:
for path in compile_classpath:
if os.path.exists(path + '.TOC'):
classpath_inputs.append(path + '.TOC')
else:
classpath_inputs.append(path)
# Compute the list of paths that when changed, we need to rebuild.
input_paths = classpath_inputs + options.java_srcjars + java_files
output_paths = [
options.jar_path,
options.jar_path.replace('.jar', '.excluded.jar'),
]
# An escape hatch to be able to check if incremental compiles are causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_JAVAC', 0))
# List python deps in input_strings rather than input_paths since the contents
# of them does not change what gets written to the depsfile.
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, javac_cmd, java_files,
classpath_inputs, runtime_classpath),
options,
input_paths=input_paths,
input_strings=javac_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
nilq/baby-python
|
python
|
import gym
import torch
import numpy as np
import seaborn as sns
from hips.plotting.colormaps import gradient_cmap
import matplotlib.pyplot as plt
import os
from tikzplotlib import save
from sds_numpy import rARHMM
from sds_torch.rarhmm import rARHMM
from lax.a2c_lax import learn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
to_torch = lambda arr: torch.from_numpy(arr).float().to(device)
to_npy = lambda arr: arr.detach().double().cpu().numpy()
# env = gym.make('Cartpole-ID-v1') # <--- eval on cartpole
env = gym.make('HybridCartpole-ID-v1') # <--- train on hybrid cartpole
env.unwrapped._dt = 0.01
env.unwrapped._sigma = 1e-4
env._max_episode_steps = 5000
"""
learn(env, seed=42, obfilter=True, tsteps_per_batch=5000, cv_opt_epochs=5, lax=False, animate=False,
gamma=0.99, vf_opt_epochs=50, total_steps=int(50e6),
save_loc='/Users/kek/Documents/informatik/master/semester_3/thesis/code/'
'sds/evaluation/l4dc2020/cartpole/evals')
"""
model = torch.load('/Users/kek/Documents/informatik/master/semester_3/thesis/code/sds/evaluation/l4dc2020/cartpole/thesis_eval/checkpoint_HybridCartpole-ID-v1_model_887_epochs_.pkl', map_location='cpu')
model.step_policy_model.policy.training = False
seed = 100
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
obs = env.reset()
obs = to_torch(model.obfilter(obs))
prev_obs = torch.zeros_like(obs)
reward = 0
all_rewards = []
env_obs = []
env_acts = []
horizon = 100000
for i in range(horizon):
identified_states = torch.cat([obs, prev_obs], -1)
prev_obs = torch.clone(obs)
sampled_u, _, mean, _ = model.step_policy_model.act(identified_states)
scaled_u = env.action_space.low + (to_npy(sampled_u) + 1.) * 0.5 * (
env.action_space.high - env.action_space.low)
scaled_u = np.clip(scaled_u, a_min=env.action_space.low, a_max=env.action_space.high)
_obs, r, done, _ = env.step(scaled_u)
obs = to_torch(model.obfilter(_obs))
reward += r
env_acts.append(sampled_u.detach())
# acts.append(scaled_u)
# print(i, _obs)
env_obs.append(to_torch(_obs))
if done:
obs = env.reset()
obs = to_torch(model.obfilter(obs))
prev_obs = torch.zeros_like(obs)
print(reward)
all_rewards.append(reward)
reward = 0
print("Expected reward: {} ± {}".format(np.mean(all_rewards), np.std(all_rewards)))
"""
rarhmm = torch.load(open(os.path.abspath(os.path.join(os.path.dirname(__file__), '..','..','..' ))
+ '/sds_numpy/envs/hybrid/models/neural_rarhmm_cartpole_cart.pkl', 'rb'),
map_location='cpu')
_, identified_states = rarhmm.viterbi([np.stack([to_npy(o) for o in env_obs])], [np.stack([to_npy(a) for a in env_acts])])
# rarhmm.viterbi([to_npy(env_obs[i][None]) for i in range(500)], [to_npy(env_acts[i][None]) for i in range(500)])
color_names = ["windows blue", "red", "amber", "faded green",
"dusty purple", "orange", "pale red", "medium green",
"denim blue", "muted purple"]
colors = sns.xkcd_palette(color_names)
cmap = gradient_cmap(colors)
identified_states = [np.stack(identified_states).squeeze()]
n_plots = env.observation_space.shape[0] + env.action_space.shape[0]
fig, axs = plt.subplots(n_plots)
x = np.arange(len(env_obs))
y_labels = ['x', '$\cos(\\theta)$', '$\sin(\\theta)$', '$\\dot{x}$', '$\\dot{\\theta}$', 'control']
y_lims = [{'low': -5.2, 'high': 5.2}, {'low': -1.5, 'high': 1.5}, {'low': -1.2, 'high': 1.2},
{'low': -5.2, 'high': 5.2}, {'low': -11.8, 'high': 11.8}, {'low': -5.4, 'high': 5.4}]
env_obs = np.stack(env_obs)
for n in range(n_plots - 1):
axs[n].plot(x, env_obs[:, n], color='black')
axs[n].imshow(identified_states[0][None, :], aspect='auto', cmap=cmap, vmin=0, vmax=len(colors) - 1,
extent=[0, horizon, y_lims[n]['low'], y_lims[n]['high']])
axs[n].set_ylabel(y_labels[n], fontsize=12)
axs[n].set_ylim(bottom=y_lims[n]['low'], top=y_lims[n]['high'])
axs[-1].plot(x, env_acts, color='black')
axs[-1].set_ylabel(y_labels[-1], fontsize=12)
axs[-1].imshow(identified_states[0][None, :], aspect='auto', cmap=cmap, vmin=0, vmax=len(colors) - 1,
extent=[0, horizon, y_lims[-1]['low'], y_lims[-1]['high']])
axs[-1].set_ylim(bottom=y_lims[-1]['low'], top=y_lims[-1]['high'])
axs[-1].set_xlim(left=0, right=horizon)
axs[-1].set_xlabel('steps')
plt.tight_layout()
save('cartpole-policy-rarhmm-dynamics.tex', externalize_tables=True)
plt.show()
"""
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup as bs
import time
from time import sleep
import os
import sys
import xlsxwriter
from random import randint
import pyautogui
import pickle
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
import json
#HERE IT FINDS THE PATH
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
else:
try:
app_full_path = os.path.realpath(__file__)
application_path = os.path.dirname(app_full_path)
except NameError:
application_path = os.getcwd()
#Here we create the variable that is going to be used to all the functions for the path
path = os.path.join(application_path)
url = "https://ais.usvisa-info.com/en-il/niv/users/sign_in"
#Prefered months to check
prefered_months = ['July', 'August', 'September']
#Selenium
options = webdriver.ChromeOptions()
options.headless = False
options.add_argument("start-maximized")
options.add_argument("Cashe-Control=no-cashe")
options.add_argument('--no-sandbox')
options.add_argument('--no-cookies')
options.add_argument('--dns-prefetch-disable')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--disablle-web-security')
options.add_argument('--ignore-certificate-errors')
options.page_load_strategy = 'none'
options.add_argument('--ingore-certificate-errors-spki-list')
options.add_argument('--ignore-ssl-errors')
options.add_experimental_option("excludeSwitches", ["enable-logging"])
browser = webdriver.Chrome(options=options, executable_path= path + '\\chromedriver.exe')
browser.get(url)
browser.implicitly_wait(10)
action = ActionChains(browser)
sleep(5)
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36"
}
#cookie
#/html/body/div[6]/div[3]/div/button
#try:
# browser.find_element_by_class_name("ui-button.ui-corner-all.ui-widget").click()
# sleep(5)
#except Exception:
# try:
# browser.find_element_by_xpath("""/html/body/div[6]/div[3]/div/button""").click()
# sleep(5)
# except Exception as Err:
# print(Err)
# print()
def main():
login()
cont_loop()
def login():
#login
login_data = []
with open(path + "\\login.txt", 'r') as l_f:
log_d = l_f.readlines()
if log_d != []:
for lg in log_d:
lg = lg.rstrip('\n')
login_data.append(lg)
l_f.close()
email = login_data[0]
password = login_data[1]
browser.get(url)
browser_cookies = browser.get_cookies()
browser.find_element_by_id("user_email").send_keys(email)
sleep(1)
browser.find_element_by_id("user_password").send_keys(password)
sleep(1)
browser.find_elements_by_class_name("icheckbox")[0].click()
sleep(1)
browser.find_elements_by_name("commit")[0].click()
sleep(1)
#browser_cookies = browser.get_cookies()
browser.implicitly_wait(10)
sleep(5)
#Pay with Visa window
browser.get("https://ais.usvisa-info.com/en-il/niv/schedule/34027977/payment")
browser.implicitly_wait(10)
sleep(5)
def cont_loop():
cont = False
count = 1
while cont == False:
cont = data_extraction()
print(f"\nRefreshes: {count}\n")
count += 1
def data_extraction():
#Get the element dates
try:
page_code = browser.find_element_by_class_name("medium-3.column").get_attribute('outerHTML')
except Exception:
try:
page_code = browser.find_element_by_xpath("""//*[@id="paymentOptions"]/div[2]""").get_attribute('outerHTML')
except Exception:
try:
page_code = browser.find_element_by_xpath("""/html/body/div[4]/main/div[4]/div[2]""").get_attribute('outerHTML')
except Exception as Err:
print(Err)
print()
soup = bs(page_code, 'html.parser')
try:
date1 = soup.find_all('table',{'class':'for-layout'})[0].text
print(date1)
print()
except Exception as Err:
print(Err)
print()
cont = extract_dates(date1)
sleep(randint(60, 180))
browser.refresh()
return cont
def extract_dates(date1):
count = 0
cont = False
if "2021" in date1:
for p in prefered_months:
if p in date1:
count += 1
cont = True
if count == 1:
print('\nFound 1 match, for the dates that you wanted\n')
elif count == 2:
print('\nFound 2 matches, for the dates that you wanted\n')
return cont
main()
|
nilq/baby-python
|
python
|
import unittest
from rover import control
class ControlTest(unittest.TestCase):
def test_example(self):
input = """5 5
1 2 N
LMLMLMLMM
3 3 E
MMRMMRMRRM"""
expected = """1 3 N
5 1 E"""
actual = control.launch_mission(input)
self.assertEqual(actual, expected)
def test_simple_move(self):
commands_from_0_0_to_0_1 = """0 1\n0 0 N\nM"""
expected = "0 1 N"
actual = control.launch_mission(commands_from_0_0_to_0_1)
self.assertEqual(actual, expected)
def test_move(self):
self.assertEqual((2, 3), control.execute_move('N', 2, 2))
self.assertEqual((3, 2), control.execute_move('E', 2, 2))
self.assertEqual((2, 1), control.execute_move('S', 2, 2))
self.assertEqual((1, 2), control.execute_move('W', 2, 2))
def test_turn_right(self):
self.assertEqual('E', control.execute_turn('N', 'R'))
self.assertEqual('S', control.execute_turn('E', 'R'))
self.assertEqual('W', control.execute_turn('S', 'R'))
self.assertEqual('N', control.execute_turn('W', 'R'))
def test_turn_left(self):
self.assertEqual('W', control.execute_turn('N', 'L'))
self.assertEqual('S', control.execute_turn('W', 'L'))
self.assertEqual('E', control.execute_turn('S', 'L'))
self.assertEqual('N', control.execute_turn('E', 'L'))
|
nilq/baby-python
|
python
|
class Property:
def __init__(self, name='', value=''):
self.name = name
self.value = value
|
nilq/baby-python
|
python
|
from pypy.module.cpyext.test.test_api import BaseApiTest
class TestIterator(BaseApiTest):
def test_check_iter(self, space, api):
assert api.PyIter_Check(space.iter(space.wrap("a")))
assert api.PyIter_Check(space.iter(space.newlist([])))
assert not api.PyIter_Check(space.w_type)
assert not api.PyIter_Check(space.wrap(2))
def test_getIter(self, space, api):
w_iter = api.PyObject_GetIter(space.wrap([1, 2, 3]))
assert space.unwrap(api.PyIter_Next(w_iter)) == 1
assert space.unwrap(api.PyIter_Next(w_iter)) == 2
assert space.unwrap(api.PyIter_Next(w_iter)) == 3
assert api.PyIter_Next(w_iter) is None
assert not api.PyErr_Occurred()
def test_iternext_error(self,space, api):
assert api.PyIter_Next(space.w_None) is None
assert api.PyErr_Occurred() is space.w_TypeError
api.PyErr_Clear()
|
nilq/baby-python
|
python
|
import os
from jarjar import jarjar
def writetofile(f, **kwargs):
"""write kwargs to a file"""
s = ''
for k, v in kwargs.items():
s += '%s=\'%s\'\n' % (k, v)
with open(f, 'w') as fh:
fh.write(s)
jj = jarjar()
print('-- vanilla')
print('channel', jj.default_channel)
print('message', jj.default_message)
print('webhook', jj.default_webhook)
print()
writetofile('.jarjar', webhook='1', channel='2', message='3')
jj = jarjar()
print('-- inferred .jarjar')
print('channel', jj.default_channel)
print('message', jj.default_message)
print('webhook', jj.default_webhook)
print()
os.remove('.jarjar')
writetofile('.jjconfig', webhook='4', channel='5', message='6')
jj = jarjar(config='.jjconfig')
print('-- specified .jjconfig')
print('channel', jj.default_channel)
print('message', jj.default_message)
print('webhook', jj.default_webhook)
print()
os.remove('.jjconfig')
|
nilq/baby-python
|
python
|
from enum import Enum
from services.proto import database_pb2
from services.proto import follows_pb2
class GetFollowsReceiver:
def __init__(self, logger, util, users_util, database_stub):
self._logger = logger
self._util = util
self._users_util = users_util
self._database_stub = database_stub
self.RequestType = Enum('RequestType', 'FOLLOWING FOLLOWERS')
def create_rich_user(self, resp, user, requester_follows):
u = resp.rich_results.add()
u.handle = user.handle
u.host = user.host
u.global_id = user.global_id
u.bio = user.bio
u.is_followed = user.is_followed
u.display_name = user.display_name
u.private.CopyFrom(user.private)
u.custom_css = user.custom_css
if requester_follows is not None:
u.is_followed = user.global_id in requester_follows
return True
def _get_follows(self, request, context, request_type):
if request_type == self.RequestType.FOLLOWERS:
self._logger.debug('List of followers of %s requested',
request.username)
else:
self._logger.debug('List of users %s is following requested',
request.username)
resp = follows_pb2.GetFollowsResponse()
# Parse input username
handle, host = self._users_util.parse_username(
request.username)
if handle is None and host is None:
resp.result_type = follows_pb2.GetFollowsResponse.ERROR
resp.error = 'Could not parse queried username'
return resp
# Get user obj associated with given user handle & host from database
user_entry = self._users_util.get_or_create_user_from_db(
handle, host, host_is_null=(host is None))
if user_entry is None:
error = 'Could not find or create user {}@{}'.format(from_handle,
from_instance)
self._logger.error(error)
resp.result_type = follows_pb2.GetFollowersResponse.ERROR
resp.error = error
return resp
user_id = user_entry.global_id
# Get followers/followings for this user.
following_ids = None
if request_type == self.RequestType.FOLLOWERS:
following_ids = self._util.get_follows(followed_id=user_id).results
else:
following_ids = self._util.get_follows(follower_id=user_id).results
user_following_ids = None
if request.HasField("user_global_id") and request.user_global_id:
uid = request.user_global_id.value
user_following = self._util.get_follows(follower_id=uid).results
user_following_ids = set([x.followed for x in user_following])
# Convert other following users and add to output proto.
for following_id in following_ids:
_id = following_id.followed
if request_type == self.RequestType.FOLLOWERS:
_id = following_id.follower
user = self._users_util.get_or_create_user_from_db(global_id=_id)
if user is None:
self._logger.warning('Could not find user for id %d',
_id)
continue
ok = self.create_rich_user(resp, user, user_following_ids)
if not ok:
self._logger.warning('Could not convert user %s@%s to ' +
'RichUser', user.handle, user.host)
ok = self._util.convert_db_user_to_follow_user(user,
resp.results.add())
if not ok:
self._logger.warning('Could not convert user %s@%s to ' +
'FollowUser', user.handle, user.host)
resp.result_type = follows_pb2.GetFollowsResponse.OK
return resp
def GetFollowing(self, request, context):
self._logger.debug('GetFollowing, username = %s', request.username)
return self._get_follows(request, context, self.RequestType.FOLLOWING)
def GetFollowers(self, request, context):
self._logger.debug('GetFollowers, username = %s', request.username)
return self._get_follows(request, context, self.RequestType.FOLLOWERS)
|
nilq/baby-python
|
python
|
from des import des
from code import apsDB
apscursor = apsDB.cursor()
insert = "INSERT INTO aps.aps_table (login, senha) VALUES (%s, %s)"
if __name__ == '__main__':
inputkey = open("key.txt", 'r')
key = inputkey.read()
user = input("Digite seu usuário: ")
textin= input("Digite sua mensagem de 8 digitos: ")
d = des()
r = d.encrypt(key,textin)
senhah = ("(r)", r)
passDB = """SELECT senha FROM aps_table WHERE login = '%s' """ (user)
apscursor.execute(passDB)
senhadb = apscursor.fetchone()
if str(senhadb) == str(senhah):
print("Login feito com sucesso!")
print("Sua senha cifrada é: ", senhah)
d = des()
r = d.encrypt(key,textin)
r2 = d.decrypt(key,r)
print("Sua mensagem decifrada é: ", r2)
|
nilq/baby-python
|
python
|
# --------------
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# path- variable storing file path
#Code starts here
df=pd.read_csv(path)
#displaying 1st five columns
print(df.head(5))
print(df.columns[:5])
#distributing features
X=df.drop('Price',axis=1)
y=df['Price']
#spliting data
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=6)
#finding correlation
corr=X_train.corr()
print(corr)
#heatmap of correlation
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(12,8))
sns.heatmap(corr,annot=True,cmap='viridis')
# --------------
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
# Code starts here
regressor=LinearRegression()
#fitting the model
regressor.fit(X_train,y_train)
#making prediction
y_pred=regressor.predict(X_test)
#checking R^2 score
r2=r2_score(y_test,y_pred)
print(r2)
# --------------
from sklearn.linear_model import Lasso
# Code starts here
#now using lasso
lasso=Lasso()
#fitting model using lass0
lasso.fit(X_train,y_train)
#making predictions
lasso_pred=lasso.predict(X_test)
#checking R^2 score
r2_lasso=r2_score(y_test,lasso_pred)
print(r2_lasso)
# --------------
from sklearn.linear_model import Ridge
# Code starts here
#now using ridge to improve model
ridge=Ridge()
#fitting model using ridge
ridge.fit(X_train,y_train)
#making predictions using ridge
ridge_pred=ridge.predict(X_test)
#checking R^2 score
r2_ridge=r2_score(y_test,ridge_pred)
print(r2_ridge)
# Code ends here
# --------------
from sklearn.model_selection import cross_val_score
import numpy as np
#Code starts here
regressor=LinearRegression()
#using cross validation
score = cross_val_score(regressor,X_train,y_train,cv=10)
#calculating mean of scores
mean_score=np.mean(score)
print(mean_score)
# --------------
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
#Code starts here
#now using polynomial features
model=make_pipeline(PolynomialFeatures(2), LinearRegression())
#fitting the model
model.fit(X_train,y_train)
#making predictions
y_pred=model.predict(X_test)
#checking r2_score
r2_poly=r2_score(y_test,y_pred)
print(r2_poly)
|
nilq/baby-python
|
python
|
"""
CS 156a: Final Exam
Anthony Bartolotta
Problems 13,14,15,16,17,18
"""
import numpy as np
import sklearn.svm as svm
def pseudoInverse(X):
# Calculate pseudo-inverse
tempM = np.linalg.inv(np.dot(np.transpose(X), X))
xPseudo = np.dot(tempM, np.transpose(X))
return xPseudo
def generateData(nSamples):
rerun = True
while rerun:
# Generate points in [-1,1]x[-1,1]
X_vec = np.random.uniform(-1,1,[nSamples,2])
# Classify each point
y_vec = np.sign(X_vec[:,1]-X_vec[:,0]+.25*np.sin(np.pi*X_vec[:,0]))
# If all points have same classification, generate new points
rerun = all( [ y_vec[i]==y_vec[0] for i in range(len(y_vec)) ] )
return [X_vec, y_vec]
def sampleError(y_true, y_fit):
# Find fraction of mismatch
fMismatch = float(np.count_nonzero(y_true != y_fit)) / len(y_true)
return fMismatch
def findNearestCenters(X_vec, centers):
# Calculate distances of sample points from centers
distances = np.array([[ np.linalg.norm(X_vec[i,:] - centers[j]) \
for j in range(len(centers))] for i in range(len(X_vec))])
# Find closest center for each point
closestCenter = np.argmin(distances, axis=1)
return closestCenter
def initializeLloyds(X_vec, K):
# Reinitialize algorithm until non-empty starting clusters are produced
reinit = True
while reinit:
# Choose K points at random uniformly from the space as initial
# centers
centers = [np.random.uniform(-1,1,[1,2]) for j in range(K)]
# Find the closest center for each sample point
closestCenters = findNearestCenters(X_vec, centers)
# Group sample points by their nearest center
groups = [ X_vec[closestCenters==j, :] for j in range(K)]
# Check that all groups are non-empty. If some are empty, repeat.
if all([len(g)!=0 for g in groups]):
reinit = False
return [centers, groups]
def iterationLloyds(centers, groups):
# Perform one iteration of Lloyd's algorithm
# Define new centers
newCenters = [np.average(g, axis=0) for g in groups]
# Return all sample points to a single group
X_vec = np.vstack(groups)
# Find the closest center for each sample point
closestCenters = findNearestCenters(X_vec, newCenters)
# Group sample points by their nearest center
newGroups = [ X_vec[closestCenters==j, :] for j in range(len(newCenters))]
return [newCenters, newGroups]
def lloydsAlgorithm(X_vec, K):
# Initialize boolean for if the iteration process should continue
iterate = True
# Initialize the algorithm
[centers, groups] = initializeLloyds(X_vec, K)
oldCenters = centers
# Iterate
while iterate:
# Perform one iteration of Lloyd's algorithm
[centers, groups] = iterationLloyds(oldCenters, groups)
# Check that groups are non empty
if any([len(g)==0 for g in groups]):
# If a cluster has become empty, the algorithm has failed and
# needs to be reinitialized
[centers, groups] = initializeLloyds(X_vec, K)
# Check if algorithm has converged
if all([np.linalg.norm(centers[i] - oldCenters[i]) <= 10**(-10) \
for i in range(K)]):
# If algorithm has converged, terminate.
iterate = False
else:
# If algorithm hasn't converged, continue iterating
oldCenters = centers
return centers
def trainRBF(X_vec, y_vec, gamma, K):
# Use Lloyd's algorithm to perform clustering
centers = lloydsAlgorithm(X_vec, K)
# Use linear regression to find appropriate weights for radial
# basis functions
phi = np.array( [ [ \
np.exp( -gamma*np.linalg.norm(X_vec[i,:]-centers[j])**2 ) \
for j in range(len(centers)) ] for i in range(len(X_vec)) ] )
# Augment matrix to account for constant bias term
phi_aug = np.hstack([np.ones((len(X_vec),1)), phi])
invPhi_aug = pseudoInverse(phi_aug)
w_vec = np.dot(invPhi_aug, y_vec)
return [w_vec, centers]
def classifyRBF(X_out, w_vec, centers, gamma):
# Classify points
phi = np.array( [ [ \
np.exp( -gamma*np.linalg.norm(X_out[i,:]-centers[j])**2 ) \
for j in range(len(centers))] for i in range(len(X_out))])
phi_aug = np.hstack([np.ones((len(X_out),1)), phi])
y_rbf = np.sign(np.dot(phi_aug, w_vec))
return y_rbf
def evaluateRBF(X_train, y_train, X_test, y_test, gamma, K):
# Fit data using RBF with K clusters
[w_vec, centers] = trainRBF(X_train, y_train, gamma, K)
# Classify in-sample points and find in-sample error
y_rbf_in = classifyRBF(X_train, w_vec, centers, gamma)
E_in = sampleError(y_train, y_rbf_in)
# Classify out-of-sample points and find out-of-sample error
y_rbf_out = classifyRBF(X_test, w_vec, centers, gamma)
E_out = sampleError(y_test, y_rbf_out)
return [E_in, E_out]
def evaluateSVM(X_train, y_train, X_test, y_test, g):
# Train the SVM
clf = svm.SVC(kernel='rbf', gamma=g, C=10**6)
clf.fit(X_train, y_train)
# Classify in-sample points and find in-sample error
y_in = clf.predict(X_train)
E_in = sampleError(y_train, y_in)
# Classify out-of-sample points and find out-of-sample error
y_out = clf.predict(X_test)
E_out = sampleError(y_test, y_out)
return [E_in, E_out]
def trial(n_in, n_out):
# Generate training data
[X_train, y_train] = generateData(n_in)
# Generate testing data
[X_test, y_test] = generateData(n_out)
# Evaluate performance of hard-margin RBF-kernel SVM with gamma = 1.5
[E_in_1, E_out_1] = evaluateSVM(X_train, y_train, X_test, y_test, 1.5)
# Evaluate performance of regular RBF with K = 9, gamma = 1.5
[E_in_2, E_out_2] = evaluateRBF(X_train, y_train, X_test, y_test, 1.5, 9)
# Evaluate performance of regular RBF with K = 9, gamma = 2
[E_in_3, E_out_3] = evaluateRBF(X_train, y_train, X_test, y_test, 2.0, 9)
# Evaluate performance of regular RBF with K = 12, gamma = 1.5
[E_in_4, E_out_4] = evaluateRBF(X_train, y_train, X_test, y_test, 1.5, 12)
# Compile results
trialResults = [E_in_1, E_out_1, E_in_2, E_out_2, E_in_3, E_out_3, \
E_in_4, E_out_4]
return trialResults
def main():
# Parameters for trials
nTrials = 1000
n_in = 10**2
n_out = 10**3
# Collect results
trialResults = np.array( [trial(n_in, n_out) for j in range(nTrials)] )
# Fraction of trials data can't be separated by hard-margin SVM
E_in_svm = trialResults[:,0]
badTrials = ( E_in_svm > 1.0/(2.0*n_in) )
f_failed = float(sum(badTrials)) / nTrials
print("Fraction of trials data was inseparable = "+repr(f_failed)+"\n")
# Find out-of-sample errors for trials with separable data
E_out_svm = trialResults[~badTrials, 1]
E_out_9_15 = trialResults[~badTrials, 3]
E_out_12_15 = trialResults[~badTrials, 7]
# Fraction of trials kernel SVM beat K=9, gamma=1.5 RBF
f_better_9 = float(sum(E_out_9_15 > E_out_svm)) / sum(~badTrials)
print("Fraction of trials kernel SVM beat K=9, gamma=1.5 RBF = " + \
repr(f_better_9)+"\n")
# Fraction of trials kernel SVM beat K=12, gamma=1.5 RBF
f_better_12 = float(sum(E_out_12_15 > E_out_svm)) / sum(~badTrials)
print("Fraction of trials kernel SVM beat K=12, gamma=1.5 RBF = " + \
repr(f_better_12)+"\n")
#
E_in_9 = trialResults[:, 2]
E_out_9 = trialResults[:, 3]
E_in_12 = trialResults[:, 6]
E_out_12 = trialResults[:, 7]
delta_E_in = (E_in_12 - E_in_9)
delta_E_out = (E_out_12 - E_out_9)
f_dec_E_in = float(sum(delta_E_in < 0)) / nTrials
f_dec_E_out = float(sum(delta_E_out < 0)) / nTrials
print("When going from K=9 to K=12 RBF with gamma=1.5: ")
print("Fraction of trials E_in decreased = " + repr(f_dec_E_in))
print("Fraction of trials E_out decreased = " + repr(f_dec_E_out) + "\n")
#
E_in_20 = trialResults[:, 4]
E_out_20 = trialResults[:, 5]
delta_E_in = (E_in_20 - E_in_9)
delta_E_out = (E_out_20 - E_out_9)
f_dec_E_in = float(sum(delta_E_in < 0)) / nTrials
f_dec_E_out = float(sum(delta_E_out < 0)) / nTrials
print("When going from gamma=1.5 to gamma=2.0 RBF with K=9: ")
print("Fraction of trials E_in decreased = " + repr(f_dec_E_in))
print("Fraction of trials E_out decreased = " + repr(f_dec_E_out) + "\n")
#
goodTrials = ( E_in_9 < 1.0/(2.0*n_in) )
f_good = float(sum(goodTrials)) / nTrials
print("Fraction of trials with E_in=0 for K=9 gamma=1.5 RBF : " \
+ repr(f_good))
return
main()
|
nilq/baby-python
|
python
|
"""get quadratures
Calculate the times of quadrature for a series of objects with given ephemerides
between two nights in a given observatory
Input file must be:
name ra(deg) dec(deg) epoch period
The output will be:
A per target list containing the times of quadratures
"""
import argparse
from datetime import datetime
from datetime import timedelta
from astropy import units as u
from astropy.coordinates import AltAz
from astropy.coordinates import EarthLocation
from astropy.coordinates import SkyCoord
from astropy.coordinates import get_moon
from astropy.coordinates import get_sun
from astropy.time import Time
from astropy.utils.iers import conf as iers_conf
iers_conf.iers_auto_url = 'https://datacenter.iers.org/data/9/finals2000A.all'
mir = 'ftp://cddis.gsfc.nasa.gov/pub/products/iers/finals2000A.all'
iers_conf.iers_auto_url_mirror = mir
def arg_parse():
"""Parse command line arguments."""
p = argparse.ArgumentParser()
p.add_argument('infile', help='Path to file containing targets.')
p.add_argument('n1', help='Night 1 in Y-m-d')
p.add_argument('n2', help='Night 2 in Y-m-d')
p.add_argument('observatory', help='Astropy name of the observatory')
return p.parse_args()
def read_ephem_file(infile):
"""Read the ephem file."""
name, ra, dec, epoch, period = [], [], [], [], []
with open(infile, 'r') as f:
for line in f:
data = line.split()
name.append(data[0])
ra.append(float(data[1]))
dec.append(float(data[2]))
epoch.append(float(data[3]))
period.append(float(data[4]))
return name, ra, dec, epoch, period
def sun_is_down(time, observatory) -> bool:
"""Check if the Sun is below -14 deg altitude."""
sun = get_sun(time).transform_to(AltAz(obstime=time, location=observatory))
return sun.alt.value <= -14
def moon_is_away(time, ra, dec, observatory) -> bool:
"""Check if the moon is 30 deg away or more."""
obj_coords = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame='icrs')
moon = get_moon(time, location=observatory)
sep = obj_coords.separation(moon).degree
return sep >= 30
if __name__ == '__main__':
args = arg_parse()
# Observatory location.
observatory = EarthLocation.of_site(args.observatory)
# Read the ephem file.
names, ras, decs, epochs, periods = read_ephem_file(args.infile)
# Get times for the run.
n1 = datetime.strptime(args.n1, '%Y-%m-%d') + timedelta(hours=12)
# Add extra day so we have nights of each date.
n2 = datetime.strptime(args.n2, '%Y-%m-%d') + timedelta(hours=36)
n1_T = Time(n1, format='datetime', scale='utc', location=observatory)
n2_T = Time(n2, format='datetime', scale='utc', location=observatory)
# Loop over each object. Remember q1 is 0.25 phase and q2 is 0.75
for name, ra, dec, epoch, period in zip(names, ras, decs, epochs, periods):
print(f'Target: {name}')
epoch_start = 0
while epoch_start < n1_T.jd:
epoch_start += period
epoch_start -= period
# Pull useable epochs
current_epoch = epoch_start
while current_epoch < n2_T.jd:
q1 = current_epoch + 0.25 * period
q2 = current_epoch - 0.25 * period
q1_T = Time(q1, format='jd', scale='utc')
q2_T = Time(q2, format='jd', scale='utc')
if n1_T.jd <= q1 <= n2_T.jd:
if sun_is_down(q1_T, observatory) and \
moon_is_away(q1_T, ra, dec, observatory):
print(f'\t{str(q1_T.datetime)[:16]}\t0.25')
if n1_T.jd <= q2 <= n2_T.jd:
if sun_is_down(q1_T, observatory) and \
moon_is_away(q1_T, ra, dec, observatory):
print(f'\t{str(q2_T.datetime)[:16]}\t0.75')
current_epoch += period
|
nilq/baby-python
|
python
|
import os
from googlecloudsdk.core.updater import local_state
class Error(Exception):
"""Exceptions for the endpoints_util module."""
class ScriptNotFoundError(Error):
"""An error when the parser in appcfg fails to parse the values we pass."""
def __init__(self, error_str):
super(ScriptNotFoundError, self).__init__(error_str)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Find new papers in XML tarball files and parse them.
"""
import csv
import multiprocessing as mp
import os
import pickle
import tarfile
from collections import Counter
from pathlib import Path
import lxml.etree as ET
import numpy as np
import pandas as pd
import spacy
from utils import set_read_only, updater_log
# Default pipelines: only "parser" and "ner" can be disabled.
# Disabling any other pipelines will affect the lemma functionality.
disabled_pipelines = ["parser", "ner"]
nlp = spacy.load("en_core_web_sm", disable=disabled_pipelines)
xpath = "//abstract/sec/*|//body/sec/*|//abstract/p|//body/sec/*|//body/p"
filter_tags = [
"sc",
"italic",
"xref",
"label",
"sub",
"sup",
"inline-formula",
"fig",
"disp-formula",
"bold",
"table-wrap",
"table",
"thead",
"tbody",
"caption",
"tr",
"td",
]
parser = ET.XMLParser(encoding="UTF-8", recover=True)
stop_words = nlp.Defaults.stop_words
def process_tarball(
tarball_filename,
prev_pmc_list_filename,
word_model_vector_filename,
new_pmc_list_filename,
new_embeddings_filename,
new_token_counts_filename,
):
"""
Search new papers in an input tarball file, and save the new papers
data on disk.
"""
updater_log(f"Processing '{tarball_filename}' ...")
# Load word model vector from input pickled filename
word_model_wv = pickle.load(open(word_model_vector_filename, "rb"))
# Read previously processed PMC IDs into a set
prev_pmc_list_df = pd.read_csv(prev_pmc_list_filename, sep="\t")
prev_pmc_ids = set()
for pmc_path in prev_pmc_list_df.file_path.tolist():
pmc_id = Path(pmc_path).stem
prev_pmc_ids.add(pmc_id)
tarball_basename = Path(tarball_filename).name
with tarfile.open(tarball_filename, "r:gz") as tar_fh:
# Write header lines into three output files
with open(new_pmc_list_filename, 'w', newline='') as pmc_list_fh, \
open(new_embeddings_filename, 'w', newline='') as embeddings_fh, \
open(new_token_counts_filename, 'w', newline='') as token_counts_fh:
pmc_list_writer = csv.DictWriter(
pmc_list_fh, delimiter="\t", fieldnames=["tarfile", "file_path"]
)
pmc_list_writer.writeheader()
embeddings_writer = csv.DictWriter(
embeddings_fh,
delimiter="\t",
fieldnames=["journal", "document"] + [f"feat_{idx}" for idx in range(300)],
)
embeddings_writer.writeheader()
token_counts_writer = csv.DictWriter(
token_counts_fh,
delimiter="\t",
fieldnames=["document", "lemma", "count"]
)
token_counts_writer.writeheader()
write_data(
word_model_wv, prev_pmc_ids, tarball_basename, tar_fh,
pmc_list_writer, embeddings_writer, token_counts_writer
)
# Set output files read-only
set_read_only(new_pmc_list_filename)
set_read_only(new_embeddings_filename)
set_read_only(new_token_counts_filename)
updater_log(f"'{tarball_filename}' is done")
def write_data(
word_model_wv, prev_pmc_ids, tarball_basename, tar_fh,
pmc_list_writer, embeddings_writer, token_counts_writer
):
"""Write new papers data to disk."""
for pmc_paper in tar_fh.getmembers():
paper_name = pmc_paper.name
pmc_id = Path(paper_name).stem
# Only process regular files that are new
if not pmc_paper.isfile() or pmc_id in prev_pmc_ids:
continue
# Save a new paper's name to pmc_list no matter it can be parsed or not
pmc_list_writer.writerow(
{"tarfile": tarball_basename, "file_path": paper_name}
)
paper_fh = tar_fh.extractfile(pmc_paper)
doc_vector, word_counter = generate_vector_counts(
word_model_wv, paper_fh
)
# If the paper doesn't include valid words, do not write
# embeddings and token count.
if word_counter is None:
continue
embeddings_writer.writerow(
{
"document": pmc_id,
"journal": str(Path(paper_name).parent),
**dict(
zip([f"feat_{idx}" for idx in range(300)], doc_vector)
),
}
)
for tok in word_counter:
token_counts_writer.writerow(
{
"document": pmc_id,
"lemma": tok,
"count": word_counter[tok],
}
)
def generate_vector_counts(word_model_wv, paper_fh):
"""
Parse a paper file (paper_fh) based on word model vector (word_model_wv).
"""
tree = ET.parse(paper_fh, parser=parser)
# Process xml without specified tags
ET.strip_tags(tree, *filter_tags)
root = tree.getroot()
# Skip non-research papers
if root.attrib['article-type'].strip() != 'research-article':
return [], None
all_text = root.xpath(xpath) # a list of 'xml.etree._Element' instances
all_text = list(map(lambda x: "".join(list(x.itertext())), all_text))
# all_text[idx].itertext() returns an instance of 'lxml.etree.ElementTextIterator';
# list(x.itertext()) returns a list of strings (including '\n');
# "".join(...) combines the list of strings into a single string;
# map(...) returns an iterable of single string for each entry in all_text;
# list(map(...)) converts the iterable of single string into a list of single string.
# Combine all single strings together into ONE single string.
all_text = " ".join(all_text)
# Optimization: Remove stop words from `all_text` before feeding it to nlp.
# This optimization not only speeds up the data processing 5%-10%, but also
# minimizes memory usage.
all_text = [x for x in all_text.split() if x not in stop_words]
all_text = " ".join(all_text)
# Set nlp.max_length dynamically
if nlp.max_length < len(all_text):
nlp.max_length = len(all_text)
updater_log(f"set nlp.max_length to: {nlp.max_length}")
all_tokens = list(
map(
lambda x: x.lemma_,
filter(
lambda tok: tok.lemma_ in word_model_wv and tok.lemma_ not in stop_words,
nlp(all_text),
)
)
)
# Skip wonky papers that have less than 20 tokens
if len(all_tokens) < 20:
return [], None
word_vectors = [word_model_wv[tok] for tok in all_tokens]
return np.stack(word_vectors).mean(axis=0), Counter(all_tokens)
def combine_new_papers(
pmc_list_subdir, new_pmc_list_filename,
embeddings_subdir, new_embeddings_filename,
token_counts_subdir, new_token_counts_filename
):
combine_new_pmc_list(pmc_list_subdir, new_pmc_list_filename)
num_new_papers = combine_new_embeddings(
embeddings_subdir, new_embeddings_filename
)
combine_new_token_counts(token_counts_subdir, new_token_counts_filename)
return num_new_papers
def combine_new_pmc_list(pmc_list_subdir, combined_pmc_list_filename):
"""
Combine PMC list output files generated by each process into a single one.
It doesn't matter if the combined file includes duplicates.
"""
sub_files = sorted(os.listdir(pmc_list_subdir))
with open(combined_pmc_list_filename, 'w') as ofh:
for idx, filename in enumerate(sub_files):
file_path = Path(pmc_list_subdir, filename)
with open(file_path) as ifh:
# If current input file is not the first one, skip header
if idx > 0:
ifh.readline()
# Copy input file into output file line by line
for line in ifh:
ofh.write(line)
# Set combined output file read-only
set_read_only(combined_pmc_list_filename)
def combine_new_embeddings(embeddings_subdir, combined_embeddings_filename):
"""
Combines embeddings files generated by each process into a single one.
Note: Some papers exist in both "comm_use.*.xml.tar.gz" and
"non_comm_use.*.xml.tar.gz" files, these duplicates must be removed.
Returns the number of new papers.
"""
sub_files = sorted(os.listdir(embeddings_subdir))
pmc_col = 1
merged_pmc = set()
with open(combined_embeddings_filename, 'w') as ofh:
for idx, filename in enumerate(sub_files):
file_path = Path(embeddings_subdir, filename)
with open(file_path) as ifh:
for line_num, line in enumerate(ifh):
# Only copy header line from the first file
if line_num == 0:
if idx == 0:
ofh.write(line)
continue
pmc_id = line.split('\t')[pmc_col]
if pmc_id not in merged_pmc:
ofh.write(line)
merged_pmc.add(pmc_id)
# Set combined output file read-only
set_read_only(combined_embeddings_filename)
# Return the number of new papers found
return len(merged_pmc)
def combine_new_token_counts(token_counts_subdir, combined_token_counts_filename):
"""
Combine token_counts files generated by each process into a single one.
This is a little more complex, because each row's `document` column
in input files are NOT unique.
"""
sub_files = sorted(os.listdir(token_counts_subdir))
merged_pmc = set()
with open(combined_token_counts_filename, 'w', newline='') as ofh:
fieldnames = ['document', 'lemma', 'count']
writer = csv.DictWriter(ofh, fieldnames=fieldnames, delimiter='\t')
writer.writeheader()
for filename in sub_files:
file_path = Path(token_counts_subdir, filename)
with open(file_path, newline='') as ifh:
prev_pmc = None
csv_reader = csv.DictReader(ifh, delimiter='\t')
for row in csv_reader:
pmc_id = row['document']
if pmc_id in merged_pmc:
continue
if pmc_id != prev_pmc: # enter a new token count session
if prev_pmc:
merged_pmc.add(prev_pmc)
prev_pmc = pmc_id
writer.writerow(row)
# Set combined output file read-only
set_read_only(combined_token_counts_filename)
def parse_new_papers(
tarball_dir,
prev_pmc_list_filename,
word_model_vector_filename,
new_papers_dir,
new_pmc_list_basename,
new_embeddings_basename,
new_token_counts_basename,
parallel=4
):
"""Process tarball files and find new papers."""
all_filenames = os.listdir(tarball_dir)
tarball_files = [x for x in all_filenames if x.endswith(".xml.tar.gz")]
pmc_list_subdir = Path(new_papers_dir, 'pmc_list_sub')
os.makedirs(pmc_list_subdir, exist_ok=True)
embeddings_subdir = Path(new_papers_dir, 'embeddings_sub')
os.makedirs(embeddings_subdir, exist_ok=True)
token_counts_subdir = Path(new_papers_dir, 'token_counts_sub')
os.makedirs(token_counts_subdir, exist_ok=True)
pool = mp.Pool(parallel)
for basename in sorted(tarball_files):
tarball_filename = Path(tarball_dir, basename)
# Each process's output file basename is the tarball filename with an
# extra ".tsv" suffix.
output_basename = basename + ".tsv"
args = (
tarball_filename,
prev_pmc_list_filename,
word_model_vector_filename,
Path(pmc_list_subdir, output_basename),
Path(embeddings_subdir, output_basename),
Path(token_counts_subdir, output_basename)
)
pool.apply_async(process_tarball, args)
pool.close()
pool.join()
combined_pmc_path = Path(new_papers_dir, new_pmc_list_basename)
combined_embeddings_path = Path(new_papers_dir, new_embeddings_basename)
combined_token_counts_path = Path(new_papers_dir, new_token_counts_basename)
num_new_papers = combine_new_papers(
pmc_list_subdir, combined_pmc_path,
embeddings_subdir, combined_embeddings_path,
token_counts_subdir, combined_token_counts_path
)
return num_new_papers
# Test harness
if __name__ == "__main__":
input_dir = "./data/current_run/input/"
output_dir = "./data/current_run/output/"
tarball_dir = output_dir + "downloaded_files"
prev_pmc_list_filename = input_dir + "pmc_oa_file_list.tsv"
word_model_vector_filename = "./data/static/word_model.wv.pkl"
new_papers_dir = output_dir + "new_papers/"
new_pmc_list_basename = "pmc_list.tsv"
new_embeddings_basename = "embeddings.tsv"
new_token_counts_basename = "token_counts.tsv"
num_new_papers = parse_new_papers(
tarball_dir,
prev_pmc_list_filename,
word_model_vector_filename,
new_papers_dir,
new_pmc_list_basename,
new_embeddings_basename,
new_token_counts_basename,
parallel=6
)
print(f"{num_new_papers:,} new papers found and parsed")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : account.py
@Time : 2021/05/11
@Author : levonwoo
@Version : 0.1
@Contact :
@License : (C)Copyright 2020-2021
@Desc : 账户模块
'''
# here put the import lib
import uuid
from QuadQuanta.portfolio.position import Position
from QuadQuanta.data.mongodb_api import insert_mongodb
class Account():
"""[summary]
"""
def __init__(self,
username=None,
passwd=None,
model='backtest',
init_cash=100000,
account_id=None,
mongo_db='QuadQuanta',
mongo_col='account',
solid=False):
self.init_cash = init_cash
self.username = username
self.passwd = passwd
self.model = model
self.available_cash = init_cash
self.orders = {}
self.positions = {}
# 印花税
self.stamp_duty = 0.001
# 手续费
self.handle_fee = 0.0001
self.datetime = ""
self.account_id = str(
uuid.uuid4()) if account_id is None else account_id
# mongodb数据库名和集合名
self.mongo_db = mongo_db
self.mongo_col = mongo_col
# 是否固化到mongodb选项
self.solid = solid
def __repr__(self) -> str:
return 'print account'
@property
def total_cash(self):
return self.available_cash + self.frozen_cash
@property
def frozen_cash(self):
return sum(
[position.frozen_cash for position in self.positions.values()])
@property
def float_profit(self):
return sum(
[position.float_profit for position in self.positions.values()])
@property
def profit_ratio(self):
return round(
100 * (self.total_assets - self.init_cash) / self.init_cash, 2)
@property
def total_assets(self):
"""
总资产
"""
return self.total_cash + self.total_market_value
@property
def total_market_value(self):
"""
股票总市值
"""
return sum(
[position.market_value for position in self.positions.values()])
def send_order(self,
code,
volume,
price,
order_direction,
order_id=None,
order_time=None):
"""[summary]
下单函数
Parameters
----------
code : str
六位数股票代码
volume : int
股票数量
price : float
价格
order_direction : [type]
买入/卖出
order_time : [type]
下单时间
"""
if order_time:
self.datetime = order_time
order_id = str(uuid.uuid4()) if order_id == None else order_id
checked_order = self.order_check(code, volume, price, order_direction)
checked_order['order_time'] = order_time
checked_order['order_id'] = order_id
self.orders[order_id] = checked_order
return checked_order
def order_check(self, code, volume, price, order_direction):
"""
订单预处理, 账户逻辑,卖出数量小于可卖出数量,
买入数量对应的金额小于资金余额,买入价格
Parameters
----------
code : [type]
[description]
volume : [type]
[description]
price : [type]
[description]
order_direction : [type]
[description]
"""
pos = self.get_position(code)
pos.update_pos(price, self.datetime)
if order_direction == 'buy':
if self.available_cash >= volume * price: # 可用资金大于买入需要资金
volume = volume
else:
volume = 100 * int(self.available_cash // (100 * price))
amount = volume * price * (1 + self.handle_fee)
pos.frozen_cash += amount
# 可用现金减少
self.available_cash -= amount
order = {
'instrument_id': code,
'price': price,
'volume': volume,
'amount': amount, # 需要的资金
'direction': order_direction,
'last_msg': "已报",
}
elif order_direction == 'sell':
if pos.volume_long_history >= volume: # 可卖数量大于卖出数量
volume = volume
else:
volume = pos.volume_long_history
amount = volume * price * (1 - self.handle_fee - self.stamp_duty)
# 历史持仓减少,冻结持仓增加
pos.volume_long_history -= volume
pos.volume_short_frozen += volume
order = {
'instrument_id': code,
'price': price,
'volume': volume,
'amount': amount,
'direction': order_direction,
'last_msg': "已报",
}
else:
raise NotImplementedError
return order
def cancel_order(self, order_id):
"""
撤单, 释放冻结
Parameters
----------
order_id : uuid
唯一订单id
"""
pass
def get_position(self, code=None) -> Position:
"""
获取某个标的持仓对象
Parameters
----------
code : str
标的代码
"""
if code is None:
return list(self.positions.values())[0]
try:
return self.positions[code]
except KeyError:
pos = Position(code)
self.positions[code] = pos
return self.positions[code]
def make_deal(self, order):
"""
撮合
Parameters
----------
order : [type]
[description]
"""
if isinstance(order, dict):
self.process_deal(code=order['instrument_id'],
trade_price=order['price'],
trade_volume=order['volume'],
trade_amount=order['amount'],
order_direction=order['direction'],
order_id=order['order_id'],
order_time=order['order_time'])
def process_deal(self,
code,
trade_price,
trade_volume,
trade_amount,
order_direction,
order_id=None,
order_time=None,
trade_id=None):
pos = self.get_position(code)
pos.update_pos(trade_price, order_time)
if order_id in self.orders.keys():
#
order = self.orders[order_id]
# 默认全部成交
# 买入/卖出逻辑
if order_direction == "buy":
# 冻结资金转换为持仓
pos.frozen_cash -= trade_amount
pos.volume_long_today += trade_volume
# 成本增加
pos.position_cost += trade_amount
pos.open_cost += trade_amount
elif order_direction == "sell":
# 冻结持仓转换为可用资金
pos.volume_short_frozen -= trade_volume
pos.volume_short_today += trade_volume
self.available_cash += trade_amount
# 成本减少
pos.position_cost -= trade_amount
else:
raise NotImplementedError
@property
def account_info(self):
return {
'cash': self.total_cash,
'market_value': self.total_market_value,
'assert': self.total_assets
}
@property
def positions_msg(self):
return [
position.static_message for position in self.positions.values()
if position.volume_long + position.volume_short_today > 0
]
@property
def account_section(self):
return {
'account_id': self.account_id,
'date': self.datetime,
'account': self.account_info,
'positions': self.positions_msg,
'orders': self.orders,
}
def save_account_section(self):
insert_mongodb(self.mongo_db, self.mongo_col, self.account_section)
def settle(self):
if self.solid:
self.save_account_section()
self.orders = {}
for code in list(self.positions.keys()):
item = self.positions[code]
item.settle()
# 清仓第二日后删除position
if item.volume_long == 0 and item.hold_days > 2:
del self.positions[code]
if __name__ == "__main__":
acc = Account('test', 'test')
od = acc.send_order('000001',
100,
12,
'buy',
order_time='2020-01-10 09:32:00')
acc.make_deal(od)
od2 = acc.send_order('000002',
100,
12,
'buy',
order_time='2020-01-10 09:33:00')
acc.make_deal(od2)
print(acc.positions_msg)
acc.settle()
# print(pos)
od3 = acc.send_order('000001',
100,
14,
'sell',
order_time='2020-01-11 09:34:00')
acc.make_deal(od3)
acc.settle()
print(acc.positions_msg)
# print(pos)
# print(acc.total_market_value)
|
nilq/baby-python
|
python
|
API_KEY=""
API_SEC=""
API_PHR=""
|
nilq/baby-python
|
python
|
class PycamBaseException(Exception):
pass
class AbortOperationException(PycamBaseException):
pass
class CommunicationError(PycamBaseException):
pass
class InitializationError(PycamBaseException):
pass
class InvalidDataError(PycamBaseException):
pass
class MissingAttributeError(InvalidDataError):
pass
class AmbiguousDataError(InvalidDataError):
pass
class UnexpectedAttributeError(InvalidDataError):
pass
class InvalidKeyError(InvalidDataError):
def __init__(self, invalid_key, choice_enum):
# retrieve the pretty name of the enum
enum_name = str(choice_enum).split("'")[1]
super().__init__("Unknown {}: {} (should be one of: {})".format(
enum_name, invalid_key, ", ".join([item.value for item in choice_enum])))
class LoadFileError(PycamBaseException):
pass
class MissingDependencyError(PycamBaseException):
""" a dependency (e.g. an external python module) is missing """
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class MergeModelsInputSpec(CommandLineInputSpec):
Model1 = File(position=-3, desc="Model", exists=True, argstr="%s")
Model2 = File(position=-2, desc="Model", exists=True, argstr="%s")
ModelOutput = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Model",
argstr="%s")
class MergeModelsOutputSpec(TraitedSpec):
ModelOutput = File(position=-1, desc="Model", exists=True)
class MergeModels(SEMLikeCommandLine):
"""title: Merge Models
category: Surface Models
description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files.
version: $Revision$
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = MergeModelsInputSpec
output_spec = MergeModelsOutputSpec
_cmd = "MergeModels "
_outputs_filenames = {'ModelOutput': 'ModelOutput.vtk'}
class ModelToLabelMapInputSpec(CommandLineInputSpec):
distance = traits.Float(desc="Sample distance", argstr="--distance %f")
InputVolume = File(
position=-3, desc="Input volume", exists=True, argstr="%s")
surface = File(position=-2, desc="Model", exists=True, argstr="%s")
OutputVolume = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="The label volume",
argstr="%s")
class ModelToLabelMapOutputSpec(TraitedSpec):
OutputVolume = File(position=-1, desc="The label volume", exists=True)
class ModelToLabelMap(SEMLikeCommandLine):
"""title: Model To Label Map
category: Surface Models
description: Intersects an input model with an reference volume and produces an output label map.
version: 0.1.0.$Revision: 8643 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap
contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelToLabelMapInputSpec
output_spec = ModelToLabelMapOutputSpec
_cmd = "ModelToLabelMap "
_outputs_filenames = {'OutputVolume': 'OutputVolume.nii'}
class GrayscaleModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(
position=-2,
desc="Volume containing the input grayscale data.",
exists=True,
argstr="%s")
OutputGeometry = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Output that contains geometry model.",
argstr="%s")
threshold = traits.Float(
desc=
"Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.",
argstr="--threshold %f")
name = traits.Str(desc="Name to use for this model.", argstr="--name %s")
smooth = traits.Int(
desc="Number of smoothing iterations. If 0, no smoothing will be done.",
argstr="--smooth %d")
decimate = traits.Float(
desc=
"Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.",
argstr="--decimate %f")
splitnormals = traits.Bool(
desc=
"Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements",
argstr="--splitnormals ")
pointnormals = traits.Bool(
desc=
"Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.",
argstr="--pointnormals ")
class GrayscaleModelMakerOutputSpec(TraitedSpec):
OutputGeometry = File(
position=-1, desc="Output that contains geometry model.", exists=True)
class GrayscaleModelMaker(SEMLikeCommandLine):
"""title: Grayscale Model Maker
category: Surface Models
description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data.
version: 3.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker
license: slicer3
contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = GrayscaleModelMakerInputSpec
output_spec = GrayscaleModelMakerOutputSpec
_cmd = "GrayscaleModelMaker "
_outputs_filenames = {'OutputGeometry': 'OutputGeometry.vtk'}
class ProbeVolumeWithModelInputSpec(CommandLineInputSpec):
InputVolume = File(
position=-3,
desc="Volume to use to 'paint' the model",
exists=True,
argstr="%s")
InputModel = File(
position=-2, desc="Input model", exists=True, argstr="%s")
OutputModel = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Output 'painted' model",
argstr="%s")
class ProbeVolumeWithModelOutputSpec(TraitedSpec):
OutputModel = File(position=-1, desc="Output 'painted' model", exists=True)
class ProbeVolumeWithModel(SEMLikeCommandLine):
"""title: Probe Volume With Model
category: Surface Models
description: Paint a model by a volume (using vtkProbeFilter).
version: 0.1.0.$Revision: 1892 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel
contributor: Lauren O'Donnell (SPL, BWH)
acknowledgements: BWH, NCIGT/LMI
"""
input_spec = ProbeVolumeWithModelInputSpec
output_spec = ProbeVolumeWithModelOutputSpec
_cmd = "ProbeVolumeWithModel "
_outputs_filenames = {'OutputModel': 'OutputModel.vtk'}
class LabelMapSmoothingInputSpec(CommandLineInputSpec):
labelToSmooth = traits.Int(
desc=
"The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.",
argstr="--labelToSmooth %d")
numberOfIterations = traits.Int(
desc="The number of iterations of the level set AntiAliasing algorithm",
argstr="--numberOfIterations %d")
maxRMSError = traits.Float(
desc="The maximum RMS error.", argstr="--maxRMSError %f")
gaussianSigma = traits.Float(
desc="The standard deviation of the Gaussian kernel",
argstr="--gaussianSigma %f")
inputVolume = File(
position=-2,
desc="Input label map to smooth",
exists=True,
argstr="%s")
outputVolume = traits.Either(
traits.Bool,
File(),
position=-1,
hash_files=False,
desc="Smoothed label map",
argstr="%s")
class LabelMapSmoothingOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Smoothed label map", exists=True)
class LabelMapSmoothing(SEMLikeCommandLine):
"""title: Label Map Smoothing
category: Surface Models
description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map.
version: 1.0
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing
contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research.
"""
input_spec = LabelMapSmoothingInputSpec
output_spec = LabelMapSmoothingOutputSpec
_cmd = "LabelMapSmoothing "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
class ModelMakerInputSpec(CommandLineInputSpec):
InputVolume = File(
position=-1,
desc=
"Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.",
exists=True,
argstr="%s")
color = File(
desc="Color table to make labels to colors and objects",
exists=True,
argstr="--color %s")
modelSceneFile = traits.Either(
traits.Bool,
InputMultiPath(File(), ),
hash_files=False,
desc=
"Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.",
argstr="--modelSceneFile %s...")
name = traits.Str(
desc=
"Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.",
argstr="--name %s")
generateAll = traits.Bool(
desc=
"Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.",
argstr="--generateAll ")
labels = InputMultiPath(
traits.Int,
desc=
"A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.",
sep=",",
argstr="--labels %s")
start = traits.Int(
desc=
"If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).",
argstr="--start %d")
end = traits.Int(
desc=
"If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.",
argstr="--end %d")
skipUnNamed = traits.Bool(
desc=
"Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.",
argstr="--skipUnNamed ")
jointsmooth = traits.Bool(
desc=
"This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.",
argstr="--jointsmooth ")
smooth = traits.Int(
desc=
"Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ",
argstr="--smooth %d")
filtertype = traits.Enum(
"Sinc",
"Laplacian",
desc=
"You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.",
argstr="--filtertype %s")
decimate = traits.Float(
desc=
"Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.",
argstr="--decimate %f")
splitnormals = traits.Bool(
desc=
"Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.",
argstr="--splitnormals ")
pointnormals = traits.Bool(
desc=
"Turn this flag on if you wish to calculate the normal vectors for the points.",
argstr="--pointnormals ")
pad = traits.Bool(
desc=
"Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.",
argstr="--pad ")
saveIntermediateModels = traits.Bool(
desc=
"You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().",
argstr="--saveIntermediateModels ")
debug = traits.Bool(
desc=
"turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)",
argstr="--debug ")
class ModelMakerOutputSpec(TraitedSpec):
modelSceneFile = OutputMultiPath(
File(exists=True),
desc=
"Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you."
)
class ModelMaker(SEMLikeCommandLine):
"""title: Model Maker
category: Surface Models
description: Create 3D surface models from segmented data.<p>Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).</p><p><b>Create Multiple:</b></p><p>If you specify a list of Labels, it will over ride any start/end label settings.</p><p>If you click<i>Generate All</i>it will over ride the list of lables and any start/end label settings.</p><p><b>Model Maker Settings:</b></p><p>You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.<br>You can set the flags to split normals or generate point normals in this pane as well.<br>You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:<br><i>slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()</i></p>
version: 4.1
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker
license: slicer4
contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = ModelMakerInputSpec
output_spec = ModelMakerOutputSpec
_cmd = "ModelMaker "
_outputs_filenames = {'modelSceneFile': 'modelSceneFile.mrml'}
|
nilq/baby-python
|
python
|
import os
import numpy as np
if os.environ.get("PYQUANT_DEV", False) == "True":
try:
import pyximport
pyximport.install(
setup_args={"include_dirs": np.get_include()}, reload_support=True
)
except Exception as e:
import traceback
traceback.print_exc()
pass
from .cpeaks import ( # noqa: F401
bigauss_jac,
bigauss_func,
bigauss_ndim,
find_nearest,
find_nearest_index,
find_nearest_indices,
gauss_func,
gauss_hess,
gauss_ndim,
gauss_jac,
get_ppm,
)
|
nilq/baby-python
|
python
|
#coding:utf-8
import tkinter
from tkinter import ttk
from Icon import ICON
from PIL import Image, ImageTk
import queue
import cv2
import numpy as np
import sys
import platform
OS = platform.system()
if OS == 'Windows':
import ctypes
def DisplayWorker(frame_shared, camera_width, camera_height, measure_params):
if OS == 'Windows':
ctypes.windll.shcore.SetProcessDpiAwareness(True)
root = tkinter.Tk()
disp = Display(root, frame_shared, camera_width, camera_height, measure_params)
class Display:
def __init__(self, root, frame_shared, camera_width, camera_height, measure_params):
self.root = root
iconimg = tkinter.PhotoImage(data=ICON)
root.iconphoto(True, iconimg)
self.root.title("ScaleSpeedCamera")
self.root.resizable(False, False)
s = ttk.Style()
if 'winnative' in s.theme_names():
s.theme_use('winnative')
else:
s.theme_use('alt')
root.protocol("WM_DELETE_WINDOW", self.on_close)
self.frame_shared = frame_shared
self.camera_width = camera_width
self.camera_height = camera_height
self.measure_params = measure_params
mainframe = ttk.Frame(self.root, padding="12 12 12 12")
mainframe.grid(column=0, row=0, sticky=(tkinter.N, tkinter.W, tkinter.E, tkinter.S))
self.canvas = tkinter.Canvas(mainframe)
self.canvas.configure(width=camera_width, height=camera_height)
self.canvas.grid(column=1, row=1, padx=10, pady=10, sticky=(tkinter.N, tkinter.W))
scales = ttk.Frame(mainframe, padding="12 12 12 12")
scales.grid(column=1, row=2)
rect_frame = ttk.LabelFrame(scales, text='最小動体面積', padding="12 12 12 12")
self.rect_size = tkinter.IntVar()
rect_scale = tkinter.Scale(rect_frame, orient=tkinter.HORIZONTAL, length=200, from_=1.0, to=100.0, variable=self.rect_size)
rect_scale.set(15)
rect_scale.grid(column=0, row=0, sticky=tkinter.W)
rect_frame.grid(column=1, row=0, sticky=(tkinter.W))
weight_frame = ttk.LabelFrame(scales, text='動体検知しきい値', padding="12 12 12 12")
self.weight = tkinter.IntVar()
weight_scale = tkinter.Scale(weight_frame, orient=tkinter.HORIZONTAL, length=200, from_=1.0, to=50.0, variable=self.weight)
weight_scale.set(25)
weight_scale.grid(column=0, row=0, sticky=tkinter.W)
weight_frame.grid(column=2, row=0, sticky=(tkinter.W))
area_height_frame = ttk.LabelFrame(scales, text='検知域高さ', padding="12 12 12 12")
self.area_height = tkinter.IntVar()
area_height_scale = tkinter.Scale(area_height_frame, orient=tkinter.HORIZONTAL, length=200, from_=1.0, to=300.0, variable=self.area_height)
area_height_scale.set(150)
area_height_scale.grid(column=0, row=0, sticky=tkinter.W)
area_height_frame.grid(column=3, row=0, sticky=(tkinter.W))
code_distance_frame = ttk.LabelFrame(scales, text='バーコード間隔(cm)', padding="12 12 12 12")
self.code_distance = tkinter.IntVar()
code_distance_scale = tkinter.Scale(code_distance_frame, orient=tkinter.HORIZONTAL, length=400, from_=15.0, to=75.0, variable=self.code_distance)
code_distance_scale.set(15)
code_distance_scale.grid(column=0, row=0, sticky=tkinter.W)
code_distance_frame.grid(column=4, row=0, sticky=(tkinter.W))
self.update()
self.root.mainloop()
def update(self):
frame = np.array(self.frame_shared, dtype=np.uint8).reshape(self.camera_height, self.camera_width, 3)
image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.photo = ImageTk.PhotoImage(image=Image.fromarray(image_rgb))
self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
self.measure_params[0] = self.rect_size.get()
self.measure_params[1] = self.weight.get()
self.measure_params[2] = self.area_height.get()
self.measure_params[4] = self.code_distance.get()
self.root.after(50, self.update)
def on_close(self):
self.root.destroy()
sys.exit()
|
nilq/baby-python
|
python
|
def oper(op, a, b):
op = str(op)
a = float(a)
b = float(b)
if op == "*":
return a * b
elif op == "/":
return a / b
elif op == "+":
return a + b
elif op == "-":
return a - b
elif op == "%":
return a % b
elif op == "^":
return a**b
operation = "*/+-%^"
instack = []
post = list(input().split())
for i in range(len(post)):
if post[i] in operation:
instack.append(oper(post[i], instack.pop(-2), instack.pop(-1)))
else:
instack.append(post[i])
print(float(instack[0]))
|
nilq/baby-python
|
python
|
"""
Given an integer n, return the first n-line Yang Hui triangle.
Example 1:
Input : n = 4
Output :
[
[1]
[1,1]
[1,2,1]
[1,3,3,1]
]
Solution:
Construct pascal triangle line by line.
"""
class Solution:
"""
@param n: a Integer
@return: the first n-line Yang Hui's triangle
"""
def calcYangHuisTriangle(self, n):
# write your code here
if n == 0:
return []
elif n == 1:
return [[1]]
else:
res = [[1]]
for i in range(1, n):
line = [1 for j in range(i+1)]
for k in range(1, i):
line[k] = res[i-1][k-1] + res[i-1][k]
res.append(line)
return res
|
nilq/baby-python
|
python
|
import sys
a = sys.stdin.readline()
sys.stdout.write(a)
sys.stderr.write(a)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
from fn.core import main
ROOT = "http://seriesblanco.com"
URL_TREE = ROOT+"/serie/1653/rick-and-morty.html"
URL = ROOT+"/serie/1653/temporada-{}/capitulo-{}/rick-and-morty.html"
main(sys.argv, ROOT, URL_TREE, URL)
|
nilq/baby-python
|
python
|
class Partner:
database =
def __init__(self, name, age, likes_me):
self.database.append(self)
self.name = name
self.age = age
self.likes_me = likes_me
Maria = Partner("Maria", 21, False)
Florian = Partner("Florian", 116, False)
Eve = Partner("Eve", 22, True)
Fiona = Partner("Fiona", 55, True)
for partner in Partner.database:
if partner.age<25 and partner.likes_me==True:
print(partner.name +"(age " + str(partner.age) + ") likes you!")
|
nilq/baby-python
|
python
|
import os
import sqlalchemy as sa
from dotenv import load_dotenv
load_dotenv()
def connect():
user = os.environ.get("DB_USER")
db_name = os.environ.get("DB_NAME")
db_pass = os.environ.get("DB_PASS")
db_port = os.environ.get("DB_PORT")
db_host = os.environ.get("DB_HOST")
# print(user, db_name, db_pass)
# print(user, db_pass, db_host, db_port, db_name)
url = "postgresql://{}:{}@{}:{}/{}"
url = url.format(user, db_pass, db_host, db_port, db_name)
# The return value of create_engine() is our connection object
connection = sa.create_engine(url, client_encoding="utf8")
# We then bind the connection to MetaData()
metadata = sa.MetaData(bind=connection)
return connection, metadata
con, meta = connect()
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
import sys
path = '../resources/haarcascades/haarcascade_frontalface_default.xml'
video = cv2.VideoCapture('/dev/video0')
if not video.isOpened():
print('Open video device fail')
sys.exit()
def empty(p):
pass
cv2.namedWindow('Camera')
cv2.createTrackbar('Scale', 'Camera', 100, 1000, empty)
cv2.createTrackbar('Neig', 'Camera', 1, 10, empty)
cv2.createTrackbar('Min Area', 'Camera', 1000, 100000, empty)
# load the classifier
cascade = cv2.CascadeClassifier(path)
while True:
timer = cv2.getTickCount()
ok, img = video.read()
if not ok:
print('Image capture fail')
break
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
scale = 1 + cv2.getTrackbarPos('Scale', 'Camera') / 1000
minNeig = cv2.getTrackbarPos('Neig', 'Camera')
objects = cascade.detectMultiScale(imgGray, scale, minNeig)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(img, 'FPS: {}'.format(int(fps)), (50,20), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,200), thickness=2)
for x, y, w, h in objects:
pt1 = x, y
pt2 = x + w, y + h
area = w * h
minArea = cv2.getTrackbarPos('Min Area', 'Camera')
if area >= minArea:
cv2.rectangle(img, pt1, pt2, (0,255,0), thickness=2)
cv2.putText(img, 'Face', (x,y-10), cv2.FONT_HERSHEY_PLAIN, 0.7, (0,255,0))
imgGray = cv2.cvtColor(imgGray, cv2.COLOR_GRAY2BGR)
cv2.imshow('Camera', img)
if cv2.waitKey(1) == (0xFF & ord('q')):
break
|
nilq/baby-python
|
python
|
# vim: set fenc=utf8 ts=4 sw=4 et :
import sys
import xml.sax
import imp
from os import path
from signal import signal, SIGINT
from shutil import copytree, ignore_patterns
from pkg_resources import resource_filename
from configparser import ConfigParser
from .logging import *
from .conf import Conf
from .plugin import *
from .pdmlhandler import PdmlHandler
def _add_common_arguments(argparser):
argparser.add_argument(
'-s',
dest='EXTRACT_SHOW',
action='store_true',
help='Extract show names, every data leaf will now look like {{ raw : [] , show: [] }} [default: {}]'.format(
Conf.EXTRACT_SHOW
)
)
argparser.add_argument(
'-d',
dest='DEBUG',
action='store_true',
help='Debug mode [default: {}]'.format(
Conf.DEBUG
)
)
def pdml2flow():
def add_arguments_cb(argparser):
argparser.add_argument(
'-f',
dest='FLOW_DEF_STR',
action='append',
help='Fields which define the flow, nesting with: \'{}\' [default: {}]'.format(
Conf.FLOW_DEF_NESTCHAR, Conf.FLOW_DEF_STR
)
)
argparser.add_argument(
'-t',
type=int,
dest='FLOW_BUFFER_TIME',
help='Lenght (in seconds) to buffer a flow before writing the packets [default: {}]'.format(
Conf.FLOW_BUFFER_TIME
)
)
argparser.add_argument(
'-l',
type=int,
dest='DATA_MAXLEN',
help='Maximum lenght of data in tshark pdml-field [default: {}]'.format(
Conf.DATA_MAXLEN
)
)
argparser.add_argument(
'-c',
dest='COMPRESS_DATA',
action='store_true',
help='Removes duplicate data when merging objects, will not preserve order of leaves [default: {}]'.format(
Conf.COMPRESS_DATA
)
)
argparser.add_argument(
'-a',
dest='FRAMES_ARRAY',
action='store_true',
help='Instead of merging the frames will append them to an array [default: {}]'.format(
Conf.FRAMES_ARRAY
)
)
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
"""Split each flowdef to a path."""
if conf['FLOW_DEF_STR'] is not None:
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Aggregates wireshark pdml to flows',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def pdml2frame():
def add_arguments_cb(argparser):
_add_common_arguments(argparser)
def postprocess_conf_cb(conf):
conf['DATA_MAXLEN'] = sys.maxsize
conf['FLOW_BUFFER_TIME'] = 0
conf['FLOW_DEF_STR'] = [ 'frame.number' ]
conf['FLOW_DEF'] = Conf.get_real_paths(
conf['FLOW_DEF_STR'],
Conf.FLOW_DEF_NESTCHAR
)
Conf.load(
'Converts wireshark pdml to frames',
add_arguments_cb,
postprocess_conf_cb
)
start_parser()
def start_parser():
# print config
for name, value in Conf.get().items():
debug('{} : {}'.format(name, value))
handler = PdmlHandler()
def sigint_handler(sig, frame):
handler.endDocument()
sys.exit(0)
signal(SIGINT, sigint_handler)
try:
xml.sax.parse(
Conf.IN,
handler
)
except xml.sax._exceptions.SAXParseException as e:
# this might happen when a pdml file is malformed
warning('Parser returned exception: {}'.format(e))
handler.endDocument()
def pdml2flow_new_plugin():
def add_arguments_cb(argparser):
argparser.add_argument(
'DST',
type=str,
nargs='+',
help='Where to initialize the plugin, basename will become the plugin name'
)
Conf.load(
'Initializes a new plugin',
add_arguments_cb
)
for dst in Conf.DST:
plugin_name = path.basename(dst)
plugin_conf = ConfigParser({
'plugin_name': plugin_name
})
copytree(
resource_filename(__name__, 'plugin-skeleton'),
dst,
ignore=ignore_patterns('__pycache__')
)
with open(path.join(dst, Conf.PLUGIN_CONF_NAME), mode='w') as fd:
plugin_conf.write(fd)
|
nilq/baby-python
|
python
|
from .CancerModel import CancerModel # , CancerModelIterator
from .ExperimentalCondition import ExperimentalCondition # , ExpCondIterator
from .TreatmentResponseExperiment import TreatmentResponseExperiment # , TREIterator
|
nilq/baby-python
|
python
|
import os
import yaml
import shutil
from dl_playground.path import MODEL_ROOT
def load_and_save_config(config_path, model_path):
"""Loads the config and save a copy to the model folder."""
with open(config_path) as f:
config = yaml.safe_load(f)
model_path = os.path.expanduser(model_path)
# If `model_path` is absolute, os.path.join would return
# `model_path` (!!)
model_path = os.path.join(MODEL_ROOT, model_path)
# Save the config
if not os.path.isdir(model_path):
os.makedirs(model_path)
shutil.copyfile(
src=config_path,
dst=os.path.join(model_path, 'exp_config.yaml')
)
return config
|
nilq/baby-python
|
python
|
# ******************************************************************************
# This file is part of the AaMakro5oul project
# (An OSC/MIDI controller for Ableton Live with DJ features)
#
# Full project source: https://github.com/hiramegl/AaMakro5oul
#
# License : Apache License 2.0
# Full license: https://github.com/hiramegl/AaMakro5oul/blob/master/LICENSE
#
# Copyright 2018, 2019 by Hiram Galicia (hiramegl@yahoo.com)
# http://www.unasystems.com
#
# All rights reserved.
# ******************************************************************************
from CoreHandler import CoreHandler
# ******************************************************************************
# Session commands handler
# ******************************************************************************
class SessionCmdHandler(CoreHandler):
def __init__(self, _oCtrlInstance, _oOscServer, _hConfig):
CoreHandler.__init__(self, _oCtrlInstance, _oOscServer, _hConfig)
bIgnoreRelease = True
bLogRxMsgs = False
self.config('/session/cmd', bIgnoreRelease, bLogRxMsgs)
self.add_callbacks(['reset','up','down','left','right', 'stop', 'arrange', 'toggle', 'pause', 'cueing','record','trackincr','sceneincr'])
self.reset_session_increments()
self.highlight_session()
def disconnect(self):
self.reset_session_increments()
def reset_session_increments(self):
self.m_nTrackIncr = 2 #self.gui_num_tracks()
self.m_nSceneIncr = 4 #self.gui_num_scenes()
self.send_msg('trackincr', self.m_nTrackIncr)
self.send_msg('sceneincr', self.m_nSceneIncr)
def handle(self, _aMessage):
if (self.m_sCmd == 'trackincr'):
self.m_nTrackIncr = int(_aMessage[2])
self.log('> new track increment: %d' % (self.m_nTrackIncr))
self.alert('> new track increment: %d' % (self.m_nTrackIncr))
return # nothing else to do here
elif (self.m_sCmd == 'sceneincr'):
self.m_nSceneIncr = int(_aMessage[2])
self.log('> new scene increment: %d' % (self.m_nSceneIncr))
self.alert('> new scene increment: %d' % (self.m_nSceneIncr))
return # nothing else to do here
elif (self.m_sCmd == 'reset'):
self.alert('Resetting %s' % (self.m_sProductName))
# TrackClipHandler: update track clips
# TrackCmdHandler : update track buttons
# TrackVolHandler : update track volumes
# SceneClipHandler: update scene launch buttons
self.alert('> %s reset' % (self.m_sProductName))
self.update_observers('session_reset')
elif (self.m_sCmd == 'left'):
if (self.gui_track_offset() - self.m_nTrackIncr >= 0):
self.gui_track_offset(self.gui_track_offset() - self.m_nTrackIncr)
else:
self.gui_track_offset(0)
self.highlight_session()
# TrackClipHandler: update track clips
# TrackCmdHandler : update track buttons
# TrackVolHandler : update track volumes
self.update_observers('new_tracks_sel')
elif (self.m_sCmd == 'right'):
if (self.gui_track_offset() + self.m_nTrackIncr < len(self.tracks())):
self.gui_track_offset(self.gui_track_offset() + self.m_nTrackIncr)
self.highlight_session()
# TrackClipHandler: update track clips
# TrackCmdHandler : update track buttons
# TrackVolHandler : update track volumes
self.update_observers('new_tracks_sel')
elif (self.m_sCmd == 'up'):
if (self.gui_scene_offset() - self.m_nSceneIncr >= 0):
self.gui_scene_offset(self.gui_scene_offset() - self.m_nSceneIncr)
else:
self.gui_scene_offset(0)
self.highlight_session()
# SceneClipHandler: update scene launch buttons
self.update_observers('new_scenes_sel')
elif (self.m_sCmd == 'down'):
if (self.gui_scene_offset() + self.m_nSceneIncr < len(self.scenes())):
self.gui_scene_offset(self.gui_scene_offset() + self.m_nSceneIncr)
self.highlight_session()
# SceneClipHandler: update scene launch buttons
self.update_observers('new_scenes_sel')
elif (self.m_sCmd == 'stop'):
self.song().stop_all_clips()
self.song().stop_playing()
self.alert('> %s stopping' % (self.m_sProductName))
elif (self.m_sCmd == 'record'):
bSessionRec = self.song().session_record
self.song().session_record = not bSessionRec
elif (self.m_sCmd == 'pause'):
return # handled by a ruby script
elif (self.m_sCmd == 'arrange'):
return # handled by a ruby script
elif (self.m_sCmd == 'toggle'):
return # handled by a ruby script
elif (self.m_sCmd == 'cueing'):
return # handled by a ruby script
def highlight_session(self):
bIncludeReturnTracks = False
self.m_oCtrlInstance.set_session_highlight(self.gui_track_offset(), self.gui_scene_offset(), self.gui_num_tracks(), self.gui_num_scenes(), bIncludeReturnTracks)
# Ableton Live events management *******************************************
def add_listeners(self):
self.remove_listeners()
if (self.song().session_record_has_listener(self.on_session_record_change) != 1):
self.song().add_session_record_listener(self.on_session_record_change)
def on_session_record_change(self):
bSessionRec = self.song().session_record
nRecord = 1.0 if (bSessionRec) else 0.0
self.send_msg('record', nRecord)
def remove_listeners(self):
if (self.song().session_record_has_listener(self.on_session_record_change) == 1):
self.song().remove_session_record_listener(self.on_session_record_change)
|
nilq/baby-python
|
python
|
import sys, getopt, signal
from test_threads import *
from sdkcom import *
from network_delegation import *
# for local
def add_threads_dev(threads):
# Delegate, 2 threads
threads.add_threads(DelegateTxLoad.dev(2))
# UnDelegate, 2 threads
threads.add_threads(UnDelegateTxLoad.dev(2))
# WithdrawRewards, 2 threads
threads.add_threads(WithdrawRewardsTxLoad.dev(2))
# ReinvestRewards, 2 threads
threads.add_threads(ReinvestRewardsTxLoad.dev(2))
# for devnet
def add_threads_prod(threads):
# Delegate, 2 threads
threads.add_threads(DelegateTxLoad.prod(2))
# UnDelegate, 2 threads
threads.add_threads(UnDelegateTxLoad.prod(2))
# WithdrawRewards, 2 threads
threads.add_threads(WithdrawRewardsTxLoad.prod(2))
# ReinvestRewards, 2 threads
threads.add_threads(ReinvestRewardsTxLoad.prod(2))
def abort_loadtest(signal, frame):
threads.stop_threads()
sys.exit(0)
def parse_params(argv):
clean_run = False
txs_persec = TXS_PER_SEC_NORMAL
try:
opts, args = getopt.getopt(argv,"cs:",["speed="])
except getopt.GetoptError:
print 'run_tests.py -s <speed>'
sys.exit(-1)
for opt, arg in opts:
if opt in ("-c", "--clean"):
clean_run = True
if opt in ("-s", "--speed"):
txs_persec = arg
return clean_run, 1000 / int(txs_persec)
if __name__ == "__main__":
# parse options
clean_run, interval = parse_params(sys.argv[1:])
# configuration based on environment
if oltest == "1":
add_threads_dev(threads)
else:
add_threads_prod(threads)
# clean up test folder
if clean_run:
threads.clean()
# setup threads before run
threads.setup_threads(interval)
# run threads
signal.signal(signal.SIGINT, abort_loadtest)
threads.run_threads()
# join threads
threads.join_threads()
|
nilq/baby-python
|
python
|
from functions import *
import subprocess
import os
import traceback
from sys import exit
if __name__ == '__main__':
try:
get_admin_permission()
if not os.popen("powershell.exe Get-AppXPackage MicrosoftCorporationII.WindowsSubsystemForAndroid").read().strip():
input("Windows Subsystem for Android is not installed. Press ENTER to exit.")
exit()
os.chdir(os.path.dirname(__file__))
choice = input("Uninstall Windows Subsystem for Android? [Y]es [N]o (default: no) > ")
if choice.casefold() in ["y", "yes"]:
a = subprocess.run("powershell.exe Get-AppXPackage MicrosoftCorporationII.WindowsSubsystemForAndroid |"
" Remove-AppXPackage -AllUsers")
if not a.returncode:
for _ in os.listdir("C:/Program Files/WSA_Advanced"):
remove(os.path.join("C:/Program Files/WSA_Advanced", _))
input("Windows Subsystem for Android uninstalled. Press ENTER to exit.")
else:
print("Windows Subsystem for Android failed to uninstall,"
" or has already been uninstalled, or uninstallation canceled.")
input("Press ENTER to exit.")
except Exception as e:
print(traceback.format_exc())
input("Press ENTER to exit.")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.