code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'window_history.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow
class History(QMainWindow):
def __init__(self):
super().__init__()
self.setObjectName("MainWindow")
self.resize(758, 473)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 741, 451))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.hverticalleyout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.hverticalleyout.setContentsMargins(0, 0, 0, 0)
self.hverticalleyout.setObjectName("hverticalleyout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.hlabel = QtWidgets.QLabel(self.verticalLayoutWidget)
font = QtGui.QFont()
font.setPointSize(18)
self.hlabel.setFont(font)
self.hlabel.setStyleSheet("")
self.hlabel.setAlignment(QtCore.Qt.AlignCenter)
self.hlabel.setObjectName("hlabel")
self.horizontalLayout.addWidget(self.hlabel)
self.hdataedit = QtWidgets.QDateEdit(self.verticalLayoutWidget)
self.hdataedit.setObjectName("hdataedit")
self.horizontalLayout.addWidget(self.hdataedit)
self.hbtn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.hbtn.setObjectName("hbtn")
self.horizontalLayout.addWidget(self.hbtn)
self.hbtn1 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.hbtn1.setObjectName("hbtn1")
self.horizontalLayout.addWidget(self.hbtn1)
self.hverticalleyout.addLayout(self.horizontalLayout)
self.plainTextEdit = QtWidgets.QPlainTextEdit(
self.verticalLayoutWidget)
self.plainTextEdit.setReadOnly(True)
self.plainTextEdit.setObjectName("plainTextEdit")
self.hverticalleyout.addWidget(self.plainTextEdit)
self.setCentralWidget(self.centralwidget)
self.retranslateUi()
QtCore.QMetaObject.connectSlotsByName(self)
self.show()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.setWindowTitle(_translate("MainWindow", "History"))
self.hlabel.setText(_translate("MainWindow", "Filter"))
self.hbtn.setText(_translate("MainWindow", "Filter"))
self.hbtn1.setText(_translate("MainWindow", "No Filter"))
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QDateEdit",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QPlainTextEdit",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QMetaObject.connectSlotsByName"
] |
[((466, 489), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self'], {}), '(self)\n', (483, 489), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((584, 621), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (601, 621), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((803, 851), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (824, 851), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1006, 1029), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1027, 1029), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1116, 1159), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1132, 1159), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1175, 1188), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1186, 1188), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1469, 1515), 'PyQt5.QtWidgets.QDateEdit', 'QtWidgets.QDateEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1488, 1515), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1642, 1690), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1663, 1690), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1803, 1851), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (1824, 1851), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2037, 2088), 'PyQt5.QtWidgets.QPlainTextEdit', 'QtWidgets.QPlainTextEdit', (['self.verticalLayoutWidget'], {}), '(self.verticalLayoutWidget)\n', (2061, 2088), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2352, 2395), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['self'], {}), '(self)\n', (2389, 2395), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((668, 698), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(741)', '(451)'], {}), '(10, 10, 741, 451)\n', (680, 698), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
def run_process_historical_roots_update(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
@with_all_phases
@spec_state_test
def test_historical_root_accumulator(spec, state):
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
history_len = len(state.historical_roots)
yield from run_process_historical_roots_update(spec, state)
assert len(state.historical_roots) == history_len + 1
|
[
"eth2spec.test.helpers.epoch_processing.run_epoch_processing_with"
] |
[((223, 296), 'eth2spec.test.helpers.epoch_processing.run_epoch_processing_with', 'run_epoch_processing_with', (['spec', 'state', '"""process_historical_roots_update"""'], {}), "(spec, state, 'process_historical_roots_update')\n", (248, 296), False, 'from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with\n')]
|
import pytest
from caia.items.items_job_config import ItemsJobConfig
from caia.items.steps.get_last_timestamp import GetLastTimestamp
def test_get_last_timestamp_default_timestamp():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
get_last_timestamp = GetLastTimestamp(job_config)
step_result = get_last_timestamp.execute()
assert step_result.was_successful() is True
last_timestamp = step_result.get_result()
assert "202007021300" == last_timestamp
def test_get_last_timestamp_no_timestamp_in_file():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
# Override "last_success_filepath"
last_success_filepath = 'tests/resources/items/no_timestamp_src_response.json'
job_config['last_success_filepath'] = last_success_filepath
get_last_timestamp = GetLastTimestamp(job_config)
step_result = get_last_timestamp.execute()
assert step_result.was_successful() is False
assert f"Could not find timestamp in {last_success_filepath}" in step_result.get_errors()
def test_get_last_timestamp_bad_file():
config = {
'storage_dir': '/tmp',
'last_success_lookup': 'tests/storage/items/items_last_success.txt'
}
job_config = ItemsJobConfig(config, 'test')
# Override "last_success_filepath"
job_config['last_success_filepath'] = 'tests/resources/items/non_existent_response.json'
get_last_timestamp = GetLastTimestamp(job_config)
with pytest.raises(FileNotFoundError):
get_last_timestamp.execute()
|
[
"caia.items.items_job_config.ItemsJobConfig",
"caia.items.steps.get_last_timestamp.GetLastTimestamp",
"pytest.raises"
] |
[((332, 362), 'caia.items.items_job_config.ItemsJobConfig', 'ItemsJobConfig', (['config', '"""test"""'], {}), "(config, 'test')\n", (346, 362), False, 'from caia.items.items_job_config import ItemsJobConfig\n'), ((389, 417), 'caia.items.steps.get_last_timestamp.GetLastTimestamp', 'GetLastTimestamp', (['job_config'], {}), '(job_config)\n', (405, 417), False, 'from caia.items.steps.get_last_timestamp import GetLastTimestamp\n'), ((805, 835), 'caia.items.items_job_config.ItemsJobConfig', 'ItemsJobConfig', (['config', '"""test"""'], {}), "(config, 'test')\n", (819, 835), False, 'from caia.items.items_job_config import ItemsJobConfig\n'), ((1048, 1076), 'caia.items.steps.get_last_timestamp.GetLastTimestamp', 'GetLastTimestamp', (['job_config'], {}), '(job_config)\n', (1064, 1076), False, 'from caia.items.steps.get_last_timestamp import GetLastTimestamp\n'), ((1456, 1486), 'caia.items.items_job_config.ItemsJobConfig', 'ItemsJobConfig', (['config', '"""test"""'], {}), "(config, 'test')\n", (1470, 1486), False, 'from caia.items.items_job_config import ItemsJobConfig\n'), ((1645, 1673), 'caia.items.steps.get_last_timestamp.GetLastTimestamp', 'GetLastTimestamp', (['job_config'], {}), '(job_config)\n', (1661, 1673), False, 'from caia.items.steps.get_last_timestamp import GetLastTimestamp\n'), ((1684, 1716), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (1697, 1716), False, 'import pytest\n')]
|
from keras import backend as K
from keras import optimizers
from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda,Dropout
from keras.models import Model
from models.common import sse, bce, mmd, sampling, kl_regu
from keras.losses import mean_squared_error,binary_crossentropy
import numpy as np
from tensorflow import set_random_seed
class XVAE:
def __init__(self, args):
self.args = args
self.vae = None
self.encoder = None
def build_model(self):
np.random.seed(42)
set_random_seed(42)
# Build the encoder network
# ------------ Input -----------------
s1_inp = Input(shape=(self.args.s1_input_size,))
s2_inp = Input(shape=(self.args.s2_input_size,))
inputs = [s1_inp, s2_inp]
# ------------ Concat Layer -----------------
x1 = Dense(self.args.ds, activation=self.args.act)(s1_inp)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(s2_inp)
x2 = BN()(x2)
x = Concatenate(axis=-1)([x1, x2])
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
# ------------ Embedding Layer --------------
z_mean = Dense(self.args.ls, name='z_mean')(x)
z_log_sigma = Dense(self.args.ls, name='z_log_sigma', kernel_initializer='zeros')(x)
z = Lambda(sampling, output_shape=(self.args.ls,), name='z')([z_mean, z_log_sigma])
self.encoder = Model(inputs, [z_mean, z_log_sigma, z], name='encoder')
self.encoder.summary()
# Build the decoder network
# ------------ Dense out -----------------
latent_inputs = Input(shape=(self.args.ls,), name='z_sampling')
x = latent_inputs
x = Dense(self.args.ds, activation=self.args.act)(x)
x = BN()(x)
x=Dropout(self.args.dropout)(x)
# ------------ Dense branches ------------
x1 = Dense(self.args.ds, activation=self.args.act)(x)
x1 = BN()(x1)
x2 = Dense(self.args.ds, activation=self.args.act)(x)
x2 = BN()(x2)
# ------------ Out -----------------------
s1_out = Dense(self.args.s1_input_size, activation='sigmoid')(x1)
if self.args.integration == 'Clin+CNA':
s2_out = Dense(self.args.s2_input_size,activation='sigmoid')(x2)
else:
s2_out = Dense(self.args.s2_input_size)(x2)
decoder = Model(latent_inputs, [s1_out, s2_out], name='decoder')
decoder.summary()
outputs = decoder(self.encoder(inputs)[2])
self.vae = Model(inputs, outputs, name='vae_x')
if self.args.distance == "mmd":
true_samples = K.random_normal(K.stack([self.args.bs, self.args.ls]))
distance = mmd(true_samples, z)
if self.args.distance == "kl":
distance = kl_regu(z_mean,z_log_sigma)
s1_loss= binary_crossentropy(inputs[0], outputs[0])
if self.args.integration == 'Clin+CNA':
s2_loss =binary_crossentropy(inputs[1], outputs[1])
else:
s2_loss =mean_squared_error(inputs[1], outputs[1])
reconstruction_loss = s1_loss+s2_loss
vae_loss = K.mean(reconstruction_loss + self.args.beta * distance)
self.vae.add_loss(vae_loss)
adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, amsgrad=False, decay=0.001)
self.vae.compile(optimizer=adam, metrics=[mean_squared_error, mean_squared_error])
self.vae.summary()
def train(self, s1_train, s2_train, s1_test, s2_test):
self.vae.fit([s1_train, s2_train], epochs=self.args.epochs, batch_size=self.args.bs, shuffle=True,
validation_data=([s1_test, s2_test], None))
if self.args.save_model:
self.vae.save_weights('./models/vae_xvae.h5')
def predict(self, s1_data, s2_data):
return self.encoder.predict([s1_data, s2_data], batch_size=self.args.bs)[0]
|
[
"keras.backend.stack",
"numpy.random.seed",
"keras.losses.binary_crossentropy",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"keras.models.Model",
"tensorflow.set_random_seed",
"keras.layers.Concatenate",
"keras.layers.Dense",
"keras.backend.mean",
"keras.layers.Lambda",
"models.common.kl_regu",
"keras.layers.Input",
"models.common.mmd",
"keras.layers.BatchNormalization",
"keras.losses.mean_squared_error"
] |
[((531, 549), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (545, 549), True, 'import numpy as np\n'), ((558, 577), 'tensorflow.set_random_seed', 'set_random_seed', (['(42)'], {}), '(42)\n', (573, 577), False, 'from tensorflow import set_random_seed\n'), ((678, 717), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.s1_input_size,)'}), '(shape=(self.args.s1_input_size,))\n', (683, 717), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((735, 774), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.s2_input_size,)'}), '(shape=(self.args.s2_input_size,))\n', (740, 774), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1488, 1543), 'keras.models.Model', 'Model', (['inputs', '[z_mean, z_log_sigma, z]'], {'name': '"""encoder"""'}), "(inputs, [z_mean, z_log_sigma, z], name='encoder')\n", (1493, 1543), False, 'from keras.models import Model\n'), ((1687, 1734), 'keras.layers.Input', 'Input', ([], {'shape': '(self.args.ls,)', 'name': '"""z_sampling"""'}), "(shape=(self.args.ls,), name='z_sampling')\n", (1692, 1734), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2459, 2513), 'keras.models.Model', 'Model', (['latent_inputs', '[s1_out, s2_out]'], {'name': '"""decoder"""'}), "(latent_inputs, [s1_out, s2_out], name='decoder')\n", (2464, 2513), False, 'from keras.models import Model\n'), ((2611, 2647), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {'name': '"""vae_x"""'}), "(inputs, outputs, name='vae_x')\n", (2616, 2647), False, 'from keras.models import Model\n'), ((2951, 2993), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['inputs[0]', 'outputs[0]'], {}), '(inputs[0], outputs[0])\n', (2970, 2993), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((3267, 3322), 'keras.backend.mean', 'K.mean', (['(reconstruction_loss + self.args.beta * distance)'], {}), '(reconstruction_loss + self.args.beta * distance)\n', (3273, 3322), True, 'from keras import backend as K\n'), ((3375, 3473), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': 'None', 'amsgrad': '(False)', 'decay': '(0.001)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, amsgrad=\n False, decay=0.001)\n', (3390, 3473), False, 'from keras import optimizers\n'), ((877, 922), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (882, 922), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((944, 948), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (946, 948), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((967, 1012), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (972, 1012), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1034, 1038), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1036, 1038), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1056, 1076), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1067, 1076), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1100, 1145), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1105, 1145), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1161, 1165), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1163, 1165), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1241, 1275), 'keras.layers.Dense', 'Dense', (['self.args.ls'], {'name': '"""z_mean"""'}), "(self.args.ls, name='z_mean')\n", (1246, 1275), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1301, 1368), 'keras.layers.Dense', 'Dense', (['self.args.ls'], {'name': '"""z_log_sigma"""', 'kernel_initializer': '"""zeros"""'}), "(self.args.ls, name='z_log_sigma', kernel_initializer='zeros')\n", (1306, 1368), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1384, 1440), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'output_shape': '(self.args.ls,)', 'name': '"""z"""'}), "(sampling, output_shape=(self.args.ls,), name='z')\n", (1390, 1440), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1773, 1818), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1778, 1818), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1834, 1838), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (1836, 1838), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1861, 1887), 'keras.layers.Dropout', 'Dropout', (['self.args.dropout'], {}), '(self.args.dropout)\n', (1868, 1887), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((1955, 2000), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (1960, 2000), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2017, 2021), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (2019, 2021), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2039, 2084), 'keras.layers.Dense', 'Dense', (['self.args.ds'], {'activation': 'self.args.act'}), '(self.args.ds, activation=self.args.act)\n', (2044, 2084), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2101, 2105), 'keras.layers.BatchNormalization', 'BN', ([], {}), '()\n', (2103, 2105), True, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2179, 2231), 'keras.layers.Dense', 'Dense', (['self.args.s1_input_size'], {'activation': '"""sigmoid"""'}), "(self.args.s1_input_size, activation='sigmoid')\n", (2184, 2231), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2794, 2814), 'models.common.mmd', 'mmd', (['true_samples', 'z'], {}), '(true_samples, z)\n', (2797, 2814), False, 'from models.common import sse, bce, mmd, sampling, kl_regu\n'), ((2877, 2905), 'models.common.kl_regu', 'kl_regu', (['z_mean', 'z_log_sigma'], {}), '(z_mean, z_log_sigma)\n', (2884, 2905), False, 'from models.common import sse, bce, mmd, sampling, kl_regu\n'), ((3064, 3106), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['inputs[1]', 'outputs[1]'], {}), '(inputs[1], outputs[1])\n', (3083, 3106), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((3142, 3183), 'keras.losses.mean_squared_error', 'mean_squared_error', (['inputs[1]', 'outputs[1]'], {}), '(inputs[1], outputs[1])\n', (3160, 3183), False, 'from keras.losses import mean_squared_error, binary_crossentropy\n'), ((2314, 2366), 'keras.layers.Dense', 'Dense', (['self.args.s2_input_size'], {'activation': '"""sigmoid"""'}), "(self.args.s2_input_size, activation='sigmoid')\n", (2319, 2366), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2405, 2435), 'keras.layers.Dense', 'Dense', (['self.args.s2_input_size'], {}), '(self.args.s2_input_size)\n', (2410, 2435), False, 'from keras.layers import BatchNormalization as BN, Concatenate, Dense, Input, Lambda, Dropout\n'), ((2732, 2769), 'keras.backend.stack', 'K.stack', (['[self.args.bs, self.args.ls]'], {}), '([self.args.bs, self.args.ls])\n', (2739, 2769), True, 'from keras import backend as K\n')]
|
from pathlib import Path
import pytest
from code_scanner.enums import FileType
from code_scanner.file_info import FileInfo
@pytest.fixture
def file_info():
"""
Returns a test python file info
"""
return FileInfo(Path('/tmp/test.py'), FileType.SOURCE_CODE)
@pytest.fixture
def another_file_info():
"""
Returns another test python file info
"""
return FileInfo(Path('/tmp/test.py'), FileType.SOURCE_CODE)
def test_file_info_funcs(file_info, another_file_info):
assert file_info.full_name == Path('/tmp/test.py')
assert str(file_info) == 'SOURCE_CODE-/tmp/test.py'
assert file_info == another_file_info
assert hash(file_info) == hash(another_file_info)
assert file_info is file_info # 'is' test only identical objects
assert repr(file_info) == repr(another_file_info)
file_infos = [another_file_info]
assert file_info in file_infos
with pytest.raises(AssertionError):
assert id(file_info) == id(another_file_info)
@pytest.mark.parametrize("updated_name,expected_full_name", [
('/tmp/new.py', 'SOURCE_CODE-/tmp/new.py'),
('/tmp/tests.py', 'SOURCE_CODE-/tmp/tests.py')
])
def test_update_file_name(file_info, updated_name, expected_full_name):
file_info.full_name = updated_name
assert str(file_info) == expected_full_name
|
[
"pytest.raises",
"pytest.mark.parametrize",
"pathlib.Path"
] |
[((1003, 1164), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""updated_name,expected_full_name"""', "[('/tmp/new.py', 'SOURCE_CODE-/tmp/new.py'), ('/tmp/tests.py',\n 'SOURCE_CODE-/tmp/tests.py')]"], {}), "('updated_name,expected_full_name', [('/tmp/new.py',\n 'SOURCE_CODE-/tmp/new.py'), ('/tmp/tests.py', 'SOURCE_CODE-/tmp/tests.py')]\n )\n", (1026, 1164), False, 'import pytest\n'), ((232, 252), 'pathlib.Path', 'Path', (['"""/tmp/test.py"""'], {}), "('/tmp/test.py')\n", (236, 252), False, 'from pathlib import Path\n'), ((397, 417), 'pathlib.Path', 'Path', (['"""/tmp/test.py"""'], {}), "('/tmp/test.py')\n", (401, 417), False, 'from pathlib import Path\n'), ((533, 553), 'pathlib.Path', 'Path', (['"""/tmp/test.py"""'], {}), "('/tmp/test.py')\n", (537, 553), False, 'from pathlib import Path\n'), ((915, 944), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (928, 944), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
from DictLearner import DictLearner
import scipy.sparse.linalg
"""The inference code was adapted from <NAME>'s sparsenet implementation,
available on github."""
class FISTALearner(DictLearner):
def __init__(self, data, learnrate, nunits, lam = 0.4, niter=100, **kwargs):
self.lam = 0.4
self.niter = niter
super().__init__(data, learnrate, nunits, **kwargs)
def infer(self, data, max_iterations=None, display=False):
""" FISTA Inference for Lasso (l1) Problem
data: Batches of data (dim x batch)
Phi: Dictionary (dictionary element x dim) (nparray or sparse array)
lambdav: Sparsity penalty
max_iterations: Maximum number of iterations
"""
lambdav=self.lam
def proxOp(x,t):
""" L1 Proximal Operator """
return np.fmax(x-t, 0) + np.fmin(x+t, 0)
x = np.zeros((self.Q.shape[0], data.shape[1]))
c = self.Q.dot(self.Q.T)
b = -2*self.Q.dot(data)
L = scipy.sparse.linalg.eigsh(2*c, 1, which='LM')[0]
invL = 1/float(L)
y = x
t = 1
max_iterations = max_iterations or self.niter
for i in range(max_iterations):
g = 2*c.dot(y) + b
x2 = proxOp(y-invL*g,invL*lambdav)
t2 = (1+np.sqrt(1+4*(t**2)))/2.0
y = x2 + ((t-1)/t2)*(x2-x)
x = x2
t = t2
if display == True:
print ("L1 Objective " + str(np.sum((data-self.Q.T.dot(x2))**2) + lambdav*np.sum(np.abs(x2))))
return x2, 0, 0
|
[
"numpy.fmin",
"numpy.fmax",
"numpy.abs",
"numpy.zeros",
"numpy.sqrt"
] |
[((940, 982), 'numpy.zeros', 'np.zeros', (['(self.Q.shape[0], data.shape[1])'], {}), '((self.Q.shape[0], data.shape[1]))\n', (948, 982), True, 'import numpy as np\n'), ((889, 906), 'numpy.fmax', 'np.fmax', (['(x - t)', '(0)'], {}), '(x - t, 0)\n', (896, 906), True, 'import numpy as np\n'), ((907, 924), 'numpy.fmin', 'np.fmin', (['(x + t)', '(0)'], {}), '(x + t, 0)\n', (914, 924), True, 'import numpy as np\n'), ((1356, 1379), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * t ** 2)'], {}), '(1 + 4 * t ** 2)\n', (1363, 1379), True, 'import numpy as np\n'), ((1571, 1581), 'numpy.abs', 'np.abs', (['x2'], {}), '(x2)\n', (1577, 1581), True, 'import numpy as np\n')]
|
from rest_framework import serializers
from drf_queryfields import QueryFieldsMixin
from django.db.models import Sum, F, DecimalField
import decimal
import database.models as models
# Customer
class CustomerSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Customer
fields = '__all__'
# Source
class SourceSerializer(QueryFieldsMixin, serializers.ModelSerializer):
total_value = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Source
fields = '__all__'
# Category
class CategorySerializer(QueryFieldsMixin, serializers.ModelSerializer):
total_value = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Category
fields = '__all__'
# Product
class ProductSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Product
fields = '__all__'
# Supplier
class SupplierSerializer(QueryFieldsMixin, serializers.ModelSerializer):
class Meta:
model = models.Supplier
fields = '__all__'
# Credit Payments
class InvoiceCreditPaymentSerializer(serializers.ModelSerializer):
class Meta:
model = models.InvoiceCreditPayment
fields = ('invoice', 'payment', 'date_of_payment',)
def validate(self, data):
invoice = data["invoice"]
current_payments = invoice.payments_total if invoice.payments_total else decimal.Decimal(0.0)
if not invoice.credit:
raise serializers.ValidationError("Invoice {0} is not a credit invoice.".format(invoice.id))
if current_payments >= invoice.invoice_total:
raise serializers.ValidationError("Invoice {0} is already fully paid.".format(invoice.id))
max_payment = invoice.invoice_total - current_payments
if decimal.Decimal(data["payment"]) > max_payment:
raise serializers.ValidationError("Payment must be less than or equal to {0}.".format(max_payment))
return data
# Invoice
class InvoiceProductSerializer(serializers.ModelSerializer):
class Meta:
model = models.InvoiceProduct
exclude = ('invoice', 'id',)
class InvoiceSerializer(QueryFieldsMixin, serializers.ModelSerializer):
products = InvoiceProductSerializer(many=True)
invoice_total = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
payments_total = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
class Meta:
model = models.Invoice
fields = '__all__'
@staticmethod
def setup_eager_loading(queryset):
queryset = queryset.prefetch_related('products')
return queryset
def validate(self, data):
products = data["products"]
if not products:
raise serializers.ValidationError("no products in invoice.")
for product in products:
try:
if product["sell_price"] <= 0.0 or product["quantity"] <= 0:
raise serializers.ValidationError("Sell price and quantity must be greater than 0.")
except KeyError:
pass
try:
if product["returned_quantity"] > product["quantity"]:
raise serializers.ValidationError("Return quantity must be less than quantity sold.")
except KeyError:
pass
return data
def update(self, instance, validated_data):
products = validated_data.pop('products')
for product in products:
invoice_product = models.InvoiceProduct.objects.get(invoice=instance.id, product=product["product"])
original_returned_quantity = invoice_product.returned_quantity
invoice_product.returned_quantity = product["returned_quantity"]
invoice_product.save(update_fields=["returned_quantity"])
product_object = models.Product.objects.get(id=product["product"].id)
product_object.stock += (product["returned_quantity"] - original_returned_quantity)
product_object.save(update_fields=["stock"])
return instance
def create(self, validated_data):
products = validated_data.pop('products')
invoice = models.Invoice.objects.create(**validated_data)
for product in products:
product_object = models.Product.objects.get(id=product["product"].id)
models.InvoiceProduct.objects.create(invoice=invoice, cost_price=product_object.cost_price, **product)
product_object.stock -= product["quantity"]
product_object.save(update_fields=["stock"])
return invoice
# Sales total
class SalesTotalSerializer(serializers.Serializer):
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
# Category and source sales
class SalesCategorySourceSerializer(serializers.Serializer):
requested_type = serializers.IntegerField(read_only=True)
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Product sales total
class SalesProductsSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
product = serializers.IntegerField(read_only=True, required=False)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Supplier sales total
class SalesSuppliersSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
supplier = serializers.IntegerField(read_only=True, required=False)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Customer sales total
class SalesCustomersSerializer(serializers.Serializer):
sales = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
profit = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
units = serializers.IntegerField(read_only=True)
customer = serializers.IntegerField(read_only=True, required=False)
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
# Cashflow total
class CashflowTotalSerializer(serializers.Serializer):
type = serializers.CharField()
year = serializers.IntegerField(read_only=True, required=False)
month = serializers.IntegerField(read_only=True, required=False)
day = serializers.IntegerField(read_only=True, required=False)
cash = serializers.DecimalField(max_digits=15, decimal_places=3, read_only=True)
# Stock history
class StockSoldTotalSerializer(serializers.Serializer):
product = serializers.IntegerField(read_only=True)
month = serializers.IntegerField(read_only=True)
quantity = serializers.IntegerField(read_only=True)
|
[
"decimal.Decimal",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField",
"database.models.InvoiceProduct.objects.get",
"database.models.Invoice.objects.create",
"database.models.Product.objects.get",
"database.models.InvoiceProduct.objects.create",
"rest_framework.serializers.DecimalField",
"rest_framework.serializers.ValidationError"
] |
[((446, 519), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (470, 519), False, 'from rest_framework import serializers\n'), ((698, 771), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (722, 771), False, 'from rest_framework import serializers\n'), ((2405, 2478), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (2429, 2478), False, 'from rest_framework import serializers\n'), ((2500, 2573), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (2524, 2573), False, 'from rest_framework import serializers\n'), ((4836, 4892), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (4860, 4892), False, 'from rest_framework import serializers\n'), ((4905, 4961), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (4929, 4961), False, 'from rest_framework import serializers\n'), ((4972, 5028), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (4996, 5028), False, 'from rest_framework import serializers\n'), ((5041, 5114), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (5065, 5114), False, 'from rest_framework import serializers\n'), ((5128, 5201), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (5152, 5201), False, 'from rest_framework import serializers\n'), ((5314, 5354), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (5338, 5354), False, 'from rest_framework import serializers\n'), ((5367, 5440), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (5391, 5440), False, 'from rest_framework import serializers\n'), ((5454, 5527), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (5478, 5527), False, 'from rest_framework import serializers\n'), ((5540, 5580), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (5564, 5580), False, 'from rest_framework import serializers\n'), ((5596, 5652), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (5620, 5652), False, 'from rest_framework import serializers\n'), ((5664, 5720), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (5688, 5720), False, 'from rest_framework import serializers\n'), ((5733, 5789), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (5757, 5789), False, 'from rest_framework import serializers\n'), ((5800, 5856), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (5824, 5856), False, 'from rest_framework import serializers\n'), ((5948, 6021), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (5972, 6021), False, 'from rest_framework import serializers\n'), ((6035, 6108), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (6059, 6108), False, 'from rest_framework import serializers\n'), ((6121, 6161), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (6145, 6161), False, 'from rest_framework import serializers\n'), ((6176, 6232), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6200, 6232), False, 'from rest_framework import serializers\n'), ((6248, 6304), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6272, 6304), False, 'from rest_framework import serializers\n'), ((6316, 6372), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6340, 6372), False, 'from rest_framework import serializers\n'), ((6385, 6441), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6409, 6441), False, 'from rest_framework import serializers\n'), ((6452, 6508), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6476, 6508), False, 'from rest_framework import serializers\n'), ((6602, 6675), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (6626, 6675), False, 'from rest_framework import serializers\n'), ((6689, 6762), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (6713, 6762), False, 'from rest_framework import serializers\n'), ((6775, 6815), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (6799, 6815), False, 'from rest_framework import serializers\n'), ((6831, 6887), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6855, 6887), False, 'from rest_framework import serializers\n'), ((6903, 6959), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6927, 6959), False, 'from rest_framework import serializers\n'), ((6971, 7027), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (6995, 7027), False, 'from rest_framework import serializers\n'), ((7040, 7096), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7064, 7096), False, 'from rest_framework import serializers\n'), ((7107, 7163), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7131, 7163), False, 'from rest_framework import serializers\n'), ((7257, 7330), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (7281, 7330), False, 'from rest_framework import serializers\n'), ((7344, 7417), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (7368, 7417), False, 'from rest_framework import serializers\n'), ((7430, 7470), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (7454, 7470), False, 'from rest_framework import serializers\n'), ((7486, 7542), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7510, 7542), False, 'from rest_framework import serializers\n'), ((7554, 7610), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7578, 7610), False, 'from rest_framework import serializers\n'), ((7623, 7679), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7647, 7679), False, 'from rest_framework import serializers\n'), ((7690, 7746), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7714, 7746), False, 'from rest_framework import serializers\n'), ((7832, 7855), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (7853, 7855), False, 'from rest_framework import serializers\n'), ((7867, 7923), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7891, 7923), False, 'from rest_framework import serializers\n'), ((7936, 7992), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (7960, 7992), False, 'from rest_framework import serializers\n'), ((8003, 8059), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)', 'required': '(False)'}), '(read_only=True, required=False)\n', (8027, 8059), False, 'from rest_framework import serializers\n'), ((8071, 8144), 'rest_framework.serializers.DecimalField', 'serializers.DecimalField', ([], {'max_digits': '(15)', 'decimal_places': '(3)', 'read_only': '(True)'}), '(max_digits=15, decimal_places=3, read_only=True)\n', (8095, 8144), False, 'from rest_framework import serializers\n'), ((8233, 8273), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (8257, 8273), False, 'from rest_framework import serializers\n'), ((8286, 8326), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (8310, 8326), False, 'from rest_framework import serializers\n'), ((8342, 8382), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (8366, 8382), False, 'from rest_framework import serializers\n'), ((4339, 4386), 'database.models.Invoice.objects.create', 'models.Invoice.objects.create', ([], {}), '(**validated_data)\n', (4368, 4386), True, 'import database.models as models\n'), ((1523, 1543), 'decimal.Decimal', 'decimal.Decimal', (['(0.0)'], {}), '(0.0)\n', (1538, 1543), False, 'import decimal\n'), ((1914, 1946), 'decimal.Decimal', 'decimal.Decimal', (["data['payment']"], {}), "(data['payment'])\n", (1929, 1946), False, 'import decimal\n'), ((2899, 2953), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""no products in invoice."""'], {}), "('no products in invoice.')\n", (2926, 2953), False, 'from rest_framework import serializers\n'), ((3666, 3753), 'database.models.InvoiceProduct.objects.get', 'models.InvoiceProduct.objects.get', ([], {'invoice': 'instance.id', 'product': "product['product']"}), "(invoice=instance.id, product=product[\n 'product'])\n", (3699, 3753), True, 'import database.models as models\n'), ((4001, 4053), 'database.models.Product.objects.get', 'models.Product.objects.get', ([], {'id': "product['product'].id"}), "(id=product['product'].id)\n", (4027, 4053), True, 'import database.models as models\n'), ((4450, 4502), 'database.models.Product.objects.get', 'models.Product.objects.get', ([], {'id': "product['product'].id"}), "(id=product['product'].id)\n", (4476, 4502), True, 'import database.models as models\n'), ((4516, 4623), 'database.models.InvoiceProduct.objects.create', 'models.InvoiceProduct.objects.create', ([], {'invoice': 'invoice', 'cost_price': 'product_object.cost_price'}), '(invoice=invoice, cost_price=\n product_object.cost_price, **product)\n', (4552, 4623), True, 'import database.models as models\n'), ((3108, 3186), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Sell price and quantity must be greater than 0."""'], {}), "('Sell price and quantity must be greater than 0.')\n", (3135, 3186), False, 'from rest_framework import serializers\n'), ((3352, 3431), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Return quantity must be less than quantity sold."""'], {}), "('Return quantity must be less than quantity sold.')\n", (3379, 3431), False, 'from rest_framework import serializers\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import calendar
import datetime
import re
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from django.contrib.auth.decorators import login_required
from django.db.models import Sum, Q
from django.http.response import HttpResponse, Http404, JsonResponse
from django.shortcuts import render
from django.template import loader
from djanban.apps.base.auth import get_user_boards, user_is_member
from djanban.apps.boards.models import Label, Board
from djanban.apps.dev_times.models import DailySpentTime
from djanban.apps.members.models import Member
from django.template.loader import get_template
from djanban.apps.multiboards.models import Multiboard
# View spent time report
@login_required
def view_daily_spent_times(request):
try:
parameters = _get_daily_spent_times_replacements(request)
except (Multiboard.DoesNotExist, Board.DoesNotExist) as e:
raise Http404
if "board" in parameters["replacements"] and parameters["replacements"]["board"]:
return render(request, "daily_spent_times/list_by_board.html", parameters["replacements"])
return render(request, "daily_spent_times/list.html", parameters["replacements"])
# Export daily spent report in CSV format
@login_required
def export_daily_spent_times(request):
spent_times = _get_daily_spent_times_from_request(request)
# Start and end date of the interval of the spent times that will be exported
start_date = spent_times["start_date"]
end_date = spent_times["end_date"]
name_str = ""
if "multiboard" in spent_times and spent_times["multiboard"]:
multiboard = spent_times["multiboard"]
name_str = (u"mb-{0}-".format(multiboard.name)).lower()
if "board" in spent_times and spent_times["board"]:
board = spent_times["board"]
name_str = (u"{0}-".format(board.name)).lower()
# Creation of the HTTP response
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{0}export-daily-spent-times-from-{1}-to-{2}.csv"'.format(
name_str, start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")
)
csv_template = loader.get_template('daily_spent_times/csv.txt')
replacements = {
'spent_times': spent_times["all"],
}
response.write(csv_template.render(replacements))
return response
# Export daily spent report in CSV format
@login_required
def send_daily_spent_times(request):
if request.method != "POST":
raise Http404
current_user = request.user
current_user_boards = get_user_boards(current_user)
recipient_email = request.POST.get("email")
if not re.match(r"[^@]+@[^@]+", recipient_email):
return JsonResponse({"message": "Invalid email"})
daily_spent_times_filter = {}
# Start date
start_date_str = request.POST.get("start_date")
start_date = None
if start_date_str:
try:
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d")
daily_spent_times_filter["date__gte"] = start_date
except ValueError:
start_date = None
# End date
end_date_str = request.POST.get("end_date")
end_date = None
if end_date_str:
try:
end_date = datetime.datetime.strptime(end_date_str, "%Y-%m-%d")
daily_spent_times_filter["date__lte"] = end_date
except ValueError:
end_date = None
# Week
week = request.POST.get('week') if request.POST.get('week') and request.POST.get('week') > 0 else None
if week:
daily_spent_times_filter["week"] = week
# Default filter is None
multiboard = None
label = None
board = None
# Filter spent time by multiboard
multiboard_str = request.POST.get("multiboard")
if multiboard_str and hasattr(current_user, "member") and\
current_user.member.multiboards.filter(id=multiboard_str).exists():
multiboard = current_user.member.multiboards.get(id=multiboard_str)
daily_spent_times_filter["board__multiboards"] = multiboard
# Filter spent time by label o board
else:
# Label
label_str = request.POST.get("label")
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_str)
if matches and current_user_boards.filter(id=matches.group("board_id")).exists():
label = None
board = current_user_boards.get(id=matches.group("board_id"))
daily_spent_times_filter["board"] = board
elif Label.objects.filter(id=label_str).exists():
label = Label.objects.get(id=label_str)
board = label.board
daily_spent_times_filter["board"] = board
daily_spent_times_filter["card__labels"] = label
# Member
member = None
if user_is_member(current_user):
current_user_members = Member.objects.filter(Q(boards__in=current_user_boards)|Q(id=current_user.member.id)).distinct()
else:
current_user_members = Member.objects.filter(boards__in=current_user_boards).distinct()
if request.POST.get("member") and current_user_members.filter(id=request.POST.get("member")).exists():
member = current_user_members.get(id=request.POST.get("member"))
daily_spent_times_filter["member"] = member
daily_spent_times = DailySpentTime.objects.filter(**daily_spent_times_filter)
replacements = {
"email": recipient_email,
"daily_spent_times": daily_spent_times,
"week": week,
"start_date": start_date,
"end_date": end_date,
"label": label,
"board": board,
"multiboard": multiboard,
"member": member
}
report_subject = get_template('daily_spent_times/emails/send_daily_spent_times_subject.txt').render(replacements)
txt_message = get_template("daily_spent_times/emails/send_daily_spent_times.txt").render(replacements)
html_message = get_template("daily_spent_times/emails/send_daily_spent_times.html").render(replacements)
csv_report = get_template('daily_spent_times/csv.txt').render({"spent_times": daily_spent_times})
csv_file_name = "custom_report_for_{0}.csv".format(recipient_email)
try:
message = EmailMultiAlternatives(report_subject, txt_message, settings.EMAIL_HOST_USER, [recipient_email])
message.attach_alternative(html_message, "text/html")
message.attach(csv_file_name, csv_report, 'text/csv')
message.send()
if request.GET.get("ajax"):
return JsonResponse({"message": "Spent times sent successfully"})
return render(request, "daily_spent_times/send_daily_spent_times_ok.html", replacements)
except Exception:
if request.GET.get("ajax"):
return JsonResponse({"message": "Error when sending data"}, status=500)
return render(request, "daily_spent_times/send_daily_spent_times_error.html", replacements)
# Return the filtered queryset and the replacements given the GET parameters
def _get_daily_spent_times_replacements(request):
selected_member_id = request.GET.get("member_id")
selected_member = None
if selected_member_id:
selected_member = Member.objects.get(id=selected_member_id)
spent_times = _get_daily_spent_times_from_request(request)
replacements = {
"multiboards": request.user.member.multiboards.all() if user_is_member(request.user) else None,
"member": request.user.member if user_is_member(request.user) else None,
"boards": get_user_boards(request.user),
"members": Member.objects.all()
}
# Start date
start_date_str = request.GET.get("start_date")
if start_date_str:
try:
start_date = datetime.datetime.strptime(start_date_str, "%Y-%m-%d")
replacements["start_date"] = start_date
replacements["date_interval"] = [start_date, timezone.now().date()]
except ValueError:
start_date = None
# End date
end_date_str = request.GET.get("end_date")
if end_date_str:
try:
end_date = datetime.datetime.strptime(end_date_str, "%Y-%m-%d")
replacements["end_date"] = end_date
replacements["date_interval"][1] = end_date
except ValueError:
end_date = None
replacements["selected_member"] = selected_member
multiboard_id = request.GET.get("multiboard_id", request.GET.get("multiboard"))
multiboard = None
if multiboard_id:
multiboard = request.user.member.multiboards.get(id=multiboard_id)
replacements["multiboard"] = multiboard
replacements["selected_multiboard"] = multiboard
replacements["board__multiboard"] = multiboard
else:
# If we are filtering by board, filter by board_id
label_id = request.GET.get("label_id", request.GET.get("label"))
label = None
board = None
if label_id:
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_id)
if matches:
board = get_user_boards(request.user).get(id=matches.group("board_id"))
label = None
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = board
replacements["board"] = board
else:
boards = get_user_boards(request.user)
label = Label.objects.get(board__in=boards, id=label_id)
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = label.board
replacements["board"] = label.board
board_id = request.GET.get("board_id", request.GET.get("board"))
if not label_id and board_id:
board = get_user_boards(request.user).get(id=board_id)
label = None
replacements["selected_label"] = label
replacements["label"] = label
replacements["selected_board"] = board
replacements["board"] = board
daily_spent_times = spent_times["all"]
replacements["week"] = request.GET.get('week') if request.GET.get('week') and request.GET.get('week') > 0 else None
replacements["months"] = spent_times["per_month"]
return {"queryset": daily_spent_times, "replacements": replacements}
# Return the daily spent times from a request
def _get_daily_spent_times_from_request(request):
current_user = request.user
selected_member = None
if request.GET.get("member_id"):
selected_member = Member.objects.get(id=request.GET.get("member_id"))
multiboard_id = None
if request.GET.get("multiboard_id"):
multiboard_id = request.GET.get("multiboard_id")
label_id = None
if request.GET.get("label_id"):
label_id = request.GET.get("label_id")
elif request.GET.get("board_id"):
label_id = "all_from_board_{0}".format(request.GET.get("board_id"))
spent_times = _get_daily_spent_times_queryset(
current_user, selected_member,
request.GET.get("start_date"), request.GET.get("end_date"), request.GET.get('week'),
label_id=label_id, multiboard_id=multiboard_id
)
return spent_times
# Return the filtered queryset and the replacements given the GET parameters
def _get_daily_spent_times_queryset(current_user, selected_member, start_date_, end_date_, week, multiboard_id, label_id):
daily_spent_time_filter = {}
# Member filter
if selected_member:
daily_spent_time_filter["member_id"] = selected_member.id
# Start date
start_date = None
if start_date_:
try:
start_date = datetime.datetime.strptime(start_date_, "%Y-%m-%d").date()
daily_spent_time_filter["date__gte"] = start_date
except ValueError:
start_date = None
# End date
end_date = None
if end_date_:
try:
end_date = datetime.datetime.strptime(end_date_, "%Y-%m-%d").date()
daily_spent_time_filter["date__lte"] = end_date
except ValueError:
end_date = None
# Week
if week and int(week) > 0:
daily_spent_time_filter["week_of_year"] = week
board = None
multiboard = None
if multiboard_id and hasattr(current_user, "member"):
multiboard = current_user.member.multiboards.get(id=multiboard_id)
daily_spent_time_filter["board__multiboards"] = multiboard
else:
# Label
label = None
board = None
current_user_boards = get_user_boards(current_user)
if label_id:
matches = re.match(r"all_from_board_(?P<board_id>\d+)", label_id)
if matches:
if current_user_boards.filter(id=matches.group("board_id")).exists():
label = None
board = current_user_boards.get(id=matches.group("board_id"))
daily_spent_time_filter["board"] = board
else:
if Label.objects.filter(id=label_id, board__in=current_user_boards).exists():
label = Label.objects.get(id=label_id)
board = label.board
daily_spent_time_filter["board"] = board
daily_spent_time_filter["card__labels"] = label
# Daily Spent Times
daily_spent_times = DailySpentTime.objects.filter(**daily_spent_time_filter).order_by("-date")
months = []
# Grouped by months
if daily_spent_times.exists():
if start_date is None:
start_date = daily_spent_times.order_by("date")[0].date
if end_date is None:
end_date = daily_spent_times[0].date
date_i = datetime.date(start_date.year, start_date.month, 1)
while date_i <= end_date:
month_index = date_i.month
year = date_i.year
month_name = calendar.month_name[month_index]
daily_spent_times_in_month_i = daily_spent_times.filter(date__year=year, date__month=month_index).order_by(
"date")
first_weekday, number_of_days_in_month = calendar.monthrange(year, month_index)
rate_amount_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("rate_amount"))["sum"]
adjusted_amount_sum = _adjusted_amount_sum(daily_spent_times_in_month_i)
spent_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("spent_time"))["sum"]
adjusted_spent_time_sum = _adjusted_spent_time_sum(daily_spent_times_in_month_i)
estimated_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("estimated_time"))["sum"]
diff_time_sum = daily_spent_times_in_month_i.aggregate(sum=Sum("diff_time"))["sum"]
month = {
"daily_spent_times": daily_spent_times_in_month_i,
"values": {
"first_day": datetime.date(year, month_index, 1).isoformat(),
"last_day": datetime.date(year, month_index, number_of_days_in_month).isoformat(),
"name": month_name,
"number": month_index,
"year": year,
"i": month_index,
"rate_amount_sum": float(rate_amount_sum) if rate_amount_sum else None,
"adjusted_amount_sum": float(adjusted_amount_sum) if adjusted_amount_sum else None,
"spent_time_sum": float(spent_time_sum) if spent_time_sum else None,
'adjusted_spent_time_sum': float(adjusted_spent_time_sum) if adjusted_spent_time_sum else None,
"estimated_time_sum": float(estimated_time_sum) if estimated_time_sum else None,
"diff_time_sum": float(diff_time_sum) if diff_time_sum else None
}
}
months.append(month)
date_i = (date_i + relativedelta(months=1))
replacements = {
"all": daily_spent_times, "per_month": months,
"start_date": start_date, "end_date": end_date,
"board": board, "multiboard": multiboard
}
return replacements
# Computes the adjusted amount according to the factor each member has
def _adjusted_amount_sum(daily_spent_times):
return _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="rate_amount")
# Computes the adjusted spent time according to the factor each member has
def _adjusted_spent_time_sum(daily_spent_times):
return _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="spent_time")
# Computes the adjusted spent time according to the factor each member has
def _adjusted_daily_spent_time_attribute_sum(daily_spent_times, attribute="spent_time"):
adjusted_value_sum = 0
member_dict = {}
for daily_spent_time in daily_spent_times:
if not daily_spent_time.member_id in member_dict:
member_dict[daily_spent_time.member_id] = daily_spent_time.member
member = member_dict[daily_spent_time.member_id]
adjusted_value_sum += member.adjust_daily_spent_time(daily_spent_time, attribute)
return adjusted_value_sum
|
[
"djanban.apps.dev_times.models.DailySpentTime.objects.filter",
"django.utils.timezone.now",
"djanban.apps.base.auth.get_user_boards",
"dateutil.relativedelta.relativedelta",
"django.core.mail.EmailMultiAlternatives",
"django.shortcuts.render",
"django.template.loader.get_template",
"djanban.apps.base.auth.user_is_member",
"django.db.models.Sum",
"datetime.date",
"re.match",
"datetime.datetime.strptime",
"calendar.monthrange",
"djanban.apps.boards.models.Label.objects.filter",
"django.http.response.JsonResponse",
"django.db.models.Q",
"djanban.apps.members.models.Member.objects.all",
"djanban.apps.members.models.Member.objects.filter",
"djanban.apps.members.models.Member.objects.get",
"djanban.apps.boards.models.Label.objects.get",
"django.http.response.HttpResponse"
] |
[((1276, 1350), 'django.shortcuts.render', 'render', (['request', '"""daily_spent_times/list.html"""', "parameters['replacements']"], {}), "(request, 'daily_spent_times/list.html', parameters['replacements'])\n", (1282, 1350), False, 'from django.shortcuts import render\n'), ((2077, 2114), 'django.http.response.HttpResponse', 'HttpResponse', ([], {'content_type': '"""text/csv"""'}), "(content_type='text/csv')\n", (2089, 2114), False, 'from django.http.response import HttpResponse, Http404, JsonResponse\n'), ((2341, 2389), 'django.template.loader.get_template', 'loader.get_template', (['"""daily_spent_times/csv.txt"""'], {}), "('daily_spent_times/csv.txt')\n", (2360, 2389), False, 'from django.template import loader\n'), ((2745, 2774), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['current_user'], {}), '(current_user)\n', (2760, 2774), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((4981, 5009), 'djanban.apps.base.auth.user_is_member', 'user_is_member', (['current_user'], {}), '(current_user)\n', (4995, 5009), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((5502, 5559), 'djanban.apps.dev_times.models.DailySpentTime.objects.filter', 'DailySpentTime.objects.filter', ([], {}), '(**daily_spent_times_filter)\n', (5531, 5559), False, 'from djanban.apps.dev_times.models import DailySpentTime\n'), ((1181, 1269), 'django.shortcuts.render', 'render', (['request', '"""daily_spent_times/list_by_board.html"""', "parameters['replacements']"], {}), "(request, 'daily_spent_times/list_by_board.html', parameters[\n 'replacements'])\n", (1187, 1269), False, 'from django.shortcuts import render\n'), ((2835, 2875), 're.match', 're.match', (['"""[^@]+@[^@]+"""', 'recipient_email'], {}), "('[^@]+@[^@]+', recipient_email)\n", (2843, 2875), False, 'import re\n'), ((2893, 2935), 'django.http.response.JsonResponse', 'JsonResponse', (["{'message': 'Invalid email'}"], {}), "({'message': 'Invalid email'})\n", (2905, 2935), False, 'from django.http.response import HttpResponse, Http404, JsonResponse\n'), ((4384, 4440), 're.match', 're.match', (['"""all_from_board_(?P<board_id>\\\\d+)"""', 'label_str'], {}), "('all_from_board_(?P<board_id>\\\\d+)', label_str)\n", (4392, 4440), False, 'import re\n'), ((6402, 6503), 'django.core.mail.EmailMultiAlternatives', 'EmailMultiAlternatives', (['report_subject', 'txt_message', 'settings.EMAIL_HOST_USER', '[recipient_email]'], {}), '(report_subject, txt_message, settings.\n EMAIL_HOST_USER, [recipient_email])\n', (6424, 6503), False, 'from django.core.mail import EmailMultiAlternatives\n'), ((6776, 6861), 'django.shortcuts.render', 'render', (['request', '"""daily_spent_times/send_daily_spent_times_ok.html"""', 'replacements'], {}), "(request, 'daily_spent_times/send_daily_spent_times_ok.html',\n replacements)\n", (6782, 6861), False, 'from django.shortcuts import render\n'), ((7364, 7405), 'djanban.apps.members.models.Member.objects.get', 'Member.objects.get', ([], {'id': 'selected_member_id'}), '(id=selected_member_id)\n', (7382, 7405), False, 'from djanban.apps.members.models import Member\n'), ((7695, 7724), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['request.user'], {}), '(request.user)\n', (7710, 7724), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((7745, 7765), 'djanban.apps.members.models.Member.objects.all', 'Member.objects.all', ([], {}), '()\n', (7763, 7765), False, 'from djanban.apps.members.models import Member\n'), ((12768, 12797), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['current_user'], {}), '(current_user)\n', (12783, 12797), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((13919, 13970), 'datetime.date', 'datetime.date', (['start_date.year', 'start_date.month', '(1)'], {}), '(start_date.year, start_date.month, 1)\n', (13932, 13970), False, 'import datetime\n'), ((3124, 3178), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_date_str', '"""%Y-%m-%d"""'], {}), "(start_date_str, '%Y-%m-%d')\n", (3150, 3178), False, 'import datetime\n'), ((3440, 3492), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end_date_str', '"""%Y-%m-%d"""'], {}), "(end_date_str, '%Y-%m-%d')\n", (3466, 3492), False, 'import datetime\n'), ((5885, 5960), 'django.template.loader.get_template', 'get_template', (['"""daily_spent_times/emails/send_daily_spent_times_subject.txt"""'], {}), "('daily_spent_times/emails/send_daily_spent_times_subject.txt')\n", (5897, 5960), False, 'from django.template.loader import get_template\n'), ((6001, 6068), 'django.template.loader.get_template', 'get_template', (['"""daily_spent_times/emails/send_daily_spent_times.txt"""'], {}), "('daily_spent_times/emails/send_daily_spent_times.txt')\n", (6013, 6068), False, 'from django.template.loader import get_template\n'), ((6109, 6177), 'django.template.loader.get_template', 'get_template', (['"""daily_spent_times/emails/send_daily_spent_times.html"""'], {}), "('daily_spent_times/emails/send_daily_spent_times.html')\n", (6121, 6177), False, 'from django.template.loader import get_template\n'), ((6217, 6258), 'django.template.loader.get_template', 'get_template', (['"""daily_spent_times/csv.txt"""'], {}), "('daily_spent_times/csv.txt')\n", (6229, 6258), False, 'from django.template.loader import get_template\n'), ((6702, 6760), 'django.http.response.JsonResponse', 'JsonResponse', (["{'message': 'Spent times sent successfully'}"], {}), "({'message': 'Spent times sent successfully'})\n", (6714, 6760), False, 'from django.http.response import HttpResponse, Http404, JsonResponse\n'), ((7015, 7103), 'django.shortcuts.render', 'render', (['request', '"""daily_spent_times/send_daily_spent_times_error.html"""', 'replacements'], {}), "(request, 'daily_spent_times/send_daily_spent_times_error.html',\n replacements)\n", (7021, 7103), False, 'from django.shortcuts import render\n'), ((7556, 7584), 'djanban.apps.base.auth.user_is_member', 'user_is_member', (['request.user'], {}), '(request.user)\n', (7570, 7584), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((7637, 7665), 'djanban.apps.base.auth.user_is_member', 'user_is_member', (['request.user'], {}), '(request.user)\n', (7651, 7665), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((7902, 7956), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_date_str', '"""%Y-%m-%d"""'], {}), "(start_date_str, '%Y-%m-%d')\n", (7928, 7956), False, 'import datetime\n'), ((8266, 8318), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end_date_str', '"""%Y-%m-%d"""'], {}), "(end_date_str, '%Y-%m-%d')\n", (8292, 8318), False, 'import datetime\n'), ((9124, 9179), 're.match', 're.match', (['"""all_from_board_(?P<board_id>\\\\d+)"""', 'label_id'], {}), "('all_from_board_(?P<board_id>\\\\d+)', label_id)\n", (9132, 9179), False, 'import re\n'), ((12841, 12896), 're.match', 're.match', (['"""all_from_board_(?P<board_id>\\\\d+)"""', 'label_id'], {}), "('all_from_board_(?P<board_id>\\\\d+)', label_id)\n", (12849, 12896), False, 'import re\n'), ((13572, 13628), 'djanban.apps.dev_times.models.DailySpentTime.objects.filter', 'DailySpentTime.objects.filter', ([], {}), '(**daily_spent_time_filter)\n', (13601, 13628), False, 'from djanban.apps.dev_times.models import DailySpentTime\n'), ((14331, 14369), 'calendar.monthrange', 'calendar.monthrange', (['year', 'month_index'], {}), '(year, month_index)\n', (14350, 14369), False, 'import calendar\n'), ((4763, 4794), 'djanban.apps.boards.models.Label.objects.get', 'Label.objects.get', ([], {'id': 'label_str'}), '(id=label_str)\n', (4780, 4794), False, 'from djanban.apps.boards.models import Label, Board\n'), ((5180, 5233), 'djanban.apps.members.models.Member.objects.filter', 'Member.objects.filter', ([], {'boards__in': 'current_user_boards'}), '(boards__in=current_user_boards)\n', (5201, 5233), False, 'from djanban.apps.members.models import Member\n'), ((6935, 6999), 'django.http.response.JsonResponse', 'JsonResponse', (["{'message': 'Error when sending data'}"], {'status': '(500)'}), "({'message': 'Error when sending data'}, status=500)\n", (6947, 6999), False, 'from django.http.response import HttpResponse, Http404, JsonResponse\n'), ((9566, 9595), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['request.user'], {}), '(request.user)\n', (9581, 9595), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((9620, 9668), 'djanban.apps.boards.models.Label.objects.get', 'Label.objects.get', ([], {'board__in': 'boards', 'id': 'label_id'}), '(board__in=boards, id=label_id)\n', (9637, 9668), False, 'from djanban.apps.boards.models import Label, Board\n'), ((16091, 16114), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (16104, 16114), False, 'from dateutil.relativedelta import relativedelta\n'), ((4698, 4732), 'djanban.apps.boards.models.Label.objects.filter', 'Label.objects.filter', ([], {'id': 'label_str'}), '(id=label_str)\n', (4718, 4732), False, 'from djanban.apps.boards.models import Label, Board\n'), ((10015, 10044), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['request.user'], {}), '(request.user)\n', (10030, 10044), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((11892, 11943), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_date_', '"""%Y-%m-%d"""'], {}), "(start_date_, '%Y-%m-%d')\n", (11918, 11943), False, 'import datetime\n'), ((12160, 12209), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['end_date_', '"""%Y-%m-%d"""'], {}), "(end_date_, '%Y-%m-%d')\n", (12186, 12209), False, 'import datetime\n'), ((13323, 13353), 'djanban.apps.boards.models.Label.objects.get', 'Label.objects.get', ([], {'id': 'label_id'}), '(id=label_id)\n', (13340, 13353), False, 'from djanban.apps.boards.models import Label, Board\n'), ((5064, 5097), 'django.db.models.Q', 'Q', ([], {'boards__in': 'current_user_boards'}), '(boards__in=current_user_boards)\n', (5065, 5097), False, 'from django.db.models import Sum, Q\n'), ((5098, 5126), 'django.db.models.Q', 'Q', ([], {'id': 'current_user.member.id'}), '(id=current_user.member.id)\n', (5099, 5126), False, 'from django.db.models import Sum, Q\n'), ((8066, 8080), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (8078, 8080), False, 'from django.utils import timezone\n'), ((9228, 9257), 'djanban.apps.base.auth.get_user_boards', 'get_user_boards', (['request.user'], {}), '(request.user)\n', (9243, 9257), False, 'from djanban.apps.base.auth import get_user_boards, user_is_member\n'), ((13220, 13284), 'djanban.apps.boards.models.Label.objects.filter', 'Label.objects.filter', ([], {'id': 'label_id', 'board__in': 'current_user_boards'}), '(id=label_id, board__in=current_user_boards)\n', (13240, 13284), False, 'from djanban.apps.boards.models import Label, Board\n'), ((14444, 14462), 'django.db.models.Sum', 'Sum', (['"""rate_amount"""'], {}), "('rate_amount')\n", (14447, 14462), False, 'from django.db.models import Sum, Q\n'), ((14629, 14646), 'django.db.models.Sum', 'Sum', (['"""spent_time"""'], {}), "('spent_time')\n", (14632, 14646), False, 'from django.db.models import Sum, Q\n'), ((14824, 14845), 'django.db.models.Sum', 'Sum', (['"""estimated_time"""'], {}), "('estimated_time')\n", (14827, 14845), False, 'from django.db.models import Sum, Q\n'), ((14925, 14941), 'django.db.models.Sum', 'Sum', (['"""diff_time"""'], {}), "('diff_time')\n", (14928, 14941), False, 'from django.db.models import Sum, Q\n'), ((15101, 15136), 'datetime.date', 'datetime.date', (['year', 'month_index', '(1)'], {}), '(year, month_index, 1)\n', (15114, 15136), False, 'import datetime\n'), ((15182, 15239), 'datetime.date', 'datetime.date', (['year', 'month_index', 'number_of_days_in_month'], {}), '(year, month_index, number_of_days_in_month)\n', (15195, 15239), False, 'import datetime\n')]
|
from nonebot import on_command
from nonebot.matcher import Matcher
from nonebot.adapters.onebot.v11 import GroupMessageEvent
from utils.utils import get_json
from utils.config_util import ConfigManager
status = on_command('status')
data = {
"server_url": "",
"uuid": "",
"remote_uuid": "",
"apikey": ""
}
conf = ConfigManager.register("mc_status", data)
@status.handle()
async def mc_server_status(matcher: Matcher):
query = {
"uuid": conf["uuid"],
"remote_uuid": conf["remote_uuid"],
"apikey": conf["apikey"],
}
headers = {
"accept": "application/json"
}
js = await get_json(conf["server_url"], query, headers)
msg = ''
if not js:
msg += 'エラー発生'
await matcher.send(msg)
data = js['data']
if data["status"] == 0:
msg += f'服务器当前状态:关闭\n上次启动时间:{data["config"]["lastDatetime"]}'
elif data["status"] == 3:
if data["info"]["version"]:
time = int(data["processInfo"]["elapsed"]) / 1000
d = int(time / (24 * 3600))
h = int((time / 3600) % 24)
m = int((time / 60) % 60)
s = int(time % 60)
msg += f'服务器名称:{data["config"]["nickname"]}\n当前状态:开启\n' \
f'启动时间:{data["config"]["lastDatetime"]}\n服务端版本:{data["info"]["version"]}\n' \
f'在线人数:{data["info"]["currentPlayers"]}/{data["info"]["maxPlayers"]}\n已运行:{d}天{h}时{m}分{s}秒'
else:
msg += '服务器正在启动...'
await matcher.send(msg)
|
[
"nonebot.on_command",
"utils.config_util.ConfigManager.register",
"utils.utils.get_json"
] |
[((213, 233), 'nonebot.on_command', 'on_command', (['"""status"""'], {}), "('status')\n", (223, 233), False, 'from nonebot import on_command\n'), ((331, 372), 'utils.config_util.ConfigManager.register', 'ConfigManager.register', (['"""mc_status"""', 'data'], {}), "('mc_status', data)\n", (353, 372), False, 'from utils.config_util import ConfigManager\n'), ((640, 684), 'utils.utils.get_json', 'get_json', (["conf['server_url']", 'query', 'headers'], {}), "(conf['server_url'], query, headers)\n", (648, 684), False, 'from utils.utils import get_json\n')]
|
from datetime import timedelta
from LoggedSensor import LoggedSensor
class HumiditySensor(LoggedSensor):
def __init__(self, node):
super(HumiditySensor, self).__init__(type_id=2, max_measurements=500, holdoff_time=timedelta(minutes=1))
node.Humidity.subscribe_to_changes(self.on_property_changed)
def on_property_changed(self, name, value):
self.humidity = float(value)
self.add_measurement(self.humidity)
|
[
"datetime.timedelta"
] |
[((229, 249), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (238, 249), False, 'from datetime import timedelta\n')]
|
import string
SWAGGER_UI_TEMPLATE = string.Template(
"""
<!-- HTML for static distribution bundle build -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Swagger UI</title>
<link rel="stylesheet" type="text/css" href="./swagger_ui_static/swagger-ui.css" >
<link rel="icon" type="image/png" href="./swagger_ui_static/favicon-32x32.png" sizes="32x32" />
<link rel="icon" type="image/png" href="./swagger_ui_static/favicon-16x16.png" sizes="16x16" />
<style>
html
{
box-sizing: border-box;
overflow: -moz-scrollbars-vertical;
overflow-y: scroll;
}
*,
*:before,
*:after
{
box-sizing: inherit;
}
body
{
margin:0;
background: #fafafa;
}
</style>
</head>
<body>
<div id="swagger-ui"></div>
<script src="./swagger_ui_static/swagger-ui-bundle.js"> </script>
<script src="./swagger_ui_static/swagger-ui-standalone-preset.js"> </script>
<script>
window.onload = function() {
// Begin Swagger UI call region
const ui = SwaggerUIBundle({...{
url: "./swagger.json",
dom_id: '#swagger-ui',
deepLinking: true,
presets: [
SwaggerUIBundle.presets.apis,
SwaggerUIStandalonePreset
],
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
}, ...${settings}});
// End Swagger UI call region
window.ui = ui
}
</script>
</body>
</html>
"""
)
REDOC_UI_TEMPLATE = string.Template(
"""
<!DOCTYPE html>
<html>
<head>
<title>ReDoc</title>
<!-- needed for adaptive design -->
<meta charset="utf-8"/>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" type="text/css" href="./redoc_ui_static/google-fonts.css" >
<link rel="shortcut icon" href="./redoc_ui_static/favicon.ico"/>
<link rel="icon" type="image/png" sizes="16x16" href="./redoc_ui_static/favicon-16x16.png"/>
<link rel="icon" type="image/png" sizes="32x32" href="./redoc_ui_static/favicon-32x32.png"/>
<!--
ReDoc doesn't change outer page styles
-->
<style>
body {
margin: 0;
padding: 0;
}
</style>
</head>
<body>
<div id='redoc-ui'></div>
<script src="./redoc_ui_static/redoc.standalone.js"> </script>
<script>
Redoc.init('./swagger.json', ${settings}, document.getElementById('redoc-ui'))
</script>
</body>
</html>
"""
)
RAPIDOC_UI_TEMPLATE = string.Template(
"""
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link rel="stylesheet" type="text/css" href="./rapidoc_ui_static/fonts.css" >
<script type="module" src="./rapidoc_ui_static/rapidoc-min.js"></script>
</head>
<body>
<rapi-doc
id='rapidoc-ui'
spec-url='./swagger.json'
regular-font='Rapidoc Regular'
mono-font='Roboto Mono'
> </rapi-doc>
<script>
const docEl = document.getElementById('rapidoc-ui');
const settings = ${settings}
for (const key in settings) {
docEl.setAttribute(key, settings[key]);
}
</script>
</body>
</html>
"""
)
|
[
"string.Template"
] |
[((37, 1578), 'string.Template', 'string.Template', (['"""\n<!-- HTML for static distribution bundle build -->\n<!DOCTYPE html>\n<html lang="en">\n <head>\n <meta charset="UTF-8">\n <title>Swagger UI</title>\n <link rel="stylesheet" type="text/css" href="./swagger_ui_static/swagger-ui.css" >\n <link rel="icon" type="image/png" href="./swagger_ui_static/favicon-32x32.png" sizes="32x32" />\n <link rel="icon" type="image/png" href="./swagger_ui_static/favicon-16x16.png" sizes="16x16" />\n <style>\n html\n {\n box-sizing: border-box;\n overflow: -moz-scrollbars-vertical;\n overflow-y: scroll;\n }\n\n *,\n *:before,\n *:after\n {\n box-sizing: inherit;\n }\n\n body\n {\n margin:0;\n background: #fafafa;\n }\n </style>\n </head>\n\n <body>\n <div id="swagger-ui"></div>\n\n <script src="./swagger_ui_static/swagger-ui-bundle.js"> </script>\n <script src="./swagger_ui_static/swagger-ui-standalone-preset.js"> </script>\n <script>\n window.onload = function() {\n // Begin Swagger UI call region\n const ui = SwaggerUIBundle({...{\n url: "./swagger.json",\n dom_id: \'#swagger-ui\',\n deepLinking: true,\n presets: [\n SwaggerUIBundle.presets.apis,\n SwaggerUIStandalonePreset\n ],\n plugins: [\n SwaggerUIBundle.plugins.DownloadUrl\n ],\n layout: "StandaloneLayout"\n }, ...${settings}});\n // End Swagger UI call region\n\n window.ui = ui\n }\n </script>\n </body>\n</html>\n"""'], {}), '(\n """\n<!-- HTML for static distribution bundle build -->\n<!DOCTYPE html>\n<html lang="en">\n <head>\n <meta charset="UTF-8">\n <title>Swagger UI</title>\n <link rel="stylesheet" type="text/css" href="./swagger_ui_static/swagger-ui.css" >\n <link rel="icon" type="image/png" href="./swagger_ui_static/favicon-32x32.png" sizes="32x32" />\n <link rel="icon" type="image/png" href="./swagger_ui_static/favicon-16x16.png" sizes="16x16" />\n <style>\n html\n {\n box-sizing: border-box;\n overflow: -moz-scrollbars-vertical;\n overflow-y: scroll;\n }\n\n *,\n *:before,\n *:after\n {\n box-sizing: inherit;\n }\n\n body\n {\n margin:0;\n background: #fafafa;\n }\n </style>\n </head>\n\n <body>\n <div id="swagger-ui"></div>\n\n <script src="./swagger_ui_static/swagger-ui-bundle.js"> </script>\n <script src="./swagger_ui_static/swagger-ui-standalone-preset.js"> </script>\n <script>\n window.onload = function() {\n // Begin Swagger UI call region\n const ui = SwaggerUIBundle({...{\n url: "./swagger.json",\n dom_id: \'#swagger-ui\',\n deepLinking: true,\n presets: [\n SwaggerUIBundle.presets.apis,\n SwaggerUIStandalonePreset\n ],\n plugins: [\n SwaggerUIBundle.plugins.DownloadUrl\n ],\n layout: "StandaloneLayout"\n }, ...${settings}});\n // End Swagger UI call region\n\n window.ui = ui\n }\n </script>\n </body>\n</html>\n"""\n )\n', (52, 1578), False, 'import string\n'), ((1596, 2572), 'string.Template', 'string.Template', (['"""\n<!DOCTYPE html>\n<html>\n <head>\n <title>ReDoc</title>\n <!-- needed for adaptive design -->\n <meta charset="utf-8"/>\n <meta name="viewport" content="width=device-width, initial-scale=1">\n <link rel="stylesheet" type="text/css" href="./redoc_ui_static/google-fonts.css" >\n <link rel="shortcut icon" href="./redoc_ui_static/favicon.ico"/>\n <link rel="icon" type="image/png" sizes="16x16" href="./redoc_ui_static/favicon-16x16.png"/>\n <link rel="icon" type="image/png" sizes="32x32" href="./redoc_ui_static/favicon-32x32.png"/>\n\n <!--\n ReDoc doesn\'t change outer page styles\n -->\n <style>\n body {\n margin: 0;\n padding: 0;\n }\n </style>\n </head>\n <body>\n <div id=\'redoc-ui\'></div>\n <script src="./redoc_ui_static/redoc.standalone.js"> </script>\n <script>\n Redoc.init(\'./swagger.json\', ${settings}, document.getElementById(\'redoc-ui\'))\n </script>\n </body>\n</html>\n"""'], {}), '(\n """\n<!DOCTYPE html>\n<html>\n <head>\n <title>ReDoc</title>\n <!-- needed for adaptive design -->\n <meta charset="utf-8"/>\n <meta name="viewport" content="width=device-width, initial-scale=1">\n <link rel="stylesheet" type="text/css" href="./redoc_ui_static/google-fonts.css" >\n <link rel="shortcut icon" href="./redoc_ui_static/favicon.ico"/>\n <link rel="icon" type="image/png" sizes="16x16" href="./redoc_ui_static/favicon-16x16.png"/>\n <link rel="icon" type="image/png" sizes="32x32" href="./redoc_ui_static/favicon-32x32.png"/>\n\n <!--\n ReDoc doesn\'t change outer page styles\n -->\n <style>\n body {\n margin: 0;\n padding: 0;\n }\n </style>\n </head>\n <body>\n <div id=\'redoc-ui\'></div>\n <script src="./redoc_ui_static/redoc.standalone.js"> </script>\n <script>\n Redoc.init(\'./swagger.json\', ${settings}, document.getElementById(\'redoc-ui\'))\n </script>\n </body>\n</html>\n"""\n )\n', (1611, 2572), False, 'import string\n'), ((2592, 3247), 'string.Template', 'string.Template', (['"""\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset="utf-8">\n <link rel="stylesheet" type="text/css" href="./rapidoc_ui_static/fonts.css" >\n <script type="module" src="./rapidoc_ui_static/rapidoc-min.js"></script>\n </head>\n <body>\n <rapi-doc\n id=\'rapidoc-ui\'\n spec-url=\'./swagger.json\'\n regular-font=\'Rapidoc Regular\'\n mono-font=\'Roboto Mono\'\n > </rapi-doc>\n <script>\n const docEl = document.getElementById(\'rapidoc-ui\');\n const settings = ${settings}\n for (const key in settings) {\n docEl.setAttribute(key, settings[key]);\n }\n </script>\n </body>\n</html>\n"""'], {}), '(\n """\n<!DOCTYPE html>\n<html>\n <head>\n <meta charset="utf-8">\n <link rel="stylesheet" type="text/css" href="./rapidoc_ui_static/fonts.css" >\n <script type="module" src="./rapidoc_ui_static/rapidoc-min.js"></script>\n </head>\n <body>\n <rapi-doc\n id=\'rapidoc-ui\'\n spec-url=\'./swagger.json\'\n regular-font=\'Rapidoc Regular\'\n mono-font=\'Roboto Mono\'\n > </rapi-doc>\n <script>\n const docEl = document.getElementById(\'rapidoc-ui\');\n const settings = ${settings}\n for (const key in settings) {\n docEl.setAttribute(key, settings[key]);\n }\n </script>\n </body>\n</html>\n"""\n )\n', (2607, 3247), False, 'import string\n')]
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Abstractions for low-level database DDL and DML operations."""
from edb.lang.schema import delta as sd
from edb.lang.schema import name as sn
from edb.lang.schema import objects as s_obj
from edb.lang.common import adapter
from edb.lang import edgeql
from edb.server.pgsql import common
from edb.server.pgsql import dbops
from edb.server.pgsql import metaschema
from edb.server.pgsql.dbops import catalogs as pg_catalogs
class SchemaDBObjectMeta(adapter.Adapter, type(s_obj.Object)):
def __init__(cls, name, bases, dct, *, adapts=None):
adapter.Adapter.__init__(cls, name, bases, dct, adapts=adapts)
type(s_obj.Object).__init__(cls, name, bases, dct)
class SchemaDBObject(metaclass=SchemaDBObjectMeta):
@classmethod
def adapt(cls, obj):
return cls.copy(obj)
@classmethod
def get_canonical_class(cls):
for base in cls.__bases__:
if issubclass(base, s_obj.Object) and not issubclass(
base, SchemaDBObject):
return base
return cls
class CallDeltaHook(dbops.Command):
def __init__(
self, *, hook, stage, op, conditions=None, neg_conditions=None,
priority=0):
super().__init__(
conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
self.hook = hook
self.stage = stage
self.op = op
async def execute(self, context):
try:
self.op.call_hook(
context.session, stage=self.stage, hook=self.hook)
except sd.DeltaHookNotFoundError:
pass
class ConstraintCommon:
def constraint_name(self, quote=True):
name = self.raw_constraint_name()
name = common.edgedb_name_to_pg_name(name)
return common.quote_ident(name) if quote else name
def schema_constraint_name(self):
return self._constraint.name
def raw_constraint_name(self):
name = '{};{}'.format(self._constraint.name, 'schemaconstr')
return name
async def extra(self, context):
text = self.raw_constraint_name()
cmd = dbops.Comment(object=self, text=text)
return [cmd]
def rename_extra(self, context, new_constraint):
new_name = new_constraint.raw_constraint_name()
cmd = dbops.Comment(object=new_constraint, text=new_name)
return [cmd]
@property
def is_abstract(self):
return self._constraint.is_abstract
class SchemaConstraintDomainConstraint(
ConstraintCommon, dbops.DomainConstraint):
def __init__(self, domain_name, constraint, exprdata):
super().__init__(domain_name)
self._exprdata = exprdata
self._constraint = constraint
async def extra(self, context):
# There seems to be no direct way to COMMENT on a domain constraint.
# See http://postgr.es/m/5310157.yWWCtg2qIU@klinga.prans.org
# Work this around by updating pg_description directly.
#
# text = self.raw_constraint_name()
# cmd = dbops.Comment(object=self, text=text)
# return [cmd]
table = pg_catalogs.PgDescriptionTable()
rec = table.record()
objoid = dbops.Query(
'(SELECT oid FROM pg_constraint WHERE conname = $1)',
[self.constraint_name(quote=False)], type='oid')
classoid = dbops.Query(
'''(SELECT c.oid
FROM
pg_class c INNER JOIN pg_namespace ns
ON c.relnamespace = ns.oid
WHERE
c.relname = 'pg_constraint' AND
ns.nspname = 'pg_catalog')
''', [], type='oid')
rec.objoid = objoid
rec.classoid = classoid
rec.description = self.raw_constraint_name()
rec.objsubid = 0
cond = [('objoid', objoid), ('classoid', classoid)]
cmd = dbops.Merge(table=table, record=rec, condition=cond)
return [cmd]
async def constraint_code(self, context):
if len(self._exprdata) == 1:
expr = self._exprdata[0]['exprdata']['plain']
else:
exprs = [e['plain'] for e in self._exprdata['exprdata']]
expr = '(' + ') AND ('.join(exprs) + ')'
return 'CHECK ({})'.format(expr)
def __repr__(self):
return '<{}.{} "{}" "%r">' % (
self.__class__.__module__, self.__class__.__name__,
self.domain_name, self._constraint)
class SchemaConstraintTableConstraint(ConstraintCommon, dbops.TableConstraint):
def __init__(self, table_name, *, constraint, exprdata, scope, type):
super().__init__(table_name, None)
self._constraint = constraint
self._exprdata = exprdata
self._scope = scope
self._type = type
async def constraint_code(self, context):
if self._scope == 'row':
if len(self._exprdata) == 1:
expr = self._exprdata[0]['exprdata']['plain']
else:
exprs = [e['exprdata']['plain'] for e in self._exprdata]
expr = '(' + ') AND ('.join(exprs) + ')'
expr = 'CHECK ({})'.format(expr)
else:
if self._type != 'unique':
raise ValueError(
'unexpected constraint type: {}'.format(self._type))
constr_exprs = []
for expr in self._exprdata:
if expr['is_trivial']:
# A constraint that contains one or more
# references to columns, and no expressions.
#
expr = ', '.join(expr['exprdata']['plain_chunks'])
expr = 'UNIQUE ({})'.format(expr)
else:
# Complex constraint with arbitrary expressions
# needs to use EXCLUDE.
#
chunks = expr['exprdata']['plain_chunks']
expr = ', '.join(
"{} WITH =".format(chunk) for chunk in chunks)
expr = 'EXCLUDE ({})'.format(expr)
constr_exprs.append(expr)
expr = constr_exprs
return expr
def numbered_constraint_name(self, i, quote=True):
raw_name = self.raw_constraint_name()
name = common.edgedb_name_to_pg_name('{}#{}'.format(raw_name, i))
return common.quote_ident(name) if quote else name
def get_trigger_procname(self):
schema = common.edgedb_module_name_to_schema_name(
self.schema_constraint_name().module)
proc_name = common.edgedb_name_to_pg_name(
self.raw_constraint_name() + '_trigproc')
return schema, proc_name
def get_trigger_condition(self):
chunks = []
for expr in self._exprdata:
condition = '{old_expr} IS DISTINCT FROM {new_expr}'.format(
old_expr=expr['exprdata']['old'],
new_expr=expr['exprdata']['new'])
chunks.append(condition)
if len(chunks) == 1:
return chunks[0]
else:
return '(' + ') OR ('.join(chunks) + ')'
def get_trigger_proc_text(self):
chunks = []
if self.is_multiconstraint():
constr_name = self.numbered_constraint_name(0)
raw_constr_name = self.numbered_constraint_name(0, quote=False)
else:
constr_name = self.constraint_name()
raw_constr_name = self.constraint_name(quote=False)
errmsg = 'duplicate key value violates unique ' \
'constraint {constr}'.format(constr=constr_name)
subject_table = self.get_subject_name()
for expr in self._exprdata:
exprdata = expr['exprdata']
text = '''
PERFORM
TRUE
FROM
{table}
WHERE
{plain_expr} = {new_expr};
IF FOUND THEN
RAISE unique_violation
USING
TABLE = '{table[1]}',
SCHEMA = '{table[0]}',
CONSTRAINT = '{constr}',
MESSAGE = '{errmsg}',
DETAIL = 'Key ({plain_expr}) already exists.';
END IF;
'''.format(
plain_expr=exprdata['plain'], new_expr=exprdata['new'],
table=subject_table, constr=raw_constr_name, errmsg=errmsg)
chunks.append(text)
text = 'BEGIN\n' + '\n\n'.join(chunks) + '\nRETURN NEW;\nEND;'
return text
def is_multiconstraint(self):
"""Determine if multiple database constraints are needed."""
return self._scope != 'row' and len(self._exprdata) > 1
def is_natively_inherited(self):
"""Determine if this constraint can be inherited natively."""
return self._type == 'check'
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
class MultiConstraintItem:
def __init__(self, constraint, index):
self.constraint = constraint
self.index = index
def get_type(self):
return self.constraint.get_type()
def get_id(self):
raw_name = self.constraint.raw_constraint_name()
name = common.edgedb_name_to_pg_name(
'{}#{}'.format(raw_name, self.index))
name = common.quote_ident(name)
return '{} ON {} {}'.format(
name, self.constraint.get_subject_type(),
self.constraint.get_subject_name())
class AlterTableAddMultiConstraint(dbops.AlterTableAddConstraint):
async def code(self, context):
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
chunks = []
for i, expr in enumerate(exprs):
name = self.constraint.numbered_constraint_name(i)
chunk = 'ADD CONSTRAINT {} {}'.format(name, expr)
chunks.append(chunk)
code = ', '.join(chunks)
else:
if isinstance(exprs, list):
exprs = exprs[0]
name = self.constraint.constraint_name()
code = 'ADD CONSTRAINT {} {}'.format(name, exprs)
return code
async def extra(self, context, alter_table):
comments = []
exprs = await self.constraint.constraint_code(context)
constr_name = self.constraint.raw_constraint_name()
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
constraint = MultiConstraintItem(self.constraint, i)
comment = dbops.Comment(constraint, constr_name)
comments.append(comment)
else:
comment = dbops.Comment(self.constraint, constr_name)
comments.append(comment)
return comments
class AlterTableRenameMultiConstraint(
dbops.AlterTableBaseMixin, dbops.CommandGroup):
def __init__(
self, name, *, constraint, new_constraint, contained=False,
conditions=None, neg_conditions=None, priority=0):
dbops.CommandGroup.__init__(
self, conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
dbops.AlterTableBaseMixin.__init__(
self, name=name, contained=contained)
self.constraint = constraint
self.new_constraint = new_constraint
async def execute(self, context):
c = self.constraint
nc = self.new_constraint
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
old_name = c.numbered_constraint_name(i, quote=False)
new_name = nc.numbered_constraint_name(i, quote=False)
ac = dbops.AlterTableRenameConstraintSimple(
name=self.name, old_name=old_name, new_name=new_name)
self.add_command(ac)
else:
old_name = c.constraint_name(quote=False)
new_name = nc.constraint_name(quote=False)
ac = dbops.AlterTableRenameConstraintSimple(
name=self.name, old_name=old_name, new_name=new_name)
self.add_command(ac)
return await super().execute(context)
async def extra(self, context):
comments = []
exprs = await self.new_constraint.constraint_code(context)
constr_name = self.new_constraint.raw_constraint_name()
if isinstance(exprs, list) and len(exprs) > 1:
for i, expr in enumerate(exprs):
constraint = MultiConstraintItem(self.new_constraint, i)
comment = dbops.Comment(constraint, constr_name)
comments.append(comment)
else:
comment = dbops.Comment(self.new_constraint, constr_name)
comments.append(comment)
return comments
class AlterTableDropMultiConstraint(dbops.AlterTableDropConstraint):
async def code(self, context):
exprs = await self.constraint.constraint_code(context)
if isinstance(exprs, list) and len(exprs) > 1:
chunks = []
for i, expr in enumerate(exprs):
name = self.constraint.numbered_constraint_name(i)
chunk = 'DROP CONSTRAINT {}'.format(name)
chunks.append(chunk)
code = ', '.join(chunks)
else:
name = self.constraint.constraint_name()
code = 'DROP CONSTRAINT {}'.format(name)
return code
class AlterTableInheritableConstraintBase(
dbops.AlterTableBaseMixin, dbops.CommandGroup):
def __init__(
self, name, *, constraint, contained=False, conditions=None,
neg_conditions=None, priority=0):
dbops.CompositeCommandGroup.__init__(
self, conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
dbops.AlterTableBaseMixin.__init__(
self, name=name, contained=contained)
self._constraint = constraint
def create_constr_trigger(self, table_name, constraint, proc_name):
cmds = []
cname = constraint.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure=proc_name, is_constraint=True, inherit=True)
cr_ins_trigger = dbops.CreateTrigger(ins_trigger)
cmds.append(cr_ins_trigger)
disable_ins_trigger = dbops.DisableTrigger(ins_trigger, self_only=True)
cmds.append(disable_ins_trigger)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
condition = constraint.get_trigger_condition()
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure=proc_name, condition=condition, is_constraint=True,
inherit=True)
cr_upd_trigger = dbops.CreateTrigger(upd_trigger)
cmds.append(cr_upd_trigger)
disable_upd_trigger = dbops.DisableTrigger(upd_trigger, self_only=True)
cmds.append(disable_upd_trigger)
return cmds
def rename_constr_trigger(self, table_name):
constraint = self._constraint
new_constr = self._new_constraint
cname = constraint.raw_constraint_name()
ncname = new_constr.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
new_ins_trg_name = common.edgedb_name_to_pg_name(
ncname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure='null', is_constraint=True, inherit=True)
rn_ins_trigger = dbops.AlterTriggerRenameTo(
ins_trigger, new_name=new_ins_trg_name)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
new_upd_trg_name = common.edgedb_name_to_pg_name(
ncname + '_updtrigger')
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure='null', is_constraint=True, inherit=True)
rn_upd_trigger = dbops.AlterTriggerRenameTo(
upd_trigger, new_name=new_upd_trg_name)
return (rn_ins_trigger, rn_upd_trigger)
def drop_constr_trigger(self, table_name, constraint):
cname = constraint.raw_constraint_name()
ins_trigger_name = common.edgedb_name_to_pg_name(cname + '_instrigger')
ins_trigger = dbops.Trigger(
name=ins_trigger_name, table_name=table_name, events=('insert', ),
procedure='null', is_constraint=True, inherit=True)
drop_ins_trigger = dbops.DropTrigger(ins_trigger)
upd_trigger_name = common.edgedb_name_to_pg_name(cname + '_updtrigger')
upd_trigger = dbops.Trigger(
name=upd_trigger_name, table_name=table_name, events=('update', ),
procedure='null', is_constraint=True, inherit=True)
drop_upd_trigger = dbops.DropTrigger(upd_trigger)
return [drop_ins_trigger, drop_upd_trigger]
def drop_constr_trigger_function(self, proc_name):
return [dbops.DropFunction(name=proc_name, args=())]
def create_constraint(self, constraint):
# Add the constraint normally to our table
#
my_alter = dbops.AlterTable(self.name)
add_constr = AlterTableAddMultiConstraint(constraint=constraint)
my_alter.add_command(add_constr)
self.add_command(my_alter)
if not constraint.is_natively_inherited():
# The constraint is not inherited by descendant tables natively,
# use triggers to emulate inheritance.
#
# Create trigger function
#
proc_name = constraint.get_trigger_procname()
proc_text = constraint.get_trigger_proc_text()
proc = dbops.CreateFunction(
dbops.Function(
name=proc_name, text=proc_text, volatility='stable',
returns='trigger', language='plpgsql'))
self.add_command(proc)
# Add a (disabled) inheritable trigger on self.
# Trigger inheritance will propagate and maintain
# the trigger on current and future descendants.
#
cr_trigger = self.create_constr_trigger(
self.name, constraint, proc_name)
self.add_commands(cr_trigger)
def rename_constraint(self, old_constraint, new_constraint):
# Rename the native constraint(s) normally
#
rename_constr = AlterTableRenameMultiConstraint(
name=self.name, constraint=old_constraint,
new_constraint=new_constraint)
self.add_command(rename_constr)
if not old_constraint.is_natively_inherited():
# Alter trigger function
#
old_proc_name = old_constraint.get_trigger_procname()
new_proc_name = new_constraint.get_trigger_procname()
rename_proc = dbops.RenameFunction(
name=old_proc_name, args=(), new_name=new_proc_name)
self.add_command(rename_proc)
new_proc_text = new_constraint.get_trigger_proc_text()
alter_text = dbops.AlterFunctionReplaceText(
name=new_proc_name, args=(), new_text=new_proc_text)
self.add_command(alter_text)
mv_trigger = self.rename_constr_trigger(self.name)
self.add_commands(mv_trigger)
def alter_constraint(self, old_constraint, new_constraint):
if old_constraint.is_abstract and not new_constraint.is_abstract:
# No longer abstract, create db structures
self.create_constraint(new_constraint)
elif not old_constraint.is_abstract and new_constraint.is_abstract:
# Now abstract, drop db structures
self.drop_constraint(new_constraint)
else:
# Some other modification, drop/create
self.drop_constraint(new_constraint)
self.create_constraint(new_constraint)
def drop_constraint(self, constraint):
if not constraint.is_natively_inherited():
self.add_commands(self.drop_constr_trigger(self.name, constraint))
# Drop trigger function
#
proc_name = constraint.raw_constraint_name() + '_trigproc'
proc_name = self.name[0], common.edgedb_name_to_pg_name(proc_name)
self.add_commands(self.drop_constr_trigger_function(proc_name))
# Drop the constraint normally from our table
#
my_alter = dbops.AlterTable(self.name)
drop_constr = AlterTableDropMultiConstraint(constraint=constraint)
my_alter.add_command(drop_constr)
self.add_command(my_alter)
class AlterTableAddInheritableConstraint(AlterTableInheritableConstraintBase):
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def _execute(self, context, code, vars):
if not self._constraint.is_abstract:
self.create_constraint(self._constraint)
await super()._execute(context, code, vars)
class AlterTableRenameInheritableConstraint(
AlterTableInheritableConstraintBase):
def __init__(self, name, *, constraint, new_constraint, **kwargs):
super().__init__(name, constraint=constraint, **kwargs)
self._new_constraint = new_constraint
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
if not self._constraint.is_abstract:
self.rename_constraint(self._constraint, self._new_constraint)
await super().execute(context)
class AlterTableAlterInheritableConstraint(
AlterTableInheritableConstraintBase):
def __init__(self, name, *, constraint, new_constraint, **kwargs):
super().__init__(name, constraint=constraint, **kwargs)
self._new_constraint = new_constraint
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
self.alter_constraint(self._constraint, self._new_constraint)
await super().execute(context)
class AlterTableDropInheritableConstraint(AlterTableInheritableConstraintBase):
def __repr__(self):
return '<{}.{} {!r}>'.format(
self.__class__.__module__, self.__class__.__name__,
self._constraint)
async def execute(self, context):
if not self._constraint.is_abstract:
self.drop_constraint(self._constraint)
await super().execute(context)
class MappingIndex(dbops.Index):
def __init__(self, name_prefix, cardinality, link_names, table_name):
super().__init__(None, table_name, True)
self.link_names = link_names
self.name_prefix = name_prefix
self.cardinality = cardinality
async def creation_code(self, context):
link_map = await context.get_class_map()
ids = tuple(sorted(list(link_map[n] for n in self.link_names)))
id_str = '_'.join(str(i) for i in ids)
name = '%s_%s_%s_cardinality_idx' % (
self.name_prefix, id_str, self.cardinality)
name = common.edgedb_name_to_pg_name(name)
predicate = 'ptr_item_id IN (%s)' % ', '.join(str(id) for id in ids)
code = '''
CREATE {unique} INDEX {name} ON {table}s ({cols}) {predicate}
'''.format(unique='UNIQUE',
name=common.qname(name),
table=common.qname(*self.table_name),
cols=', '.join(common.quote_ident(c) for c in self.columns),
predicate=('WHERE {}'.format(predicate)))
return code
def __repr__(self):
name = '%s_%s_%s_cardinality_idx' % (
self.name_prefix, '<HASH>', self.cardinality)
predicate = 'ptr_item_id IN (%s)' % ', '.join(
str(n) for n in self.link_names)
return \
'<{mod.{cls} name="{name}" cols=({cols}) unique={uniq} ' \
'predicate={pred}>'.format(
mod=self.__class__.__module__,
cls=self.__class__.__name__,
name=name,
cols=','.join(self.columns),
uniq=self.unique,
pred=predicate)
class MangleExprObjectRefs(dbops.Command):
def __init__(self, *, scls, field, expr,
conditions=None, neg_conditions=None, priority=0):
super().__init__(
conditions=conditions, neg_conditions=neg_conditions,
priority=priority)
self.name = scls.name
self.table = metaschema.get_metaclass_table(scls.__class__)
self.field = common.edgedb_name_to_pg_name(field)
self.expr = expr
async def execute(self, context):
class_map = await context._get_class_map()
def _cb(name):
clsid = class_map.get(name)
if clsid:
return sn.Name(module='__class__', name=str(clsid))
else:
return name
expr = edgeql.rewrite_refs(self.expr, _cb)
rec = self.table.record()
setattr(rec, self.field, expr)
condition = [('name', str(self.name))]
upd = dbops.Update(table=self.table, record=rec, condition=condition)
await upd.execute(context)
|
[
"edb.server.pgsql.dbops.Trigger",
"edb.server.pgsql.dbops.Function",
"edb.server.pgsql.dbops.Query",
"edb.server.pgsql.common.edgedb_name_to_pg_name",
"edb.server.pgsql.dbops.CompositeCommandGroup.__init__",
"edb.server.pgsql.common.quote_ident",
"edb.server.pgsql.dbops.CreateTrigger",
"edb.lang.common.adapter.Adapter.__init__",
"edb.server.pgsql.dbops.AlterTriggerRenameTo",
"edb.server.pgsql.dbops.CommandGroup.__init__",
"edb.server.pgsql.dbops.DisableTrigger",
"edb.server.pgsql.common.qname",
"edb.server.pgsql.dbops.Merge",
"edb.server.pgsql.dbops.AlterTable",
"edb.server.pgsql.dbops.RenameFunction",
"edb.server.pgsql.dbops.DropFunction",
"edb.server.pgsql.dbops.Update",
"edb.server.pgsql.dbops.AlterTableBaseMixin.__init__",
"edb.server.pgsql.dbops.Comment",
"edb.lang.edgeql.rewrite_refs",
"edb.server.pgsql.dbops.AlterFunctionReplaceText",
"edb.server.pgsql.dbops.AlterTableRenameConstraintSimple",
"edb.server.pgsql.dbops.catalogs.PgDescriptionTable",
"edb.server.pgsql.dbops.DropTrigger",
"edb.server.pgsql.metaschema.get_metaclass_table"
] |
[((1239, 1301), 'edb.lang.common.adapter.Adapter.__init__', 'adapter.Adapter.__init__', (['cls', 'name', 'bases', 'dct'], {'adapts': 'adapts'}), '(cls, name, bases, dct, adapts=adapts)\n', (1263, 1301), False, 'from edb.lang.common import adapter\n'), ((2419, 2454), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (['name'], {}), '(name)\n', (2448, 2454), False, 'from edb.server.pgsql import common\n'), ((2808, 2845), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', ([], {'object': 'self', 'text': 'text'}), '(object=self, text=text)\n', (2821, 2845), False, 'from edb.server.pgsql import dbops\n'), ((2991, 3042), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', ([], {'object': 'new_constraint', 'text': 'new_name'}), '(object=new_constraint, text=new_name)\n', (3004, 3042), False, 'from edb.server.pgsql import dbops\n'), ((3807, 3839), 'edb.server.pgsql.dbops.catalogs.PgDescriptionTable', 'pg_catalogs.PgDescriptionTable', ([], {}), '()\n', (3837, 3839), True, 'from edb.server.pgsql.dbops import catalogs as pg_catalogs\n'), ((4047, 4393), 'edb.server.pgsql.dbops.Query', 'dbops.Query', (['"""(SELECT c.oid\n FROM\n pg_class c INNER JOIN pg_namespace ns\n ON c.relnamespace = ns.oid\n WHERE\n c.relname = \'pg_constraint\' AND\n ns.nspname = \'pg_catalog\')\n """', '[]'], {'type': '"""oid"""'}), '(\n """(SELECT c.oid\n FROM\n pg_class c INNER JOIN pg_namespace ns\n ON c.relnamespace = ns.oid\n WHERE\n c.relname = \'pg_constraint\' AND\n ns.nspname = \'pg_catalog\')\n """\n , [], type=\'oid\')\n', (4058, 4393), False, 'from edb.server.pgsql import dbops\n'), ((4611, 4663), 'edb.server.pgsql.dbops.Merge', 'dbops.Merge', ([], {'table': 'table', 'record': 'rec', 'condition': 'cond'}), '(table=table, record=rec, condition=cond)\n', (4622, 4663), False, 'from edb.server.pgsql import dbops\n'), ((10217, 10241), 'edb.server.pgsql.common.quote_ident', 'common.quote_ident', (['name'], {}), '(name)\n', (10235, 10241), False, 'from edb.server.pgsql import common\n'), ((11981, 12092), 'edb.server.pgsql.dbops.CommandGroup.__init__', 'dbops.CommandGroup.__init__', (['self'], {'conditions': 'conditions', 'neg_conditions': 'neg_conditions', 'priority': 'priority'}), '(self, conditions=conditions, neg_conditions=\n neg_conditions, priority=priority)\n', (12008, 12092), False, 'from edb.server.pgsql import dbops\n'), ((12122, 12194), 'edb.server.pgsql.dbops.AlterTableBaseMixin.__init__', 'dbops.AlterTableBaseMixin.__init__', (['self'], {'name': 'name', 'contained': 'contained'}), '(self, name=name, contained=contained)\n', (12156, 12194), False, 'from edb.server.pgsql import dbops\n'), ((14705, 14824), 'edb.server.pgsql.dbops.CompositeCommandGroup.__init__', 'dbops.CompositeCommandGroup.__init__', (['self'], {'conditions': 'conditions', 'neg_conditions': 'neg_conditions', 'priority': 'priority'}), '(self, conditions=conditions,\n neg_conditions=neg_conditions, priority=priority)\n', (14741, 14824), False, 'from edb.server.pgsql import dbops\n'), ((14855, 14927), 'edb.server.pgsql.dbops.AlterTableBaseMixin.__init__', 'dbops.AlterTableBaseMixin.__init__', (['self'], {'name': 'name', 'contained': 'contained'}), '(self, name=name, contained=contained)\n', (14889, 14927), False, 'from edb.server.pgsql import dbops\n'), ((15149, 15201), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_instrigger')"], {}), "(cname + '_instrigger')\n", (15178, 15201), False, 'from edb.server.pgsql import common\n'), ((15224, 15363), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'ins_trigger_name', 'table_name': 'table_name', 'events': "('insert',)", 'procedure': 'proc_name', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=ins_trigger_name, table_name=table_name, events=(\n 'insert',), procedure=proc_name, is_constraint=True, inherit=True)\n", (15237, 15363), False, 'from edb.server.pgsql import dbops\n'), ((15410, 15442), 'edb.server.pgsql.dbops.CreateTrigger', 'dbops.CreateTrigger', (['ins_trigger'], {}), '(ins_trigger)\n', (15429, 15442), False, 'from edb.server.pgsql import dbops\n'), ((15510, 15559), 'edb.server.pgsql.dbops.DisableTrigger', 'dbops.DisableTrigger', (['ins_trigger'], {'self_only': '(True)'}), '(ins_trigger, self_only=True)\n', (15530, 15559), False, 'from edb.server.pgsql import dbops\n'), ((15629, 15681), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_updtrigger')"], {}), "(cname + '_updtrigger')\n", (15658, 15681), False, 'from edb.server.pgsql import common\n'), ((15760, 15925), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'upd_trigger_name', 'table_name': 'table_name', 'events': "('update',)", 'procedure': 'proc_name', 'condition': 'condition', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=upd_trigger_name, table_name=table_name, events=(\n 'update',), procedure=proc_name, condition=condition, is_constraint=\n True, inherit=True)\n", (15773, 15925), False, 'from edb.server.pgsql import dbops\n'), ((15979, 16011), 'edb.server.pgsql.dbops.CreateTrigger', 'dbops.CreateTrigger', (['upd_trigger'], {}), '(upd_trigger)\n', (15998, 16011), False, 'from edb.server.pgsql import dbops\n'), ((16079, 16128), 'edb.server.pgsql.dbops.DisableTrigger', 'dbops.DisableTrigger', (['upd_trigger'], {'self_only': '(True)'}), '(upd_trigger, self_only=True)\n', (16099, 16128), False, 'from edb.server.pgsql import dbops\n'), ((16449, 16501), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_instrigger')"], {}), "(cname + '_instrigger')\n", (16478, 16501), False, 'from edb.server.pgsql import common\n'), ((16529, 16582), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(ncname + '_instrigger')"], {}), "(ncname + '_instrigger')\n", (16558, 16582), False, 'from edb.server.pgsql import common\n'), ((16619, 16755), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'ins_trigger_name', 'table_name': 'table_name', 'events': "('insert',)", 'procedure': '"""null"""', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=ins_trigger_name, table_name=table_name, events=(\n 'insert',), procedure='null', is_constraint=True, inherit=True)\n", (16632, 16755), False, 'from edb.server.pgsql import dbops\n'), ((16803, 16869), 'edb.server.pgsql.dbops.AlterTriggerRenameTo', 'dbops.AlterTriggerRenameTo', (['ins_trigger'], {'new_name': 'new_ins_trg_name'}), '(ins_trigger, new_name=new_ins_trg_name)\n', (16829, 16869), False, 'from edb.server.pgsql import dbops\n'), ((16911, 16963), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_updtrigger')"], {}), "(cname + '_updtrigger')\n", (16940, 16963), False, 'from edb.server.pgsql import common\n'), ((16991, 17044), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(ncname + '_updtrigger')"], {}), "(ncname + '_updtrigger')\n", (17020, 17044), False, 'from edb.server.pgsql import common\n'), ((17081, 17217), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'upd_trigger_name', 'table_name': 'table_name', 'events': "('update',)", 'procedure': '"""null"""', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=upd_trigger_name, table_name=table_name, events=(\n 'update',), procedure='null', is_constraint=True, inherit=True)\n", (17094, 17217), False, 'from edb.server.pgsql import dbops\n'), ((17265, 17331), 'edb.server.pgsql.dbops.AlterTriggerRenameTo', 'dbops.AlterTriggerRenameTo', (['upd_trigger'], {'new_name': 'new_upd_trg_name'}), '(upd_trigger, new_name=new_upd_trg_name)\n', (17291, 17331), False, 'from edb.server.pgsql import dbops\n'), ((17531, 17583), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_instrigger')"], {}), "(cname + '_instrigger')\n", (17560, 17583), False, 'from edb.server.pgsql import common\n'), ((17606, 17742), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'ins_trigger_name', 'table_name': 'table_name', 'events': "('insert',)", 'procedure': '"""null"""', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=ins_trigger_name, table_name=table_name, events=(\n 'insert',), procedure='null', is_constraint=True, inherit=True)\n", (17619, 17742), False, 'from edb.server.pgsql import dbops\n'), ((17792, 17822), 'edb.server.pgsql.dbops.DropTrigger', 'dbops.DropTrigger', (['ins_trigger'], {}), '(ins_trigger)\n', (17809, 17822), False, 'from edb.server.pgsql import dbops\n'), ((17851, 17903), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (["(cname + '_updtrigger')"], {}), "(cname + '_updtrigger')\n", (17880, 17903), False, 'from edb.server.pgsql import common\n'), ((17926, 18062), 'edb.server.pgsql.dbops.Trigger', 'dbops.Trigger', ([], {'name': 'upd_trigger_name', 'table_name': 'table_name', 'events': "('update',)", 'procedure': '"""null"""', 'is_constraint': '(True)', 'inherit': '(True)'}), "(name=upd_trigger_name, table_name=table_name, events=(\n 'update',), procedure='null', is_constraint=True, inherit=True)\n", (17939, 18062), False, 'from edb.server.pgsql import dbops\n'), ((18112, 18142), 'edb.server.pgsql.dbops.DropTrigger', 'dbops.DropTrigger', (['upd_trigger'], {}), '(upd_trigger)\n', (18129, 18142), False, 'from edb.server.pgsql import dbops\n'), ((18439, 18466), 'edb.server.pgsql.dbops.AlterTable', 'dbops.AlterTable', (['self.name'], {}), '(self.name)\n', (18455, 18466), False, 'from edb.server.pgsql import dbops\n'), ((21748, 21775), 'edb.server.pgsql.dbops.AlterTable', 'dbops.AlterTable', (['self.name'], {}), '(self.name)\n', (21764, 21775), False, 'from edb.server.pgsql import dbops\n'), ((24593, 24628), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (['name'], {}), '(name)\n', (24622, 24628), False, 'from edb.server.pgsql import common\n'), ((26020, 26066), 'edb.server.pgsql.metaschema.get_metaclass_table', 'metaschema.get_metaclass_table', (['scls.__class__'], {}), '(scls.__class__)\n', (26050, 26066), False, 'from edb.server.pgsql import metaschema\n'), ((26088, 26124), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (['field'], {}), '(field)\n', (26117, 26124), False, 'from edb.server.pgsql import common\n'), ((26455, 26490), 'edb.lang.edgeql.rewrite_refs', 'edgeql.rewrite_refs', (['self.expr', '_cb'], {}), '(self.expr, _cb)\n', (26474, 26490), False, 'from edb.lang import edgeql\n'), ((26626, 26689), 'edb.server.pgsql.dbops.Update', 'dbops.Update', ([], {'table': 'self.table', 'record': 'rec', 'condition': 'condition'}), '(table=self.table, record=rec, condition=condition)\n', (26638, 26689), False, 'from edb.server.pgsql import dbops\n'), ((2470, 2494), 'edb.server.pgsql.common.quote_ident', 'common.quote_ident', (['name'], {}), '(name)\n', (2488, 2494), False, 'from edb.server.pgsql import common\n'), ((7099, 7123), 'edb.server.pgsql.common.quote_ident', 'common.quote_ident', (['name'], {}), '(name)\n', (7117, 7123), False, 'from edb.server.pgsql import common\n'), ((11616, 11659), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', (['self.constraint', 'constr_name'], {}), '(self.constraint, constr_name)\n', (11629, 11659), False, 'from edb.server.pgsql import dbops\n'), ((13012, 13108), 'edb.server.pgsql.dbops.AlterTableRenameConstraintSimple', 'dbops.AlterTableRenameConstraintSimple', ([], {'name': 'self.name', 'old_name': 'old_name', 'new_name': 'new_name'}), '(name=self.name, old_name=old_name,\n new_name=new_name)\n', (13050, 13108), False, 'from edb.server.pgsql import dbops\n'), ((13711, 13758), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', (['self.new_constraint', 'constr_name'], {}), '(self.new_constraint, constr_name)\n', (13724, 13758), False, 'from edb.server.pgsql import dbops\n'), ((18268, 18311), 'edb.server.pgsql.dbops.DropFunction', 'dbops.DropFunction', ([], {'name': 'proc_name', 'args': '()'}), '(name=proc_name, args=())\n', (18286, 18311), False, 'from edb.server.pgsql import dbops\n'), ((20153, 20226), 'edb.server.pgsql.dbops.RenameFunction', 'dbops.RenameFunction', ([], {'name': 'old_proc_name', 'args': '()', 'new_name': 'new_proc_name'}), '(name=old_proc_name, args=(), new_name=new_proc_name)\n', (20173, 20226), False, 'from edb.server.pgsql import dbops\n'), ((20379, 20467), 'edb.server.pgsql.dbops.AlterFunctionReplaceText', 'dbops.AlterFunctionReplaceText', ([], {'name': 'new_proc_name', 'args': '()', 'new_text': 'new_proc_text'}), '(name=new_proc_name, args=(), new_text=\n new_proc_text)\n', (20409, 20467), False, 'from edb.server.pgsql import dbops\n'), ((11500, 11538), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', (['constraint', 'constr_name'], {}), '(constraint, constr_name)\n', (11513, 11538), False, 'from edb.server.pgsql import dbops\n'), ((12719, 12815), 'edb.server.pgsql.dbops.AlterTableRenameConstraintSimple', 'dbops.AlterTableRenameConstraintSimple', ([], {'name': 'self.name', 'old_name': 'old_name', 'new_name': 'new_name'}), '(name=self.name, old_name=old_name,\n new_name=new_name)\n', (12757, 12815), False, 'from edb.server.pgsql import dbops\n'), ((13595, 13633), 'edb.server.pgsql.dbops.Comment', 'dbops.Comment', (['constraint', 'constr_name'], {}), '(constraint, constr_name)\n', (13608, 13633), False, 'from edb.server.pgsql import dbops\n'), ((19038, 19149), 'edb.server.pgsql.dbops.Function', 'dbops.Function', ([], {'name': 'proc_name', 'text': 'proc_text', 'volatility': '"""stable"""', 'returns': '"""trigger"""', 'language': '"""plpgsql"""'}), "(name=proc_name, text=proc_text, volatility='stable', returns\n ='trigger', language='plpgsql')\n", (19052, 19149), False, 'from edb.server.pgsql import dbops\n'), ((21546, 21586), 'edb.server.pgsql.common.edgedb_name_to_pg_name', 'common.edgedb_name_to_pg_name', (['proc_name'], {}), '(proc_name)\n', (21575, 21586), False, 'from edb.server.pgsql import common\n'), ((24860, 24878), 'edb.server.pgsql.common.qname', 'common.qname', (['name'], {}), '(name)\n', (24872, 24878), False, 'from edb.server.pgsql import common\n'), ((24905, 24935), 'edb.server.pgsql.common.qname', 'common.qname', (['*self.table_name'], {}), '(*self.table_name)\n', (24917, 24935), False, 'from edb.server.pgsql import common\n'), ((24971, 24992), 'edb.server.pgsql.common.quote_ident', 'common.quote_ident', (['c'], {}), '(c)\n', (24989, 24992), False, 'from edb.server.pgsql import common\n')]
|
#!/usr/bin/env python
import rospy
from math import cos, sin, atan, pi
import numpy as np
import yaml
import sys
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Float64
import pdb
pub = rospy.Publisher('pid_error', Float64, queue_size=10)
# You can define constants in Python as uppercase global names like these.
MIN_DISTANCE = 0.1
MAX_DISTANCE = 30.0
MIN_ANGLE = -45.0
MAX_ANGLE = 225.0
a = 0.0
b = 0.0
al = 0.0
bl = 0.0
ar = 0.0
br = 0.0
# data: single message from topic /scan
# angle: between -45 to 225 degrees, where 0 degrees is directly to the right
# Outputs length in meters to object with angle in lidar scan field of view
def getRange(data, angle):
# TODO: implement
ranges = np.asarray(data.ranges)
angle_index = (angle + 45) * 4 # hokuyo ust-10lx fen bian lv 0.25du
output_range = ranges[angle_index]
return output_range
# data: single message from topic /scan
# desired_distance: desired distance to the left wall [meters]
# Outputs the PID error required to make the car follow the left wall.
def followLeft(data, desired_distance):
# TODO: implement
global a, b
L = 0.015 #old: 0.025
desired_distance = desired_distance
a = getRange(data, 135)
b = getRange(data, 180)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# desired_distance: desired distance to the right wall [meters]
# Outputs the PID error required to make the car follow the right wall.
def followRight(data, desired_distance):
# TODO: implement
global a, b
L = 0.025
desired_distance = desired_distance
a = getRange(data, 45)
b = getRange(data, 0)
theta = 45 * pi / 180
alpha = atan((a * cos(theta) - b) / (a * sin(theta)))
current_dist = b * cos(alpha)
next_dist = current_dist + L * sin(alpha)
error_t = -(current_dist - desired_distance + L * sin(alpha))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# data: single message from topic /scan
# Outputs the PID error required to make the car drive in the middle
# of the hallway.
def followCenter(data):
# TODO: implement
global al, bl, ar, br
L = 0.025
al = getRange(data, 135)
bl = getRange(data, 180)
ar = getRange(data, 0)
br = getRange(data, 45)
theta = 45 * pi / 180
alpha_l = atan((al * cos(theta) - bl) / (al * sin(theta)))
alpha_r = atan((ar * cos(theta) - br) / (ar * sin(theta)))
left_dist = bl * cos(alpha_l)
right_dist = br * cos(alpha_r)
desired_distance = (left_dist + right_dist) / 2.0
error_t = -(right_dist - desired_distance + L * sin(alpha_r))
# pass the error_t term into some function and output the next_angle and velocity
return error_t
# Callback for receiving LIDAR data on the /scan topic.
# data: the LIDAR data, published as a list of distances to the wall.
def scan_callback(data):
error = followCenter(data) # TODO: replace with followLeft, followRight, or followCenter
msg = Float64()
msg.data = error
pub.publish(msg)
# Boilerplate code to start this ROS node.
# DO NOT MODIFY!
if __name__ == '__main__':
rospy.init_node('pid_error_node', anonymous = True)
rospy.Subscriber("scan", LaserScan, scan_callback)
rospy.spin()
|
[
"rospy.Subscriber",
"numpy.asarray",
"rospy.Publisher",
"math.sin",
"std_msgs.msg.Float64",
"rospy.init_node",
"math.cos",
"rospy.spin"
] |
[((203, 255), 'rospy.Publisher', 'rospy.Publisher', (['"""pid_error"""', 'Float64'], {'queue_size': '(10)'}), "('pid_error', Float64, queue_size=10)\n", (218, 255), False, 'import rospy\n'), ((712, 735), 'numpy.asarray', 'np.asarray', (['data.ranges'], {}), '(data.ranges)\n', (722, 735), True, 'import numpy as np\n'), ((3221, 3230), 'std_msgs.msg.Float64', 'Float64', ([], {}), '()\n', (3228, 3230), False, 'from std_msgs.msg import Float64\n'), ((3358, 3407), 'rospy.init_node', 'rospy.init_node', (['"""pid_error_node"""'], {'anonymous': '(True)'}), "('pid_error_node', anonymous=True)\n", (3373, 3407), False, 'import rospy\n'), ((3411, 3461), 'rospy.Subscriber', 'rospy.Subscriber', (['"""scan"""', 'LaserScan', 'scan_callback'], {}), "('scan', LaserScan, scan_callback)\n", (3427, 3461), False, 'import rospy\n'), ((3463, 3475), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3473, 3475), False, 'import rospy\n'), ((1330, 1340), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (1333, 1340), False, 'from math import cos, sin, atan, pi\n'), ((2004, 2014), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (2007, 2014), False, 'from math import cos, sin, atan, pi\n'), ((2704, 2716), 'math.cos', 'cos', (['alpha_l'], {}), '(alpha_l)\n', (2707, 2716), False, 'from math import cos, sin, atan, pi\n'), ((2737, 2749), 'math.cos', 'cos', (['alpha_r'], {}), '(alpha_r)\n', (2740, 2749), False, 'from math import cos, sin, atan, pi\n'), ((1374, 1384), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (1377, 1384), False, 'from math import cos, sin, atan, pi\n'), ((2048, 2058), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (2051, 2058), False, 'from math import cos, sin, atan, pi\n'), ((1296, 1306), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1299, 1306), False, 'from math import cos, sin, atan, pi\n'), ((1437, 1447), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (1440, 1447), False, 'from math import cos, sin, atan, pi\n'), ((1970, 1980), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (1973, 1980), False, 'from math import cos, sin, atan, pi\n'), ((2111, 2121), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (2114, 2121), False, 'from math import cos, sin, atan, pi\n'), ((2611, 2621), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2614, 2621), False, 'from math import cos, sin, atan, pi\n'), ((2672, 2682), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (2675, 2682), False, 'from math import cos, sin, atan, pi\n'), ((2852, 2864), 'math.sin', 'sin', (['alpha_r'], {}), '(alpha_r)\n', (2855, 2864), False, 'from math import cos, sin, atan, pi\n'), ((1273, 1283), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1276, 1283), False, 'from math import cos, sin, atan, pi\n'), ((1947, 1957), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (1950, 1957), False, 'from math import cos, sin, atan, pi\n'), ((2586, 2596), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2589, 2596), False, 'from math import cos, sin, atan, pi\n'), ((2647, 2657), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (2650, 2657), False, 'from math import cos, sin, atan, pi\n')]
|
"""
Utility functions go here.
SpiceBucks
"""
# ------------------------------------------------------------------
import sys
import numpy as np
from util.message import message
# ------------------------------------------------------------------
# -----------------------------------------------------------------------------------------
# system functions
# ----------------------------------------z-------------------------------------------------
def exit(code):
"""
Exit the program, 0 is failure, 1 is success.
"""
if not isinstance(code, int):
message.logError("Exit code must be an interger.")
exit(0)
if code == 0:
message.logError("Exiting program with failure status.")
elif code == 1:
message.logDebug("Exiting program with success status.")
else:
message.logError(
"Exiting program with unknown error status (" + str(code) + ")"
)
sys.exit()
|
[
"util.message.message.logError",
"util.message.message.logDebug",
"sys.exit"
] |
[((944, 954), 'sys.exit', 'sys.exit', ([], {}), '()\n', (952, 954), False, 'import sys\n'), ((583, 633), 'util.message.message.logError', 'message.logError', (['"""Exit code must be an interger."""'], {}), "('Exit code must be an interger.')\n", (599, 633), False, 'from util.message import message\n'), ((676, 732), 'util.message.message.logError', 'message.logError', (['"""Exiting program with failure status."""'], {}), "('Exiting program with failure status.')\n", (692, 732), False, 'from util.message import message\n'), ((761, 817), 'util.message.message.logDebug', 'message.logDebug', (['"""Exiting program with success status."""'], {}), "('Exiting program with success status.')\n", (777, 817), False, 'from util.message import message\n')]
|
import RPi.GPIO as GPIO
import time
from flask import Flask, render_template
import threading
app = Flask('dispener-server')
lock = threading.Lock()
last_treat = 0
TREAT_MIN_SECONDS = 2
GPIO.setmode(GPIO.BOARD)
control_pins = [7,11,13,15]
for pin in control_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, 0)
halfstep_seq = [
[0,1,0,1],
[0,0,0,1],
[1,0,0,1],
[1,0,0,0],
[1,0,1,0],
[0,0,1,0],
[0,1,1,0],
[0,1,0,0],
]
def get_step(n):
x = n % len(halfstep_seq)
return halfstep_seq[x]
pos = 0
def stop():
for pin in range(4):
GPIO.output(control_pins[pin], 1)
def step():
with lock:
global pos
global last_treat
if time.time() - last_treat < TREAT_MIN_SECONDS:
return
last_treat = time.time()
for i in range(pos, pos+33):
s = get_step(i)
for pin in range(len(control_pins)):
GPIO.output(control_pins[pin], 1-s[pin])
time.sleep(.005)
time.sleep(.1)
stop()
pos = pos + 33
if pos % 100 == 99:
pos += 1
@app.route('/')
def index():
return render_template('index.html')
@app.route('/step', methods=['POST'])
def call_step():
step()
return 'ok'
|
[
"RPi.GPIO.setmode",
"RPi.GPIO.setup",
"flask.Flask",
"time.time",
"threading.Lock",
"time.sleep",
"flask.render_template",
"RPi.GPIO.output"
] |
[((102, 126), 'flask.Flask', 'Flask', (['"""dispener-server"""'], {}), "('dispener-server')\n", (107, 126), False, 'from flask import Flask, render_template\n'), ((134, 150), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (148, 150), False, 'import threading\n'), ((189, 213), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (201, 213), True, 'import RPi.GPIO as GPIO\n'), ((272, 297), 'RPi.GPIO.setup', 'GPIO.setup', (['pin', 'GPIO.OUT'], {}), '(pin, GPIO.OUT)\n', (282, 297), True, 'import RPi.GPIO as GPIO\n'), ((302, 321), 'RPi.GPIO.output', 'GPIO.output', (['pin', '(0)'], {}), '(pin, 0)\n', (313, 321), True, 'import RPi.GPIO as GPIO\n'), ((1195, 1224), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (1210, 1224), False, 'from flask import Flask, render_template\n'), ((628, 661), 'RPi.GPIO.output', 'GPIO.output', (['control_pins[pin]', '(1)'], {}), '(control_pins[pin], 1)\n', (639, 661), True, 'import RPi.GPIO as GPIO\n'), ((834, 845), 'time.time', 'time.time', ([], {}), '()\n', (843, 845), False, 'import time\n'), ((1052, 1067), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1062, 1067), False, 'import time\n'), ((1027, 1044), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (1037, 1044), False, 'import time\n'), ((747, 758), 'time.time', 'time.time', ([], {}), '()\n', (756, 758), False, 'import time\n'), ((974, 1016), 'RPi.GPIO.output', 'GPIO.output', (['control_pins[pin]', '(1 - s[pin])'], {}), '(control_pins[pin], 1 - s[pin])\n', (985, 1016), True, 'import RPi.GPIO as GPIO\n')]
|
import os
from anasymod.generators.codegen import CodeGenerator
from anasymod.util import back2fwd, expand_searchpaths
from typing import Union
class ConfigFileObj(CodeGenerator):
def __init__(self, files, config_path, name):
super().__init__()
self.files = None
""" type(str) : mandatory setting; defines the path to sources. The path can be relative, absolute and
contain wildcards. """
if isinstance(files, list):
self.files = files
elif isinstance(files, str):
self.files = [files]
else:
raise TypeError(f"Type of config_paths variable provided to SubConfig class is not a list, is:{type(files)} instead.")
self.config_path = config_path
self.name = name
def expand_paths(self):
"""
Expand environment variables in provided list of paths.
Check if path is absolute or relative, in case of a relative path, it will be expanded to an absolute path,
whereas the folder of the config_file will be used to complete the path.
"""
self.files = expand_searchpaths(paths=self.files, rel_path_reference=os.path.dirname(self.config_path))
class SubConfig(ConfigFileObj):
def __init__(self, files: Union[list, str], name, config_path=None):
super().__init__(files=files, config_path=config_path, name=name)
class Sources(ConfigFileObj):
def __init__(self, files: list, fileset, config_path, name):
super().__init__(files=files, config_path=config_path, name=name)
self.fileset = fileset
""" type(str) : Fileset, the source shall be associsted with. """
def generate(self):
pass
def set_property(self, name, value, objects):
self.writeln(' '.join(['set_property', '-name', name, '-value', value, '-objects', objects]))
class VerilogSource(Sources):
"""
Container for source of type Verilog/SystemVerilog.
:param files: Path to source file, could be relative/absolute and contain wildcards
:type files: str
"""
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None, version=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
self.version = version
""" type(str) : Verilog version, that shall be used when compiling sources. """
def generate(self):
self.text = self.files
return self.dump()
class VerilogHeader(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
def set_header_files(self):
file_list = '{ ' + ' '.join('"' + back2fwd(file) + '"' for file in self.files) + ' }'
self.set_property('file_type', '{Verilog Header}', f'[get_files {file_list}]')
def generate(self):
self.dump()
class VHDLSource(Sources):
def __init__(self, files: Union[list, str], name, library=None, fileset=r"default", config_path=None, version=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
self.library = library
""" type(str) : Library, the source shall be associated with when compiling. """
self.version = version
""" type(str) : VHDL version, that shall be used when compiling sources. """
def generate(self):
self.dump()
class EDIFFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class FirmwareFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class XCIFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class TCLFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class XDCFile(Sources):
def __init__(self, files: Union[list, str], name, fileset=r"default", config_path=None):
super().__init__(files=files, fileset=fileset, config_path=config_path, name=name)
class MEMFile(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class BDFile(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class IPRepo(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
class FunctionalModel(Sources):
def __init__(self, files: str, name, fileset=r"default", config_path=None):
super().__init__(files=[files], fileset=fileset, config_path=config_path, name=name)
self.gen_files = None
def set_gen_files_path(self, hdl_dir_root):
"""
Set the result HDL path, where generated files can be found after generation was conducted.
:param hdl_dir_root: Root directory for gen_files, this is usually set in emu config.
"""
# TODO: Have the model generator declare what files should be included in "gen_files"
# It is possible that not everything in the hdl_dir_root is an HDL source (e.g.,
# temporary files generated during processing, memory files that are included, etc.)
self.gen_files = [os.path.join(hdl_dir_root, self.fileset, self.name, '*.*v')]
def expand_gen_files_path(self):
"""
Expand environment variables in provided list of paths.
Check if path is absolute or relative, in case of a relative path, it will be expanded to an absolute path,
whereas the folder of the config_file will be used to complete the path.
"""
self.gen_files = expand_searchpaths(paths=self.gen_files, rel_path_reference=os.path.dirname(self.config_path))
|
[
"anasymod.util.back2fwd",
"os.path.dirname",
"os.path.join"
] |
[((6050, 6109), 'os.path.join', 'os.path.join', (['hdl_dir_root', 'self.fileset', 'self.name', '"""*.*v"""'], {}), "(hdl_dir_root, self.fileset, self.name, '*.*v')\n", (6062, 6109), False, 'import os\n'), ((1202, 1235), 'os.path.dirname', 'os.path.dirname', (['self.config_path'], {}), '(self.config_path)\n', (1217, 1235), False, 'import os\n'), ((6529, 6562), 'os.path.dirname', 'os.path.dirname', (['self.config_path'], {}), '(self.config_path)\n', (6544, 6562), False, 'import os\n'), ((2829, 2843), 'anasymod.util.back2fwd', 'back2fwd', (['file'], {}), '(file)\n', (2837, 2843), False, 'from anasymod.util import back2fwd, expand_searchpaths\n')]
|
import requests
def extra_metadata_helper(project_url, headers):
"""
Build extra metadata dict to help with other integrations.
Parameters
----------
project_url: str
The url to the project info
headers: dict
Figshare Authorization header
Returns
-------
Extra metadata dictionary
"""
project_info = requests.get(project_url, headers=headers).json()
creators = [{
"first_name": author['name'].partition(' ')[0],
"last_name": author['name'].partition(' ')[2],
'ORCID': None
} for author in project_info['collaborators']]
publication_date = project_info['created_date']
if 'published_date' in project_info.keys():
publication_date = project_info['published_date']
extra_metadata = {
"title": project_info['title'],
"creators": creators,
"publication_date": publication_date,
"description": project_info['description'],
"keywords": [],
"license": None,
"related_identifiers": [],
"references": None,
"notes": None
}
return extra_metadata
|
[
"requests.get"
] |
[((368, 410), 'requests.get', 'requests.get', (['project_url'], {'headers': 'headers'}), '(project_url, headers=headers)\n', (380, 410), False, 'import requests\n')]
|
import os
import unittest
from copy import deepcopy
from uuid import uuid4
import pytest
from dotenv import load_dotenv
from culqi import __version__
from culqi.client import Culqi
from culqi.resources import Card
from .data import Data
class CardTest(unittest.TestCase):
# pylint: disable = too-many-public-methods
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
load_dotenv()
self.version = __version__
self.public_key = os.environ.get("API_PUBLIC_KEY")
self.private_key = os.environ.get("API_PRIVATE_KEY")
self.culqi = Culqi(self.public_key, self.private_key)
self.card = Card(client=self.culqi)
self.metadata = {"order_id": "0001"}
def get_card_data(self, code, provider):
email = "<EMAIL>".format(uuid4().hex[:4])
token_data = deepcopy(Data.CARD[code][provider])
token_data["email"] = email
token = self.culqi.token.create(data=token_data)
customer_data = deepcopy(Data.CUSTOMER)
customer_data["email"] = email
customer = self.culqi.customer.create(data=customer_data)
return {
"token_id": token["data"]["id"],
"customer_id": customer["data"]["id"],
}
def test_url(self):
# pylint: disable=protected-access
id_ = "sample_id"
assert self.card._get_url() == "https://api.culqi.com/v2/cards"
assert self.card._get_url(id_) == "https://api.culqi.com/v2/cards/{0}".format(
id_
)
@pytest.mark.vcr()
def test_card_create(self):
card_data = self.get_card_data("successful", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_retrieve(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
retrieved_card = self.card.read(created_card["data"]["id"])
assert created_card["data"]["id"] == retrieved_card["data"]["id"]
@pytest.mark.vcr()
def test_card_list(self):
retrieved_card_list = self.card.list(
headers={
"Accept-Encoding": "identity",
},
)
assert "items" in retrieved_card_list["data"]
@pytest.mark.vcr()
def test_card_update(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
metadata = {"metadata": self.metadata}
updated_card = self.card.update(id_=created_card["data"]["id"], data=metadata)
assert created_card["data"]["id"] == created_card["data"]["id"]
assert updated_card["data"]["metadata"] == self.metadata
@pytest.mark.vcr()
def test_card_delete(self):
card_data = self.get_card_data("successful", "visa")
created_card = self.card.create(data=card_data)
deleted_card = self.card.delete(id_=created_card["data"]["id"])
assert deleted_card["data"]["deleted"]
assert deleted_card["data"]["id"] == created_card["data"]["id"]
assert deleted_card["status"] == 200
@pytest.mark.vcr()
def test_card_create__successful__visa(self):
card_data = self.get_card_data("successful", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__master_card(self):
card_data = self.get_card_data("successful", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__american_express(self):
card_data = self.get_card_data("successful", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__successful__diners_club(self):
card_data = self.get_card_data("successful", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "card"
@pytest.mark.vcr()
def test_card_create__stolen_card__visa(self):
card_data = self.get_card_data("stolen_card", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "stolen_card"
@pytest.mark.vcr()
def test_card_create__lost_card__visa(self):
card_data = self.get_card_data("lost_card", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "lost_card"
@pytest.mark.vcr()
def test_card_create__insufficient_funds__visa(self):
card_data = self.get_card_data("insufficient_funds", "visa")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "insufficient_funds"
@pytest.mark.vcr()
def test_card_create__contact_issuer__master_card(self):
card_data = self.get_card_data("contact_issuer", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "contact_issuer"
@pytest.mark.vcr()
def test_card_create__incorrect_cvv__master_card(self):
card_data = self.get_card_data("incorrect_cvv", "master_card")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "incorrect_cvv"
@pytest.mark.vcr()
def test_card_create__issuer_not_available__american_express(self):
card_data = self.get_card_data("issuer_not_available", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "issuer_not_available"
@pytest.mark.vcr()
def test_card_create__issuer_decline_operation__american_express(self):
card_data = self.get_card_data("issuer_decline_operation", "american_express")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "issuer_decline_operation"
@pytest.mark.vcr()
def test_card_create__invalid_card__diners_club(self):
card_data = self.get_card_data("invalid_card", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "invalid_card"
@pytest.mark.vcr()
def test_card_create__processing_error__diners_club(self):
card_data = self.get_card_data("processing_error", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "processing_error"
@pytest.mark.vcr()
def test_card_create__fraudulent__diners_club(self):
card_data = self.get_card_data("fraudulent", "diners_club")
card = self.card.create(data=card_data)
assert card["data"]["object"] == "error"
assert card["data"]["code"] == "card_declined"
assert card["data"]["decline_code"] == "fraudulent"
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"copy.deepcopy",
"uuid.uuid4",
"pytest.mark.vcr",
"culqi.resources.Card",
"os.environ.get",
"dotenv.load_dotenv",
"culqi.client.Culqi"
] |
[((1550, 1567), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (1565, 1567), False, 'import pytest\n'), ((1763, 1780), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (1778, 1780), False, 'import pytest\n'), ((2080, 2097), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (2095, 2097), False, 'import pytest\n'), ((2328, 2345), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (2343, 2345), False, 'import pytest\n'), ((2774, 2791), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (2789, 2791), False, 'import pytest\n'), ((3184, 3201), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (3199, 3201), False, 'import pytest\n'), ((3415, 3432), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (3430, 3432), False, 'import pytest\n'), ((3660, 3677), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (3675, 3677), False, 'import pytest\n'), ((3915, 3932), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (3930, 3932), False, 'import pytest\n'), ((4160, 4177), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (4175, 4177), False, 'import pytest\n'), ((4510, 4527), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (4525, 4527), False, 'import pytest\n'), ((4854, 4871), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (4869, 4871), False, 'import pytest\n'), ((5225, 5242), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (5240, 5242), False, 'import pytest\n'), ((5598, 5615), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (5613, 5615), False, 'import pytest\n'), ((5968, 5985), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (5983, 5985), False, 'import pytest\n'), ((6369, 6386), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (6384, 6386), False, 'import pytest\n'), ((6782, 6799), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (6797, 6799), False, 'import pytest\n'), ((7149, 7166), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (7164, 7166), False, 'import pytest\n'), ((7528, 7545), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (7543, 7545), False, 'import pytest\n'), ((7916, 7931), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7929, 7931), False, 'import unittest\n'), ((416, 429), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (427, 429), False, 'from dotenv import load_dotenv\n'), ((492, 524), 'os.environ.get', 'os.environ.get', (['"""API_PUBLIC_KEY"""'], {}), "('API_PUBLIC_KEY')\n", (506, 524), False, 'import os\n'), ((552, 585), 'os.environ.get', 'os.environ.get', (['"""API_PRIVATE_KEY"""'], {}), "('API_PRIVATE_KEY')\n", (566, 585), False, 'import os\n'), ((608, 648), 'culqi.client.Culqi', 'Culqi', (['self.public_key', 'self.private_key'], {}), '(self.public_key, self.private_key)\n', (613, 648), False, 'from culqi.client import Culqi\n'), ((669, 692), 'culqi.resources.Card', 'Card', ([], {'client': 'self.culqi'}), '(client=self.culqi)\n', (673, 692), False, 'from culqi.resources import Card\n'), ((857, 892), 'copy.deepcopy', 'deepcopy', (['Data.CARD[code][provider]'], {}), '(Data.CARD[code][provider])\n', (865, 892), False, 'from copy import deepcopy\n'), ((1011, 1034), 'copy.deepcopy', 'deepcopy', (['Data.CUSTOMER'], {}), '(Data.CUSTOMER)\n', (1019, 1034), False, 'from copy import deepcopy\n'), ((818, 825), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (823, 825), False, 'from uuid import uuid4\n')]
|
import math
import torch
import torch.nn as nn
from models.modules import (gaussian_likelihood, gaussian_sample, View)
class VAE(nn.Module):
def __init__(self, image_shape, hidden_channels):
super().__init__()
self.encoder = nn.Sequential(*[
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 16, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Flatten(),
nn.Linear(8 * 8 * 16, hidden_channels),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(),
nn.Linear(hidden_channels, hidden_channels * 2),
])
self.decoder = nn.Sequential(*[
nn.Linear(hidden_channels, hidden_channels),
nn.BatchNorm1d(hidden_channels),
nn.ReLU(),
nn.Linear(hidden_channels, 8 * 8 * 16),
nn.BatchNorm1d(8 * 8 * 16),
nn.ReLU(),
View((-1, 16, 8, 8)),
nn.ConvTranspose2d(16, 32, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.ConvTranspose2d(16, 3 * 2, kernel_size=3, stride=1, padding=1, bias=False),
])
def forward(self, x):
b, c, h, w = x.shape
z_mu, z_logvar = torch.chunk(self.encoder(x),2,dim=1)
z_var = torch.exp(z_logvar)
z = z_mu
if self.training:
z_std = torch.exp(0.5 * z_logvar)
eps = torch.randn_like(z_mu)
z += z_std * eps
x_mu, x_logvar = torch.chunk(self.decoder(z),2,dim=1)
x_var = torch.exp(x_logvar)
loss_DKL = - 0.5 * (1 + z_logvar - z_mu**2 - z_var).sum(dim=1)
loss_rec = 0.5 * (c * h * w) * torch.log(2 * math.pi * x_var.view(b,-1).sum(dim=1)) + 0.5 * ((x - x_mu) ** 2 / x_var).view(b, -1).sum(dim=1)
objective = -(loss_rec + loss_DKL)
bpd = (-objective) / (math.log(2.) * c * h * w)
summary_image = torch.cat([x[:16], x_mu[:16]], dim=0)
return z, bpd, None, summary_image
|
[
"torch.nn.ReLU",
"torch.nn.ConvTranspose2d",
"torch.randn_like",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.exp",
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"models.modules.View",
"math.log",
"torch.nn.Flatten"
] |
[((2049, 2068), 'torch.exp', 'torch.exp', (['z_logvar'], {}), '(z_logvar)\n', (2058, 2068), False, 'import torch\n'), ((2306, 2325), 'torch.exp', 'torch.exp', (['x_logvar'], {}), '(x_logvar)\n', (2315, 2325), False, 'import torch\n'), ((2687, 2724), 'torch.cat', 'torch.cat', (['[x[:16], x_mu[:16]]'], {'dim': '(0)'}), '([x[:16], x_mu[:16]], dim=0)\n', (2696, 2724), False, 'import torch\n'), ((2132, 2157), 'torch.exp', 'torch.exp', (['(0.5 * z_logvar)'], {}), '(0.5 * z_logvar)\n', (2141, 2157), False, 'import torch\n'), ((2176, 2198), 'torch.randn_like', 'torch.randn_like', (['z_mu'], {}), '(z_mu)\n', (2192, 2198), False, 'import torch\n'), ((276, 340), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n', (285, 340), True, 'import torch.nn as nn\n'), ((354, 372), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (368, 372), True, 'import torch.nn as nn\n'), ((386, 395), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (393, 395), True, 'import torch.nn as nn\n'), ((409, 474), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(32)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(16, 32, kernel_size=3, stride=2, padding=1, bias=False)\n', (418, 474), True, 'import torch.nn as nn\n'), ((488, 506), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (502, 506), True, 'import torch.nn as nn\n'), ((520, 529), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (527, 529), True, 'import torch.nn as nn\n'), ((543, 608), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(32, 32, kernel_size=3, stride=1, padding=1, bias=False)\n', (552, 608), True, 'import torch.nn as nn\n'), ((622, 640), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (636, 640), True, 'import torch.nn as nn\n'), ((654, 663), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (661, 663), True, 'import torch.nn as nn\n'), ((677, 742), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(16)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(32, 16, kernel_size=3, stride=2, padding=1, bias=False)\n', (686, 742), True, 'import torch.nn as nn\n'), ((756, 774), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (770, 774), True, 'import torch.nn as nn\n'), ((788, 797), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (795, 797), True, 'import torch.nn as nn\n'), ((811, 823), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (821, 823), True, 'import torch.nn as nn\n'), ((837, 875), 'torch.nn.Linear', 'nn.Linear', (['(8 * 8 * 16)', 'hidden_channels'], {}), '(8 * 8 * 16, hidden_channels)\n', (846, 875), True, 'import torch.nn as nn\n'), ((889, 920), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_channels'], {}), '(hidden_channels)\n', (903, 920), True, 'import torch.nn as nn\n'), ((934, 943), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (941, 943), True, 'import torch.nn as nn\n'), ((957, 1004), 'torch.nn.Linear', 'nn.Linear', (['hidden_channels', '(hidden_channels * 2)'], {}), '(hidden_channels, hidden_channels * 2)\n', (966, 1004), True, 'import torch.nn as nn\n'), ((1069, 1112), 'torch.nn.Linear', 'nn.Linear', (['hidden_channels', 'hidden_channels'], {}), '(hidden_channels, hidden_channels)\n', (1078, 1112), True, 'import torch.nn as nn\n'), ((1126, 1157), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_channels'], {}), '(hidden_channels)\n', (1140, 1157), True, 'import torch.nn as nn\n'), ((1171, 1180), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1178, 1180), True, 'import torch.nn as nn\n'), ((1194, 1232), 'torch.nn.Linear', 'nn.Linear', (['hidden_channels', '(8 * 8 * 16)'], {}), '(hidden_channels, 8 * 8 * 16)\n', (1203, 1232), True, 'import torch.nn as nn\n'), ((1246, 1272), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(8 * 8 * 16)'], {}), '(8 * 8 * 16)\n', (1260, 1272), True, 'import torch.nn as nn\n'), ((1286, 1295), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1293, 1295), True, 'import torch.nn as nn\n'), ((1309, 1329), 'models.modules.View', 'View', (['(-1, 16, 8, 8)'], {}), '((-1, 16, 8, 8))\n', (1313, 1329), False, 'from models.modules import gaussian_likelihood, gaussian_sample, View\n'), ((1343, 1439), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(16)', '(32)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': '(False)'}), '(16, 32, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=False)\n', (1361, 1439), True, 'import torch.nn as nn\n'), ((1449, 1467), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1463, 1467), True, 'import torch.nn as nn\n'), ((1481, 1490), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1488, 1490), True, 'import torch.nn as nn\n'), ((1504, 1578), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(32, 32, kernel_size=3, stride=1, padding=1, bias=False)\n', (1522, 1578), True, 'import torch.nn as nn\n'), ((1592, 1610), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (1606, 1610), True, 'import torch.nn as nn\n'), ((1624, 1633), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1631, 1633), True, 'import torch.nn as nn\n'), ((1647, 1743), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(32)', '(16)'], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)', 'output_padding': '(1)', 'bias': '(False)'}), '(32, 16, kernel_size=3, stride=2, padding=1,\n output_padding=1, bias=False)\n', (1665, 1743), True, 'import torch.nn as nn\n'), ((1753, 1771), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (1767, 1771), True, 'import torch.nn as nn\n'), ((1785, 1794), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1792, 1794), True, 'import torch.nn as nn\n'), ((1808, 1885), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(16)', '(3 * 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(16, 3 * 2, kernel_size=3, stride=1, padding=1, bias=False)\n', (1826, 1885), True, 'import torch.nn as nn\n'), ((2628, 2641), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (2636, 2641), False, 'import math\n')]
|
import matplotlib.animation as animation
from matplotlib import style
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig1=plt.figure()
ax1=fig1.add_subplot(1,1,1)
def animate(p):
plot_data=open('test1.txt','r').read()
line_data=plot_data.split('\n')
x1=[]
y1=[]
for line in line_data:
if len(line)>1:
x,y= line.split(',')
x1.append(x)
y1.append(y)
ax1.clear()
ax1.plot(x1,y1)
anime_data=animation.FuncAnimation(fig1,animate,interval=500)
plt.show()
|
[
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.animation.FuncAnimation"
] |
[((107, 139), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (120, 139), True, 'import matplotlib.pyplot as plt\n'), ((148, 160), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (158, 160), True, 'import matplotlib.pyplot as plt\n'), ((533, 585), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig1', 'animate'], {'interval': '(500)'}), '(fig1, animate, interval=500)\n', (556, 585), True, 'import matplotlib.animation as animation\n'), ((585, 595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (593, 595), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- mode: python; coding:utf-8; -*-
from __future__ import print_function
# A SCons tool to enable compilation of Erlang in SCons.
#
# Copyright © 2005 <NAME> "Pupeno" <NAME>
# Copyright © 2009, 2011, 2017 <NAME>
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
# Original this code was licenced under GPLv2. This fork is relicenced under GPLv3 as is permitted.
from SCons.Builder import Builder
from SCons.Scanner import Scanner
import os
import subprocess
def generate(env):
env["ERLC"] = env.Detect("erlc") or "erlc"
env["ERL"] = env.Detect("erl") or "erl"
bugReport = '''
Please report this bug via the SCons Erlang tool project issue tracker on BitBucket ( cf. https://bitbucket.org/russel/scons_erlang)
or direct to Russel Winder <<EMAIL>>.'''
def addTarget(target, source, env):
""" Adds the targets (.beam, .script and/or .boot) according to source's extension, source's path and $OUTPUT. """
# We should receive one and only one source.
if len(source) > 1:
print("Warning: unexpected internal situation.")
print("This is a bug. {}".format(bugReport))
print("addTarget received more than one source.")
print("addTarget({}, {}, {})".format(source, target, env))
sourceStr = str(source[0])
# Tear appart the source.
filename = os.path.basename(sourceStr)
extension = os.path.splitext(filename)[1]
basename = os.path.splitext(filename)[0]
# Use $OUTPUT or where the source is as the prefix.
prefix = outputDir(sourceStr, env)
# Generate the targen according to the source.
if extension == ".erl":
# .erls generate a .beam.
return ([prefix + basename + ".beam"], source)
elif extension == ".rel":
# .rels generate a .script and a .boot.
return ([prefix + basename + ".script", prefix + basename + ".boot"], source)
else:
print("Warning: extension '{}' is unknown.".format(extension))
print("If you feel this is a valid extension, then it might be a missing feature or a bug. {}".format(bugReport))
print("addTarget({}, {}, {}).".format(target, source, env))
return (target, source)
def erlangGenerator(source, target, env, for_signature):
""" Generate the erlc compilation command line. """
# We should receive one and only one source.
if len(source) > 1:
print("Warning: unexpected internal situation.")
print("This is a bug. {}".format(bugReport))
print("erlangGenerator received more than one source.")
print("erlangGenerator({}, {}, {}, {})".format(source, target, env, for_signature))
source = str(source[0])
# Start with the complier.
command = "$ERLC"
# The output (-o) parameter
command += " -o " + outputDir(source, env)
# Add the libpaths.
if env.has_key("ERLLIBPATH"):
if not isinstance(env["ERLLIBPATH"], list):
env["ERLLIBPATH"] = [env["ERLLIBPATH"]]
for libpath in env["ERLLIBPATH"]:
command += " -I " + libpath
# At last, the source.
return command + " " + source
erlangBuilder = Builder(generator = erlangGenerator,
#action = "$ERLC -o $OUTPUT $SOURCE",
#suffix = [".beam", ".boot", ".script"],
src_suffix = ".erl",
emitter = addTarget,
single_source = True)
env.Append(BUILDERS = {"Erlang" : erlangBuilder})
env.Append(ENV = {"HOME" : os.environ["HOME"]}) # erlc needs $HOME.
def outputDir(source, env):
""" Given a source and its environment, return the output directory. """
if env.has_key("OUTPUT"):
return env["OUTPUT"]
else:
return dirOf(source)
def libpath(env):
""" Return a list of the libpath or an empty list. """
if env.has_key("ERLLIBPATH"):
if isinstance(env["ERLLIBPATH"], list):
return env["ERLLIBPATH"]
else:
return [env["ERLLIBPATH"]]
else:
return []
def dirOf(filename):
""" Returns the relative directory of filename. """
directory = os.path.dirname(filename)
if directory == "":
return "./"
else:
return directory + "/"
def relModules(node, env, path):
""" Return a list of modules needed by a release (.rel) file. """
# Run the function reApplications of erlangscanner to get the applications.
command = "erl -noshell -s erlangscanner relApplications \"" + str(node) + "\" -s init stop"
sp = subprocess.Popen(command,
shell = True,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
sp.wait()
if sp.returncode != 0:
print("Warning: The scanner failed to scan your files, dependencies won't be calculated.")
print("If your file '{}' is correctly (syntactically and semantically), this is a bug. {}".format((node, bugReport)))
print("Command: {}.".format(command))
print("Return code: {}.".format(sp.returncode))
print("Output: \n{}\n".format(sp.stdout.read().strip()))
print("Error: \n{}\n".format(sp.stderr.read().strip()))
return []
# Get the applications defined in the .rel.
appNames = sp.stdout.read().split()
# Build the search path
paths = set([outputDir(str(node), env)] + libpath(env))
modules = []
for path in paths:
for appName in appNames:
appFileName = path + appName + ".app"
if os.access(appFileName, os.R_OK):
modules += appModules(appFileName, env, path)
return modules
def appModules(node, env, path):
""" Return a list of modules needed by a application (.app) file. """
# Run the function appModules of erlangscanner to get the modules.
command = "erl -noshell -s erlangscanner appModules \"" + str(node) + "\" -s init stop"
sp = subprocess.Popen(command,
shell = True,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
sp.wait()
if sp.returncode != 0:
print("Warning: The scanner failed to scan your files, dependencies won't be calculated.")
print("If your file '{}' is correctly (syntactically and semantically), this is a bug. {}".format(node, bugReport))
print("Command: {}.".format(command))
print("Return code: {}.".format(sp.returncode))
print("Output: \n{}\n".format(sp.stdout.read().strip()))
print("Error: \n{}\n".format(sp.stderr.read().strip()))
return []
# Get the applications defined in the .rel.
moduleNames = sp.stdout.read().split()
# Build the search path
paths = set([outputDir(node, env)] + libpath(env))
modules = []
# When there are more than one application in a project, since we are scanning all paths against all files, we might end up with more dependencies that really exists. The worst is that we'll get recompilation of a file that didn't really needed it.
for path in paths:
for moduleName in moduleNames:
modules.append(moduleName + ".beam")
return modules
relScanner = Scanner(function = relModules,
name = "RelScanner",
skeys = [".rel"],
recursive = False)
env.Append(SCANNERS = relScanner)
def edocGenerator(source, target, env, for_signature):
""" Generate the command line to generate the code. """
tdir = os.path.dirname(str(target[0])) + "/"
command = "erl -noshell -run edoc_run files '[%s]' '[{dir, \"%s\"}]' -run init stop" % (
",".join(['"' + str(x) + '"' for x in source]),
tdir)
return command
def documentTargets(target, source, env):
""" Artifitially create all targets that generating documentation will generate to clean them up latter. """
tdir = os.path.dirname(str(target[0])) + "/"
newTargets = [str(target[0])]
# TODO: What happens if two different sources has the same name on different directories ?
newTargets += [tdir + os.path.splitext(os.path.basename(filename))[0] + ".html"
for filename in map(str, source)]
newTargets += [tdir + filename for filename in
["edoc-info", "modules-frame.html", "overview-summary.html", "overview-summary.html", "stylesheet.css", "packages-frame.html"]]
#newSources = source + [tdir + "overview.edoc"]
return (newTargets, source)
def edocScanner(node, env, path):
#print "edocScanner(%s, %s, %s)\n" % (node, env, path)
overview = os.path.dirname(str(node)) + "/overview.edoc"
if os.path.exists(overview):
return ["overview.edoc"]
else:
return []
edocBuilder = Builder(generator = edocGenerator,
emitter = documentTargets,
target_scanner = Scanner(function=edocScanner))
env.Append(BUILDERS = {"EDoc" : edocBuilder})
def exists(env):
return env.Detect(["erlc"])
|
[
"subprocess.Popen",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"os.path.splitext",
"os.access",
"SCons.Scanner.Scanner",
"SCons.Builder.Builder"
] |
[((3896, 3992), 'SCons.Builder.Builder', 'Builder', ([], {'generator': 'erlangGenerator', 'src_suffix': '""".erl"""', 'emitter': 'addTarget', 'single_source': '(True)'}), "(generator=erlangGenerator, src_suffix='.erl', emitter=addTarget,\n single_source=True)\n", (3903, 3992), False, 'from SCons.Builder import Builder\n'), ((8392, 8477), 'SCons.Scanner.Scanner', 'Scanner', ([], {'function': 'relModules', 'name': '"""RelScanner"""', 'skeys': "['.rel']", 'recursive': '(False)'}), "(function=relModules, name='RelScanner', skeys=['.rel'], recursive=False\n )\n", (8399, 8477), False, 'from SCons.Scanner import Scanner\n'), ((1952, 1979), 'os.path.basename', 'os.path.basename', (['sourceStr'], {}), '(sourceStr)\n', (1968, 1979), False, 'import os\n'), ((4991, 5016), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (5006, 5016), False, 'import os\n'), ((5429, 5530), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdin': 'None', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdin=None, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (5445, 5530), False, 'import subprocess\n'), ((6982, 7083), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdin': 'None', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, shell=True, stdin=None, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (6998, 7083), False, 'import subprocess\n'), ((9949, 9973), 'os.path.exists', 'os.path.exists', (['overview'], {}), '(overview)\n', (9963, 9973), False, 'import os\n'), ((2000, 2026), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2016, 2026), False, 'import os\n'), ((2049, 2075), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2065, 2075), False, 'import os\n'), ((10198, 10227), 'SCons.Scanner.Scanner', 'Scanner', ([], {'function': 'edocScanner'}), '(function=edocScanner)\n', (10205, 10227), False, 'from SCons.Scanner import Scanner\n'), ((6559, 6590), 'os.access', 'os.access', (['appFileName', 'os.R_OK'], {}), '(appFileName, os.R_OK)\n', (6568, 6590), False, 'import os\n'), ((9373, 9399), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (9389, 9399), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
STATES = ['AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA',
'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MD',
'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ',
'NM', 'NY', 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV', 'WI', 'WY']
class MeijerSpider(scrapy.Spider):
name = 'meijer'
allowed_domains = ['www.meijer.com']
def start_requests(self):
for state in STATES:
yield scrapy.Request(
'https://www.meijer.com/custserv/locate_store_by_state.cmd?form_state=locateStoreByStateForm&state={}'.format(state),
callback = self.parse
)
def parse(self, response):
stores = response.css('div.records_inner>script::text').extract_first()
if stores:
stores = stores.strip()[13:-1]
stores = stores.replace('\',\'', '","')
stores = stores.replace('[\'', '["')
stores = stores.replace('\']', '"]')
stores = json.loads(stores)
loc_data = response.css('script').extract()[10]
lat_matches = re.findall(r'(\"LAT\"), (\")([+-]?([0-9]*[.])?[0-9]+)(\")', loc_data)
lon_matches = re.findall(r'(\"LNG\"), (\")([+-]?([0-9]*[.])?[0-9]+)(\")', loc_data)
n = 0
for store in stores:
address1 = store[6].split(',')
city = address1[0].strip()
address2 = address1[1].strip().split(' ')
state = address2[0]
postcode = address2[1]
properties = {
'ref': store[0],
'name': store[1],
'phone': store[7],
'opening_hours': self.hours(store[8]),
'lat': lat_matches[n][2],
'lon': lon_matches[n][2],
'street': store[2],
'city': city,
'state': state,
'postcode': postcode
}
n = n + 1
yield GeojsonPointItem(**properties)
def hours(self, data):
if data == 'Open 24 hrs a day, 364 days a year.':
return '24/7'
else :
return data
|
[
"locations.items.GeojsonPointItem",
"re.findall",
"json.loads"
] |
[((1207, 1225), 'json.loads', 'json.loads', (['stores'], {}), '(stores)\n', (1217, 1225), False, 'import json\n'), ((1312, 1384), 're.findall', 're.findall', (['"""(\\\\"LAT\\\\"), (\\\\")([+-]?([0-9]*[.])?[0-9]+)(\\\\")"""', 'loc_data'], {}), '(\'(\\\\"LAT\\\\"), (\\\\")([+-]?([0-9]*[.])?[0-9]+)(\\\\")\', loc_data)\n', (1322, 1384), False, 'import re\n'), ((1408, 1480), 're.findall', 're.findall', (['"""(\\\\"LNG\\\\"), (\\\\")([+-]?([0-9]*[.])?[0-9]+)(\\\\")"""', 'loc_data'], {}), '(\'(\\\\"LNG\\\\"), (\\\\")([+-]?([0-9]*[.])?[0-9]+)(\\\\")\', loc_data)\n', (1418, 1480), False, 'import re\n'), ((2312, 2342), 'locations.items.GeojsonPointItem', 'GeojsonPointItem', ([], {}), '(**properties)\n', (2328, 2342), False, 'from locations.items import GeojsonPointItem\n')]
|
from url_shortener.form_filters import prepend_http, strip_value
def test_prepend_http():
assert prepend_http('example.com/') == 'http://example.com/'
assert prepend_http('http://example.com/') == 'http://example.com/'
assert prepend_http('https://example.com/') == 'https://example.com/'
assert prepend_http('') == ''
assert prepend_http(None) is None
def test_strip_value():
assert strip_value(' example ') == 'example'
assert strip_value('example') == 'example'
assert strip_value('') == ''
assert strip_value(None) is None
|
[
"url_shortener.form_filters.prepend_http",
"url_shortener.form_filters.strip_value"
] |
[((103, 131), 'url_shortener.form_filters.prepend_http', 'prepend_http', (['"""example.com/"""'], {}), "('example.com/')\n", (115, 131), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((168, 203), 'url_shortener.form_filters.prepend_http', 'prepend_http', (['"""http://example.com/"""'], {}), "('http://example.com/')\n", (180, 203), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((240, 276), 'url_shortener.form_filters.prepend_http', 'prepend_http', (['"""https://example.com/"""'], {}), "('https://example.com/')\n", (252, 276), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((314, 330), 'url_shortener.form_filters.prepend_http', 'prepend_http', (['""""""'], {}), "('')\n", (326, 330), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((348, 366), 'url_shortener.form_filters.prepend_http', 'prepend_http', (['None'], {}), '(None)\n', (360, 366), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((412, 438), 'url_shortener.form_filters.strip_value', 'strip_value', (['""" example """'], {}), "(' example ')\n", (423, 438), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((463, 485), 'url_shortener.form_filters.strip_value', 'strip_value', (['"""example"""'], {}), "('example')\n", (474, 485), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((510, 525), 'url_shortener.form_filters.strip_value', 'strip_value', (['""""""'], {}), "('')\n", (521, 525), False, 'from url_shortener.form_filters import prepend_http, strip_value\n'), ((543, 560), 'url_shortener.form_filters.strip_value', 'strip_value', (['None'], {}), '(None)\n', (554, 560), False, 'from url_shortener.form_filters import prepend_http, strip_value\n')]
|
import time, os, sys
import logging
import json
import importlib
import threading
import pprint
import datetime
import gc
import re
import shutil
from configparser import ConfigParser
from pymongo import MongoClient
from bson.objectid import ObjectId
from subprocess import Popen
from threading import Lock, Thread
from lib.constants import FA_HOME
from lib.saq.client import Alert, AlertSubmitException
log = logging.getLogger()
class Scheduler:
def __init__(self):
log.info('Initializing scheduler.')
self.config = ConfigParser()
self.config.read(os.path.join(FA_HOME, 'etc', 'config.ini'))
self.working = self.config.get('general', 'working_dir')
self.logging_dir = self.config.get('general', 'logging_dir')
self.json_results_file = os.path.join(self.working, 'scan_results.json')
self.running = True
self.update_minutes = self.config.getint('general', 'update_minutes')
# Thread to regularly update the master list of indicators and their final status
self.update_list_thread = None
self.master_indicator_dict = {}
self.master_indicator_lock = Lock()
self.has_initial_data = False
# Module data structures
self.modules = []
self.module_classes = {}
# Informational tracking
# Tracks types that don't have a module
self.orphaned_types = []
# Now we can initialize all our modules
modules_to_load = self.config.get('general', 'load_modules').split(',')
for m in modules_to_load:
mcfg = 'module_{}'.format(m)
if mcfg in self.config:
log.info('Loading module {}'.format(m))
try:
_class_name = self.config.get(mcfg, 'class_name')
_module_name = 'lib.modules.{}'.format(self.config.get(mcfg, 'module_name'))
log.debug('Loading module: {}'.format(_module_name))
_module = importlib.import_module(_module_name)
log.debug('Loading class: {}'.format(_class_name))
_class = getattr(_module, _class_name)
self.module_classes[_class_name] = _class
log.debug('Creating instance of module: {}'.format(_class_name))
_module_instance = _class()
self.modules.append(_module_instance)
except ImportError as e:
log.error('Error importing module. {}'.format(e))
except AttributeError as e:
log.error('Error importing class. {}'.format(e))
else:
log.error('Module {} configuration not found!'.format(m))
def start(self):
# Start a new thread to hold a master list of all 'New' indicators and update it regularly
# This updates the indicator list as new indicators are found in CRITS
self.update_list_thread = Thread(target = self.update_master_indicator_list, name='IndicatorMaster')
self.update_list_thread.start()
# Wait until our master_indicator_dict contains our first set of indicators
# A little hacky, but whatever
log.info('Obtaining initial indicator data.')
while not self.has_initial_data:
time.sleep(1)
log.info('Initial data obtained. Starting modules.')
# Get our indicator data and start the modules
for module in self.modules:
self.start_module(module)
# Finally, make sure our modules aren't broken
for module in self.modules:
status = module.get_module_status()
if not status:
log.error('module_status field not found in module data for {}. Shutting down...'.format(module.getName()))
self.running = False
break
if status == 'not initialized':
log.error('Module {} not initialized! Cannot update indicators!'.format(module.getName()))
self.running = False
break
# Start the main loop
sleeptime = 10
while self.running:
try:
with self.master_indicator_lock:
all_cids = list(self.master_indicator_dict.keys())
# looping through all crits ids
for cid in all_cids:
with self.master_indicator_lock:
status = self.master_indicator_dict[cid]['status']
ctype = self.master_indicator_dict[cid]['type']
value = self.master_indicator_dict[cid]['value']
# Process indicators by their status
# This may or may not update the overall status depending on whether
# all the modules have finished for that particular indicator
if status == 'New':
self._process_new_indicator(cid, ctype, value)
self._process_indicator_status(cid, ctype)
# If the status is something other than 'New', we update the indicator and remove it from
# all of the modules.
if status == 'In Progress' or status == 'Analyzed':
# Update the CRITS status
self.update_indicator_status(cid, status)
# Send alerts to CRITS
if status == 'In Progress':
self.send_alert_to_ace(cid)
with self.master_indicator_lock:
self.master_indicator_dict[cid]['submitted'] = True
# Remove the indicator from our master_indicator_dict
self.clean_master_and_modules()
collected = gc.collect()
if collected > 0:
log.debug('Garbage Collector: Collected {} objects.'.format(collected))
scount = 0
while scount < sleeptime and self.running:
time.sleep(1)
scount += 1
except KeyboardInterrupt:
log.info('Keyboard interrupt caught in scheduler. Terminating...')
self.stop()
def _process_new_indicator(self, cid, ctype, value):
# First we will see if we need to add the indicator to the modules
has_module = False
for module in self.modules:
if ctype not in module.get_valid_indicator_types():
continue
if module.has_indicator(cid):
has_module = True
continue
log.info('Adding new indicator to all the modules: {}'.format(value))
has_module = True
module.add_indicator(cid, value, ctype)
# Reporting that an indicator type does not have a module
# This means we need to write a module
if not has_module:
if ctype not in self.orphaned_types:
self.orphaned_types.append(ctype)
log.warning('No module for indicator type {} and indicator {}'.format(ctype, cid))
def _process_indicator_status(self, cid, ctype):
# Now we will check the status of the indicators and see if we can update the overall
# status from 'New' to either 'In Progress' or 'Analyzed'
# Can we update this module
_can_update = True
# If only one module says "in progress", we set this to False
_is_in_progress = False
# We want to make sure at least one module can analyze an indicator before we set it to 'Analyzed'.
# Otherwise it stays 'New'. This flag tracks that.
_at_least_one_analyzed = False
for module in self.modules:
# Now we process the results data
module_data = module.get_indicator_data(cid)
if not module_data and ctype in module.get_valid_indicator_types():
log.warning('Module {} can handle indicator type '\
'{} for {}, but it is not in the module data.'.format(module.getName(), ctype, cid))
_can_update = False
continue
if not module_data:
continue
if module_data['status'] == 'New':
_can_update = False
if module_data['processing_results']:
# We are still processing results
_can_update = False
# One module says it is 'in progress', so that's what we mark it
if module_data['status'] == 'In Progress':
_is_in_progress = True
_is_analyzed = False
if module_data['status'] == 'Analyzed':
_at_least_one_analyzed = True
# Now we check the results!
# We set the ultimate result in master_indicator_dict, which is what the mongo update function will use
if _can_update:
if _is_in_progress:
log.debug('Setting indicator {} to "In Progress"'.format(cid))
with self.master_indicator_lock:
self.master_indicator_dict[cid]['status'] = 'In Progress'
elif _at_least_one_analyzed:
log.debug('Setting indicator {} to "Analyzed"'.format(cid))
with self.master_indicator_lock:
self.master_indicator_dict[cid]['status'] = 'Analyzed'
else:
log.debug('Not updating indicator {}'.format(cid))
def start_module(self, module):
module.start()
def stop(self):
self.running = False
for module in self.modules:
module.stop()
def get_all_new_indicators(self):
mongo_host = self.config.get('database', 'host')
mongo_port = int(self.config.get('database', 'port'))
try:
connection = MongoClient(host=mongo_host, port=mongo_port)
db = connection['crits']
whitelist_reg = re.compile('^whitelist:')
collection = db.indicators.find( { 'status' : 'New',
'confidence.rating' : { '$ne' :
'benign' },
'impact.rating' : { '$ne' :
'benign' },
'bucket_list' : { '$nin' : [
whitelist_reg ] }
} )
return list(collection)
except Exception as e:
sys.exit('Error retrieving data from mongo: {}'.format(str(e)))
finally:
connection.close()
def update_indicator_status(self, cid, status):
mongo_host = self.config.get('database', 'host')
mongo_port = int(self.config.get('database', 'port'))
try:
connection = MongoClient(host=mongo_host, port=mongo_port)
db = connection['crits']
# Make sure the indicator is still New first
log.debug('Ensuring indicator {} is still New'.format(cid))
indicator = db.indicators.find_one( { '_id' : ObjectId(cid) } )
if indicator['status'] != 'New':
log.warning('Tried to update indicator {} but status was not New. Status was {}'.format(cid, indicator['status']))
return False
# Now we can update the indicator
log.info('Updating indicator {} with status {}'.format(cid, status))
db.indicators.update_one( { '_id' : ObjectId(cid)}, { '$set' : { 'status' : status } } )
return True
except Exception as e:
log.error('Error retrieving data from mongo: {}'.format(e))
finally:
connection.close()
return False
def send_alert_to_ace(self, cid):
# Create the basic alert data
with self.master_indicator_lock:
ind_value = self.master_indicator_dict[cid]['value']
_results = { 'indicator' : { 'crits_id' : cid, 'value' : ind_value } }
_observables = []
_observables.append( { 'type' : 'indicator', 'value' : cid } )
total_hit_count = 0
_at_least_one_module = False
for module in self.modules:
if module.has_indicator(cid):
_at_least_one_module = True
module_results = module.get_indicator_data(cid)
_results[module.getName()] = module_results['results']
for fa_result in module_results['results']:
total_hit_count += int(fa_result['total_hits'])
if 'observables' in module_results:
obs_count = 0
for observable in module_results['observables']:
obs_count += 1
_observables.append( observable )
log.debug('Adding observable {} {}'.format(observable['type'], observable['value']))
if not _at_least_one_module:
log.warning('Tried to submit an alert to ACE, but no module has this indicator: {}'.format(cid))
return False
# Send results
log.info('Sending alert to ACE for indicator {}'.format(cid))
alert = Alert(
tool = 'faqueue',
tool_instance = 'nakylexsec101',
alert_type = 'faqueue',
desc = 'FA Queue - Indicator {} - {} Hits'.format(ind_value, total_hit_count),
event_time = datetime.datetime.now(),
details = _results
)
for obs in _observables:
alert.add_observable(obs['type'], obs['value'])
try:
alert.submit(self.config.get('general', 'ace_submit'), 'blah')
except Exception as e:
log.error('Error submitting alert to ACE: {}'.format(str(e)))
# This means we can remove the indicator from all the modules and our master list
self.master_indicator_dict[cid]['submitted'] = True
# Check for alerts that failed submission and attempt to resubmit them
failed_alerts_path = os.path.join(FA_HOME, '.saq_alerts')
if os.path.exists(failed_alerts_path):
for alert_dir in os.listdir(failed_alerts_path):
if not re.match(r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$', alert_dir):
continue
data_file = os.path.join(failed_alerts_path, alert_dir, 'data.json')
alert_full_path = os.path.join(failed_alerts_path, alert_dir)
alert = Alert()
url = None
key = None
ok_to_delete = False
try:
url, key = alert.load_saved_alert(data_file)
alert.submit(url, key, save_on_fail=False)
ok_to_delete = True
except AlertSubmitException as e:
log.error("Failed to re-submit alert to ACE with the following error: {}".format(str(e)))
except Exception as e:
log.error("Unable to load alert from {0}: {1}".format(data_file, str(e)))
ok_to_delete = True
if ok_to_delete:
try:
shutil.rmtree(alert_full_path)
except Exception as e:
log.error("Unable to delete directory {0}: {1}".format(alert_full_path, str(e)))
def update_master_indicator_list(self):
# Update every X minutes
update_time = self.update_minutes * 60
last_update_time = time.time() - update_time - 1
while self.running:
current_time = time.time()
if current_time - last_update_time > update_time:
# log.debug('Updating the master indicator list.')
indicators = self.get_all_new_indicators()
new_indicator_count = 0
total_indicator_count = 0
with self.master_indicator_lock:
for indicator in indicators:
cid = str(indicator['_id'])
ctype = indicator['type']
cvalue = indicator['value']
if cid not in self.master_indicator_dict:
self.master_indicator_dict[cid] = { 'status' : 'New', 'type' : ctype, 'value' : cvalue, 'submitted' : False }
new_indicator_count += 1
self.add_indicator_to_modules(cid, ctype, cvalue)
total_indicator_count = len(self.master_indicator_dict.keys())
if new_indicator_count > 0:
log.info('Found {} new indicators to analyze.'.format(new_indicator_count))
log.info('Master list size is now {}'.format(total_indicator_count))
last_update_time = time.time()
self.has_initial_data = True
# log.debug('Master indicator list updated.')
time.sleep(1)
def add_indicator_to_modules(self, cid, ctype, cvalue):
for module in self.modules:
if ctype in module.get_valid_indicator_types():
log.debug('Adding indicator {} to module {}'.format(cid, module.getName()))
module.add_indicator(cid, ctype, cvalue)
# This should only be called after update_indicator()
# This removes any indicator from the list that has a status of 'In Progress' or 'Analyzed'
def clean_master_and_modules(self):
ids_to_remove = []
was_modified = False
total_indicator_count = 0
with self.master_indicator_lock:
for cid in self.master_indicator_dict.keys():
if self.master_indicator_dict[cid]['submitted']:
ids_to_remove.append(cid)
for cid in ids_to_remove:
self.master_indicator_dict.pop(cid)
was_modified = True
total_indicator_count = len(self.master_indicator_dict.keys())
# Now remove from the modules
for module in self.modules:
for cid in ids_to_remove:
module.remove_indicator(cid)
if was_modified:
log.info('Master list size is now {}'.format(total_indicator_count))
|
[
"pymongo.MongoClient",
"threading.Thread",
"importlib.import_module",
"bson.objectid.ObjectId",
"lib.saq.client.Alert",
"os.path.exists",
"re.match",
"datetime.datetime.now",
"time.sleep",
"threading.Lock",
"time.time",
"gc.collect",
"shutil.rmtree",
"configparser.ConfigParser",
"os.path.join",
"os.listdir",
"logging.getLogger",
"re.compile"
] |
[((413, 432), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (430, 432), False, 'import logging\n'), ((541, 555), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (553, 555), False, 'from configparser import ConfigParser\n'), ((792, 839), 'os.path.join', 'os.path.join', (['self.working', '"""scan_results.json"""'], {}), "(self.working, 'scan_results.json')\n", (804, 839), False, 'import time, os, sys\n'), ((1152, 1158), 'threading.Lock', 'Lock', ([], {}), '()\n', (1156, 1158), False, 'from threading import Lock, Thread\n'), ((2962, 3034), 'threading.Thread', 'Thread', ([], {'target': 'self.update_master_indicator_list', 'name': '"""IndicatorMaster"""'}), "(target=self.update_master_indicator_list, name='IndicatorMaster')\n", (2968, 3034), False, 'from threading import Lock, Thread\n'), ((14245, 14281), 'os.path.join', 'os.path.join', (['FA_HOME', '""".saq_alerts"""'], {}), "(FA_HOME, '.saq_alerts')\n", (14257, 14281), False, 'import time, os, sys\n'), ((14293, 14327), 'os.path.exists', 'os.path.exists', (['failed_alerts_path'], {}), '(failed_alerts_path)\n', (14307, 14327), False, 'import time, os, sys\n'), ((581, 623), 'os.path.join', 'os.path.join', (['FA_HOME', '"""etc"""', '"""config.ini"""'], {}), "(FA_HOME, 'etc', 'config.ini')\n", (593, 623), False, 'import time, os, sys\n'), ((3308, 3321), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3318, 3321), False, 'import time, os, sys\n'), ((9893, 9938), 'pymongo.MongoClient', 'MongoClient', ([], {'host': 'mongo_host', 'port': 'mongo_port'}), '(host=mongo_host, port=mongo_port)\n', (9904, 9938), False, 'from pymongo import MongoClient\n'), ((10004, 10029), 're.compile', 're.compile', (['"""^whitelist:"""'], {}), "('^whitelist:')\n", (10014, 10029), False, 'import re\n'), ((10999, 11044), 'pymongo.MongoClient', 'MongoClient', ([], {'host': 'mongo_host', 'port': 'mongo_port'}), '(host=mongo_host, port=mongo_port)\n', (11010, 11044), False, 'from pymongo import MongoClient\n'), ((14358, 14388), 'os.listdir', 'os.listdir', (['failed_alerts_path'], {}), '(failed_alerts_path)\n', (14368, 14388), False, 'import time, os, sys\n'), ((15839, 15850), 'time.time', 'time.time', ([], {}), '()\n', (15848, 15850), False, 'import time, os, sys\n'), ((17187, 17200), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (17197, 17200), False, 'import time, os, sys\n'), ((5841, 5853), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5851, 5853), False, 'import gc\n'), ((13628, 13651), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13649, 13651), False, 'import datetime\n'), ((14558, 14614), 'os.path.join', 'os.path.join', (['failed_alerts_path', 'alert_dir', '"""data.json"""'], {}), "(failed_alerts_path, alert_dir, 'data.json')\n", (14570, 14614), False, 'import time, os, sys\n'), ((14649, 14692), 'os.path.join', 'os.path.join', (['failed_alerts_path', 'alert_dir'], {}), '(failed_alerts_path, alert_dir)\n', (14661, 14692), False, 'import time, os, sys\n'), ((14717, 14724), 'lib.saq.client.Alert', 'Alert', ([], {}), '()\n', (14722, 14724), False, 'from lib.saq.client import Alert, AlertSubmitException\n'), ((15754, 15765), 'time.time', 'time.time', ([], {}), '()\n', (15763, 15765), False, 'import time, os, sys\n'), ((17056, 17067), 'time.time', 'time.time', ([], {}), '()\n', (17065, 17067), False, 'import time, os, sys\n'), ((1990, 2027), 'importlib.import_module', 'importlib.import_module', (['_module_name'], {}), '(_module_name)\n', (2013, 2027), False, 'import importlib\n'), ((6087, 6100), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6097, 6100), False, 'import time, os, sys\n'), ((11269, 11282), 'bson.objectid.ObjectId', 'ObjectId', (['cid'], {}), '(cid)\n', (11277, 11282), False, 'from bson.objectid import ObjectId\n'), ((11667, 11680), 'bson.objectid.ObjectId', 'ObjectId', (['cid'], {}), '(cid)\n', (11675, 11680), False, 'from bson.objectid import ObjectId\n'), ((14413, 14502), 're.match', 're.match', (['"""^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"""', 'alert_dir'], {}), "('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$',\n alert_dir)\n", (14421, 14502), False, 'import re\n'), ((15422, 15452), 'shutil.rmtree', 'shutil.rmtree', (['alert_full_path'], {}), '(alert_full_path)\n', (15435, 15452), False, 'import shutil\n')]
|
from inferelator import amusr_workflow
from inferelator import workflow
from inferelator.regression.base_regression import _RegressionWorkflowMixin
from inferelator.postprocessing.results_processor import ResultsProcessor
from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE
from inferelator.utils import InferelatorData
import pandas as pd
import numpy as np
class NoOutputRP(ResultsProcessor):
def summarize_network(self, output_dir, gold_standard, priors):
return super(NoOutputRP, self).summarize_network(None, gold_standard, priors)
# Factory method to spit out a puppet workflow
def create_puppet_workflow(regression_class=_RegressionWorkflowMixin,
base_class=workflow.WorkflowBase,
result_processor_class=NoOutputRP):
puppet_parent = workflow._factory_build_inferelator(regression=regression_class, workflow=base_class)
class PuppetClass(puppet_parent):
"""
Standard workflow except it takes all the data as references to __init__ instead of as filenames on disk or
as environment variables, and returns the model AUPR and edge counts without writing files (unless told to)
"""
write_network = True
network_file_name = None
pr_curve_file_name = None
initialize_mp = False
def __init__(self, data, prior_data, gs_data):
self.data = data
self.priors_data = prior_data
self.gold_standard = gs_data
super(PuppetClass, self).__init__()
def startup_run(self):
# Skip all of the data loading
self.process_priors_and_gold_standard()
def create_output_dir(self, *args, **kwargs):
pass
return PuppetClass
class TaskDataStub(amusr_workflow.create_task_data_class(workflow_class="single-cell")):
priors_data = TestDataSingleCellLike.priors_data
tf_names = TestDataSingleCellLike.tf_names
meta_data_task_column = "Condition"
tasks_from_metadata = True
task_name = "TestStub"
task_workflow_type = "single-cell"
def __init__(self, sparse=False):
self.data = TEST_DATA.copy() if not sparse else TEST_DATA_SPARSE.copy()
super(TaskDataStub, self).__init__()
def get_data(self):
if self.tasks_from_metadata:
return self.separate_tasks_by_metadata()
else:
return [self]
class FakeDRD:
def __init__(self, *args, **kwargs):
pass
def run(self, expr, meta):
return expr, expr, expr
def validate_run(self, meta):
return True
class FakeWriter(object):
def writerow(self, *args, **kwargs):
pass
class FakeRegressionMixin(_RegressionWorkflowMixin):
def run_regression(self):
beta = [pd.DataFrame(np.array([[0, 1], [0.5, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
beta_resc = [pd.DataFrame(np.array([[0, 1], [1, 0.05]]), index=['gene1', 'gene2'], columns=['tf1', 'tf2'])]
return beta, beta_resc
def run_bootstrap(self, bootstrap):
return True
class FakeResultProcessor:
network_data = None
def __init__(self, *args, **kwargs):
pass
def summarize_network(self, *args, **kwargs):
return 1, 0, 0
|
[
"inferelator.amusr_workflow.create_task_data_class",
"inferelator.tests.artifacts.test_data.TEST_DATA_SPARSE.copy",
"inferelator.tests.artifacts.test_data.TEST_DATA.copy",
"numpy.array",
"inferelator.workflow._factory_build_inferelator"
] |
[((1835, 1902), 'inferelator.amusr_workflow.create_task_data_class', 'amusr_workflow.create_task_data_class', ([], {'workflow_class': '"""single-cell"""'}), "(workflow_class='single-cell')\n", (1872, 1902), False, 'from inferelator import amusr_workflow\n'), ((867, 957), 'inferelator.workflow._factory_build_inferelator', 'workflow._factory_build_inferelator', ([], {'regression': 'regression_class', 'workflow': 'base_class'}), '(regression=regression_class, workflow=\n base_class)\n', (902, 957), False, 'from inferelator import workflow\n'), ((2203, 2219), 'inferelator.tests.artifacts.test_data.TEST_DATA.copy', 'TEST_DATA.copy', ([], {}), '()\n', (2217, 2219), False, 'from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE\n'), ((2239, 2262), 'inferelator.tests.artifacts.test_data.TEST_DATA_SPARSE.copy', 'TEST_DATA_SPARSE.copy', ([], {}), '()\n', (2260, 2262), False, 'from inferelator.tests.artifacts.test_data import TestDataSingleCellLike, TEST_DATA, TEST_DATA_SPARSE\n'), ((2850, 2881), 'numpy.array', 'np.array', (['[[0, 1], [0.5, 0.05]]'], {}), '([[0, 1], [0.5, 0.05]])\n', (2858, 2881), True, 'import numpy as np\n'), ((2968, 2997), 'numpy.array', 'np.array', (['[[0, 1], [1, 0.05]]'], {}), '([[0, 1], [1, 0.05]])\n', (2976, 2997), True, 'import numpy as np\n')]
|
"""blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.views.decorators.cache import cache_page
from django.contrib.sitemaps.views import sitemap
from post import views as post_views
from post.sitemaps import PostSitemap
from post.feeds import LatestPostFeed
from core import views as core_views
sitemaps = {
"posts": PostSitemap
}
urlpatterns = [
url(r"^admin/",
include("admin_honeypot.urls", namespace="admin_honeypot")),
url(r'^fake-admin/', admin.site.urls),
url(r"^$",
post_views.homepage_view,
name="homepage"),
url(r"^query/$",
post_views.post_search_view,
name="post_search"),
url(r"^post/(?P<slug>[\w-]+)/$",
post_views.post_detail_view,
name="post_detail"),
url(r"^tag/(?P<tag_slug>[\w-]+)$",
cache_page(60 * 60)(post_views.PostListByTag.as_view()),
name="post_tagged"),
url(r"^all/$",
post_views.PostListView.as_view(),
name="post_list"),
url(r"^image/(?P<width>\d+)/(?P<height>\d+)/$",
core_views.placeholder_view,
name="placeholder"),
url(r"^markdown/", include("django_markdown.urls")),
url(r"^sitemap\.xml$", sitemap, {"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitemap"),
url(r"^feed/$",
LatestPostFeed(),
name="post_feed"),
url(r"^api/",
include("post.api.urls", namespace="api")),
url(r'^grappelli/', include('grappelli.urls')),
# url(r'^silk/', include('silk.urls', namespace='silk')) # 性能检测
]
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
[
"post.feeds.LatestPostFeed",
"django.conf.urls.include",
"post.views.PostListByTag.as_view",
"post.views.PostListView.as_view",
"django.views.decorators.cache.cache_page",
"django.conf.urls.url",
"django.conf.urls.static.static"
] |
[((2299, 2360), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (2305, 2360), False, 'from django.conf.urls.static import static\n'), ((1194, 1230), 'django.conf.urls.url', 'url', (['"""^fake-admin/"""', 'admin.site.urls'], {}), "('^fake-admin/', admin.site.urls)\n", (1197, 1230), False, 'from django.conf.urls import url, include\n'), ((1237, 1289), 'django.conf.urls.url', 'url', (['"""^$"""', 'post_views.homepage_view'], {'name': '"""homepage"""'}), "('^$', post_views.homepage_view, name='homepage')\n", (1240, 1289), False, 'from django.conf.urls import url, include\n'), ((1312, 1376), 'django.conf.urls.url', 'url', (['"""^query/$"""', 'post_views.post_search_view'], {'name': '"""post_search"""'}), "('^query/$', post_views.post_search_view, name='post_search')\n", (1315, 1376), False, 'from django.conf.urls import url, include\n'), ((1399, 1485), 'django.conf.urls.url', 'url', (['"""^post/(?P<slug>[\\\\w-]+)/$"""', 'post_views.post_detail_view'], {'name': '"""post_detail"""'}), "('^post/(?P<slug>[\\\\w-]+)/$', post_views.post_detail_view, name=\n 'post_detail')\n", (1402, 1485), False, 'from django.conf.urls import url, include\n'), ((1725, 1827), 'django.conf.urls.url', 'url', (['"""^image/(?P<width>\\\\d+)/(?P<height>\\\\d+)/$"""', 'core_views.placeholder_view'], {'name': '"""placeholder"""'}), "('^image/(?P<width>\\\\d+)/(?P<height>\\\\d+)/$', core_views.\n placeholder_view, name='placeholder')\n", (1728, 1827), False, 'from django.conf.urls import url, include\n'), ((1902, 2008), 'django.conf.urls.url', 'url', (['"""^sitemap\\\\.xml$"""', 'sitemap', "{'sitemaps': sitemaps}"], {'name': '"""django.contrib.sitemaps.views.sitemap"""'}), "('^sitemap\\\\.xml$', sitemap, {'sitemaps': sitemaps}, name=\n 'django.contrib.sitemaps.views.sitemap')\n", (1905, 2008), False, 'from django.conf.urls import url, include\n'), ((1129, 1187), 'django.conf.urls.include', 'include', (['"""admin_honeypot.urls"""'], {'namespace': '"""admin_honeypot"""'}), "('admin_honeypot.urls', namespace='admin_honeypot')\n", (1136, 1187), False, 'from django.conf.urls import url, include\n'), ((1658, 1691), 'post.views.PostListView.as_view', 'post_views.PostListView.as_view', ([], {}), '()\n', (1689, 1691), True, 'from post import views as post_views\n'), ((1863, 1894), 'django.conf.urls.include', 'include', (['"""django_markdown.urls"""'], {}), "('django_markdown.urls')\n", (1870, 1894), False, 'from django.conf.urls import url, include\n'), ((2042, 2058), 'post.feeds.LatestPostFeed', 'LatestPostFeed', ([], {}), '()\n', (2056, 2058), False, 'from post.feeds import LatestPostFeed\n'), ((2114, 2155), 'django.conf.urls.include', 'include', (['"""post.api.urls"""'], {'namespace': '"""api"""'}), "('post.api.urls', namespace='api')\n", (2121, 2155), False, 'from django.conf.urls import url, include\n'), ((2183, 2208), 'django.conf.urls.include', 'include', (['"""grappelli.urls"""'], {}), "('grappelli.urls')\n", (2190, 2208), False, 'from django.conf.urls import url, include\n'), ((1545, 1564), 'django.views.decorators.cache.cache_page', 'cache_page', (['(60 * 60)'], {}), '(60 * 60)\n', (1555, 1564), False, 'from django.views.decorators.cache import cache_page\n'), ((1565, 1599), 'post.views.PostListByTag.as_view', 'post_views.PostListByTag.as_view', ([], {}), '()\n', (1597, 1599), True, 'from post import views as post_views\n'), ((2490, 2517), 'django.conf.urls.include', 'include', (['debug_toolbar.urls'], {}), '(debug_toolbar.urls)\n', (2497, 2517), False, 'from django.conf.urls import url, include\n')]
|
import curses
import colorama
from colorama import Fore, Back, Style
colorama.init()
class menuInterface:
def __init__(self, menu):
self.info = menu
def print_menu(self,stdscr, selected_row_idx):
list = self.info
stdscr.clear()
h, w = stdscr.getmaxyx()
for idx, row in enumerate(list):
char = ' ✓ ' if row["status"] else ' x '
text = '{id} {char} {task}'
x = w//2 - len(text)//2
y = h//2 - len(list)//2 + idx
pair = 2
if row["status"]:
pair = 1
if idx == selected_row_idx:
stdscr.attron(curses.color_pair(pair))
stdscr.addstr(y, x, text.format(
id=idx,
char = char,
task=row["task"],
))
stdscr.attroff(curses.color_pair(pair))
else:
stdscr.addstr(y, x, text.format(
id=idx,
char = char,
task=row["task"],
))
x = 0
y = h//2 + len(list)//2 +1
text = "↑ or ↓ to scroll the list \n ↵ (Enter) to toggle" +"\n" +"q to quit"
stdscr.attron(curses.color_pair(3))
stdscr.addstr(y,x,text,)
stdscr.attron(curses.color_pair(3))
stdscr.refresh()
def update(self,stdscr,rowid,status):
self.info[rowid]["status"] = not status
self.print_menu(stdscr, rowid)
return
def main(self,stdscr):
menu = self.info
# turn off cursor blinking
curses.curs_set(0)
# color schemes
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE,curses.COLOR_BLACK)
# specify the current selected row
current_row = 0
# print the menu
self.print_menu(stdscr, current_row)
while 1:
key = stdscr.getch()
if key == curses.KEY_UP and current_row > 0:
current_row -= 1
elif key == curses.KEY_DOWN and current_row < len(menu)-1:
current_row += 1
elif key == curses.KEY_ENTER or key in [10, 13]:
s = menu[current_row]["status"]
self.update(stdscr,current_row,s)
# if user selected last row, exit the program
elif key==ord("q"):
return self.info
self.print_menu(stdscr, current_row)
|
[
"colorama.init",
"curses.color_pair",
"curses.curs_set",
"curses.init_pair"
] |
[((69, 84), 'colorama.init', 'colorama.init', ([], {}), '()\n', (82, 84), False, 'import colorama\n'), ((1607, 1625), 'curses.curs_set', 'curses.curs_set', (['(0)'], {}), '(0)\n', (1622, 1625), False, 'import curses\n'), ((1659, 1718), 'curses.init_pair', 'curses.init_pair', (['(1)', 'curses.COLOR_BLACK', 'curses.COLOR_GREEN'], {}), '(1, curses.COLOR_BLACK, curses.COLOR_GREEN)\n', (1675, 1718), False, 'import curses\n'), ((1727, 1784), 'curses.init_pair', 'curses.init_pair', (['(2)', 'curses.COLOR_BLACK', 'curses.COLOR_RED'], {}), '(2, curses.COLOR_BLACK, curses.COLOR_RED)\n', (1743, 1784), False, 'import curses\n'), ((1793, 1852), 'curses.init_pair', 'curses.init_pair', (['(3)', 'curses.COLOR_WHITE', 'curses.COLOR_BLACK'], {}), '(3, curses.COLOR_WHITE, curses.COLOR_BLACK)\n', (1809, 1852), False, 'import curses\n'), ((1241, 1261), 'curses.color_pair', 'curses.color_pair', (['(3)'], {}), '(3)\n', (1258, 1261), False, 'import curses\n'), ((1318, 1338), 'curses.color_pair', 'curses.color_pair', (['(3)'], {}), '(3)\n', (1335, 1338), False, 'import curses\n'), ((652, 675), 'curses.color_pair', 'curses.color_pair', (['pair'], {}), '(pair)\n', (669, 675), False, 'import curses\n'), ((875, 898), 'curses.color_pair', 'curses.color_pair', (['pair'], {}), '(pair)\n', (892, 898), False, 'import curses\n')]
|
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import pickle
from typing import TYPE_CHECKING, Dict, List, Union
from monkq.assets.order import ORDER_T, BaseOrder
from monkq.assets.trade import Trade
from monkq.utils.timefunc import utc_datetime
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from monkq.context import Context
DAILY_STAT_TYPE = Dict[str, Union[float, datetime.datetime]]
class Statistic():
def __init__(self, context: "Context"):
self.context = context
self.report_file: str = getattr(self.context.settings, 'REPORT_FILE', 'result.pkl')
self.collect_freq = getattr(self.context.settings, 'COLLECT_FREQ', '4H')
self.daily_capital: List[DAILY_STAT_TYPE] = []
self.order_collections: List[BaseOrder] = []
self.trade_collections: List[Trade] = []
self.collect_offset: DateOffset = to_offset(self.collect_freq)
self.last_collect_time: datetime.datetime = utc_datetime(1970, 1, 1)
def collect_account_info(self) -> None:
accounts_capital: DAILY_STAT_TYPE = {k: v.total_capital for k, v in self.context.accounts.items()}
accounts_capital.update({'timestamp': self.context.now})
self.daily_capital.append(accounts_capital)
def freq_collect_account(self) -> None:
if self.context.now - self.last_collect_time >= self.collect_offset.delta:
self.collect_account_info()
self.last_collect_time = self.context.now
def collect_order(self, order: ORDER_T) -> None:
self.order_collections.append(order)
def collect_trade(self, trade: Trade) -> None:
self.trade_collections.append(trade)
def _pickle_obj(self) -> dict:
return {
"daily_capital": self.daily_capital,
"orders": self.order_collections,
"trades": self.trade_collections,
"settings": self.context.settings
}
def report(self) -> None:
with open(self.report_file, 'wb') as f:
pickle.dump(self._pickle_obj(), f)
|
[
"monkq.utils.timefunc.utc_datetime",
"pandas.tseries.frequencies.to_offset"
] |
[((1969, 1997), 'pandas.tseries.frequencies.to_offset', 'to_offset', (['self.collect_freq'], {}), '(self.collect_freq)\n', (1978, 1997), False, 'from pandas.tseries.frequencies import DateOffset, to_offset\n'), ((2050, 2074), 'monkq.utils.timefunc.utc_datetime', 'utc_datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (2062, 2074), False, 'from monkq.utils.timefunc import utc_datetime\n')]
|
from solvers.rigidity_solver.models import *
import numpy as np
_scale = lambda arr: arr * 15
v = lambda x, y, z: np.array([x, y, z], dtype=np.double)
p = lambda x, y, z: (_scale(np.array([x, y, z], dtype=np.double)))
def lerp(p, q, weight):
return p + (q - p) * weight
def define(stage):
_p = {
"a": p(0, 0, 0),
"b": p(1, 0, 0),
"c": p(1 / 2, np.sqrt(3) / 2, 0),
"A-u": p(3 / 2, np.sqrt(3) / 2, 1),
"A-d": p(3 / 2, np.sqrt(3) / 2, -1),
"B-u": p(-1 / 2, np.sqrt(3) / 2, 1),
"B-d": p(-1 / 2, np.sqrt(3) / 2, -1),
"C-u": p(1 / 2, -np.sqrt(3) / 2, 1),
"C-d": p(1 / 2, -np.sqrt(3) / 2, -1),
}
_p.update({
"ab-mid": lerp(_p["A-u"], _p["B-u"], 0.5),
"bc-mid": lerp(_p["B-u"], _p["C-u"], 0.5),
"ca-mid": lerp(_p["C-u"], _p["A-u"], 0.5),
"ab-0.1": lerp(_p["A-u"], _p["B-u"], 0.1),
"bc-0.1": lerp(_p["B-u"], _p["C-u"], 0.1),
"ca-0.1": lerp(_p["C-u"], _p["A-u"], 0.1),
"ba-0.1": lerp(_p["B-u"], _p["A-u"], 0.1),
"cb-0.1": lerp(_p["C-u"], _p["B-u"], 0.1),
"ac-0.1": lerp(_p["A-u"], _p["C-u"], 0.1),
"ab-0.9": lerp(_p["A-u"], _p["B-u"], 0.9),
"bc-0.9": lerp(_p["B-u"], _p["C-u"], 0.9),
"ca-0.9": lerp(_p["C-u"], _p["A-u"], 0.9),
"ba-0.9": lerp(_p["B-u"], _p["A-u"], 0.9),
"cb-0.9": lerp(_p["C-u"], _p["B-u"], 0.9),
"ac-0.9": lerp(_p["A-u"], _p["C-u"], 0.9),
})
def beam_init(p, q, density=0.5):
return Beam.tetra(p, q, density=density, thickness=1)
stage_2_frac = 0.25
stage_3_frac = 0.7
normalize = lambda x: x / np.linalg.norm(x)
_da = normalize(_p["c"] - _p["b"])
_db = normalize(_p["a"] - _p["c"])
_dc = normalize(_p["b"] - _p["a"])
_dz = v(0, 0, 1)
model = Model()
_bmap = {
"top-A": beam_init(_p["B-u"], _p["C-u"]),
"top-B": beam_init(_p["C-u"], _p["A-u"]),
"top-C": beam_init(_p["A-u"], _p["B-u"]),
# "top-ab-bc": beam_init(_p["ab-mid"], _p["bc-mid"]),
# "top-bc-ca": beam_init(_p["bc-mid"], _p["ca-mid"]),
# "top-ca-ab": beam_init(_p["ca-mid"], _p["ab-mid"]),
#
# "core-ab": beam_init(_p['a'], _p["b"]),
# "core-bc": beam_init(_p["b"], _p["c"]),
# "core-ca": beam_init(_p["c"], _p["a"]),
#
"A-c": beam_init(_p["ca-0.9"], _p["C-d"]),
"A-b": beam_init(_p["ab-0.1"], _p["B-d"]),
"B-a": beam_init(_p["ab-0.9"], _p["A-d"]),
"B-c": beam_init(_p["bc-0.1"], _p["C-d"]),
"C-b": beam_init(_p["bc-0.9"], _p["B-d"]),
"C-a": beam_init(_p["ca-0.1"], _p["A-d"]),
}
joints = [
Joint(_bmap["B-a"], _bmap["C-a"], pivot=_p["A-d"], rotation_axes=_da),
Joint(_bmap["C-b"], _bmap["A-b"], pivot=_p["B-d"], rotation_axes=_db),
Joint(_bmap["A-c"], _bmap["B-c"], pivot=_p["C-d"], rotation_axes=_dc),
Joint(_bmap["top-C"], _bmap["top-A"], pivot=_p["B-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-A"], _bmap["top-B"], pivot=_p["C-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["top-C"], pivot=_p["A-u"], rotation_axes=-v(0, 0, 1)),
Joint(_bmap["top-B"], _bmap["A-b"], pivot=_p["ab-0.1"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["A-c"], pivot=_p["ca-0.9"], rotation_axes=_da),
Joint(_bmap["top-C"], _bmap["B-c"], pivot=_p["bc-0.1"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["B-a"], pivot=_p["ab-0.9"], rotation_axes=_db),
Joint(_bmap["top-A"], _bmap["C-a"], pivot=_p["ca-0.1"], rotation_axes=_dc),
Joint(_bmap["top-B"], _bmap["C-b"], pivot=_p["bc-0.9"], rotation_axes=_dc),
Joint(_bmap["A-b"], _bmap["B-a"],
pivot=(_p["ab-0.1"] + _p["ab-0.9"] + _p["A-d"] + _p["B-d"]) / 4,
rotation_axes=np.cross(_dc, _dz)),
Joint(_bmap["B-c"], _bmap["C-b"],
pivot=(_p["bc-0.1"] + _p["bc-0.9"] + _p["B-d"] + _p["C-d"]) / 4,
rotation_axes=np.cross(_da, _dz)),
Joint(_bmap["C-a"], _bmap["A-c"],
pivot=(_p["ca-0.1"] + _p["ca-0.9"] + _p["C-d"] + _p["A-d"]) / 4,
rotation_axes=np.cross(_db, _dz)),
]
ax_z = v(0, 0, 1)
if stage >= 2:
_stage_2_points = {
f"{a}-u-{b}-d-{stage_2_frac}": lerp(_p[f"{a.lower()}{b.lower()}-0.1"], _p[f"{b}-d"], stage_2_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_2_points)
_stage_2_beam = {
f"s2-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_2_beam)
_stage_2_joint = [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s2-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_2_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_2_joint)
if stage >= 3:
_stage_3_points = {
f"{a}-u-{b}-d-{stage_3_frac}": lerp(_p[f"{a}-u"], _p[f"{b}-d"], stage_3_frac)
for a in "ABC" for b in "ABC" if a != b
}
_p.update(_stage_3_points)
_stage_3_beam = {
f"s3-{a}{b}": beam_init(_p[f"{a}-u-{b}-d-{stage_3_frac}"], _p[f"{b}-u-{a}-d-{stage_3_frac}"])
for a, b in ("AB", "BC", "CA")
}
_bmap.update(_stage_3_beam)
_stage_3_joint = [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{a}-{b.lower()}"], pivot=_p[f"{a}-u-{b}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
] + [
Joint(_bmap[f"s3-{a}{b}"], _bmap[f"{b}-{a.lower()}"], pivot=_p[f"{b}-u-{a}-d-{stage_3_frac}"], rotation_axes=ax_z)
for a, b in ("AB", "BC", "CA")
]
joints.extend(_stage_3_joint)
if stage >= 4:
_indices = ["AB", "BC", "CA"]
_stage_4_points = {
f"s4-{_indices[i % 3]}": lerp(_p[f"{a}-u-{b}-d-{stage_2_frac}"], _p[f"{b}-u-{a}-d-{stage_2_frac}"], 0.5)
for i, (a, b) in enumerate(_indices)
}
_p.update(_stage_4_points)
_stage_4_beam = {
f"s4-{_indices[i % 3]}": beam_init(_p[f"s4-{_indices[i]}"], _p[f"{a.lower()}{b.lower()}-mid"])
for i, (a, b) in enumerate(_indices)
}
_bmap.update(_stage_4_beam)
_stage_4_joint = [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"s2-{_indices[i % 3]}"],
pivot=_p[f"s4-{_indices[i]}"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
] + [
Joint(_bmap[f"s4-{_indices[i % 3]}"], _bmap[f"top-{'CAB'[i]}"],
pivot=_p[f"{a.lower()}{b.lower()}-mid"],
rotation_axes=np.cross((_dc, _da, _db)[i], v(0, 0, 1))
)
for i, (a, b) in enumerate(_indices)
]
joints.extend(_stage_4_joint)
beams = list(_bmap.values())
model.add_beams(beams)
model.add_joints(joints)
return locals()
if __name__ == "__main__":
model = define(1)["model"]
model.visualize(show_hinge=True)
points = model.point_matrix()
edges = model.edge_matrix()
stiffness = spring_energy_matrix_accelerate_3D(points, edges, abstract_edges=[]),
constraints = model.constraint_matrix()
new_stiffness, B = generalized_courant_fischer(
stiffness,
constraints
)
pairs = model.eigen_solve(num_pairs=20)
print([e for e, v in pairs])
for stage in range(1, 4 + 1):
model = define(stage)["model"]
model.save_json(f"output/table-stage{stage}.json")
|
[
"numpy.linalg.norm",
"numpy.cross",
"numpy.array",
"numpy.sqrt"
] |
[((115, 151), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.double'}), '([x, y, z], dtype=np.double)\n', (123, 151), True, 'import numpy as np\n'), ((180, 216), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.double'}), '([x, y, z], dtype=np.double)\n', (188, 216), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (1663, 1666), True, 'import numpy as np\n'), ((379, 389), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (386, 389), True, 'import numpy as np\n'), ((424, 434), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (431, 434), True, 'import numpy as np\n'), ((468, 478), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (475, 478), True, 'import numpy as np\n'), ((515, 525), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (522, 525), True, 'import numpy as np\n'), ((560, 570), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (567, 570), True, 'import numpy as np\n'), ((3840, 3858), 'numpy.cross', 'np.cross', (['_dc', '_dz'], {}), '(_dc, _dz)\n', (3848, 3858), True, 'import numpy as np\n'), ((4010, 4028), 'numpy.cross', 'np.cross', (['_da', '_dz'], {}), '(_da, _dz)\n', (4018, 4028), True, 'import numpy as np\n'), ((4180, 4198), 'numpy.cross', 'np.cross', (['_db', '_dz'], {}), '(_db, _dz)\n', (4188, 4198), True, 'import numpy as np\n'), ((607, 617), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (614, 617), True, 'import numpy as np\n'), ((652, 662), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (659, 662), True, 'import numpy as np\n')]
|
from urllib.parse import urlparse
def url_checker(url_site,
url_site_disallowed_path):
validator = False
for site_path in url_site_disallowed_path:
if site_path not in urlparse(url_site).path:
validator = True
continue
else:
validator = False
break
return validator
|
[
"urllib.parse.urlparse"
] |
[((205, 223), 'urllib.parse.urlparse', 'urlparse', (['url_site'], {}), '(url_site)\n', (213, 223), False, 'from urllib.parse import urlparse\n')]
|
import ctypes
import time
so7 = ctypes.CDLL("./so7.so")
t1 = ("ěščř ЩжΛλ"*10000).encode("utf-8")
t2 = ("<foobar>"*10000).encode("utf-8")
so7.concat.restype = ctypes.c_char_p
for i in range(100000):
t = so7.concat(t1, t2)
print(len(t))
time.sleep(0.001)
|
[
"ctypes.CDLL",
"time.sleep"
] |
[((33, 56), 'ctypes.CDLL', 'ctypes.CDLL', (['"""./so7.so"""'], {}), "('./so7.so')\n", (44, 56), False, 'import ctypes\n'), ((251, 268), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (261, 268), False, 'import time\n')]
|
import discord
import time
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from discord.ext import commands
from selenium import webdriver
# Selenium setup
options = webdriver.ChromeOptions()
options.add_argument("headless")
options.add_argument("log-level=3")
driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)
# Bot as discord.Client() class object
tab_bot = discord.Client()
tab_bot = commands.Bot(command_prefix=".")
# Startup
@tab_bot.event
async def on_ready():
print("Logged as {0.user}".format(tab_bot))
# On command
@tab_bot.command()
@commands.cooldown(1, 15, commands.BucketType.user)
# Search for requested tabulature on Songsterr
async def tab(ctx, *, arg):
search = "songsterr.com " + arg
driver.get("https://google.com")
try:
driver.find_element_by_id("L2AGLb").click()
except:
pass
driver.find_element_by_name("q").send_keys(search)
driver.find_element_by_name("q").send_keys(Keys.ENTER)
driver.find_element_by_css_selector("div a h3").click()
time.sleep(3)
url = driver.current_url
if url[:32] == "https://www.songsterr.com/a/wsa/":
if "-tab-" in url:
try:
driver.find_element_by_id("accept").click()
except:
pass
# BPM
try:
bpm = driver.find_element_by_class_name("vs1qc").text[1:]
except:
bpm = "?"
# Time signature
try:
show_time_signature = True
time_signature = []
for name in driver.find_elements_by_class_name("vscf"):
time_signature.append(name.text)
for name in time_signature:
if int(name) > 9:
time_signature = "?"
show_time_signature = False
if show_time_signature == True:
time_signature = str(time_signature)
time_signature = (
time_signature.replace("[", "")
.replace("]", "")
.replace("'", "")
.replace(",", "")
)
time_signature = time_signature.replace(" ", "")
length = len(time_signature) * 2
for i in range(1, len(time_signature) * 2, 3):
time_signature = time_signature[:i] + "/" + time_signature[i:]
time_signature = time_signature.replace(" ", "")
for i in range(3, len(time_signature) * 2, 4):
time_signature = time_signature[:i] + " " + time_signature[i:]
for i in range(len(time_signature)):
if time_signature[len(time_signature) - 1] == "/":
time_signature = time_signature[: len(time_signature) - 1]
print(time_signature)
time_signature = time_signature[:length]
except:
time_signature = "?"
# Number of tracks
try:
driver.find_element_by_id("control-mixer").click()
tracks_number = len(driver.find_elements_by_class_name("Cv3137"))
except:
tracks_number = "?"
# Tuning
try:
tuning = []
for name in driver.find_elements_by_class_name("C8nsu"):
tuning.append(name.text)
tuning = str(tuning)
tuning = (
tuning.replace("[", " ")
.replace("]", " ")
.replace(",", " ")
.replace("'", "")
)
tuning = tuning[::-1]
except:
tuning = "?"
# Chords, if avaliable
try:
chords_url = driver.find_element_by_class_name("C6c2vy").get_attribute(
"href"
)
except:
chords_url = "No chords for this particular song"
# Artist and song name
try:
artist_name = driver.find_element_by_class_name("Bpv319").text
song_title = driver.find_element_by_css_selector(
"span[aria-label='title']"
).text
except:
artist_name = "?"
song_title = "?"
# Difficulty
try:
driver.find_element_by_id("menu-search").click()
time.sleep(1)
driver.find_element_by_class_name("Cgl126").send_keys(
artist_name + " " + song_title
)
difficulty = driver.find_element_by_class_name("Cae2ew").get_attribute(
"title"
)
except:
difficulty = "?"
# Tab embed
embed = discord.Embed(title="Requested tab", color=0x128DF6)
embed.add_field(name="Artist name", value=artist_name, inline=False)
embed.add_field(name="Song title", value=song_title, inline=False)
embed.add_field(name="Url", value=url, inline=False)
embed.add_field(name="Chords", value=chords_url, inline=False)
embed.add_field(name="Difficulty", value=difficulty, inline=False)
embed.add_field(name="BPM", value=bpm, inline=False)
embed.add_field(name="Tuning", value=tuning, inline=False)
embed.add_field(name="Time signature", value=time_signature, inline=False)
embed.add_field(name="Number of tracks", value=tracks_number, inline=False)
await ctx.send(embed=embed)
tab.reset_cooldown(ctx)
elif "-tabs-" in url:
# Number of tabs for particular artist
try:
tabs_number = len(driver.find_elements_by_class_name("Beiqi"))
artist_name = driver.find_element_by_id("top").text
except:
tabs_number = "?"
artist_name = "?"
if tabs_number == 50:
tabs_number = "50+"
# Tab embed
embed = discord.Embed(title="Requested artist", color=0x128DF6)
embed.add_field(name="Artist name", value=artist_name[:-4], inline=False)
embed.add_field(name="Url", value=url, inline=False)
embed.add_field(name="Number of tabs", value=tabs_number, inline=False)
await ctx.send(
"Unable to find requested tab - redirecting to band page", embed=embed
)
tab.reset_cooldown(ctx)
else:
await ctx.send("Unable to find requested tab or artist")
tab.reset_cooldown(ctx)
# On command
@tab_bot.command()
@commands.cooldown(1, 10, commands.BucketType.user)
# Informations about bot
async def info(ctx):
embed = discord.Embed(title="TabBot", color=0x128DF6)
embed.add_field(
name="Information",
value="Use .tab command in format \".tab artist name song name\" to find requested tablature. If the particular tablature couldn't be find or the song name won't be given, only the url to artist page will be sent. The whole tabulature finding process needs a few seconds.",
inline=False,
)
await ctx.send(embed=embed)
# Handling commands errors
@tab.error
async def tab_error(ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Command missing required argument")
tab.reset_cooldown(ctx)
if isinstance(error, commands.CommandOnCooldown):
return
else:
raise error
@info.error
async def tab_error(error):
if isinstance(error, commands.CommandOnCooldown):
return
else:
raise error
# Bot's token
tab_bot.run("token")
|
[
"discord.Embed",
"time.sleep",
"discord.ext.commands.cooldown",
"selenium.webdriver.ChromeOptions",
"webdriver_manager.chrome.ChromeDriverManager",
"discord.ext.commands.Bot",
"discord.Client"
] |
[((224, 249), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (247, 249), False, 'from selenium import webdriver\n'), ((452, 468), 'discord.Client', 'discord.Client', ([], {}), '()\n', (466, 468), False, 'import discord\n'), ((479, 511), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""."""'}), "(command_prefix='.')\n", (491, 511), False, 'from discord.ext import commands\n'), ((643, 693), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(15)', 'commands.BucketType.user'], {}), '(1, 15, commands.BucketType.user)\n', (660, 693), False, 'from discord.ext import commands\n'), ((6959, 7009), 'discord.ext.commands.cooldown', 'commands.cooldown', (['(1)', '(10)', 'commands.BucketType.user'], {}), '(1, 10, commands.BucketType.user)\n', (6976, 7009), False, 'from discord.ext import commands\n'), ((1110, 1123), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1120, 1123), False, 'import time\n'), ((7068, 7112), 'discord.Embed', 'discord.Embed', ([], {'title': '"""TabBot"""', 'color': '(1215990)'}), "(title='TabBot', color=1215990)\n", (7081, 7112), False, 'import discord\n'), ((345, 366), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (364, 366), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((5090, 5141), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Requested tab"""', 'color': '(1215990)'}), "(title='Requested tab', color=1215990)\n", (5103, 5141), False, 'import discord\n'), ((4704, 4717), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4714, 4717), False, 'import time\n'), ((6360, 6414), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Requested artist"""', 'color': '(1215990)'}), "(title='Requested artist', color=1215990)\n", (6373, 6414), False, 'import discord\n')]
|
import re
# 태그 제거
def relace_tag(content):
cleaner = re.compile('<.*?>')
cleantext = re.sub(cleaner, '', content)
return cleantext
|
[
"re.sub",
"re.compile"
] |
[((58, 77), 're.compile', 're.compile', (['"""<.*?>"""'], {}), "('<.*?>')\n", (68, 77), False, 'import re\n'), ((95, 123), 're.sub', 're.sub', (['cleaner', '""""""', 'content'], {}), "(cleaner, '', content)\n", (101, 123), False, 'import re\n')]
|
import glob
import pickle
import gensim
import torch
from tqdm import tqdm
import nltk
nltk.download('wordnet')
# with open('F:/workspace/LdaSummarization/dictionary_large_2020_12_05.pkl', 'rb') as f:
# tm_dictionary = pickle.load(f)
with open('F:/workspace/LdaSummarization/lda_model_large_2020_12_08.pkl', 'rb') as f:
lda_model, tm_dictionary = pickle.load(f)
stemmer = nltk.SnowballStemmer('english')
def lemmatize(text):
return nltk.WordNetLemmatizer().lemmatize(text, pos='v')
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
result.append(lemmatize(token))
return result
limit = 9999999
pt_files = sorted(glob.glob('F:/workspace/LdaSummarization/bert_data/cnndm' + '.' + 'train' + '.[0-9]*.pt'))[:limit]
for pt in tqdm(pt_files):
pt_result = []
res = torch.load(pt)
for r in res:
bow_vector = tm_dictionary.doc2bow(preprocess(' '.join(r['src_txt'])))
article_topic = sorted(lda_model[bow_vector], key=lambda tup: -1 * tup[1])
r['topics'] = article_topic
torch.save(res, pt.replace('bert_data', 'bert_data_with_topics'))
|
[
"tqdm.tqdm",
"nltk.WordNetLemmatizer",
"torch.load",
"gensim.utils.simple_preprocess",
"pickle.load",
"glob.glob",
"nltk.download",
"nltk.SnowballStemmer"
] |
[((90, 114), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (103, 114), False, 'import nltk\n'), ((386, 417), 'nltk.SnowballStemmer', 'nltk.SnowballStemmer', (['"""english"""'], {}), "('english')\n", (406, 417), False, 'import nltk\n'), ((888, 902), 'tqdm.tqdm', 'tqdm', (['pt_files'], {}), '(pt_files)\n', (892, 902), False, 'from tqdm import tqdm\n'), ((360, 374), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (371, 374), False, 'import pickle\n'), ((559, 595), 'gensim.utils.simple_preprocess', 'gensim.utils.simple_preprocess', (['text'], {}), '(text)\n', (589, 595), False, 'import gensim\n'), ((934, 948), 'torch.load', 'torch.load', (['pt'], {}), '(pt)\n', (944, 948), False, 'import torch\n'), ((778, 871), 'glob.glob', 'glob.glob', (["('F:/workspace/LdaSummarization/bert_data/cnndm' + '.' + 'train' + '.[0-9]*.pt'\n )"], {}), "('F:/workspace/LdaSummarization/bert_data/cnndm' + '.' + 'train' +\n '.[0-9]*.pt')\n", (787, 871), False, 'import glob\n'), ((452, 476), 'nltk.WordNetLemmatizer', 'nltk.WordNetLemmatizer', ([], {}), '()\n', (474, 476), False, 'import nltk\n')]
|
"""
Django settings for dts_test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
DEFAULT_FILE_STORAGE = 'tenant_schemas.storage.TenantFileSystemStorage'
# Application definition
SHARED_APPS = (
'tenant_schemas', # mandatory
'customers', # you must list the app where your tenant model resides in
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
TENANT_APPS = (
'dts_test_app',
)
TENANT_MODEL = "customers.Client" # app.Model
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
INSTALLED_APPS = (
'tenant_schemas',
'dts_test_app',
'customers',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
ROOT_URLCONF = 'dts_test_project.urls'
WSGI_APPLICATION = 'dts_test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': os.environ.get('PG_NAME', 'dts_test_project'),
'USER': os.environ.get('PG_USER'),
'PASSWORD': os.environ.get('PG_PASSWORD'),
'HOST': os.environ.get('PG_HOST'),
'PORT': int(os.environ.get('PG_PORT')) if os.environ.get('PG_PORT') else None,
}
}
MIDDLEWARE = (
'tenant_tutorial.middleware.TenantTutorialMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'OPTIONS': {
'debug': DEBUG,
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
]
},
}
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'tenant_schemas.storage.TenantStaticFilesStorage'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'tenant_context': {
'()': 'tenant_schemas.log.TenantContextFilter'
},
},
'formatters': {
'simple': {
'format': '%(levelname)-7s %(asctime)s %(message)s',
},
'tenant_context': {
'format': '[%(schema_name)s:%(domain_url)s] '
'%(levelname)-7s %(asctime)s %(message)s',
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'console': {
'class': 'logging.StreamHandler',
'filters': ['tenant_context'],
'formatter': 'tenant_context',
},
},
'loggers': {
'': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': True,
},
},
}
|
[
"os.environ.get",
"os.path.dirname"
] |
[((365, 390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (380, 390), False, 'import os\n'), ((1781, 1826), 'os.environ.get', 'os.environ.get', (['"""PG_NAME"""', '"""dts_test_project"""'], {}), "('PG_NAME', 'dts_test_project')\n", (1795, 1826), False, 'import os\n'), ((1844, 1869), 'os.environ.get', 'os.environ.get', (['"""PG_USER"""'], {}), "('PG_USER')\n", (1858, 1869), False, 'import os\n'), ((1891, 1920), 'os.environ.get', 'os.environ.get', (['"""PG_PASSWORD"""'], {}), "('PG_PASSWORD')\n", (1905, 1920), False, 'import os\n'), ((1938, 1963), 'os.environ.get', 'os.environ.get', (['"""PG_HOST"""'], {}), "('PG_HOST')\n", (1952, 1963), False, 'import os\n'), ((2015, 2040), 'os.environ.get', 'os.environ.get', (['"""PG_PORT"""'], {}), "('PG_PORT')\n", (2029, 2040), False, 'import os\n'), ((1985, 2010), 'os.environ.get', 'os.environ.get', (['"""PG_PORT"""'], {}), "('PG_PORT')\n", (1999, 2010), False, 'import os\n')]
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.net.buildbot import Build
from webkitpy.common.net.failuremap import *
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.tool.mocktool import MockBuilder
class FailureMapTest(unittest.TestCase):
builder1 = MockBuilder("Builder1")
builder2 = MockBuilder("Builder2")
build1a = Build(builder1, build_number=22, revision=1233, is_green=True)
build1b = Build(builder1, build_number=23, revision=1234, is_green=False)
build2a = Build(builder2, build_number=89, revision=1233, is_green=True)
build2b = Build(builder2, build_number=90, revision=1235, is_green=False)
regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1'])
regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1'])
def _make_failure_map(self):
failure_map = FailureMap()
failure_map.add_regression_window(self.builder1, self.regression_window1)
failure_map.add_regression_window(self.builder2, self.regression_window2)
return failure_map
def test_failing_revisions(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
def test_new_failures(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: False)
self.assertEquals(failure_map.failing_revisions(), [1234, 1235])
def test_new_failures_with_old_revisions(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: revision == 1234)
self.assertEquals(failure_map.failing_revisions(), [])
def test_new_failures_with_more_old_revisions(self):
failure_map = self._make_failure_map()
failure_map.filter_out_old_failures(lambda revision: revision == 1235)
self.assertEquals(failure_map.failing_revisions(), [1234])
def test_tests_failing_for(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.tests_failing_for(1234), [u'test1'])
def test_failing_tests(self):
failure_map = self._make_failure_map()
self.assertEquals(failure_map.failing_tests(), set([u'test1']))
|
[
"webkitpy.common.net.regressionwindow.RegressionWindow",
"webkitpy.common.net.buildbot.Build",
"webkitpy.tool.mocktool.MockBuilder"
] |
[((1810, 1833), 'webkitpy.tool.mocktool.MockBuilder', 'MockBuilder', (['"""Builder1"""'], {}), "('Builder1')\n", (1821, 1833), False, 'from webkitpy.tool.mocktool import MockBuilder\n'), ((1849, 1872), 'webkitpy.tool.mocktool.MockBuilder', 'MockBuilder', (['"""Builder2"""'], {}), "('Builder2')\n", (1860, 1872), False, 'from webkitpy.tool.mocktool import MockBuilder\n'), ((1888, 1950), 'webkitpy.common.net.buildbot.Build', 'Build', (['builder1'], {'build_number': '(22)', 'revision': '(1233)', 'is_green': '(True)'}), '(builder1, build_number=22, revision=1233, is_green=True)\n', (1893, 1950), False, 'from webkitpy.common.net.buildbot import Build\n'), ((1965, 2028), 'webkitpy.common.net.buildbot.Build', 'Build', (['builder1'], {'build_number': '(23)', 'revision': '(1234)', 'is_green': '(False)'}), '(builder1, build_number=23, revision=1234, is_green=False)\n', (1970, 2028), False, 'from webkitpy.common.net.buildbot import Build\n'), ((2043, 2105), 'webkitpy.common.net.buildbot.Build', 'Build', (['builder2'], {'build_number': '(89)', 'revision': '(1233)', 'is_green': '(True)'}), '(builder2, build_number=89, revision=1233, is_green=True)\n', (2048, 2105), False, 'from webkitpy.common.net.buildbot import Build\n'), ((2120, 2183), 'webkitpy.common.net.buildbot.Build', 'Build', (['builder2'], {'build_number': '(90)', 'revision': '(1235)', 'is_green': '(False)'}), '(builder2, build_number=90, revision=1235, is_green=False)\n', (2125, 2183), False, 'from webkitpy.common.net.buildbot import Build\n'), ((2210, 2280), 'webkitpy.common.net.regressionwindow.RegressionWindow', 'RegressionWindow', (['build1a', 'build1b'], {'failing_tests': "[u'test1', u'test1']"}), "(build1a, build1b, failing_tests=[u'test1', u'test1'])\n", (2226, 2280), False, 'from webkitpy.common.net.regressionwindow import RegressionWindow\n'), ((2306, 2366), 'webkitpy.common.net.regressionwindow.RegressionWindow', 'RegressionWindow', (['build2a', 'build2b'], {'failing_tests': "[u'test1']"}), "(build2a, build2b, failing_tests=[u'test1'])\n", (2322, 2366), False, 'from webkitpy.common.net.regressionwindow import RegressionWindow\n')]
|
from random import shuffle
import os
from glob import glob
import shutil
import re
import tqdm
from multiprocessing import Pool
from normalise import normalise
months = {'jan.': 'January', 'feb.': 'February', 'mar.': 'March', 'apr.': 'April', 'may': 'May', 'jun.': 'June', 'jul.': 'July', 'aug.': 'August', 'sep.': 'September', 'oct.': 'October', 'nov.': 'November', 'dec.': 'December', 'jan': 'January', 'feb': 'February', 'mar': 'March', 'apr': 'April', 'jun': 'June', 'jul': 'July', 'aug': 'August', 'sep': 'September', 'oct': 'October', 'nov': 'November', 'dec': 'December'}
replace_words = {'&': 'and', '¡':'', 'r&b':'R and B', 'funtime':'fun time', 'español':'espanol', "'s":'s', 'palylist':'playlist'}
replace_vocab = {'ú':'u', 'ñ':'n', 'Ō':'O', 'â':'a'}
reservations = {'chyi':'chyi', 'Pre-Party':'pre party', 'Chu':'Chu', 'B&B':'B and B', '0944':'nine four four', 'Box':'Box', 'ain’t':'am not', 'Zon':'Zon', 'Yui':'Yui', 'neto':'neto', 'skepta':'skepta', '¡Fiesta':'Fiesta', 'Vue':'Vue', 'iheart':'iheart', 'disco':'disco'}
same = "klose la mejor música para tus fiestas dubstep dangles drejer listas".split(' ')
for word in same:
reservations[word] = word
def word_normalise(words):
ret = []
for word in words:
if word.lower() in months:
word = months[word.lower()]
if word.lower() in replace_words:
word = replace_words[word.lower()]
for regex in replace_vocab:
word = re.sub(regex, '', word)
#word = re.sub(r'(\S)([\.\,\!\?])', r'\1 \2', word)
word = re.sub(r'[\.\,\!\?;\/]', '', word)
ret.append(word)
return ret
def sent_normalise(text, slots_split=None):
norm_slots, norm_texts = [], []
text_split = text.split(' ')
if slots_split is None:
slots_split = ['O']*len(text_split)
for idx in range(len(text_split)):
if text_split[idx] in '.,!?;/]':
continue
if text_split[idx] in reservations:
for word in reservations[text_split[idx]].split(' '):
norm_texts.append(word)
norm_slots.append(slots_split[idx])
continue
norm_text = normalise(word_normalise([text_split[idx]]), variety="AmE", verbose=False)
for phrase in norm_text:
if phrase == '':
continue
for word in re.split(r' |\-', phrase):
word = re.sub(r'[\.\,\!\?;\/]', '', word)
if word == '':
continue
norm_texts.append(word)
norm_slots.append(slots_split[idx])
return norm_slots, norm_texts
def process_raw_snips_file(file, out_f):
with open(file) as f:
content = f.readlines()
content = [x.strip() for x in content]
with open(out_f, 'w') as f:
for cnt, line in enumerate(content):
text = line.split(' <=> ')[0]
intent = line.split(' <=> ')[1]
#[r.split(':')[0] if len(r.split(':')) == 2 else ' ' for r in x.split()]
text_split = [x.replace('::', ':').split(':')[0] if len(x.replace('::', ':').split(':')) == 2 else ' ' for x in text.split()]
text_entities = ' '.join(text_split)
slots_split = [x.replace('::', ':').split(':')[1] for x in text.split()]
slots_entities = ' '.join(slots_split)
assert len(text_split) == len(slots_split), (text_split, slots_split)
f.write('%d | BOS %s EOS | O %s | %s\n' % (cnt, text_entities, slots_entities, intent))
def remove_IBO_from_snipt_vocab_slot(in_f, out_f):
with open(in_f) as f:
content = f.readlines()
content = [x.strip() for x in content]
# get rid of BIO tag from the slots
for idx, line in enumerate(content):
if line != 'O':
content[idx] = line[len('B-'):]
content = set(content) # remove repeating slots
with open(out_f, 'w') as f:
for line in content:
f.write('%s\n' % line)
def process_daniel_snips_file(content):
content = [x.strip() for x in content]
utt_ids = [x.split('\t', 1)[0] for x in content]
valid_uttids = [x for x in utt_ids if x.split('-')[1] == 'valid']
test_uttids = [x for x in utt_ids if x.split('-')[1] == 'test']
train_uttids = [x for x in utt_ids if x.split('-')[1] == 'train']
utt2text, utt2slots, utt2intent = {}, {}, {}
assert len(utt_ids) == len(set(utt_ids))
# create utt2text, utt2slots, utt2intent
for line in content:
uttid, text, slots, intent = line.split('\t')
if len(text.split()) != len(slots.split()): # detect 'empty' in text
assert len(text.split(' ')) == 2
empty_idx = text.split().index(text.split(' ')[0].split()[-1]) + 1
slots_list = slots.split()
del slots_list[empty_idx]
cleaned_slots = ' '.join(slots_list)
assert len(text.split()) == len(slots_list)
cleaned_text = ' '.join(text.split())
#print(cleaned_text, cleaned_slots)
else:
(cleaned_text, cleaned_slots) = (text, slots)
# get rid of the 'intent/' from all slot values
cleaned_slots = ' '.join([x.split('/')[1] if x != 'O' else x for x in cleaned_slots.split()])
# strip the whitespaces before punctuations
#cleaned_text = re.sub(r'\s([?.!,"](?:\s|$))', r'\1', cleaned_text)
utt2text[uttid] = cleaned_text
utt2slots[uttid] = cleaned_slots
utt2intent[uttid] = intent
test_utt2text, test_utt2slots, test_utt2intent = {}, {}, {}
valid_utt2text, valid_utt2slots, valid_utt2intent = {}, {}, {}
train_utt2text, train_utt2slots, train_utt2intent = {}, {}, {}
for utt in valid_uttids:
valid_utt2text[utt] = utt2text[utt]
valid_utt2slots[utt] = utt2slots[utt]
valid_utt2intent[utt] = utt2intent[utt]
for utt in test_uttids:
test_utt2text[utt] = utt2text[utt]
test_utt2slots[utt] = utt2slots[utt]
test_utt2intent[utt] = utt2intent[utt]
for utt in train_uttids:
train_utt2text[utt] = utt2text[utt]
train_utt2slots[utt] = utt2slots[utt]
train_utt2intent[utt] = utt2intent[utt]
assert len(set(valid_utt2intent.values())) == len(set(test_utt2intent.values())) == len(set(train_utt2intent.values())) == 7
assert len(valid_utt2intent.keys()) == len(test_utt2intent.keys()) == 700
assert len(train_utt2intent.keys()) == 13084
def __return_set_of_slots(utt2slots):
all_slots = []
for slot in utt2slots.values():
all_slots.extend(slot.split())
unique_slots = set(all_slots)
return unique_slots
assert len(__return_set_of_slots(valid_utt2slots)) == len(__return_set_of_slots(test_utt2slots)) == \
len(__return_set_of_slots(train_utt2slots)) == 40
return (train_utt2text, train_utt2slots, train_utt2intent), \
(valid_utt2text, valid_utt2slots, valid_utt2intent), \
(test_utt2text, test_utt2slots, test_utt2intent)
def map_and_link_snips_audio(snips_audio_dir, link_dir):
# traverse through snips_audio_dir
result = [y for x in os.walk(snips_audio_dir) for y in glob(os.path.join(x[0], '*.mp3'))]
for path in result:
person = path.split('/')[8].split('_')[1]
filename = path.split('/')[-1]
if filename[:5] != 'snips':
continue
uttid = filename.split('.')[0]
new_uttid = person + '-' + filename
partition = uttid.split('-')[1]
destination = os.path.join(link_dir, partition, new_uttid)
shutil.copyfile(path, destination)
def create_multispk_for_snips(output_dir):
speakers = "<NAME> <NAME>".split(' ')
dataset_info = [{'split':'test', 'num_utts':700}, {'split':'valid', 'num_utts':700}, {'split':'train', 'num_utts':13084}]
test_out_f = open(os.path.join(output_dir, 'all.iob.snips.txt'), 'w')
for data in dataset_info:
num_utts = data['num_utts']
split = data['split']
with open(os.path.join(output_dir, 'single-matched-snips.%s.w-intent'%split)) as f:
content = f.readlines()
utt2line = {x.strip().split()[0]:x.strip() for x in content}
for spk in speakers:
for num in range(num_utts):
uttid = "%s-snips-%s-%d"%(spk, split, num) #mp3.split('/')[-1].split('.')[0]
line = utt2line["snips-%s-%d"%(split, num)] #'-'.join(uttid.split('-')[1:])]
text = line.split('\t')[1].upper()
slots = line.split('\t')[2]
intent = line.split('\t')[3]
test_out_f.write('%s BOS %s EOS\tO %s %s\n' % (uttid, text, slots, intent))
test_out_f.close()
def apply_text_norm_and_modify_slots(all_tsv, output_dir):
train_dirs, valid_dirs, test_dirs = process_daniel_snips_file(all_tsv)
# test
test_file = open(os.path.join(output_dir, 'single-matched-snips.test.w-intent'), 'w')
vocab_slot = {}
for uttid in tqdm.tqdm(test_dirs[0].keys(), desc='Text Normalising on testing set'):
text = test_dirs[0][uttid]
slots = test_dirs[1][uttid]
intent = test_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
test_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
test_file.close()
# valid
valid_file = open(os.path.join(output_dir, 'single-matched-snips.valid.w-intent'), 'w')
for uttid in tqdm.tqdm(valid_dirs[0].keys(), desc='Text Normalising on validation set'):
text = valid_dirs[0][uttid]
slots = valid_dirs[1][uttid]
intent = valid_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
valid_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
valid_file.close()
# train
train_file = open(os.path.join(output_dir, 'single-matched-snips.train.w-intent'), 'w')
for uttid in tqdm.tqdm(train_dirs[0].keys(), desc='Text Normalising on training set'):
text = train_dirs[0][uttid]
slots = train_dirs[1][uttid]
intent = train_dirs[2][uttid]
slots_split = slots.split()
for s in slots_split:
vocab_slot.setdefault(s, 0)
vocab_slot[s] += 1
norm_slots, norm_texts = sent_normalise(text, slots_split)
assert len(norm_texts) == len(norm_slots), (norm_texts, norm_slots)
# write to file
train_file.write('%s\t%s\t%s\t%s\n' % (uttid, ' '.join(norm_texts).upper(), ' '.join(norm_slots), intent))
train_file.close()
vocab_file = open(os.path.join(output_dir, 'slots.txt'), 'w')
vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()), key=lambda x:vocab_slot[x], reverse=True)))
def sox_func(inputs):
files, root, out_root, speaker = inputs
for name in tqdm.tqdm(files, desc='Process for speaker: '+speaker):
if name.endswith(".mp3"):
split = name.split('-')[1]
out_dir = os.path.join(out_root, split)
os.makedirs(out_dir, exist_ok=True)
orig_file = os.path.join(root, name)
new_file = os.path.join(out_dir, speaker+'-'+name.split('/')[-1].split('.')[0] + '.wav')
bashCommand = "sox " + orig_file + " -t wav -c 1 -r 16000 -b 16 -e signed-integer " + new_file
r = os.popen(bashCommand).read()
def sox_mp3_to_wav(in_root, out_root):
os.makedirs(out_root, exist_ok=True)
pool = Pool(16)
inputs = []
for root, dirs, files in os.walk(in_root):
print('[Processing] enter directory %s'%root)
if not len(files):
continue
speaker = root.split('/')[-2].split('_')[1]
print('[Processing] process %d audio files from speaker %s'%(len(files), speaker))
inputs.append((files, root, out_root, speaker))
pool.map(sox_func, inputs)
if __name__ == '__main__':
import sys, os
mode = sys.argv[1]
if mode == 'text':
repo_dir = sys.argv[2]
dump_dir = sys.argv[3]
os.makedirs(dump_dir, exist_ok=True)
content = []
content += open(os.path.join(repo_dir, 'data/nlu_annotation/valid')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/test')).readlines()[1:]
content += open(os.path.join(repo_dir, 'data/nlu_annotation/train')).readlines()[1:]
apply_text_norm_and_modify_slots(content, dump_dir)
create_multispk_for_snips(dump_dir)
elif mode == 'audio':
audio_dir = sys.argv[2]
dump_dir = sys.argv[3]
# Step: sox the snips *.mp3 to the correct format
sox_mp3_to_wav(audio_dir, dump_dir)
else:
print('Usage: python preprocess.py [text|audio] [data_path] [dump_path]')
|
[
"tqdm.tqdm",
"re.split",
"os.makedirs",
"os.path.join",
"os.walk",
"os.popen",
"multiprocessing.Pool",
"shutil.copyfile",
"re.sub"
] |
[((11438, 11494), 'tqdm.tqdm', 'tqdm.tqdm', (['files'], {'desc': "('Process for speaker: ' + speaker)"}), "(files, desc='Process for speaker: ' + speaker)\n", (11447, 11494), False, 'import tqdm\n'), ((12019, 12055), 'os.makedirs', 'os.makedirs', (['out_root'], {'exist_ok': '(True)'}), '(out_root, exist_ok=True)\n', (12030, 12055), False, 'import sys, os\n'), ((12067, 12075), 'multiprocessing.Pool', 'Pool', (['(16)'], {}), '(16)\n', (12071, 12075), False, 'from multiprocessing import Pool\n'), ((12121, 12137), 'os.walk', 'os.walk', (['in_root'], {}), '(in_root)\n', (12128, 12137), False, 'import sys, os\n'), ((1557, 1595), 're.sub', 're.sub', (['"""[\\\\.\\\\,\\\\!\\\\?;\\\\/]"""', '""""""', 'word'], {}), "('[\\\\.\\\\,\\\\!\\\\?;\\\\/]', '', word)\n", (1563, 1595), False, 'import re\n'), ((7576, 7620), 'os.path.join', 'os.path.join', (['link_dir', 'partition', 'new_uttid'], {}), '(link_dir, partition, new_uttid)\n', (7588, 7620), False, 'import sys, os\n'), ((7629, 7663), 'shutil.copyfile', 'shutil.copyfile', (['path', 'destination'], {}), '(path, destination)\n', (7644, 7663), False, 'import shutil\n'), ((7898, 7943), 'os.path.join', 'os.path.join', (['output_dir', '"""all.iob.snips.txt"""'], {}), "(output_dir, 'all.iob.snips.txt')\n", (7910, 7943), False, 'import sys, os\n'), ((8933, 8995), 'os.path.join', 'os.path.join', (['output_dir', '"""single-matched-snips.test.w-intent"""'], {}), "(output_dir, 'single-matched-snips.test.w-intent')\n", (8945, 8995), False, 'import sys, os\n'), ((9702, 9765), 'os.path.join', 'os.path.join', (['output_dir', '"""single-matched-snips.valid.w-intent"""'], {}), "(output_dir, 'single-matched-snips.valid.w-intent')\n", (9714, 9765), False, 'import sys, os\n'), ((10461, 10524), 'os.path.join', 'os.path.join', (['output_dir', '"""single-matched-snips.train.w-intent"""'], {}), "(output_dir, 'single-matched-snips.train.w-intent')\n", (10473, 10524), False, 'import sys, os\n'), ((11204, 11241), 'os.path.join', 'os.path.join', (['output_dir', '"""slots.txt"""'], {}), "(output_dir, 'slots.txt')\n", (11216, 11241), False, 'import sys, os\n'), ((12635, 12671), 'os.makedirs', 'os.makedirs', (['dump_dir'], {'exist_ok': '(True)'}), '(dump_dir, exist_ok=True)\n', (12646, 12671), False, 'import sys, os\n'), ((1458, 1481), 're.sub', 're.sub', (['regex', '""""""', 'word'], {}), "(regex, '', word)\n", (1464, 1481), False, 'import re\n'), ((2348, 2373), 're.split', 're.split', (['""" |\\\\-"""', 'phrase'], {}), "(' |\\\\-', phrase)\n", (2356, 2373), False, 'import re\n'), ((7188, 7212), 'os.walk', 'os.walk', (['snips_audio_dir'], {}), '(snips_audio_dir)\n', (7195, 7212), False, 'import sys, os\n'), ((11589, 11618), 'os.path.join', 'os.path.join', (['out_root', 'split'], {}), '(out_root, split)\n', (11601, 11618), False, 'import sys, os\n'), ((11631, 11666), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (11642, 11666), False, 'import sys, os\n'), ((11691, 11715), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (11703, 11715), False, 'import sys, os\n'), ((2398, 2436), 're.sub', 're.sub', (['"""[\\\\.\\\\,\\\\!\\\\?;\\\\/]"""', '""""""', 'word'], {}), "('[\\\\.\\\\,\\\\!\\\\?;\\\\/]', '', word)\n", (2404, 2436), False, 'import re\n'), ((7227, 7254), 'os.path.join', 'os.path.join', (['x[0]', '"""*.mp3"""'], {}), "(x[0], '*.mp3')\n", (7239, 7254), False, 'import sys, os\n'), ((8064, 8132), 'os.path.join', 'os.path.join', (['output_dir', "('single-matched-snips.%s.w-intent' % split)"], {}), "(output_dir, 'single-matched-snips.%s.w-intent' % split)\n", (8076, 8132), False, 'import sys, os\n'), ((11941, 11962), 'os.popen', 'os.popen', (['bashCommand'], {}), '(bashCommand)\n', (11949, 11962), False, 'import sys, os\n'), ((12718, 12769), 'os.path.join', 'os.path.join', (['repo_dir', '"""data/nlu_annotation/valid"""'], {}), "(repo_dir, 'data/nlu_annotation/valid')\n", (12730, 12769), False, 'import sys, os\n'), ((12811, 12861), 'os.path.join', 'os.path.join', (['repo_dir', '"""data/nlu_annotation/test"""'], {}), "(repo_dir, 'data/nlu_annotation/test')\n", (12823, 12861), False, 'import sys, os\n'), ((12903, 12954), 'os.path.join', 'os.path.join', (['repo_dir', '"""data/nlu_annotation/train"""'], {}), "(repo_dir, 'data/nlu_annotation/train')\n", (12915, 12954), False, 'import sys, os\n')]
|
from cfiddle import *
from util import *
from fixtures import *
from cfiddle.source import FullyInstrumentedExecutable
from cfiddle.util import working_directory
import pytest
import tempfile
import os
def test_cfg(test_cpp):
assert isinstance(test_cpp, FullyInstrumentedExecutable)
with tempfile.TemporaryDirectory() as d:
png = os.path.join(d, "test.png")
svg = os.path.join(d, "test.svg")
test_cpp.cfg("four", output=png)
assert os.path.exists(png)
test_cpp.cfg("four", svg)
assert os.path.exists(svg)
|
[
"tempfile.TemporaryDirectory",
"os.path.join",
"os.path.exists"
] |
[((304, 333), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (331, 333), False, 'import tempfile\n'), ((354, 381), 'os.path.join', 'os.path.join', (['d', '"""test.png"""'], {}), "(d, 'test.png')\n", (366, 381), False, 'import os\n'), ((396, 423), 'os.path.join', 'os.path.join', (['d', '"""test.svg"""'], {}), "(d, 'test.svg')\n", (408, 423), False, 'import os\n'), ((480, 499), 'os.path.exists', 'os.path.exists', (['png'], {}), '(png)\n', (494, 499), False, 'import os\n'), ((549, 568), 'os.path.exists', 'os.path.exists', (['svg'], {}), '(svg)\n', (563, 568), False, 'import os\n')]
|
import os
import datetime
import json
import numpy as np
import itertools
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import transforms, datasets, models
import torch.nn.functional as F
from sklearn.metrics import roc_curve, auc
from matplotlib import pyplot as plt
from time import sleep
import datetime
import cv2
import shutil
from tqdm import tqdm
#
def transform_image(img):
min_size = min(img.shape[0],img.shape[1])
max_crop = min_size - 224 # 224 for ResNet50
pil_transform = transforms.ToPILImage()
resize_transform = transforms.Resize(224)
total_transform = transforms.Compose([
transforms.RandomApply([
transforms.ColorJitter(0.2, 0.2),
transforms.Pad((10,10))
], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.RandomPerspective(),
transforms.RandomRotation(30),
transforms.RandomCrop(min_size - round(max_crop/10))
])
image = pil_transform(img)
if min_size < 224:
image = resize_transform(image)
return total_transform(image)
# About RANDOMCROP transformation
# ResNet50 would a 224x224 sized images
# Due to differente size of images in dataset, random crop must preserve at least
# 224 pixels for each dimensions. With max_crop I obtain the maximum crop to preserve 224 pixels
# on minimum size. Then I crop min_size - max_crop/10
def data_augmentation(workspace, data_dir, source_dirs):
augset_dir = os.path.join(workspace, 'Augmented_TrainSet')
if os.path.isdir(augset_dir) != True:
os.mkdir(augset_dir)
for c in source_dirs:
if (os.path.isdir(os.path.join(augset_dir, c)) != True):
os.mkdir(os.path.join(augset_dir, c))
imgs = [x for x in os.listdir(os.path.join(data_dir, c))]
for i, img in enumerate(imgs):
original_img = img
source_path = os.path.join(data_dir, c, original_img)
target_path = os.path.join(augset_dir, c)
shutil.copy(source_path, target_path)
img = cv2.imread(source_path)
for j in range(12):
new_img = np.array(transform_image(img))
new_img_name = "{}_copy{}.{}".format("".join(original_img.split(".")[:-1]),j,original_img.split(".").pop(-1))
cv2.imwrite(os.path.join(target_path, new_img_name), new_img)
print("Immagine {} trasformazione {} salvata".format(i, j), end="\r")
# DONT KEEP THIS CREATE_TRANSFORM FUNCTION ALREADY IN MODEL UTILITIES
def create_transform(model_name):
if model_name == 'Inception_v3':
transform = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return transform
|
[
"torchvision.transforms.ColorJitter",
"os.mkdir",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.isdir",
"torchvision.transforms.RandomRotation",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomPerspective",
"cv2.imread",
"torchvision.transforms.Pad",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"os.path.join",
"shutil.copy",
"torchvision.transforms.Resize"
] |
[((577, 600), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (598, 600), False, 'from torchvision import transforms, datasets, models\n'), ((624, 646), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (641, 646), False, 'from torchvision import transforms, datasets, models\n'), ((1529, 1574), 'os.path.join', 'os.path.join', (['workspace', '"""Augmented_TrainSet"""'], {}), "(workspace, 'Augmented_TrainSet')\n", (1541, 1574), False, 'import os\n'), ((1582, 1607), 'os.path.isdir', 'os.path.isdir', (['augset_dir'], {}), '(augset_dir)\n', (1595, 1607), False, 'import os\n'), ((1625, 1645), 'os.mkdir', 'os.mkdir', (['augset_dir'], {}), '(augset_dir)\n', (1633, 1645), False, 'import os\n'), ((833, 866), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (864, 866), False, 'from torchvision import transforms, datasets, models\n'), ((876, 906), 'torchvision.transforms.RandomPerspective', 'transforms.RandomPerspective', ([], {}), '()\n', (904, 906), False, 'from torchvision import transforms, datasets, models\n'), ((916, 945), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (941, 945), False, 'from torchvision import transforms, datasets, models\n'), ((1949, 1988), 'os.path.join', 'os.path.join', (['data_dir', 'c', 'original_img'], {}), '(data_dir, c, original_img)\n', (1961, 1988), False, 'import os\n'), ((2015, 2042), 'os.path.join', 'os.path.join', (['augset_dir', 'c'], {}), '(augset_dir, c)\n', (2027, 2042), False, 'import os\n'), ((2055, 2092), 'shutil.copy', 'shutil.copy', (['source_path', 'target_path'], {}), '(source_path, target_path)\n', (2066, 2092), False, 'import shutil\n'), ((2111, 2134), 'cv2.imread', 'cv2.imread', (['source_path'], {}), '(source_path)\n', (2121, 2134), False, 'import cv2\n'), ((1698, 1725), 'os.path.join', 'os.path.join', (['augset_dir', 'c'], {}), '(augset_dir, c)\n', (1710, 1725), False, 'import os\n'), ((1758, 1785), 'os.path.join', 'os.path.join', (['augset_dir', 'c'], {}), '(augset_dir, c)\n', (1770, 1785), False, 'import os\n'), ((2711, 2733), 'torchvision.transforms.Resize', 'transforms.Resize', (['(299)'], {}), '(299)\n', (2728, 2733), False, 'from torchvision import transforms, datasets, models\n'), ((2747, 2773), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(299)'], {}), '(299)\n', (2768, 2773), False, 'from torchvision import transforms, datasets, models\n'), ((2787, 2808), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2806, 2808), False, 'from torchvision import transforms, datasets, models\n'), ((2822, 2888), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2842, 2888), False, 'from torchvision import transforms, datasets, models\n'), ((2963, 2985), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224)'], {}), '(224)\n', (2980, 2985), False, 'from torchvision import transforms, datasets, models\n'), ((2999, 3025), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3020, 3025), False, 'from torchvision import transforms, datasets, models\n'), ((3039, 3060), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3058, 3060), False, 'from torchvision import transforms, datasets, models\n'), ((3074, 3140), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (3094, 3140), False, 'from torchvision import transforms, datasets, models\n'), ((736, 768), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.2)', '(0.2)'], {}), '(0.2, 0.2)\n', (758, 768), False, 'from torchvision import transforms, datasets, models\n'), ((782, 806), 'torchvision.transforms.Pad', 'transforms.Pad', (['(10, 10)'], {}), '((10, 10))\n', (796, 806), False, 'from torchvision import transforms, datasets, models\n'), ((1825, 1850), 'os.path.join', 'os.path.join', (['data_dir', 'c'], {}), '(data_dir, c)\n', (1837, 1850), False, 'import os\n'), ((2378, 2417), 'os.path.join', 'os.path.join', (['target_path', 'new_img_name'], {}), '(target_path, new_img_name)\n', (2390, 2417), False, 'import os\n')]
|
from backend.services import services
from urllib.parse import urlparse
def parse_url(url):
'''
takes a validated url pointing to an item on
a supported streaming service and returns a
query to be used with other streaming services
Parameters:
url (str): the url to be parsed
Returns:
query (Query): the query object to be used
with other streaming services
'''
hygienic_url = urlparse(url)
loc = hygienic_url.netloc
for service in services.keys():
if (loc in services[service].NETLOCS):
return services[service].parse_link(url)
|
[
"backend.services.services.keys",
"urllib.parse.urlparse"
] |
[((412, 425), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (420, 425), False, 'from urllib.parse import urlparse\n'), ((471, 486), 'backend.services.services.keys', 'services.keys', ([], {}), '()\n', (484, 486), False, 'from backend.services import services\n')]
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import filecmp
import unittest
import zipfile
from bazel_tools.tools.python.runfiles import runfiles
from tests.zip import zip_test_lib
HELLO_CRC = 2069210904
LOREM_CRC = 2178844372
EXECUTABLE_CRC = 342626072
class ZipContentsTests(zip_test_lib.ZipContentsTestBase):
def test_empty(self):
self.assertZipFileContent("test_zip_empty.zip", [])
def test_basic(self):
self.assertZipFileContent("test_zip_basic.zip", [
{"filename": "foodir/", "isdir": True, "attr": 0o711},
{"filename": "hello.txt", "crc": HELLO_CRC},
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
{"filename": "usr/bin/foo", "attr": 0o555, "data": "/usr/local/foo/foo.real"},
])
def test_timestamp(self):
self.assertZipFileContent("test_zip_timestamp.zip", [
{"filename": "hello.txt", "crc": HELLO_CRC, "timestamp": 1234567890},
])
def test_permissions(self):
self.assertZipFileContent("test_zip_permissions.zip", [
{
"filename": "executable.sh",
"crc": EXECUTABLE_CRC,
"timestamp": 1234567890,
"attr": 0o644,
}
])
def test_package_dir(self):
self.assertZipFileContent("test_zip_package_dir0.zip", [
{"filename": "abc/def/hello.txt", "crc": HELLO_CRC},
{"filename": "abc/def/loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_empty(self):
self.assertZipFileContent("test-zip-strip_prefix-empty.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_none(self):
self.assertZipFileContent("test-zip-strip_prefix-none.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_zipcontent(self):
self.assertZipFileContent("test-zip-strip_prefix-zipcontent.zip", [
{"filename": "loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_strip_prefix_dot(self):
self.assertZipFileContent("test-zip-strip_prefix-dot.zip", [
{"filename": "zipcontent/loremipsum.txt", "crc": LOREM_CRC},
])
def test_zip_tree(self):
self.assertZipFileContent("test_zip_tree.zip", [
{"filename": "generate_tree/a/a"},
{"filename": "generate_tree/a/b/c"},
{"filename": "generate_tree/b/c/d"},
{"filename": "generate_tree/b/d"},
{"filename": "generate_tree/b/e"},
])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main"
] |
[((3023, 3038), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3036, 3038), False, 'import unittest\n')]
|
# example3.py
#
# Cached instances
import weakref
class Cached(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cache = weakref.WeakValueDictionary()
def __call__(self, *args):
if args in self.__cache:
return self.__cache[args]
else:
obj = super().__call__(*args)
self.__cache[args] = obj
return obj
class Spam(metaclass=Cached):
def __init__(self, name):
print('Creating Spam({!r})'.format(name))
self.name = name
if __name__ == '__main__':
a = Spam('foo')
b = Spam('bar')
print('a is b:', a is b)
c = Spam('foo')
print('a is c:', a is c)
|
[
"weakref.WeakValueDictionary"
] |
[((178, 207), 'weakref.WeakValueDictionary', 'weakref.WeakValueDictionary', ([], {}), '()\n', (205, 207), False, 'import weakref\n')]
|
import autoCorrection
import numpy as np
import unittest
class TestEndToEnd(unittest.TestCase):
def test_end_to_end(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector()
correction = corrector.correct(counts=counts, size_factors=sf)
self.assertEqual(counts.shape, correction.shape)
class TestSavingAndLoading(unittest.TestCase):
def test_loading(self):
self.test_saving()
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".")
correction = corrector.correct(counts, sf, only_predict=True)
self.assertEqual(counts.shape, correction.shape)
def test_saving(self):
counts = np.random.negative_binomial(n=20, p=0.2, size=(10, 8))
sf = np.ones((10, 8))
corrector = autoCorrection.correctors.AECorrector(model_name='test1', model_directory=".", save_model=True)
correction = corrector.correct(counts, sf)
self.assertEqual(counts.shape, correction.shape)
class TestSetSeed(unittest.TestCase):
def test_setSeed(self):
# generate data
nsamples = 15
ngenes = 20
counts = np.random.negative_binomial(n=20, p=0.2, size=(ngenes, nsamples))
sf = np.random.uniform(0.8, 1.2, size=(ngenes, nsamples))
# run the autocorrection 2 times with seed and one without. it should deviate
ac = autoCorrection.correctors
correct1 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0).correct(counts, sf)
correct2 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
correct3 = ac.AECorrector(model_name='test1', model_directory=".", save_model=True, verbose=0, seed=42).correct(counts, sf)
# check if the results are similar. Due to randomness in the numbers we still have little changes
#self.assertTrue(sum(sum(np.round(correct2) == np.round(correct3))) > 0.9 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct2))) < 0.3 * nsamples * ngenes)
self.assertTrue(sum(sum(np.round(correct1) == np.round(correct3))) < 0.3 * nsamples * ngenes)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.random.uniform",
"numpy.random.negative_binomial",
"numpy.ones",
"autoCorrection.correctors.AECorrector",
"numpy.round"
] |
[((2475, 2490), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2488, 2490), False, 'import unittest\n'), ((147, 201), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (174, 201), True, 'import numpy as np\n'), ((215, 231), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (222, 231), True, 'import numpy as np\n'), ((252, 291), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {}), '()\n', (289, 291), False, 'import autoCorrection\n'), ((542, 596), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (569, 596), True, 'import numpy as np\n'), ((610, 626), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (617, 626), True, 'import numpy as np\n'), ((647, 725), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {'model_name': '"""test1"""', 'model_directory': '"""."""'}), "(model_name='test1', model_directory='.')\n", (684, 725), False, 'import autoCorrection\n'), ((898, 952), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(10, 8)'}), '(n=20, p=0.2, size=(10, 8))\n', (925, 952), True, 'import numpy as np\n'), ((966, 982), 'numpy.ones', 'np.ones', (['(10, 8)'], {}), '((10, 8))\n', (973, 982), True, 'import numpy as np\n'), ((1003, 1103), 'autoCorrection.correctors.AECorrector', 'autoCorrection.correctors.AECorrector', ([], {'model_name': '"""test1"""', 'model_directory': '"""."""', 'save_model': '(True)'}), "(model_name='test1', model_directory=\n '.', save_model=True)\n", (1040, 1103), False, 'import autoCorrection\n'), ((1371, 1436), 'numpy.random.negative_binomial', 'np.random.negative_binomial', ([], {'n': '(20)', 'p': '(0.2)', 'size': '(ngenes, nsamples)'}), '(n=20, p=0.2, size=(ngenes, nsamples))\n', (1398, 1436), True, 'import numpy as np\n'), ((1450, 1502), 'numpy.random.uniform', 'np.random.uniform', (['(0.8)', '(1.2)'], {'size': '(ngenes, nsamples)'}), '(0.8, 1.2, size=(ngenes, nsamples))\n', (1467, 1502), True, 'import numpy as np\n'), ((2266, 2284), 'numpy.round', 'np.round', (['correct1'], {}), '(correct1)\n', (2274, 2284), True, 'import numpy as np\n'), ((2288, 2306), 'numpy.round', 'np.round', (['correct2'], {}), '(correct2)\n', (2296, 2306), True, 'import numpy as np\n'), ((2368, 2386), 'numpy.round', 'np.round', (['correct1'], {}), '(correct1)\n', (2376, 2386), True, 'import numpy as np\n'), ((2390, 2408), 'numpy.round', 'np.round', (['correct3'], {}), '(correct3)\n', (2398, 2408), True, 'import numpy as np\n')]
|
import os
import unittest
from unittest.mock import patch
import botocore.session
from botocore.stub import Stubber
from src.utils import secrets_manager_helper
session = botocore.session.get_session()
secretsmanager = session.create_client("secretsmanager", region_name=secrets_manager_helper.REGION)
class TestSecretsManagerHelper(unittest.TestCase):
def test_null_environment_value(self):
with self.assertRaises(ValueError):
secrets_manager_helper.retrieve_secret("variable")
@patch.dict(os.environ, {"variable": "some_secret_id"})
def test_retrieve_secret(self):
mock_secret = "SEKRET!"
secretsmanager_stubber = Stubber(secretsmanager)
request = {"SecretId": "some_secret_id"}
response = {"SecretString": mock_secret}
secretsmanager_stubber.add_response("get_secret_value", response, request)
secretsmanager_stubber.activate()
secret_value = secrets_manager_helper.retrieve_secret("variable", secretsmanager)
secretsmanager_stubber.assert_no_pending_responses()
self.assertEqual(mock_secret, secret_value)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"botocore.stub.Stubber",
"unittest.mock.patch.dict",
"src.utils.secrets_manager_helper.retrieve_secret"
] |
[((513, 567), 'unittest.mock.patch.dict', 'patch.dict', (['os.environ', "{'variable': 'some_secret_id'}"], {}), "(os.environ, {'variable': 'some_secret_id'})\n", (523, 567), False, 'from unittest.mock import patch\n'), ((1155, 1170), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1168, 1170), False, 'import unittest\n'), ((669, 692), 'botocore.stub.Stubber', 'Stubber', (['secretsmanager'], {}), '(secretsmanager)\n', (676, 692), False, 'from botocore.stub import Stubber\n'), ((940, 1006), 'src.utils.secrets_manager_helper.retrieve_secret', 'secrets_manager_helper.retrieve_secret', (['"""variable"""', 'secretsmanager'], {}), "('variable', secretsmanager)\n", (978, 1006), False, 'from src.utils import secrets_manager_helper\n'), ((456, 506), 'src.utils.secrets_manager_helper.retrieve_secret', 'secrets_manager_helper.retrieve_secret', (['"""variable"""'], {}), "('variable')\n", (494, 506), False, 'from src.utils import secrets_manager_helper\n')]
|
# Import all packages required
# Type annotation imports
from typing import Union
from typing import Tuple
# Other imports
import os
import glob
import h5py
from datetime import datetime
from datetime import timedelta
from datetime import timezone
import numpy as np
import pandas as pd
from scipy import interpolate
#-------------------------------------------------------------------------------------------------------#
# CREAM Data Utility class. Please refer to the docstring for details!
#-------------------------------------------------------------------------------------------------------#
class CREAM_Day():
"""
A class representing one particular day of the CREAM dataset.
The CREAM dataset has the following file structure:
|-CREAM
|------- 2018-08-23
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- *.hdf5
| |--------- ......
|
|------- 2018-08-24
.......
This class corresponds to one of the subfolders, i.e. of the folders representing a particular day, such as, for
example, the first folder "2018-08-23". You have to create one CREAM_Day object per day folder in order to use the
data of the full dataset.
During initialization, the following attributes are set.
files_metadata_df (pandas.DataFrame): columns: Start_timestamp, End_timestamp, Filename to store start
end times of each file in this day
files (list): full path to every file in this day
minimum_request_timestamp (datetime.datetime): First timestamp of the day
maximum_request_timestamp (datetime.datetime): Last timestamp of the day
file_cache (dict): file cache for buffering already loaded files
day_date (datetime.datetime): day and date of the current object
This class also provides convenience functions to load the files of the CREAM dataset.
To load an arbitrary CREAM file, use the load_file method.
To load an arbitrary data window, based on the start_timestamp of the window to load, use the load_time_frame method.
To load the maintenance or product events as a pandas.DataFrame, use the load_machine_events method.
Via a parameter, one can also load the raw files that were generated by the coffee maker (they can be found in the
raw_coffee_maker_logs subfolder of the CREAM dataset).
To load the component events as a pandas.DataFrame, use the load_component_events_method.
To load information whether a specific day is a working day (German working day in the dataset), use the get_weekday_
information method.
Other self-explaining convenience functions are:
- get_datetime_from_filepath
- get_index_from_timestamp
- get_timestamp_from_index
Functions starting with an "_" underscore are private functions and are not intended for user usage.
"""
def __init__(self, cream_day_location: str, use_buffer : bool =False, buffer_size_files : int =5):
"""
Initialize the CREAM_Day object
Parameters
----------
cream_day_location (str): location of the root folder of the respective day in the CREAM dataset. Specify
a path to the respective day, not to the root of the overall CREAM datset!
use_buffer (boolean): default=False. In case it is set to True, files loaded via the load_file, or load_time_frame
method are stored in the cache of the CREAM_Day object. This speeds up streaming the dataset.
In case no buffer_size_file is provide, a default buffer_size_files of 5 is used.
Hence, the recent 5 files are stored in the cache. Old files are automatically removed from
the cache in case the buffer_size_files limit is exceeded.
buffer_size_files (int): Size of the file cache of the CREAM_Day object. Functionality of the cache is documented
in the use_buffer parameter description right above.
"""
self.dataset_location = cream_day_location
self.use_buffer = use_buffer
self.buffer_size_files = buffer_size_files
if self.buffer_size_files == 5 and use_buffer is True:
raise Warning("Buffer size was specified with size 5 (default value): a minimum buffer size of 5 files was set therefore")
# Initiate the file buffer dictionary
self.file_cache = {}
# Get all the files of the respective day
self.files = glob.glob(os.path.join(self.dataset_location, "*.hdf5"))
self.files.sort()
# We use the first file and the timestamps in the filenames in the dataset (of this day) to get the metadata information
# Get the timezone information from the filename timestamp
# Load Metadata from the first file of the respective device --> same for all of the device --> STATIC METADATA
with h5py.File(self.files[0], 'r', driver='core') as f:
self.sampling_rate = int(f.attrs['frequency']) # get the sampling rate
self.samples_per_file = len(f["voltage"]) # get the length of the signal
# get the start timestamp‚
start_timestamp = datetime(
year=int(f.attrs['year']),
month=int(f.attrs['month']),
day=int(f.attrs['day']),
hour=int(f.attrs['hours']),
minute=int(f.attrs['minutes']),
second=int(f.attrs['seconds']),
microsecond=int(f.attrs['microseconds']),
tzinfo=timezone(timedelta(hours=int(f.attrs['timezone'][1:4]), minutes=int(f.attrs['timezone'][4:]))))
self.file_duration_sec = 60 * 60 # each file, one hour --> seconds per file
self.number_of_files = len(self.files)
# Some file metadata for every file
file_start_times = [self.get_datetime_from_filepath(f) for f in self.files]
file_end_times = [timedelta(seconds=self.file_duration_sec) + ts for ts in file_start_times]
self.files_metadata_df = pd.DataFrame({"Start_timestamp": file_start_times,
"Filename": self.files,
"End_timestamp": file_end_times})
self.dataset_name = "CREAM"
# Compute the minimum and maximum time for this day, and the respective differences to the day before
self.minimum_request_timestamp = self.files_metadata_df.iloc[0].Start_timestamp
self.maximum_request_timestamp = self.files_metadata_df.iloc[-1].Start_timestamp + timedelta(seconds=self.file_duration_sec)
# Find the day of the dataset
folder_path = os.path.basename(os.path.normpath(self.dataset_location)) # name of the folder
date = folder_path.split("-")
self.day_date = datetime(year=int(date[0]), month=int(date[1]), day=int(date[2]))
# Initialize weekday information
self.weekday_information_df = None
def load_machine_events(self, file_path: str = None, filter_day : bool = False, raw_file=True) -> pd.DataFrame:
"""
Load the maintenance event file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False. If set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
raw_file (boolean): default=True. If set to True, the user has to provide the path to the raw events file that
were generated by the coffee maker. They can be found in the raw_coffee_maker_logs subfolder
of the dataset.
Returns
-------
data (pd.DataFrame):
if raw_file=True: pd.DataFrame with columns "Timestamp", "Activity" (maintenance file) or
"Timestamp", "Product" (product file)
If raw_file=False: pd.DataFrame with columns
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
if raw_file is True and "raw" not in file_path:
raise ValueError("In case you intend to load a raw_file, you also need to pass a path to a raw file to the "
"function!")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
if raw_file is True: # In case the raw product file is used
data.Timestamp = pd.to_datetime(data.Timestamp)
data = self._convert_timezone(data, "Timestamp", target_timezone=timezone)
data.sort_values("Timestamp", inplace=True)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
else: # the manually adjusted and pre-processed product file is used
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.End_Timestamp.apply(lambda x: x.date())
data.sort_values("Start_Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_component_events(self, file_path: str = None, filter_day : bool = False) -> pd.DataFrame:
"""
Load the labeled electrical events, i.e. the components events, file. The events are sorted by the time they occur.
Parameters
----------
file_path (str): path to the component events file (.csv) file
filter_day (boolean): default=False, if set to True, the DataFrame is filtered for the events belonging
to the CREAM_Day object
Returns
-------
data (pd.DataFrame): pd.DataFrame with columns:
'Start_Timestamp', 'Automatic_Timestamp', 'Event_Type', 'End_Timestamp', 'Event_Duration_Seconds', 'Date',
Sorted descending by 'Start_Timestamp'.
"""
if file_path is None:
raise ValueError("Specify a file_path, containing the events file.")
data = pd.read_csv(file_path)
# The timezone of the timestamps need to be from the same type
# We use the first file of the day_object to get
timezone = self.get_datetime_from_filepath(self.files[0]).tzinfo
for column in data.columns:
# Convert all timestamp columns
if "Timestamp" in column:
data[column] = pd.to_datetime(data[column])
data = self._convert_timezone(data, column, target_timezone=timezone)
data["Date"] = data.Timestamp.apply(lambda x: x.date())
data.sort_values("Timestamp", inplace=True)
if filter_day is True: # only return the event of the corresponding CREAM day
data = data[data["Date"] == self.day_date.date()]
return data
def load_file(self, file_path: str, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Load a file of the CREAM dataset
If return_noise is specified, the noise channel is also returned. The current is 2-dimensional then.
The signals get pre-processed before they are returned by this function:
1. y-direction calibration: we center the signal around zero
2. calibration_factor: we calibrate the signal by the measurement device specific calibration_factor.
This calibration_factor is included in the metadata of the files.
Parameters
----------
file_path (string): path to the file to be loaded
return_noise (boolean): default=False. If set to True, the current of the noise socket is also returned.
Returns
-------
voltage (ndarray): voltage signal with shape=(1, file_length,). In case of an empty file None is returned.
current (ndarray): current signal either with shape (1, file_length) or (2, file_length)
In case of an empty file None is returned
"""
voltage = None
current = None
# Check if the file is already in the file cache
if self.use_buffer is True and file_path in self.file_cache:
voltage = self.file_cache[file_path]["voltage"]
current = self.file_cache[file_path]["current"]
return voltage, current
else:
# Check if the file is empty (zero bytes): if so return and empty current and voltage array
if os.stat(file_path).st_size > 0: # if not empty
with h5py.File(file_path, 'r', driver='core') as f:
voltage_offset, current_offset = self._adjust_amplitude_offset(f) # y value offset adjustment
for name in list(f):
signal = f[name][:] * 1.0
if name == 'voltage' and voltage_offset is not None: # the voltage signal
voltage = signal - voltage_offset
calibration_factor = f[name].attrs['calibration_factor']
voltage = np.multiply(voltage, calibration_factor)
elif "current1" in name and current_offset is not None: # the current signal of the coffee maker
current = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current = np.multiply(current, calibration_factor)
elif return_noise == True and "current6" in name and current_offset is not None: # the current signal of the noise channel
current_noise = signal - current_offset
calibration_factor = f[name].attrs['calibration_factor']
current_noise = np.multiply(current_noise, calibration_factor)
if return_noise is True:
current = np.array([current, current_noise])
voltage = np.array(voltage)
else:
current = np.array(current)
voltage = np.array(voltage)
# Before returning, check if we store the file in the cache and if we need to delete one instead from the cache
if self.use_buffer is True:
if len(self.file_cache) < self.buffer_size_files:
self.file_cache[file_path] = {"voltage" : np.array(voltage), "current": np.array(current)}
else:
sorted_filenames = list(self.file_cache.keys())
sorted_filenames.sort()
del self.file_cache[sorted_filenames[0]] #delete the oldest file
return np.array(voltage), np.array(current)
else: # if empty
return None, None
def load_file_metadata(self, file_path: str, attribute_list: list = []) -> dict:
"""
Load the file metadata for a specifc files.
The metadata is stored in the HDF5 attributes, details are documented in the data descriptor.
The following attributes are available:
["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
Parameters
----------
file_path (str): path to the file to be loaded. Needs to be the full-path, as provide by the "files"
attribute of the CREAM_Day object.
attribute_list (list): default=[], specify specifc attribute names to be loaded. If no
dedicated attributes are specified, all attributes are returned
Returns
-------
attributes_dict (dict): dictionary with all HDF5 attributes of a specifc file.
"""
if file_path is None:
raise ValueError("Specify a file path!")
all_attributes = ["name", "first_trigger_id", "last_trigger_id", "sequence", "frequency", "year", "month", "day",
"hours", "minutes", "seconds", "microseconds", "timezone", "calibration_factor", "removed_offset"]
if len(attribute_list) == 0: #use all attributes if non is specified
attribute_list = all_attributes
else:
# Check if user specified attributes exist in the metadata
for attr in attribute_list:
if attr not in all_attributes:
raise ValueError("The atttribute %s is not available!")
attributes_dict = {}
with h5py.File(file_path, 'r', driver='core') as f:
for attr in attribute_list:
if attr in ["calibration_factor", "removed_offset"]: #not in the attribute root of the hdf5 file
attributes_dict[attr] = f["voltage"].attrs[attr]
else: #attributes in the root of the hdf5 file
attributes_dict[attr] = f.attrs[attr]
return attributes_dict
def load_time_frame(self, start_datetime: datetime, duration : float, return_noise: bool = False) -> Tuple[np.ndarray, np.ndarray]:
"""
Loads an arbitrary time-frame of the CREAM dataset. Can be also used for streaming the data fast: in case
the caching parameter is enabled in the CREAM_Day object. Otherwise, the files will be reloaded every time
this method is called, thus, slowing down the data retrieval.
Parameters
----------
start_datetime (datetime.datetime): start timestamp of the window to load
duration (float): duration of the window to load (window size) in seconds. ATTENTION: if not provided in seconds,
wrong results are returned!
return_noise (boolean): default: False. If set to True, also returns the signal of the noise channel recorded in
CREAM dataset (from socket 6)
Returns
-------
voltage (numpy.ndarray): voltage signal of the window
current (numpy.ndarray): curent signal of the window. One dimensional if return_noise=False, two dimensional if
if return_noise=True. The first element is the coffee-maker signal, the second element
the noise signal.
"""
# Perform initial checks
if start_datetime < self.minimum_request_timestamp:
raise ValueError(
"The requested Time window is smaller then the minimum_request_timestamp of the day object")
end_datetime = start_datetime + timedelta(seconds=duration)
if end_datetime > self.maximum_request_timestamp:
raise ValueError("The requested Time window is bigger then the maximum_request_timestamp of the day object")
# determine all the files that are relevant for the requested time window
# The index of the first relevant_file: i.e. the last file that is smaller then the start_datetime
first_file_idx = self.files_metadata_df[self.files_metadata_df.Start_timestamp <= start_datetime].index[-1]
# The last relevant_file: i.e. the first file that has and End_timestamp that is bigger then the one we need
last_file_idx = self.files_metadata_df[self.files_metadata_df.End_timestamp >= end_datetime].index[0]
# Get all the files in between the first and the last file needed
relevant_files_df = self.files_metadata_df.loc[first_file_idx:last_file_idx]
if len(relevant_files_df) == 0:
raise ValueError("The timeframe requested does not lie within the current day!")
relevant_voltage = []
relevant_current = []
relevant_current_noise = []
for i, row in relevant_files_df.iterrows():
voltage, current = self.load_file(row.Filename, return_noise=return_noise)
relevant_voltage.append(voltage)
relevant_current.append(current)
if return_noise is True:
relevant_current.append(current[0])
relevant_current_noise.append(current[1])
# now stack together the relevant signals
relevant_voltage = np.concatenate(relevant_voltage, axis=-1)
relevant_current = np.concatenate(relevant_current, axis=-1)
if return_noise is True and len(relevant_current_noise) > 0:
relevant_current_noise = np.concatenate(relevant_current_noise, axis=-1)
# Compute the start_index
# 1.1 Compute the offset in the first file
start_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, start_datetime))
end_index = int(self.get_index_from_timestamp(relevant_files_df.iloc[0].Start_timestamp, end_datetime))
# Get the voltage and current window
voltage = relevant_voltage[start_index:end_index] #there is only one voltage channel
if return_noise is True and len(relevant_current_noise) > 0:
current = [relevant_current[start_index:end_index], relevant_current_noise[start_index:end_index]]
else:
current = relevant_current[start_index:end_index]
voltage = np.array(voltage)
current = np.array(current)
return voltage, current
def compute_average_sampling_rate(self) -> float:
"""
Estimate the average sampling rate per day.
Load the metadata of every file of the current day.
Per file (one hour files), we compute the actual sampling rate.
We then average this number over all files of this day, resulting in the average sampling rate.
Calculate the difference between the first and last sample of a day based on
the timestamps of the files.
Sets the average_sampling_rate attribute of the CREAM_Day object.
One can compare the average_sampling_rate to the nominal one of 6400.
Parameters
----------
Returns
-------
average_sampling_rate (float): average sampling rate per day (computed over the files)
"""
FILE_LENGTH_SEC = 60 * 60 #one hour files
actual_sampling_rates = []
for file in self.files:
voltage, current = self.load_file(file_path=file)
samples_per_file = len(voltage)
actual_sampling_rate = samples_per_file / FILE_LENGTH_SEC
actual_sampling_rates.append(actual_sampling_rate)
self.average_sampling_rate = np.mean(actual_sampling_rates)
return self.average_sampling_rate
def get_datetime_from_filepath(self, filepath: str) -> datetime:
"""
Extracts the datetime from a filename of a CREAM file.
Parameters
----------
filepath (str): path to a CREAM file
Returns
-------
start_timestamp (datetime): start timestamp of the file, extracted from the filename
"""
filename = os.path.basename(filepath) # get the filename
string_timestamp = "-".join(filename.split("-")[2:-1])
datetime_object = datetime.strptime(string_timestamp, '%Y-%m-%dT%H-%M-%S.%fT%z') # string parse time
return datetime_object
def get_index_from_timestamp(self, start_timestamp: datetime, event_timestamp: datetime) -> int:
"""
Returns the index of the event, represented by the event_timestamp, relativ to the start_timestamp (i.e. start timestamp of the file of interest e.g.)
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window the event is located at
event_timestamp (datetime.datetime): timestamp of the event of interest
Returns
-------
event_index (int): The resulting event index
"""
sec_since_start = event_timestamp - start_timestamp
event_index = sec_since_start.total_seconds() * (self.sampling_rate) # and # multiply by samples per second
return int(event_index)
def get_timestamp_from_index(self, start_timestamp: datetime, event_index: int) -> datetime:
"""
Returns the timestamp for an event index. The event index has to be relative to a start_timestamp of a window.
Parameters
----------
start_timestamp (datetime.datetime): start timestamp of the window.
event_index (int): Index of the event of interest, has to be relative to the start_timestamp provided.
Returns
-------
event_timestamp (datetime.datetime): The resulting timestamp
"""
seconds_per_sample = 1 / self.sampling_rate # 1 second / samples = seconds per sample
time_since_start = event_index * seconds_per_sample
event_ts = start_timestamp + timedelta(seconds=time_since_start)
return event_ts
def _adjust_amplitude_offset(self, file: h5py.File) -> Tuple[int, int]:
"""
Resembles the pre-processing functionality in the BLOND repository (one_second_data_summary_functions.py) by
<NAME>.
Computes the mean per period to get an estimate for the offset in each period.
This is done for the voltage signal.
The period length is computed using the nominal sampling rate. Tthis can deviate from the
actual period length. Therefore, we zero pad the voltage signal to get full periods again before computing
the mean.
Then we use the estimate per period, to linearly interpolate the mean values per period, to get an offset value
per sample point in the signal. We then use the offset of the voltage to compute the offset of the current by multiplying
it by the crest-coefficient of 1/sqrt(2), i.e., approx. 0.7 .
Parameters
----------
file (h5py.File): a h5py CREAM file.
Returns
-------
voltage_offset (int): the voltage offset to adjust for
current_offset (int): the current offset to adjust for
"""
length = len(file['voltage'])
# Compute the average period_length, using the nominal sampling rate
period_length = round(self.sampling_rate / 50)
# Get the missing samples, opposed to the optimal number of periods in the signal
remainder = divmod(length, period_length)[1]
voltage = np.pad(file['voltage'][:], (0, period_length - remainder), 'constant',
constant_values=0) # zero padding
voltage = voltage.reshape(-1, period_length) # the single periods, period wise reshape
mean_values_per_period = voltage.mean(axis=1) # compute the mean per period
# Create x values for the interpolation
x_per_period = np.linspace(1, length, len(mean_values_per_period), dtype=np.int) # number of periods
x_original = np.linspace(1, length, length, dtype=np.int)
# build a linear interpolation, that interpolates for each period witch offset it should have
# for each of the datapoints, interpolate the offset
voltage_offset = interpolate.interp1d(x_per_period, mean_values_per_period)(x_original)
current_offset = voltage_offset * 1 / np.sqrt(2) # roughly * 0.7
return voltage_offset, current_offset
def _convert_timezone(self, dataframe: pd.DataFrame, column_name : str, target_timezone:str) -> pd.DataFrame:
"""
Converts timezone in column_name column in dataframe to target_timezone
Parameters
----------
dataframe (pandas.DataFrame): DataFrame object, containing some time columns
column_name (str): Name of the column of interest, i.e. the name of a time column
target_timezone (str): datetime.datetime.tzinfo timezone information as a string. This is the target timezone.
Returns
-------
dataframe (pandas.DataFrame): DataFrame object, with the column_name column converted to the target_timezone
"""
ts_array = []
for i, row in dataframe.iterrows():
ts = row[column_name].tz_convert(target_timezone)
ts_array.append(ts)
dataframe[column_name] = ts_array
return dataframe
def get_weekday_information(self, date : Union[list, np.ndarray], file_path : str = None) -> pd.DataFrame:
"""
For a certain date, get the day related information from the file provided with the dataset.
Parameters
----------
date (list, np.ndarray): list of string dates to be checked, format: year-month-day
file_path (string): default=None if path is not provided, the default location of the file is assumed
Returns
-------
day_information_df (pd.DataFrame): DataFrame with columns:
Date (string, date format year-month-day), WorkingDay (boolean), Weekday (string)
"""
if file_path is None:
file_path = os.path.abspath(self.dataset_location + "/../" + "day_information.csv")
day_information_df = None
if self.weekday_information_df is None: # if not initialized yet
self.weekday_information_df = pd.read_csv(file_path)
if type(date) in [list, np.ndarray]:
if not all(isinstance(n, str) for n in date): # if not all dates are strings, convert them
date = [str(n) for n in date]
day_information_df = self.weekday_information_df[self.weekday_information_df.Date.isin(date)]
day_information_df.Date = day_information_df.Date.apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d')).dt.date
return day_information_df
|
[
"pandas.read_csv",
"numpy.mean",
"scipy.interpolate.interp1d",
"os.path.join",
"pandas.DataFrame",
"numpy.pad",
"os.path.abspath",
"numpy.multiply",
"datetime.timedelta",
"os.path.normpath",
"numpy.linspace",
"h5py.File",
"os.stat",
"os.path.basename",
"datetime.datetime.strptime",
"pandas.to_datetime",
"numpy.concatenate",
"numpy.array",
"numpy.sqrt"
] |
[((6149, 6261), 'pandas.DataFrame', 'pd.DataFrame', (["{'Start_timestamp': file_start_times, 'Filename': self.files,\n 'End_timestamp': file_end_times}"], {}), "({'Start_timestamp': file_start_times, 'Filename': self.files,\n 'End_timestamp': file_end_times})\n", (6161, 6261), True, 'import pandas as pd\n'), ((8696, 8718), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (8707, 8718), True, 'import pandas as pd\n'), ((10855, 10877), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (10866, 10877), True, 'import pandas as pd\n'), ((21013, 21054), 'numpy.concatenate', 'np.concatenate', (['relevant_voltage'], {'axis': '(-1)'}), '(relevant_voltage, axis=-1)\n', (21027, 21054), True, 'import numpy as np\n'), ((21082, 21123), 'numpy.concatenate', 'np.concatenate', (['relevant_current'], {'axis': '(-1)'}), '(relevant_current, axis=-1)\n', (21096, 21123), True, 'import numpy as np\n'), ((22009, 22026), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (22017, 22026), True, 'import numpy as np\n'), ((22045, 22062), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (22053, 22062), True, 'import numpy as np\n'), ((23299, 23329), 'numpy.mean', 'np.mean', (['actual_sampling_rates'], {}), '(actual_sampling_rates)\n', (23306, 23329), True, 'import numpy as np\n'), ((23759, 23785), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (23775, 23785), False, 'import os\n'), ((23896, 23958), 'datetime.datetime.strptime', 'datetime.strptime', (['string_timestamp', '"""%Y-%m-%dT%H-%M-%S.%fT%z"""'], {}), "(string_timestamp, '%Y-%m-%dT%H-%M-%S.%fT%z')\n", (23913, 23958), False, 'from datetime import datetime\n'), ((27131, 27224), 'numpy.pad', 'np.pad', (["file['voltage'][:]", '(0, period_length - remainder)', '"""constant"""'], {'constant_values': '(0)'}), "(file['voltage'][:], (0, period_length - remainder), 'constant',\n constant_values=0)\n", (27137, 27224), True, 'import numpy as np\n'), ((27628, 27672), 'numpy.linspace', 'np.linspace', (['(1)', 'length', 'length'], {'dtype': 'np.int'}), '(1, length, length, dtype=np.int)\n', (27639, 27672), True, 'import numpy as np\n'), ((4593, 4638), 'os.path.join', 'os.path.join', (['self.dataset_location', '"""*.hdf5"""'], {}), "(self.dataset_location, '*.hdf5')\n", (4605, 4638), False, 'import os\n'), ((4997, 5041), 'h5py.File', 'h5py.File', (['self.files[0]', '"""r"""'], {'driver': '"""core"""'}), "(self.files[0], 'r', driver='core')\n", (5006, 5041), False, 'import h5py\n'), ((6681, 6722), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.file_duration_sec'}), '(seconds=self.file_duration_sec)\n', (6690, 6722), False, 'from datetime import timedelta\n'), ((6801, 6840), 'os.path.normpath', 'os.path.normpath', (['self.dataset_location'], {}), '(self.dataset_location)\n', (6817, 6840), False, 'import os\n'), ((9021, 9051), 'pandas.to_datetime', 'pd.to_datetime', (['data.Timestamp'], {}), '(data.Timestamp)\n', (9035, 9051), True, 'import pandas as pd\n'), ((17398, 17438), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {'driver': '"""core"""'}), "(file_path, 'r', driver='core')\n", (17407, 17438), False, 'import h5py\n'), ((19419, 19446), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration'}), '(seconds=duration)\n', (19428, 19446), False, 'from datetime import timedelta\n'), ((21231, 21278), 'numpy.concatenate', 'np.concatenate', (['relevant_current_noise'], {'axis': '(-1)'}), '(relevant_current_noise, axis=-1)\n', (21245, 21278), True, 'import numpy as np\n'), ((25572, 25607), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'time_since_start'}), '(seconds=time_since_start)\n', (25581, 25607), False, 'from datetime import timedelta\n'), ((27862, 27920), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['x_per_period', 'mean_values_per_period'], {}), '(x_per_period, mean_values_per_period)\n', (27882, 27920), False, 'from scipy import interpolate\n'), ((27979, 27989), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (27986, 27989), True, 'import numpy as np\n'), ((29719, 29790), 'os.path.abspath', 'os.path.abspath', (["(self.dataset_location + '/../' + 'day_information.csv')"], {}), "(self.dataset_location + '/../' + 'day_information.csv')\n", (29734, 29790), False, 'import os\n'), ((29942, 29964), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (29953, 29964), True, 'import pandas as pd\n'), ((6041, 6082), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'self.file_duration_sec'}), '(seconds=self.file_duration_sec)\n', (6050, 6082), False, 'from datetime import timedelta\n'), ((11230, 11258), 'pandas.to_datetime', 'pd.to_datetime', (['data[column]'], {}), '(data[column])\n', (11244, 11258), True, 'import pandas as pd\n'), ((9512, 9540), 'pandas.to_datetime', 'pd.to_datetime', (['data[column]'], {}), '(data[column])\n', (9526, 9540), True, 'import pandas as pd\n'), ((13240, 13258), 'os.stat', 'os.stat', (['file_path'], {}), '(file_path)\n', (13247, 13258), False, 'import os\n'), ((13310, 13350), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {'driver': '"""core"""'}), "(file_path, 'r', driver='core')\n", (13319, 13350), False, 'import h5py\n'), ((14707, 14741), 'numpy.array', 'np.array', (['[current, current_noise]'], {}), '([current, current_noise])\n', (14715, 14741), True, 'import numpy as np\n'), ((14772, 14789), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (14780, 14789), True, 'import numpy as np\n'), ((14842, 14859), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (14850, 14859), True, 'import numpy as np\n'), ((14890, 14907), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (14898, 14907), True, 'import numpy as np\n'), ((15526, 15543), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (15534, 15543), True, 'import numpy as np\n'), ((15545, 15562), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (15553, 15562), True, 'import numpy as np\n'), ((13850, 13890), 'numpy.multiply', 'np.multiply', (['voltage', 'calibration_factor'], {}), '(voltage, calibration_factor)\n', (13861, 13890), True, 'import numpy as np\n'), ((15217, 15234), 'numpy.array', 'np.array', (['voltage'], {}), '(voltage)\n', (15225, 15234), True, 'import numpy as np\n'), ((15247, 15264), 'numpy.array', 'np.array', (['current'], {}), '(current)\n', (15255, 15264), True, 'import numpy as np\n'), ((30345, 30381), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'format': '"""%Y-%m-%d"""'}), "(x, format='%Y-%m-%d')\n", (30359, 30381), True, 'import pandas as pd\n'), ((14200, 14240), 'numpy.multiply', 'np.multiply', (['current', 'calibration_factor'], {}), '(current, calibration_factor)\n', (14211, 14240), True, 'import numpy as np\n'), ((14588, 14634), 'numpy.multiply', 'np.multiply', (['current_noise', 'calibration_factor'], {}), '(current_noise, calibration_factor)\n', (14599, 14634), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import datetime as dt
import os
import discord
from __main__ import send_cmd_help
from box import Box, BoxList
from cogs.utils import checks
from cogs.utils.chat_formatting import pagify
from cogs.utils.dataIO import dataIO
from discord.ext import commands
PATH = os.path.join("data", "votemanager")
JSON = os.path.join(PATH, "settings.json")
class Survey(Box):
"""A survey"""
def __init__(self, title=None, description=None, role_ids=None, options=None, votes=None, timestamp=None):
super().__init__()
if role_ids is None:
role_ids = BoxList()
if options is None:
options = BoxList()
if votes is None:
votes = Box()
self.title = title
self.description = description
self.role_ids = role_ids
self.options = options
self.votes = votes
self.timestamp = timestamp
class VoteManager:
"""Vote Manager. Voting module"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = Box(dataIO.load_json(JSON), default_box=True)
def save_settings(self):
"""Save settings."""
dataIO.save_json(JSON, self.settings)
def add_survey(self, server, title, description, roles, options):
"""Add a new survey.
:returns: ID of survey
"""
server_settings = self.settings[server.id]
if server_settings.surveys == Box():
server_settings.surveys = BoxList()
survey = Survey(
title=title,
description=description,
role_ids=[r.id for r in roles],
options=options,
timestamp=dt.datetime.utcnow().timestamp()
)
server_settings.surveys.append(survey)
self.save_settings()
return len(server_settings.surveys)
def get_surveys(self, server):
"""Return list of surveys on the server."""
server_settings = self.settings[server.id]
if "surveys" not in server_settings:
server_settings.surveys = BoxList()
return server_settings.surveys
def get_survey_by_id(self, server, id):
"""Return survey by ID, where ID is 0-based index of surveys."""
surveys = self.get_surveys(server)
if id >= len(surveys):
return None
return surveys[id]
def reset_server(self, server):
"""Reset server settings."""
self.settings[server.id] = Box()
self.settings[server.id].surveys = BoxList()
self.save_settings()
@commands.group(pass_context=True, aliases=['vm'])
async def votemanager(self, ctx):
"""Settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.serverowner()
@votemanager.command(name="reset", aliases=[], pass_context=True, no_pm=True)
async def votemanager_reset(self, ctx):
"""Resret server settings"""
server = ctx.message.server
self.reset_server(server)
await self.bot.say("Server settings reset.")
@checks.mod_or_permissions()
@votemanager.command(name="add", aliases=['a'], pass_context=True, no_pm=True)
async def votemanager_add(self, ctx):
"""Add vote. Interactive."""
author = ctx.message.author
server = ctx.message.server
await self.bot.say("Add a new survey. Continue? (y/n)")
answer = await self.bot.wait_for_message(author=author)
if answer.content.lower() != 'y':
await self.bot.say("Aborted.")
return
#: Title
await self.bot.say("Enter the title of the vote:")
answer = await self.bot.wait_for_message(author=author)
title = answer.content
#: Description
await self.bot.say("Enter the description of the vote:")
answer = await self.bot.wait_for_message(author=author)
description = answer.content
#: Roles
await self.bot.say("Enter list of roles who can vote for this, separated by `|`:")
answer = await self.bot.wait_for_message(author=author)
role_names = [a.strip() for a in answer.content.split('|')]
roles = [discord.utils.get(server.roles, name=role_name) for role_name in role_names]
for role in roles:
if role is None:
await self.bot.say("Cannot find {} on server. Aborting…".format(role))
return
#: Options
await self.bot.say("Enter a list of options, separated by `|`:")
answer = await self.bot.wait_for_message(author=author)
options = [a.strip() for a in answer.content.split('|')]
survey_id = self.add_survey(server, title, description, roles, options)
await ctx.invoke(self.votemanager_list, survey_id)
@votemanager.command(name="list", aliases=['l'], pass_context=True, no_pm=True)
async def votemanager_list(self, ctx, survey_number=None):
"""List votes."""
server = ctx.message.server
surveys = self.get_surveys(server)
if len(surveys) == 0:
await self.bot.say("No surveys found.")
return
if survey_number is None:
em = discord.Embed(
title="Vote Manager",
description="List of surveys"
)
for i, s in enumerate(surveys, 1):
em.add_field(
name=str(i),
value=s.title,
inline=False
)
em.set_footer(
text='[p]vm list 1 to see details about survey 1'
)
await self.bot.say(embed=em)
else:
id = int(survey_number) - 1
survey = self.get_survey_by_id(server, id)
em = discord.Embed(
title=survey.title,
description=survey.description
)
em.add_field(
name='Role(s)',
value=', '.join([discord.utils.get(server.roles, id=rid).name for rid in survey.role_ids]),
)
em.add_field(
name='Options',
value='\n'.join(
['`{}. ` {}'.format(number, option) for number, option in enumerate(survey.options, 1)]
),
inline=False
)
em.set_footer(
text='[p]vm vote {} [option_number] to cast your vote.'.format(survey_number)
)
await self.bot.say(embed=em)
@votemanager.command(name="vote", pass_context=True, no_pm=True)
async def votemanager_vote(self, ctx, survey_number, option_number=None):
"""Vote"""
server = ctx.message.server
author = ctx.message.author
survey_id = int(survey_number) - 1
survey = self.get_survey_by_id(server, survey_id)
if survey is None:
await self.bot.say("Invalid survey id.")
return
if option_number is None:
await self.bot.say("You didn’t enter your option number. Here are the options:")
await ctx.invoke(self.votemanager_list, survey_number)
return
if not option_number.isdigit():
await self.bot.say("Option number must be a number.")
return
option_number = int(option_number)
if option_number > len(survey.options) or option_number < 1:
await self.bot.say("That is not a valid options.")
await ctx.invoke(self.votemanager_list, survey_number)
return
roles = [discord.utils.get(server.roles, id=id) for id in survey.role_ids]
valid_roles = [x for x in roles if x in author.roles]
if len(valid_roles) == 0:
await self.bot.say("You do not have the required roles to vote for this survey.")
return
if author.id in survey.votes.keys():
voted_option_id = survey.votes[author.id]
await self.bot.say(
"You have previously voted for option {}. {}".format(
voted_option_id + 1, survey.options[voted_option_id]
)
)
self.add_vote(server, author, survey_number, option_number)
await self.bot.say(
"You have cast a vote for option {}. {}".format(
option_number, survey.options[int(option_number) - 1]
)
)
def add_vote(self, server, author, survey_number, option_number):
"""Add a vote."""
survey_id = int(survey_number) - 1
option_id = int(option_number) - 1
survey = self.get_survey_by_id(server, survey_id)
survey.votes[author.id] = option_id
self.save_settings()
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = VoteManager(bot)
bot.add_cog(n)
|
[
"discord.utils.get",
"box.Box",
"box.BoxList",
"os.makedirs",
"discord.Embed",
"cogs.utils.checks.mod_or_permissions",
"cogs.utils.dataIO.dataIO.is_valid_json",
"cogs.utils.dataIO.dataIO.load_json",
"datetime.datetime.utcnow",
"cogs.utils.checks.serverowner",
"__main__.send_cmd_help",
"discord.ext.commands.group",
"os.path.join",
"cogs.utils.dataIO.dataIO.save_json"
] |
[((1370, 1405), 'os.path.join', 'os.path.join', (['"""data"""', '"""votemanager"""'], {}), "('data', 'votemanager')\n", (1382, 1405), False, 'import os\n'), ((1413, 1448), 'os.path.join', 'os.path.join', (['PATH', '"""settings.json"""'], {}), "(PATH, 'settings.json')\n", (1425, 1448), False, 'import os\n'), ((3658, 3707), 'discord.ext.commands.group', 'commands.group', ([], {'pass_context': '(True)', 'aliases': "['vm']"}), "(pass_context=True, aliases=['vm'])\n", (3672, 3707), False, 'from discord.ext import commands\n'), ((3856, 3876), 'cogs.utils.checks.serverowner', 'checks.serverowner', ([], {}), '()\n', (3874, 3876), False, 'from cogs.utils import checks\n'), ((4169, 4196), 'cogs.utils.checks.mod_or_permissions', 'checks.mod_or_permissions', ([], {}), '()\n', (4194, 4196), False, 'from cogs.utils import checks\n'), ((9871, 9903), 'os.makedirs', 'os.makedirs', (['PATH'], {'exist_ok': '(True)'}), '(PATH, exist_ok=True)\n', (9882, 9903), False, 'import os\n'), ((2266, 2303), 'cogs.utils.dataIO.dataIO.save_json', 'dataIO.save_json', (['JSON', 'self.settings'], {}), '(JSON, self.settings)\n', (2282, 2303), False, 'from cogs.utils.dataIO import dataIO\n'), ((3564, 3569), 'box.Box', 'Box', ([], {}), '()\n', (3567, 3569), False, 'from box import Box, BoxList\n'), ((3613, 3622), 'box.BoxList', 'BoxList', ([], {}), '()\n', (3620, 3622), False, 'from box import Box, BoxList\n'), ((9958, 9984), 'cogs.utils.dataIO.dataIO.is_valid_json', 'dataIO.is_valid_json', (['JSON'], {}), '(JSON)\n', (9978, 9984), False, 'from cogs.utils.dataIO import dataIO\n'), ((9994, 10020), 'cogs.utils.dataIO.dataIO.save_json', 'dataIO.save_json', (['JSON', '{}'], {}), '(JSON, {})\n', (10010, 10020), False, 'from cogs.utils.dataIO import dataIO\n'), ((1679, 1688), 'box.BoxList', 'BoxList', ([], {}), '()\n', (1686, 1688), False, 'from box import Box, BoxList\n'), ((1740, 1749), 'box.BoxList', 'BoxList', ([], {}), '()\n', (1747, 1749), False, 'from box import Box, BoxList\n'), ((1797, 1802), 'box.Box', 'Box', ([], {}), '()\n', (1800, 1802), False, 'from box import Box, BoxList\n'), ((2157, 2179), 'cogs.utils.dataIO.dataIO.load_json', 'dataIO.load_json', (['JSON'], {}), '(JSON)\n', (2173, 2179), False, 'from cogs.utils.dataIO import dataIO\n'), ((2538, 2543), 'box.Box', 'Box', ([], {}), '()\n', (2541, 2543), False, 'from box import Box, BoxList\n'), ((2583, 2592), 'box.BoxList', 'BoxList', ([], {}), '()\n', (2590, 2592), False, 'from box import Box, BoxList\n'), ((3163, 3172), 'box.BoxList', 'BoxList', ([], {}), '()\n', (3170, 3172), False, 'from box import Box, BoxList\n'), ((5284, 5331), 'discord.utils.get', 'discord.utils.get', (['server.roles'], {'name': 'role_name'}), '(server.roles, name=role_name)\n', (5301, 5331), False, 'import discord\n'), ((6297, 6363), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Vote Manager"""', 'description': '"""List of surveys"""'}), "(title='Vote Manager', description='List of surveys')\n", (6310, 6363), False, 'import discord\n'), ((6881, 6946), 'discord.Embed', 'discord.Embed', ([], {'title': 'survey.title', 'description': 'survey.description'}), '(title=survey.title, description=survey.description)\n', (6894, 6946), False, 'import discord\n'), ((8672, 8710), 'discord.utils.get', 'discord.utils.get', (['server.roles'], {'id': 'id'}), '(server.roles, id=id)\n', (8689, 8710), False, 'import discord\n'), ((3831, 3849), '__main__.send_cmd_help', 'send_cmd_help', (['ctx'], {}), '(ctx)\n', (3844, 3849), False, 'from __main__ import send_cmd_help\n'), ((2776, 2796), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (2794, 2796), True, 'import datetime as dt\n'), ((7084, 7123), 'discord.utils.get', 'discord.utils.get', (['server.roles'], {'id': 'rid'}), '(server.roles, id=rid)\n', (7101, 7123), False, 'import discord\n')]
|
#!/usr/bin/env python
# File: squares-enumerator.py
# Description: An SQL Synthesizer Using Query Reverse Engineering
# Author: <NAME>
# Created on: 22-02-2019 15:13:15
# Usage: python3 squaresEnumerator.py [flags|(-h for help)] specFile.in
# Python version: 3.6.4
from sys import argv
from string import *
import tyrell.spec as S
from tyrell.interpreter import PostOrderInterpreter, GeneralError
from tyrell.enumerator import *
from tyrell.decider import Example, ExampleConstraintDecider, ExampleConstraintPruningDecider
from tyrell.synthesizer import Synthesizer
from tyrell.logger import get_logger
import rpy2.robjects as robjects
from itertools import permutations
import warnings
from rpy2.rinterface import RRuntimeWarning
import sqlparse as sp
import re
import sys
import os
warnings.filterwarnings("ignore", category=RRuntimeWarning)
logger = get_logger('tyrell')
counter_ = 0
distinct = False
getProgram = False
final_program = ''
_tables = dict()
output_attrs = ""
attributes = []
robjects.r('''
library(dplyr)
library(dbplyr)
library(tidyr)
library(stringr)
options(warn=-1)
''')
## Common utils.
def get_collist(sel):
return sel
def get_fresh_name():
global counter_
counter_ = counter_ + 1
fresh_str = 'RET_DF' + str(counter_)
return fresh_str
def get_fresh_col():
global counter_
counter_ = counter_ + 1
fresh_str = 'COL' + str(counter_)
return fresh_str
def get_type(df, index):
_rscript = 'sapply({df_name}, class)[{pos}]'.format(df_name=df, pos=index)
ret_val = robjects.r(_rscript)
return ret_val[0]
# get the string format to be used in filter
def getConst(cons):
global attributes
try:
if int(cons):
return str(cons)
except:
if str(cons)=="max(n)" or cons in attributes:
return str(cons)
else:
return "\""+str(cons)+"\""
def getColsPermutations(cols, num):
if num == 0:
return []
return [", ".join(a) for a in permutations(cols, num)] + getColsPermutations(cols,num-1)
def eq_r(actual, expect):
global distinct
_rscript = 'all.equal(lapply({lhs}, as.character),lapply({rhs}, as.character))'.format(lhs=actual, rhs=expect)
# _rscript = 'all.equal(lapply({lhs}, FUN=function(x){{ data.frame(as.matrix(x))}}),lapply({rhs}, FUN=function(x){{ data.frame(as.matrix(x)) }} ))'.format(lhs=actual, rhs=expect)
try:
ret_val = robjects.r(_rscript)
except:
return False
return True == ret_val[0]
# find if there is one integer constant in the list of constants
def findConst(consts):
if consts == []:
return False
try:
if int(consts[0][1:-1]):
return True
except:
return findConst(consts[1:])
class SquaresInterpreter(PostOrderInterpreter):
## Concrete interpreter
def eval_ColInt(self, v):
return v
def eval_ColList(self, v):
return v
def eval_const(self, node, args):
return args[0]
def eval_unused(self, node, args):
return get_fresh_name()
def eval_select(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- {table} %>% ungroup() %>% select({cols})'.format(ret_df=ret_df_name, table=args[0], cols=get_collist(args[1]))
if args[2] == "distinct":
_script += ' %>% distinct()'
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting select...')
raise GeneralError()
def eval_filter(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "str_detect" not in args[1]:
col, op, const = args[1].split(" ")
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({col} {op} {const})'.format(ret_df=ret_df_name, table=args[0], op=op, col=col, const=getConst(const)) if const != "max(n)" else '{ret_df} <- filter({table}, {col} {op} {const})'.format(ret_df=ret_df_name, table=args[0], op=op, col=col, const="max(n)")
else:
col, string = args[1].split("|")
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({col}, {const}))'.format(ret_df=ret_df_name, table=args[0], col=col, const="\""+string[:-1]+"\"")
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting filter...')
raise GeneralError()
def eval_filters(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "str_detect" not in args[1]:
col, op, const = args[1].split(" ")
const = getConst(const) if const != "max(n)" else "max(n)"
arg1 = col + " " + op + " " + const
else:
col, string = args[1].split("|")
arg1 = col+", "+"\""+string[:-1]+"\")"
if "str_detect" not in args[2]:
col, op, const = args[2].split(" ")
const = getConst(const) if const != "max(n)" else "max(n)"
arg2 = col + " " + op + " " + const
else:
col, string = args[2].split("|")
arg2 = col+", "+"\""+string[:-1]+"\")"
_script = '{ret_df} <- {table} %>% ungroup() %>% filter({arg1} {Operator} {arg2})'.format(ret_df=ret_df_name, table=args[0], arg1=arg1, arg2=arg2, Operator=args[3])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting filters...')
raise GeneralError()
def eval_summariseGrouped(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "paste" in args[1]:
args[1] = '{at} = paste({at}, collapse=:)'.format(at=args[1].split("|")[1])
_script = '{ret_df} <- {table} %>% group_by({cols}) %>% summarise({cond})'.format(ret_df=ret_df_name, table=args[0], cols=get_collist(args[2]), cond=args[1].replace(":", "\":\""))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting summarise...')
raise GeneralError()
def eval_summarise(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
if "paste" in args[1]:
args[1] = '{at} = paste({at}, collapse=\":\")'.format(at=args[1].split("|")[1])
_script = '{ret_df} <- {table} %>% summarise({cond})'.format(ret_df=ret_df_name, table=args[0], cond=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting summarise...')
raise GeneralError()
def eval_inner_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_inner_join3(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join(inner_join({t1}, {t2}), {t3})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], t3=args[2])
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin3...')
raise GeneralError()
def eval_inner_join4(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- inner_join(inner_join(inner_join({t1}, {t2}), {t3}), {t4})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], t3=args[2], t4=args[3])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin4...')
raise GeneralError()
def eval_anti_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- anti_join(select({t1},{col}), select({t2}, {col}))'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], col=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_left_join(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- left_join({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_bind_rows(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- bind_rows({t1}, {t2})'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1])
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_intersect(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- intersect(select({t1},{col}), select({t2}, {col}))'.format(
ret_df=ret_df_name, t1=args[0], t2=args[1], col=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
def eval_unite(self, node, args):
global final_program, getProgram
ret_df_name = get_fresh_name()
_tables[ret_df_name] = counter_
_script = '{ret_df} <- unite({t1}, {col1}, which(colnames({t1})=="{col1}"), {col2}, which(colnames({t1})=="{col2}"), sep=":")'.format(
ret_df=ret_df_name, t1=args[0], col1=get_collist(args[1]), col2=get_collist(args[2]))
# print(_script)
if getProgram:
final_program += _script + "\n"
try:
ret_val = robjects.r(_script)
return ret_df_name
except:
#LOGGER logger.error('Error in interpreting innerjoin...')
raise GeneralError()
## Abstract interpreter
def apply_row(self, val):
df = val
if isinstance(val, str):
df = robjects.r(val)
## df: rpy2.robjects.vectors.DataFrame
return df.nrow
def apply_col(self, val):
df = val
if isinstance(val, str):
df = robjects.r(val)
return df.ncol
def apply_name(self, val):
return _tables[val]
def divide_int_str_constants(const):
str_const, int_const = [], []
for c in const:
try:
if c == '0' or int(c):
int_const.append(c)
except:
str_const.append(c)
return str_const, int_const
def divide_int_str_attributes(files, attrs):
str_attr, int_attr = [], []
for a in attrs:
if a == "n":
if a not in int_attr:
int_attr.append(a)
for i in files:
with open(i, 'r') as f:
columns = f.readline()[:-1].split(",")
if a in columns:
ind = columns.index(a)
try:
if f.readline()[:-1].split(",")[ind]=='0' or int(f.readline()[:-1].split(",")[ind]):
if a not in int_attr:
int_attr.append(a)
except:
if a not in str_attr:
str_attr.append(a)
return str_attr, int_attr
def find_filter_conditions(str_const, int_const, str_attr, int_attr, new_int_attr, aggrs, files, necessary_conditions, summarise_conditions):
conditions = []
int_ops = ["==", ">", "<", ">=", "<="]
str_ops = ["==", "!="]
happens_before = []
for sc in str_const + int_const:
necessary_conditions.append([])
for sa in str_attr:
att = False
for i in files:
if att:
break
with open(i, 'r') as f:
columns = f.readline()[:-1].split(",")
if sa in columns:
ind = columns.index(sa)
for l in f:
if l[:-1].split(",")[ind] == sc:
att = True
break
else:
continue
if 'like' in aggrs:
conditions.append('str_detect({sa}|{sc})'.format(sa=sa, sc=sc))
necessary_conditions[-1].append(conditions[-1])
if not att:
continue
for so in str_ops:
conditions.append('{sa} {so} {sc}'.format(sa=sa, so=so, sc=sc))
necessary_conditions[-1].append(conditions[-1])
for ic in int_const:
necessary_conditions.append([])
for ia in int_attr + new_int_attr:
if ic == ia:
continue
for io in int_ops:
conditions.append('{ia} {io} {ic}'.format(ia=ia, io=io, ic=ic))
necessary_conditions[-1].append(conditions[-1])
if ia == "n":
happens_before.append((conditions[-1],"n = n()"))
for ic in new_int_attr:
for ia in int_attr + new_int_attr:
if ic == ia:
continue
for io in int_ops:
conditions.append('{ia} {io} {ic}'.format(ia=ia, io=io, ic=ic))
for sc in summarise_conditions:
if ic in sc:
happens_before.append((conditions[-1], sc))
necessary_conditions = list(filter(lambda a: a != [], necessary_conditions))
# if "max" in aggrs and "n" in aggrs or "max(n)" in aggrs:
if "max(n)" in aggrs:
conditions.append("n == max(n)")
happens_before.append((conditions[-1],"n = n()"))
necessary_conditions.append([conditions[-1]])
# print("filter conditions "+str(conditions))
return conditions, necessary_conditions, happens_before
def find_summarise_conditions(int_attr, str_attr, aggrs, necessary_conditions):
conditions = []
new_int_attr = []
for a in aggrs:
if a == "like":
continue
necessary_conditions.append([])
if "n" == a:
conditions.append('{a} = {a}()'.format(a=a))
necessary_conditions[-1].append(conditions[-1])
continue
if 'concat' in a:
for at in int_attr + str_attr:
conditions.append('paste|{at}'.format(at=at))
necessary_conditions[-1].append(conditions[-1])
continue
if "max(n)" == a:
continue
for ia in int_attr:
conditions.append('{a}{ia} = {a}({ia})'.format(ia=ia, a=a))
necessary_conditions[-1].append(conditions[-1])
new_int_attr.append('{a}{ia}'.format(ia=ia, a=a))
return list(filter(lambda a: a != [], necessary_conditions)), new_int_attr, conditions
def find_conditions(files, const, attrs, aggrs, bools):
global attributes
necessary_conditions = []
str_const, int_const =divide_int_str_constants(const)
str_attr, int_attr = divide_int_str_attributes(files, attrs)
# print("str_consts "+str(str_const))
# print("int consts "+str(int_const))
# print("str attrs "+str(str_attr))
# print("int attrs "+str(int_attr))
necessary_conditions, new_int_attr, sum_cond = find_summarise_conditions(int_attr, str_attr, aggrs, necessary_conditions)
# print("new_int_attr " + str(new_int_attr))
# print("summarise cond "+ str(sum_cond))
filt_cond, necessary_conditions, happens_before = find_filter_conditions(str_const, int_const, str_attr, int_attr, new_int_attr, aggrs, files, necessary_conditions, sum_cond)
# exit()
attributes = int_attr + new_int_attr
return filt_cond, sum_cond, necessary_conditions, happens_before
def find_necessary_conditions(conds):
predicates = ""
for c in conds:
if c == []:
break
predicate = "\npredicate constant_occurs(\""
for i in c:
predicate += i + ","
predicates += predicate[:-1]+"\");"
return predicates
def happensBefore(conds):
predicates = ""
for c in conds:
if c == ():
break
predicates += "\npredicate happens_before(\""+c[0]+"\",\""+c[1]+"\");"
return predicates
def DSL():
global counter_
global _tables
global output_attrs
prog_out = ""
Operators = ""
concat = ""
input_tables, ags, cns, ats, bls, db_columns = [], [], [], [], [], []
filtersOne = "\nfunc filter: Table r -> Table a, FilterCondition f {\n row(r) <= row(a);\n col(r) == col(a);\n}"
filters = filtersOne
filterAndOr = "\nfunc filters: Table r -> Table a, FilterCondition f, FilterCondition g, Op o {\n row(r) <= row(a);\n col(r) == col(a);\n}"
filterPredicateOne = "\npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join4, filter, 100);\npredicate is_not_parent(filter, filter, 100);\npredicate distinct_inputs(filter);\n"
filterPredicate = filterPredicateOne
filterPredicateTwo = "predicate distinct_filters(filters, 1, 2);\npredicate is_not_parent(filters, filters, 100);\npredicate is_not_parent(inner_join, filters, 100);\npredicate is_not_parent(inner_join3, filters, 100);\npredicate is_not_parent(inner_join4, filters, 100);\npredicate distinct_inputs(filters);"
summarise = "\nfunc summariseGrouped: Table r -> Table a, SummariseCondition s, Cols b {\n row(r) <= row(a);\n col(r) <= 3;\n}\n\npredicate is_not_parent(inner_join4, summariseGrouped, 100);\npredicate is_not_parent(summariseGrouped, summariseGrouped, 100);"
# \nfunc summarise: Table r -> Table a, SummariseCondition s {\n row(r) == 1;\n col(r) == 1;\n}\n\npredicate is_not_parent(summariseGrouped, summarise, 100);\npredicate is_not_parent(inner_join3, summarise, 100);\npredicate is_not_parent(inner_join4, summarise, 100);\npredicate is_not_parent(summarise, summariseGrouped, 100);\npredicate is_not_parent(summarise, summarise, 100);
# summarise = "\nfunc summariseGrouped: Table r -> Table a, SummariseCondition s, Cols b;\n\nfunc summarise: Table r -> Table a, SummariseCondition s;\n\npredicate is_not_parent(summariseGrouped, summarise, 100);\npredicate is_not_parent(inner_join3, summarise, 100);\npredicate is_not_parent(inner_join4, summarise, 100);\npredicate is_not_parent(inner_join4, summariseGrouped, 100);\npredicate is_not_parent(summarise, summariseGrouped, 100);\npredicate is_not_parent(summarise, summarise, 100);\npredicate is_not_parent(summariseGrouped, summariseGrouped, 100);"
# read the input and output files
f_in = open(argv[-1], 'r')
inputs = f_in.readline()[:-1].split(":")[1].replace(" ","").split(",")
prog_out += "con <- DBI::dbConnect(RSQLite::SQLite(), \":memory:\")\n"
for i in inputs:
_script = 'input{cnt} <- read.table("{file}", sep =",", header=T)\ninput{cnt}\n'.format(file=i, cnt=counter_)
prog_out += _script
prog_out += 'input{cnt} <- copy_to(con,input{cnt})\n'.format(cnt=counter_)
benchmark1_input = robjects.r(_script)
input_tables.append('input{cnt}'.format(cnt=counter_))
_tables[input_tables[-1]] = counter_
counter_+=1
with open(i, 'r') as f:
db_columns = list(set(db_columns + f.readline()[:-1].split(",")))
output = f_in.readline()[:-1].split(":")[1].replace(" ","")
_script = 'expected_output <- read.table("{file}", sep =",", header=T)\nexpected_output\n'.format(file=output)
prog_out += _script
# print(_script)
_tables['expected_output'] = counter_
counter_+=1
benchmark1_output = robjects.r(_script)
# read the list of constants from the input
consts = f_in.readline()[:-1].replace(" ","").split(":",1)
intConst = findConst(consts[1].replace(" ","").split(","))
filterFlag = 0
if(consts[1]!=''):
filterFlag = 1
consts_temp = ""
if len(consts[1].split(","))>1:
filterFlag = 2
filters = filterAndOr
filterPredicate = filterPredicateTwo
Operators = "enum Op{\n \"|\", \"&\"\n}"
cns = consts[1].replace(" ","").replace("\"","").split(",")
else:
filterPredicate, filters, consts = "", "", ""
# read the list of aggregation functions from the input file
aggrs = f_in.readline()[:-1].replace(" ","").split(":")
if aggrs[1]!='':
ags = aggrs[1].replace(" ","").replace("\"","").split(",")
for a in ags:
if a == "concat":
ags.remove(a)
concat = "\nfunc unite: Table r -> Table a, Col c, Col d {\n row(r) <= row(a);\n col(r) < col(a);\n}"
if (len(ags) == 1 and "like" in ags) or len(ags)==0:
summarise = ""
else:
aggrs = ""
summarise = ""
if "\"max(n)\"" in aggrs:
cns.append("max(n)")
aggrs = aggrs.replace(",\"max(n)\"", "")
file_path = 'example/squares.tyrell'
# read the list of attributes from the input file
attrs = f_in.readline()[:-1].replace(" ","").split(":")
if(attrs[1]!=''):
ats = list(attrs[1].replace(" ","").replace("\"","").split(","))
ats = ats + ["n"] if "n" in ags and intConst else ats
elif "\"n\"" in aggrs:
ats.append("n")
else:
attrs = ""
hasBools = False
bools = f_in.readline()[:-1].replace(" ","").split(":")
if "bools" in bools:
hasBools = True
if not hasBools:
loc = int(bools[1])
else:
loc = int(f_in.readline()[:-1].replace(" ","").split(":")[1])
# print("constants "+str(cns))
# print("attributes "+str(ats))
# print("aggrs "+str(ags))
# print("bools "+str(bls))
filterConditions, summariseConditions, necessary_conditions, happens_before = find_conditions(inputs, cns, ats, ags, bls)
if filters == "" and filterConditions != []:
filters = filtersOne
filterPredicate = "\npredicate is_not_parent(filter, filter, 100);"
# \npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join4, filter, 100);
if len(necessary_conditions) > 1:
filters = filtersOne + filterAndOr
filterPredicate = "predicate distinct_filters(filters, 1, 2);\n\npredicate is_not_parent(filters, filter, 100);\npredicate is_not_parent(filter, filters, 100);\npredicate is_not_parent(filter, filter, 100);\npredicate is_not_parent(filters, filters, 100);"
# \npredicate is_not_parent(inner_join3, filter, 100);\npredicate is_not_parent(inner_join, filters, 100);\npredicate is_not_parent(inner_join3, filters, 100);\npredicate is_not_parent(inner_join4, filters, 100);\npredicate is_not_parent(anti_join, filters, 100);\npredicate is_not_parent(inner_join4, filter, 100);
Operators = "enum Op{\n \"|\", \"&\"\n}"
necessary_conditions = find_necessary_conditions(necessary_conditions)
necessary_conditions += happensBefore(happens_before)
# find which attributes are in the output table, and format the DSL
with open(output, 'r') as f:
cols = f.readline()
output_attrs = cols[:-1]
cols = str(getColsPermutations(str(db_columns)[1:-1].replace("'","").replace(" ","").split(","), 2))[1:-1].replace("'", "\"")
oneColumn = str(getColsPermutations(str(db_columns)[1:-1].replace("'","").replace(" ","").split(","), 1))[1:-1].replace("'", "\"")
# try:
with open(dir+file_path, 'r') as f:
spec_str = f.read()
# except:
# with open('../example/squares.tyrell', 'r') as f:
# spec_str = f.read()
fil_conditions = "enum FilterCondition{\n"+ str(filterConditions)[1:-1].replace("'","\"") +"\n}\n" if filterConditions!=[] else ""
sum_conditions = "enum SummariseCondition{\n"+ str(summariseConditions)[1:-1].replace("'","\"") +"\n}\n" if summariseConditions != [] else ""
# print("final filter conditions "+ str(fil_conditions))
# print("final summarise conditions "+ str(sum_conditions))
return spec_str.format(cols=cols, Tables=str("Table, "*len(inputs))[:-2], summarise=summarise, filters=filters, filterPred=filterPredicate, FilterConditions=fil_conditions, SummariseConditions=sum_conditions, Op=Operators, necessaryConditions=necessary_conditions, SelectCols=str("\""+output_attrs+"\""), col=oneColumn, concat=concat), input_tables, prog_out, loc
index_table_aux = 0
def beautifier(sql):
# parsed = sp.parse(sql)
# new_sql = beautifier_aux(parsed[0])
sql = re.sub("\`TBL_LEFT\`\.\`[^,\`]*\` AS |\`LHS\`\.\`[^,\`]*\` AS ", "", sql)
sql = re.sub("\`TBL_RIGHT\`\.\`[^,\`]*\` AS |\`RHS\`\.\`[^,\`]*\` AS ", "", sql)
return sp.format(sql, reindent=True, keyword_case='upper')
# print(sp.format(new_sql, reindent=True, keyword_case='upper'))
def main(seed=None):
global getProgram, final_program
if not debug:
sys.stderr = open(dir+'output.err', 'w+')
# os.close(sys.stderr.fileno())
warnings.filterwarnings("ignore", category=RRuntimeWarning)
warnings.filterwarnings('ignore')
logger.info('Parsing Spec...')
dsl, input_tables, prog_out, loc = DSL()
# print(dsl)
spec = S.parse(dsl)
logger.info('Parsing succeeded')
# loc += 1 #select
# logger.info("Lines of Code: "+str(loc))
logger.info('Building synthesizer...')
loc = 1
while (True):
logger.info("Lines of Code: "+str(loc))
if argv[1]=="tree":
enumerator = SmtEnumerator(spec, depth=loc+1, loc=loc)
else:
if "-off" in argv:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc)
elif "-on" in argv:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc, break_sym_online=True)
else:
enumerator = LinesEnumerator(spec, depth=loc+1, loc=loc, sym_breaker=False)
synthesizer = Synthesizer(
#loc: # of function productions
enumerator=enumerator,
# decider=ExampleConstraintDecider(
decider=ExampleConstraintPruningDecider(
spec=spec,
interpreter=SquaresInterpreter(),
examples=[
Example(input=input_tables, output='expected_output'),
],
equal_output=eq_r
)
)
logger.info('Synthesizing programs...')
prog = synthesizer.synthesize()
if prog is not None:
logger.info('Solution found: {}'.format(prog))
# print(prog_out+"select("+str(prog).replace("@param", "table")+","+output_attrs+")")
# print(prog_out+str(prog).replace("@param", "table"))
getProgram = True
interpreter=SquaresInterpreter()
evaluation = interpreter.eval(prog, input_tables)
if dir == "./":
print()
if "-nr" not in argv:
print("------------------------------------- R Solution ---------------------------------------\n")
print(prog_out)
print(final_program)
print();print()
print("+++++++++++++++++++++++++++++++++++++ SQL Solution +++++++++++++++++++++++++++++++++++++\n")
robjects.r('{rscript}'.format(rscript=prog_out+final_program))
sql_query = robjects.r('sql_render({result_table})'.format(result_table=evaluation))
if dir == "./":
print(beautifier(str(sql_query)[6:]))
print()
return final_program,beautifier(str(sql_query)[6:])
else:
logger.info('No more queries to be tested. Solution not found!')
logger.info('Increasing the number of lines of code.')
loc = loc + 1
debug=False
dir ="./"
if __name__ == '__main__':
# sys.stderr = open('output.err', 'w')
# sys.stderr.close()
# sys.stderr = sys.__stderr__
if "-d" in argv:
debug = True
print("Hey")
logger.setLevel('DEBUG')
else:
logger.setLevel('CRITICAL')
seed = None
if "-h" in argv:
exit("Usage: python3 squaresEnumerator.py [tree|lines] [flags -h, ...] input.in\nflags:\n-on : computing symmetries online\n-off : computing symmetries offline\n-d : debug info\n\n-nr : only SQL solution\n\nDefault: lines enumerator and without symmetry breaking")
if len(argv) > 1:
try:
seed = int(argv[1])
except ValueError:
pass
prog = main(seed)
class Squares(object):
"""docstring for Squares."""
def __init__(self):
super(Squares, self).__init__()
self.template = "inputs: {inputs}\noutput: {output}\nconst: {const}\naggrs: {aggrs}\nattrs: {attrs}\nbools:\nloc: {loc}\n"
def synthesize(self, inputs, output_ex, const="", aggrs="", attrs="", loc=0):
"""docstring for Squares."""
global argv, dir
dir = "../"
ins = list([])
temp = self.template
try:
path, dirs, files = next(os.walk("../users/files"))
except:
path, dirs, files = next(os.walk("users/files"))
dir="./"
file_count = str(len(files) +1)
i_c = 0
for i in inputs:
input = open(dir+"users/tables/"+"i"+str(file_count)+str(i_c),"w+")
input.write(i)
input.close()
ins.append(dir+"users/tables/"+"i"+str(file_count)+str(i_c))
i_c += 1
output = open(dir+"users/tables/"+"o"+str(file_count),"w+")
output.write(output_ex)
output.close()
output = dir+"users/tables/o"+str(file_count)
input_file_name = dir+"users/files/"+"f"+str(file_count)
input_file = open(input_file_name, "w+")
inputs=str(ins).replace("\'","").replace("]","").replace("[","")
input_file.write(temp.format(inputs=inputs,output=output, const="\""+const.replace(",","\",\"").replace(" ","")+"\"", aggrs="\""+aggrs.replace(",","\",\"").replace(" ","")+"\"", attrs="\""+attrs.replace(",","\",\"").replace(" ","")+"\"", loc=str(loc)).replace("\"\"",""))
input_file.close()
argv = []
argv.append("lines")
argv.append(input_file_name)
return main()
# # not used
# def beautifier_aux(tokens):
# # print(tokens)
# global index_table_aux
# sub_query = ""
# left_index = right_index = None
# for t in tokens:
# if "(SELECT" in str(t):
# if "AS `TBL_RIGHT`" == str(t)[-13:]:
# right_index = index_table_aux
# index_table_aux += 1
# elif "AS `TBL_LEFT`" == str(t)[-13:]:
# left_index = index_table_aux
# index_table_aux += 1
# if "`TBL_LEFT`" in str(t):
# left_index = index_table_aux
# index_table_aux += 1
# if "`TBL_RIGHT`" in str(t):
# right_index = index_table_aux
# index_table_aux += 1
# for t in tokens:
# if "(SELECT" in str(t):
# # print(t)
# if "AS `TBL_RIGHT`" == str(t)[-13:]:
# aux_str = str(t).split("AS `TBL_RIGHT`")
# new_input = sp.parse(aux_str[0])[0]
# # print("RIGHT", t, "-->", new_input)
# sub_query += beautifier_aux(new_input) + " AS " + "table_"+str(right_index)
# elif "AS `TBL_LEFT`" == str(t)[-13:]:
# aux_str = str(t).split("AS `TBL_LEFT`")
# new_input = sp.parse(aux_str[0])[0]
# # print("LEFT", t, "-->", new_input)
# sub_query += beautifier_aux(new_input) + " AS " + "table_"+str(left_index)
# else:
# sub_query += beautifier_aux(t)
# else:
# sub_query += str(t).replace("`TBL_LEFT`", "table_"+str(left_index)).replace("`TBL_RIGHT`", "table_"+str(right_index))
# return sub_query
|
[
"tyrell.decider.Example",
"sqlparse.format",
"tyrell.interpreter.GeneralError",
"warnings.filterwarnings",
"sys.argv.append",
"itertools.permutations",
"tyrell.spec.parse",
"rpy2.robjects.r",
"os.walk",
"tyrell.logger.get_logger",
"re.sub"
] |
[((785, 844), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RRuntimeWarning'}), "('ignore', category=RRuntimeWarning)\n", (808, 844), False, 'import warnings\n'), ((855, 875), 'tyrell.logger.get_logger', 'get_logger', (['"""tyrell"""'], {}), "('tyrell')\n", (865, 875), False, 'from tyrell.logger import get_logger\n'), ((995, 1112), 'rpy2.robjects.r', 'robjects.r', (['"""\n\tlibrary(dplyr)\n\tlibrary(dbplyr)\n\tlibrary(tidyr)\n\tlibrary(stringr)\n\toptions(warn=-1)\n """'], {}), '(\n """\n\tlibrary(dplyr)\n\tlibrary(dbplyr)\n\tlibrary(tidyr)\n\tlibrary(stringr)\n\toptions(warn=-1)\n """\n )\n', (1005, 1112), True, 'import rpy2.robjects as robjects\n'), ((1508, 1528), 'rpy2.robjects.r', 'robjects.r', (['_rscript'], {}), '(_rscript)\n', (1518, 1528), True, 'import rpy2.robjects as robjects\n'), ((19191, 19210), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (19201, 19210), True, 'import rpy2.robjects as robjects\n'), ((23626, 23715), 're.sub', 're.sub', (['"""\\\\`TBL_LEFT\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS |\\\\`LHS\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS """', '""""""', 'sql'], {}), "('\\\\`TBL_LEFT\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS |\\\\`LHS\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS ',\n '', sql)\n", (23632, 23715), False, 'import re\n'), ((23707, 23797), 're.sub', 're.sub', (['"""\\\\`TBL_RIGHT\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS |\\\\`RHS\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS """', '""""""', 'sql'], {}), "('\\\\`TBL_RIGHT\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS |\\\\`RHS\\\\`\\\\.\\\\`[^,\\\\`]*\\\\` AS ',\n '', sql)\n", (23713, 23797), False, 'import re\n'), ((23790, 23841), 'sqlparse.format', 'sp.format', (['sql'], {'reindent': '(True)', 'keyword_case': '"""upper"""'}), "(sql, reindent=True, keyword_case='upper')\n", (23799, 23841), True, 'import sqlparse as sp\n'), ((24058, 24117), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RRuntimeWarning'}), "('ignore', category=RRuntimeWarning)\n", (24081, 24117), False, 'import warnings\n'), ((24119, 24152), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (24142, 24152), False, 'import warnings\n'), ((24250, 24262), 'tyrell.spec.parse', 'S.parse', (['dsl'], {}), '(dsl)\n', (24257, 24262), True, 'import tyrell.spec as S\n'), ((2297, 2317), 'rpy2.robjects.r', 'robjects.r', (['_rscript'], {}), '(_rscript)\n', (2307, 2317), True, 'import rpy2.robjects as robjects\n'), ((18680, 18699), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (18690, 18699), True, 'import rpy2.robjects as robjects\n'), ((28437, 28457), 'sys.argv.append', 'argv.append', (['"""lines"""'], {}), "('lines')\n", (28448, 28457), False, 'from sys import argv\n'), ((28460, 28488), 'sys.argv.append', 'argv.append', (['input_file_name'], {}), '(input_file_name)\n', (28471, 28488), False, 'from sys import argv\n'), ((3274, 3293), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (3284, 3293), True, 'import rpy2.robjects as robjects\n'), ((4227, 4246), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (4237, 4246), True, 'import rpy2.robjects as robjects\n'), ((5284, 5303), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (5294, 5303), True, 'import rpy2.robjects as robjects\n'), ((5950, 5969), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (5960, 5969), True, 'import rpy2.robjects as robjects\n'), ((6545, 6564), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (6555, 6564), True, 'import rpy2.robjects as robjects\n'), ((7028, 7047), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (7038, 7047), True, 'import rpy2.robjects as robjects\n'), ((7524, 7543), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (7534, 7543), True, 'import rpy2.robjects as robjects\n'), ((8069, 8088), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (8079, 8088), True, 'import rpy2.robjects as robjects\n'), ((8606, 8625), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (8616, 8625), True, 'import rpy2.robjects as robjects\n'), ((9087, 9106), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (9097, 9106), True, 'import rpy2.robjects as robjects\n'), ((9568, 9587), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (9578, 9587), True, 'import rpy2.robjects as robjects\n'), ((10104, 10123), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (10114, 10123), True, 'import rpy2.robjects as robjects\n'), ((10704, 10723), 'rpy2.robjects.r', 'robjects.r', (['_script'], {}), '(_script)\n', (10714, 10723), True, 'import rpy2.robjects as robjects\n'), ((10944, 10959), 'rpy2.robjects.r', 'robjects.r', (['val'], {}), '(val)\n', (10954, 10959), True, 'import rpy2.robjects as robjects\n'), ((11093, 11108), 'rpy2.robjects.r', 'robjects.r', (['val'], {}), '(val)\n', (11103, 11108), True, 'import rpy2.robjects as robjects\n'), ((1884, 1907), 'itertools.permutations', 'permutations', (['cols', 'num'], {}), '(cols, num)\n', (1896, 1907), False, 'from itertools import permutations\n'), ((3397, 3411), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (3409, 3411), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((4350, 4364), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (4362, 4364), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((5408, 5422), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (5420, 5422), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((6076, 6090), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (6088, 6090), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((6671, 6685), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (6683, 6685), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((7154, 7168), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (7166, 7168), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((7651, 7665), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (7663, 7665), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((8196, 8210), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (8208, 8210), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((8732, 8746), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (8744, 8746), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((9213, 9227), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (9225, 9227), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((9694, 9708), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (9706, 9708), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((10230, 10244), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (10242, 10244), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((10830, 10844), 'tyrell.interpreter.GeneralError', 'GeneralError', ([], {}), '()\n', (10842, 10844), False, 'from tyrell.interpreter import PostOrderInterpreter, GeneralError\n'), ((27457, 27482), 'os.walk', 'os.walk', (['"""../users/files"""'], {}), "('../users/files')\n", (27464, 27482), False, 'import os\n'), ((27522, 27544), 'os.walk', 'os.walk', (['"""users/files"""'], {}), "('users/files')\n", (27529, 27544), False, 'import os\n'), ((25082, 25135), 'tyrell.decider.Example', 'Example', ([], {'input': 'input_tables', 'output': '"""expected_output"""'}), "(input=input_tables, output='expected_output')\n", (25089, 25135), False, 'from tyrell.decider import Example, ExampleConstraintDecider, ExampleConstraintPruningDecider\n')]
|
import PBRTv3Lex
import PBRTv3Yacc
from Directives import *
import sys
class PBRTv3Loader:
def importFile(self, filename):
data = open(filename).read()
sceneStructure = PBRTv3Yacc.parse(data)
return sceneStructure
def loadScene(self, sceneStructure):
scene = Scene()
if len(sceneStructure) == 1:
if sceneStructure[0][0] in ['Integrator', 'Sampler', 'Film', 'Filter', 'Camera', 'Transform']:
scene = self.loadDirectives(sceneStructure[0], scene)
else:
scene = self.loadWorld(sceneStructure[0], scene)
else:
scene = self.loadDirectives(sceneStructure[0], scene)
scene = self.loadWorld(sceneStructure[1], scene)
return scene
def loadDirectives(self, directiveStructure, scene):
scene.sensor = Sensor()
for struct in directiveStructure:
directive = struct[0]
if directive == 'Integrator':
scene.integrator.type = struct[1]
if struct[2] is not None:
scene.integrator.params = self.loadParams(struct[2])
elif directive == 'Camera':
scene.sensor.type = struct[1]
if struct[2] is not None:
scene.sensor.params = self.loadParams(struct[2])
elif directive == 'Sampler':
scene.sensor.sampler.type = struct[1]
if struct[2] is not None:
scene.sensor.sampler.params = self.loadParams(struct[2])
elif directive == 'Film':
scene.sensor.film.type = struct[1]
if struct[2] is not None:
scene.sensor.film.params = self.loadParams(struct[2])
elif directive == 'PixelFilter':
scene.sensor.film.filter = struct[1]
elif directive == 'Transform':
scene.sensor.transform = Transform()
if struct[2] is not None:
scene.sensor.transform.matrix = struct[2]
scene.sensor.transform.matrix = [scene.sensor.transform.matrix[i:i + 4] for i in range(0, len(scene.sensor.transform.matrix), 4)]
return scene
def loadWorld(self, worldStructure, scene):
materials = []
shapes = []
lights = []
textures = [] # {}
currentRefMaterial = ''
for struct in worldStructure:
directive = struct[0]
if directive == 'Texture':
name = struct[1]
type = struct[3]
params = self.loadParams(struct[4])
texture = Texture(name, type)
texture.params = params
textures.append( texture )
#textures[name] = texture
elif directive == 'MakeNamedMaterial':
id = struct[1]
type = ''
material = None
if struct[2] is not None:
params = self.loadParams(struct[2])
# actually there's little need to check if type is specified, but for the sake of properness...
if 'type' in params:
type = params['type'].value
params.pop('type')
# I'M NOT SURE
# if 'bumpmap' in params:
# bumpTextureName = params['bumpmap'].value
# material = BumpMap()
# material.texture = textures[bumpTextureName]
# material.material = Material(type, id)
# material.material.params = params
# materials.append(material)
# else:
material = Material(type, id)
material.params = params
# if 'Kd' in params:
# kd = params['Kd']
# if kd.type == 'texture':
# material.texture = textures[kd.value]
# material.params.pop('Kd')
materials.append(material)
elif directive == 'NamedMaterial':
currentRefMaterial = struct[1]
elif directive == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(struct[1])
shape.params = self.loadParams(struct[2])
shape.material = currentRefMaterial
shapes.append(shape)
elif directive == 'LightSource':
# simple emitters, no transform or shape involved. they go into lights list
emitter = Emitter(struct[1])
emitter.transform = None
emitter.params = self.loadParams(struct[2])
lights.append(emitter)
elif directive == 'AttributeBegin':
material = None
emitter = None
transform = None
for modifiedStruct in struct[1]:
modifiedDirective = modifiedStruct[0]
if modifiedDirective == 'AreaLightSource':
emitter = Emitter(modifiedStruct[1])
emitter.params = self.loadParams(modifiedStruct[2])
elif modifiedDirective == 'Transform':
transform = Transform()
transform.matrix = modifiedStruct[2]
elif modifiedDirective == 'Material':
type = modifiedStruct[1]
params = self.loadParams(modifiedStruct[2])
material = Material(type, '')
material.params = params
elif modifiedDirective == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(modifiedStruct[1])
shape.params = self.loadParams(modifiedStruct[2])
shape.emitter = emitter
shape.material = currentRefMaterial
shape.transform = transform
shapes.append(shape)
elif directive == 'TransformBegin':
transform = None
for modifiedStruct in struct[1]:
modifiedDirective = modifiedStruct[0]
if modifiedDirective == 'Transform':
transform = Transform()
transform.matrix = modifiedStruct[2]
transform.matrix = [transform.matrix[i:i + 4] for i in range(0, len(transform.matrix), 4)]
elif modifiedDirective == 'Shape':
# simple shape, no emitter, embed material or transform
shape = Shape(modifiedStruct[1])
shape.params = self.loadParams(modifiedStruct[2])
shape.material = currentRefMaterial
shape.transform = transform
shapes.append(shape)
elif modifiedDirective == 'LightSource':
# simple emitters, no transform or shape involved. they go into lights list
emitter = Emitter(modifiedStruct[1])
emitter.transform = transform
emitter.params = self.loadParams(modifiedStruct[2])
lights.append(emitter)
scene.materials = materials
scene.lights = lights
scene.shapes = shapes
scene.textures = textures
return scene
def loadParams(self, paramStructure):
params = {}
for tuple in paramStructure:
param = Param(tuple[0], tuple[1], tuple[2])
params[tuple[1]] = param
return params
def __init__(self, filename):
sceneStruct = self.importFile(filename)
self.scene = self.loadScene(sceneStruct)
# if __name__ == '__main__':
# loader = PBRTv3Loader(sys.argv[1])
|
[
"PBRTv3Yacc.parse"
] |
[((191, 213), 'PBRTv3Yacc.parse', 'PBRTv3Yacc.parse', (['data'], {}), '(data)\n', (207, 213), False, 'import PBRTv3Yacc\n')]
|
import re
import json
from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
BROWSER_WAIT = 10
class APITest(LiveServerTestCase):
fixtures = ['demo.json']
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(BROWSER_WAIT)
def tearDown(self):
self.browser.quit()
def test_branch_api(self):
self.assert_branches_reachable()
def assert_branches_reachable(self):
self.browser.get(self.live_server_url + '/api/v1/branch/?format=json')
json_data = extract_json(self.browser.page_source)
branch_2 = json_data['objects'][1]['name']
self.assertEqual(branch_2, 'Capilano')
def extract_json(page_source):
match = re.search('<pre>(.*)</pre>', page_source)
if not match:
return {}
page_json = match.groups()[0]
return json.loads(page_json)
|
[
"re.search",
"json.loads",
"selenium.webdriver.Firefox"
] |
[((793, 834), 're.search', 're.search', (['"""<pre>(.*)</pre>"""', 'page_source'], {}), "('<pre>(.*)</pre>', page_source)\n", (802, 834), False, 'import re\n'), ((917, 938), 'json.loads', 'json.loads', (['page_json'], {}), '(page_json)\n', (927, 938), False, 'import json\n'), ((274, 293), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (291, 293), False, 'from selenium import webdriver\n')]
|
from django.contrib.auth.models import User
from django.db import models
def generate_random_key():
return User.objects.make_random_password(length=16)
class TestingKey(models.Model):
key = models.CharField(blank=True, max_length=16)
is_used = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if not self.pk:
self.key = generate_random_key()
super(TestingKey, self).save(*args, **kwargs)
|
[
"django.db.models.CharField",
"django.contrib.auth.models.User.objects.make_random_password",
"django.db.models.BooleanField"
] |
[((113, 157), 'django.contrib.auth.models.User.objects.make_random_password', 'User.objects.make_random_password', ([], {'length': '(16)'}), '(length=16)\n', (146, 157), False, 'from django.contrib.auth.models import User\n'), ((202, 245), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(16)'}), '(blank=True, max_length=16)\n', (218, 245), False, 'from django.db import models\n'), ((260, 294), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (279, 294), False, 'from django.db import models\n')]
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2016 by <NAME>
:mail: <EMAIL>.
:license: Apache 2.0, see LICENSE for more details.
"""
from flask_admin.contrib.sqla import ModelView
from ..models import FoFModel,get_all_fof
from logging import getLogger
from ..extensions import cache
from wtforms import PasswordField
logger = getLogger()
class CustomView(ModelView):
pass
# list_template = 'manager/list.html'
# create_template = 'manager/create.html'
# edit_template = 'manager/edit.html'
class FofAdmin(CustomView):
column_display_pk = True
form_choices = {'strategy_type': [
('股票多头策略','股票多头策略'),
('股票多空策略', '股票多空策略'),
('事件驱动策略', '事件驱动策略'),
('其他股票策略', '其他股票策略'),
('阿尔法策略', '阿尔法策略'),
('债券策略','债券策略'),
('货币市场策略', '货币市场策略'),
('管理期货策略', '管理期货策略'),
('套利策略', '套利策略'),
('宏观策略', '宏观策略'),
('组合基金策略','组合基金策略'),
('现金管理', '现金管理'),
],
'rank':[
('0',"未评级"),
('1',"不关注"),
('2',"观察"),
('3',"备选"),
('4',"核心池")
]}
form_columns = ['wind_code','sec_name','strategy_type','fund_setupdate','fund_maturitydate','fund_mgrcomp','fund_status','alias',
'fund_existingyear','fund_ptmyear','fund_type','fund_fundmanager','nav_acc_latest','nav_acc_mdd','sharpe',
'nav_date_latest','annual_return','scale_tot','scale_a','scale_b','priority_asset','inferior_asset',
'priority_interest_rate','rank','file','fh_inv_manager','fh_prod_manager','fh_channel_manager','nav_maintain_mode']
column_labels = dict(
wind_code='基金代码',sec_name='基金名称',strategy_type='策略名称',fund_setupdate='成立时间',fund_maturitydate='终止日',
fund_mgrcomp='基金经理',fund_status='基金状态',alias='别名',
fund_existingyear='存在年限',fund_ptmyear='存续年限',fund_type='基金类型',fund_fundmanager='基金管理人员',
nav_acc_latest='最新净值',nav_acc_mdd="最大回撤比",sharpe='夏普比',
nav_date_latest="最新净值日期",annual_return="年化收益率",scale_tot="总规模",scale_a="A类份额规模",scale_b="B类份额规模",
priority_asset="优先级资产规模",inferior_asset="劣后级资产规模",fh_inv_manager="投资负责人",fh_prod_manager="产品负责人",fh_channel_manager="渠道负责人",
priority_interest_rate="优先级年化收益率",rank="基金评级信息",file="文件",nav_maintain_mode='净值模式')
column_searchable_list = ('wind_code','sec_name')
column_list = ('wind_code','sec_name','strategy_type','fund_setupdate','fund_maturitydate','fund_mgrcomp','fund_status')
export_max_rows = 10
class PctAdmin(ModelView):
column_display_pk = True
column_labels = dict(
invest_scale='投资规模',
date_adj = '调整日期',
wind_code_s = "子基金",
fund_info ="母基金"
)
column_list = ('wind_code_s', 'fund_info', 'date_adj', 'invest_scale')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class UserAdmin(ModelView):
column_display_pk = True
column_labels = dict(
username = "用户名",
email="邮箱",
password_hash = "密码",
role = "角色",
is_admin= "管理员",
is_staff="复华",
update_nav="净值修改",
is_report="研究员",
confirmed='已激活'
)
column_list = ('username', 'email','password_hash','is_admin','is_staff','update_nav','is_report')
column_formatters = dict(
password_hash=lambda v, c, m, p: '*****' + m.password_hash[-6:],
)
form_excluded_columns = ('password_hash')
form_extra_fields = {
'password2': PasswordField('密码哈希')
}
def on_model_change(self, form, User, is_created):
if len(form.password2.data) > 0 :
User.password_hash = User.set_password(form.password2.data)
fof_list = get_all_fof(User)
if fof_list is None:
logger.warning("用户没有可管理的基金,删除缓存")
cache.delete(str(User.id))
else:
logger.info("用户{}的缓存已更新".format(User.username))
cache.set(str(User.id), fof_list)
class StgAdmin(ModelView):
form_choices = {'stg_code': [
('股票多头策略', '股票多头策略'),
('股票多空策略', '股票多空策略'),
('事件驱动策略', '事件驱动策略'),
('其他股票策略', '其他股票策略'),
('阿尔法策略', '阿尔法策略'),
('债券策略', '债券策略'),
('货币市场策略', '货币市场策略'),
('管理期货策略', '管理期货策略'),
('套利策略', '套利策略'),
('宏观策略', '宏观策略'),
('现金管理', '现金管理'),
]}
column_labels = dict(
fund_info='基金名称',
stg_code='策略类型',
trade_date='调整日期',
stg_pct='策略比例'
)
column_list = ('wind_code','stg_code','trade_date','stg_pct')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class RoleAdmin(ModelView):
column_display_pk = True
column_labels = dict(
name ='角色名称',
permissions = '权限',
file_type ='文件类型',
fof = '母基金',
user = "用户名"
)
column_list = ('name','fof', 'user','permissions','file_type')
form_ajax_refs = {
'fof': {
'fields': (FoFModel.wind_code,FoFModel.sec_name,FoFModel.alias)
}
}
def after_model_change(self, form, model, is_created):
user = model.user
if len(user) > 0 :
for i in user:
fof_list = get_all_fof(i)
if fof_list is None:
logger.warning("用户没有可管理的基金,删除缓存")
cache.delete(str(i.id))
else:
logger.info("用户{}的缓存已更新".format(i.username))
cache.set(str(i.id),fof_list)
class PerAdmin(ModelView):
column_display_pk = True
column_labels = dict(
name='权限名称',
action = "函数名称",
roles="角色"
)
column_list = ('name', 'action', 'roles')
class FileTypeAdmin(ModelView):
column_display_pk = True
column_labels = dict(
file='文件',
type_name = "类型",
role="角色"
)
column_list = ('file', 'type_name', 'roles')
form_columns = ["type_name",'file']
class FileAdmin(ModelView):
column_display_pk = True
column_labels = dict(
wind_code='基金',
show_name='文件名称',
type_name='文件类型',
file_path='文件路径',
upload_datetime="上传时间",
fund_info="母基金"
)
column_list = ('fund_info', 'show_name', 'type_name','file_path','upload_datetime')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class AccAdmin(ModelView):
column_display_pk = True
form_columns =["wind_code","nav_date","nav","nav_acc","source_mark","nav_tot"]
column_searchable_list = ('wind_code',)
class SecAdmin(ModelView):
column_display_pk = True
class EventAdmin(ModelView):
form_choices = {'event_type': [
('投资事项', '投资事项'),
('投后事项', '投后事项'),
('法务事项', '法务事项'),
('其他事项', '其他事项'),
]}
column_labels = dict(
fund_info='基金名称',
event_date='到期时间',
event_type='事件类型',
remind_date='提醒日期',
handle_status='提醒状态',
notation="消息正文",
wind_code='基金名称',
create_user="用户"
)
column_list = ('wind_code','event_date','event_type','remind_date','handle_status','create_user')
form_ajax_refs = {
'fund_info': {
'fields': (FoFModel.wind_code, FoFModel.sec_name, FoFModel.alias)
}
}
class ChildMapping(ModelView):
column_display_pk = True
form_columns = ['wind_code_s','wind_code','sec_name_s','date_start','date_end','warning_line','winding_line']
column_labels = dict(
wind_code_s = '批次代码',
wind_code = '子基金代码',
sec_name_s = '批次名称',
date_start = '开始时间',
date_end = '结束时间',
warning_line = '预警线',
winding_line = '清盘线'
)
class Invest_corp_admin(ModelView):
column_display_pk = True
form_columns = ["name", "alias", "review_status"]
column_searchable_list = ('name',)
class Invest_corp_file_admin(ModelView):
column_display_pk = True
form_columns = ["file_id",'mgrcomp_id','file_type','upload_user_id','upload_datetime','file_name']
|
[
"logging.getLogger",
"wtforms.PasswordField"
] |
[((327, 338), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (336, 338), False, 'from logging import getLogger\n'), ((3824, 3845), 'wtforms.PasswordField', 'PasswordField', (['"""密码哈希"""'], {}), "('密码哈希')\n", (3837, 3845), False, 'from wtforms import PasswordField\n')]
|
#!/usr/bin/env python3
import json
import os
import subprocess
import sys
from collections import OrderedDict
script_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.dirname(script_dir)
sys.path.append(root_dir)
import emsdk # noqa
def version_to_list(version_string):
return [int(part) for part in version_string.split('.')]
def main(args):
if subprocess.check_output(['git', 'status', '--porcelain'], cwd=root_dir).strip():
print('tree is not clean')
sys.exit(1)
release_info = emsdk.load_releases_info()
new_version = version_to_list(release_info['latest'])
new_version[-1] += 1
branch_name = 'version_%s' % '_'.join(str(part) for part in new_version)
# Create a new git branch
subprocess.check_call(['git', 'checkout', '-b', branch_name], cwd=root_dir)
new_version = '.'.join(str(part) for part in new_version)
new_hash = emsdk.get_emscripten_releases_tot()
print('Creating new release: %s -> %s' % (new_version, new_hash))
release_info['releases'][new_version] = new_hash
releases = [(k, v) for k, v in release_info['releases'].items()]
releases.sort(key=lambda pair: version_to_list(pair[0]))
release_info['releases'] = OrderedDict(reversed(releases))
release_info['latest'] = new_version
with open(os.path.join(root_dir, 'emscripten-releases-tags.txt'), 'w') as f:
f.write(json.dumps(release_info, indent=2))
f.write('\n')
subprocess.check_call(os.path.join(script_dir, 'update_bazel_workspace.sh'), cwd=root_dir)
# Create auto-generated changes to the new git branch
subprocess.check_call(['git', 'add', '-u', '.'], cwd=root_dir)
subprocess.check_call(['git', 'commit', '-m', new_version], cwd=root_dir)
print('New relase created in branch: `%s`' % branch_name)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"sys.path.append",
"os.path.abspath",
"emsdk.get_emscripten_releases_tot",
"os.path.dirname",
"subprocess.check_output",
"json.dumps",
"sys.exit",
"os.path.join",
"emsdk.load_releases_info",
"subprocess.check_call"
] |
[((179, 206), 'os.path.dirname', 'os.path.dirname', (['script_dir'], {}), '(script_dir)\n', (194, 206), False, 'import os\n'), ((207, 232), 'sys.path.append', 'sys.path.append', (['root_dir'], {}), '(root_dir)\n', (222, 232), False, 'import sys\n'), ((141, 166), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (156, 166), False, 'import os\n'), ((522, 548), 'emsdk.load_releases_info', 'emsdk.load_releases_info', ([], {}), '()\n', (546, 548), False, 'import emsdk\n'), ((734, 809), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'checkout', '-b', branch_name]"], {'cwd': 'root_dir'}), "(['git', 'checkout', '-b', branch_name], cwd=root_dir)\n", (755, 809), False, 'import subprocess\n'), ((884, 919), 'emsdk.get_emscripten_releases_tot', 'emsdk.get_emscripten_releases_tot', ([], {}), '()\n', (917, 919), False, 'import emsdk\n'), ((1565, 1627), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'add', '-u', '.']"], {'cwd': 'root_dir'}), "(['git', 'add', '-u', '.'], cwd=root_dir)\n", (1586, 1627), False, 'import subprocess\n'), ((1630, 1703), 'subprocess.check_call', 'subprocess.check_call', (["['git', 'commit', '-m', new_version]"], {'cwd': 'root_dir'}), "(['git', 'commit', '-m', new_version], cwd=root_dir)\n", (1651, 1703), False, 'import subprocess\n'), ((492, 503), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (500, 503), False, 'import sys\n'), ((1437, 1490), 'os.path.join', 'os.path.join', (['script_dir', '"""update_bazel_workspace.sh"""'], {}), "(script_dir, 'update_bazel_workspace.sh')\n", (1449, 1490), False, 'import os\n'), ((376, 447), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'status', '--porcelain']"], {'cwd': 'root_dir'}), "(['git', 'status', '--porcelain'], cwd=root_dir)\n", (399, 447), False, 'import subprocess\n'), ((1279, 1333), 'os.path.join', 'os.path.join', (['root_dir', '"""emscripten-releases-tags.txt"""'], {}), "(root_dir, 'emscripten-releases-tags.txt')\n", (1291, 1333), False, 'import os\n'), ((1358, 1392), 'json.dumps', 'json.dumps', (['release_info'], {'indent': '(2)'}), '(release_info, indent=2)\n', (1368, 1392), False, 'import json\n')]
|
import datetime
import xml.etree.ElementTree as ET
from pathlib import Path
from banking_analytics.celery import app
from apps.banks.models import BalanceAccount, Bank, Region
from apps.cbrf.views import CBRF
@app.task
def get_accounts():
cb = CBRF()
response = cb.query(method='Form101IndicatorsEnumXML')
data_directory = Path('apps/banks/cbr_data/Form101IndicatorsEnum')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('Form101IndicatorsEnumXML').get('ns')
root = tree.findall(
cb.methods.get('Form101IndicatorsEnumXML').get('root'), ns)
for child in root:
indCode = str(child.find('IndCode').text)
name = str(child.find('name').text)
indType = str(child.find('IndType').text)
indChapter = str(child.find('IndChapter').text)
bank, _ = BalanceAccount.objects.get_or_create(
indCode=indCode,
defaults={'name': name,
'indType': indType,
'indChapter': indChapter,
}
)
file_name.unlink(missing_ok=False)
return 'OK'
@app.task
def get_regions():
cb = CBRF()
response = cb.query(method='EnumRegions')
data_directory = Path('apps/banks/cbr_data/EnumRegions')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('EnumRegions').get('ns')
root = tree.findall(
cb.methods.get('EnumRegions').get('root'), ns)
for child in root:
name = str(child.find('Name').text)
code = int(child.find('rgn').text)
region, _ = Region.objects.get_or_create(
code=code,
defaults={'name': name}
)
file_name.unlink(missing_ok=False)
return 'OK'
@app.task
def get_bics():
cb = CBRF()
response = cb.query(method='EnumBIC')
data_directory = Path('apps/banks/cbr_data/EnumBIC')
now_date = datetime.date.today()
data_file = Path(f'{now_date.strftime("%Y_%m_%d")}.xml')
file_name = data_directory / data_file
with open(file_name, 'w+', encoding="utf-8") as file:
file.write(response.text)
parser = ET.XMLParser(encoding="utf-8")
tree = ET.parse(file_name, parser=parser)
ns = cb.methods.get('EnumBIC').get('ns')
root = tree.findall(
cb.methods.get('EnumBIC').get('root'), ns)
for child in root:
bic = str(child.find('BIC').text)
reg_date = str(child.find('RC').text)
reg_date = datetime.datetime.strptime(
reg_date, '%Y-%m-%dT%H:%M:%S%z').date()
name = str(child.find('NM').text)
ogrn = str(child.find('RB').text)
cregnr = str(child.find('cregnr').text)
internal_number = str(child.find('intCode').text)
reg_number = str(child.find('RN').text)
bank, _ = Bank.objects.get_or_create(
ogrn=ogrn,
defaults={'bic': bic,
'reg_date': reg_date,
'name': name,
'cregnr': cregnr,
'internal_number': internal_number,
'reg_number': reg_number,
}
)
file_name.unlink(missing_ok=False)
return 'OK'
|
[
"xml.etree.ElementTree.parse",
"apps.cbrf.views.CBRF",
"apps.banks.models.BalanceAccount.objects.get_or_create",
"datetime.date.today",
"xml.etree.ElementTree.XMLParser",
"apps.banks.models.Region.objects.get_or_create",
"apps.banks.models.Bank.objects.get_or_create",
"pathlib.Path",
"datetime.datetime.strptime"
] |
[((252, 258), 'apps.cbrf.views.CBRF', 'CBRF', ([], {}), '()\n', (256, 258), False, 'from apps.cbrf.views import CBRF\n'), ((339, 388), 'pathlib.Path', 'Path', (['"""apps/banks/cbr_data/Form101IndicatorsEnum"""'], {}), "('apps/banks/cbr_data/Form101IndicatorsEnum')\n", (343, 388), False, 'from pathlib import Path\n'), ((404, 425), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (423, 425), False, 'import datetime\n'), ((635, 665), 'xml.etree.ElementTree.XMLParser', 'ET.XMLParser', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (647, 665), True, 'import xml.etree.ElementTree as ET\n'), ((677, 711), 'xml.etree.ElementTree.parse', 'ET.parse', (['file_name'], {'parser': 'parser'}), '(file_name, parser=parser)\n', (685, 711), True, 'import xml.etree.ElementTree as ET\n'), ((1430, 1436), 'apps.cbrf.views.CBRF', 'CBRF', ([], {}), '()\n', (1434, 1436), False, 'from apps.cbrf.views import CBRF\n'), ((1504, 1543), 'pathlib.Path', 'Path', (['"""apps/banks/cbr_data/EnumRegions"""'], {}), "('apps/banks/cbr_data/EnumRegions')\n", (1508, 1543), False, 'from pathlib import Path\n'), ((1559, 1580), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1578, 1580), False, 'import datetime\n'), ((1790, 1820), 'xml.etree.ElementTree.XMLParser', 'ET.XMLParser', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (1802, 1820), True, 'import xml.etree.ElementTree as ET\n'), ((1832, 1866), 'xml.etree.ElementTree.parse', 'ET.parse', (['file_name'], {'parser': 'parser'}), '(file_name, parser=parser)\n', (1840, 1866), True, 'import xml.etree.ElementTree as ET\n'), ((2317, 2323), 'apps.cbrf.views.CBRF', 'CBRF', ([], {}), '()\n', (2321, 2323), False, 'from apps.cbrf.views import CBRF\n'), ((2387, 2422), 'pathlib.Path', 'Path', (['"""apps/banks/cbr_data/EnumBIC"""'], {}), "('apps/banks/cbr_data/EnumBIC')\n", (2391, 2422), False, 'from pathlib import Path\n'), ((2438, 2459), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (2457, 2459), False, 'import datetime\n'), ((2669, 2699), 'xml.etree.ElementTree.XMLParser', 'ET.XMLParser', ([], {'encoding': '"""utf-8"""'}), "(encoding='utf-8')\n", (2681, 2699), True, 'import xml.etree.ElementTree as ET\n'), ((2711, 2745), 'xml.etree.ElementTree.parse', 'ET.parse', (['file_name'], {'parser': 'parser'}), '(file_name, parser=parser)\n', (2719, 2745), True, 'import xml.etree.ElementTree as ET\n'), ((1108, 1236), 'apps.banks.models.BalanceAccount.objects.get_or_create', 'BalanceAccount.objects.get_or_create', ([], {'indCode': 'indCode', 'defaults': "{'name': name, 'indType': indType, 'indChapter': indChapter}"}), "(indCode=indCode, defaults={'name':\n name, 'indType': indType, 'indChapter': indChapter})\n", (1144, 1236), False, 'from apps.banks.models import BalanceAccount, Bank, Region\n'), ((2126, 2190), 'apps.banks.models.Region.objects.get_or_create', 'Region.objects.get_or_create', ([], {'code': 'code', 'defaults': "{'name': name}"}), "(code=code, defaults={'name': name})\n", (2154, 2190), False, 'from apps.banks.models import BalanceAccount, Bank, Region\n'), ((3333, 3517), 'apps.banks.models.Bank.objects.get_or_create', 'Bank.objects.get_or_create', ([], {'ogrn': 'ogrn', 'defaults': "{'bic': bic, 'reg_date': reg_date, 'name': name, 'cregnr': cregnr,\n 'internal_number': internal_number, 'reg_number': reg_number}"}), "(ogrn=ogrn, defaults={'bic': bic, 'reg_date':\n reg_date, 'name': name, 'cregnr': cregnr, 'internal_number':\n internal_number, 'reg_number': reg_number})\n", (3359, 3517), False, 'from apps.banks.models import BalanceAccount, Bank, Region\n'), ((2997, 3056), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['reg_date', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(reg_date, '%Y-%m-%dT%H:%M:%S%z')\n", (3023, 3056), False, 'import datetime\n')]
|
""" Merges netCDF files covering the same month into one file.
Currently depends a lot on the input files having the right structure and
naming, i.e. a lot of assumptions are hardwired into this script. Maybe I'll
get to generalizing it by pulling information out of every file found, but for
now, this will have to do.
Parameters
----------
sys.argv[1]: directory
The script will look into every tar file in this directory, an merge and
netCDF file the tar file contains with the others, provided the name
contains the right variables and date.
sys.argv[2]: path prefix
The new netCDF file containing the merged data found will be written to
the file "sys.argv[2]-YEAR_MONTH.nc" where YEAR_MONTH is pulled out of
filenames considered for the merge.
sys.argv[3]: str, comma separated list of variables
The argument is split by occurrences of "," and the results have any
surrounding whitespace removed. The result is then treated as the list of
variables eligible for a merge.
sys.argv[4]: regular expression
A pattern to further limit the file names the script actually handles.
Useful during development to not act on more files than absolutely
necessary.
sys.argv[5]: filename
A single file containing all merged data fill be created using this name.
"""
from glob import iglob
from pprint import pprint as pp
from tempfile import TemporaryDirectory as TD
from subprocess import call
import os.path as osp
import re
import sys
import tarfile
from dask.diagnostics import ProgressBar
import xarray as xr
from add_dimension import add_dimension
""" Command lines:
python ../monthly.py '../' './no-height' 'ASWDIFD_S, ASWDIR_S, ASWDIR_NS2'
python ../monthly.py '../' './' \
'WSS_zlevel, T_zlevel, P_zlevel, WDIRlat_zlevel'
Special: Z0
(no height but weird time bounds due to mean/instantaneous measurement)
python ../../cli/code/monthly.py './' './import-test' \
'WSS_zlevel, P_zlevel' \
'2015' \
'2015.WSS.P.nc'
"""
def merge(variable, tar, store):
# chunks={"time": 12, "rlat": 11, "rlon": 11}
chunks = {}
with TD(dir="./_T_") as tmp:
members = tar.getmembers()
netcdfs = []
for member in members:
if variable not in member.name:
continue
print("Handling {}.".format(member.name))
netcdfs.append(member.name)
tar.extractall(tmp, members=[member])
path = osp.join(tmp, member.name)
fix_height = re.search("WSS_10M|WDIRlat_10M", member.name)
if fix_height:
print("Fixing height.")
add_dimension(
source=path,
target=osp.join(tmp, "fixed"),
variable=fix_height.group(),
dimension="height",
position=2,
value=10.0,
new_name=fix_height.group()[:-4],
)
call(["mv", osp.join(tmp, "fixed"), path])
netcdfs = [osp.join(tmp, f) for f in netcdfs]
print("Merging:")
pp(netcdfs)
target = osp.join(store, "{}.nc".format(variable))
print("--> {}".format(target))
datasets = [
xr.open_dataset(n, decode_cf=False, chunks=chunks) for n in netcdfs
]
merged = xr.merge(
d[v] for d in datasets for v in d.data_vars if v != "rotated_pole"
)
computation = merged.to_netcdf(target, format="NETCDF4", compute=False)
with ProgressBar():
computation.compute()
return target
if __name__ == "__main__":
""" Variables:
"WSS_zlevel", "T_zlevel", "P_zlevel", "Z0", "WDIRlat_zlevel",
"ASWDIFD_S", "ASWDIR_S", "ASWDIR_NS2"
"""
variables = [s.strip() for s in sys.argv[3].split(",")]
# chunks={"time": 12, "rlat": 11, "rlon": 11}
chunks = {}
with TD(dir="./_T_/") as tmp:
tars = list(
tarfile.open(tar)
for tar in iglob(osp.join(sys.argv[1], "*.tar"))
if re.search(sys.argv[4], tar)
)
everything = []
for tar in tars:
year = re.search(r"(\d\d\d\d_\d\d)\.tar", tar.name).groups()[0]
merged = []
for variable in variables:
merged.append(merge(variable, tar, tmp))
print("Merging/Compressing to:")
pp(merged)
mergetarget = "{}-{}.nc".format(sys.argv[2], year)
print("--> {}".format(mergetarget))
datasets = (
xr.open_dataset(path, decode_cf=False, chunks=chunks)
for path in merged
)
data_vars = [d[v] for d in datasets for v in d.data_vars]
for dv in data_vars:
if dv.name[0].isupper():
dv.encoding["least_significant_digit"] = 3
ds = xr.merge(data_vars)
computation = ds.to_netcdf(
mergetarget,
format="NETCDF4",
compute=False,
encoding={
v: {"complevel": 9, "zlib": True}
for v in list(ds.variables)
},
)
with ProgressBar():
computation.compute()
call(["rm", "-r"] + merged)
ds.close()
everything.append(mergetarget)
"""
print("Compressing to {}.".format(sys.argv[5]))
computation = (
xr.merge(
d[v]
for d in [
xr.open_dataset(p, chunks=chunks)
for p in everything]
for v in d.data_vars)
.to_netcdf(sys.argv[5], format='NETCDF4',
encoding={
v: {'complevel': 9, 'zlib': True}
for v in list(ds.variables)},
compute=False))
with ProgressBar():
computation.compute()
#call(["mv", tmpeverything, sys.argv[5]])
"""
print("All done.")
|
[
"tempfile.TemporaryDirectory",
"os.path.join",
"xarray.open_dataset",
"dask.diagnostics.ProgressBar",
"xarray.merge",
"subprocess.call",
"pprint.pprint",
"tarfile.open",
"re.search"
] |
[((2211, 2226), 'tempfile.TemporaryDirectory', 'TD', ([], {'dir': '"""./_T_"""'}), "(dir='./_T_')\n", (2213, 2226), True, 'from tempfile import TemporaryDirectory as TD\n'), ((3206, 3217), 'pprint.pprint', 'pp', (['netcdfs'], {}), '(netcdfs)\n', (3208, 3217), True, 'from pprint import pprint as pp\n'), ((3444, 3520), 'xarray.merge', 'xr.merge', (["(d[v] for d in datasets for v in d.data_vars if v != 'rotated_pole')"], {}), "(d[v] for d in datasets for v in d.data_vars if v != 'rotated_pole')\n", (3452, 3520), True, 'import xarray as xr\n'), ((4004, 4020), 'tempfile.TemporaryDirectory', 'TD', ([], {'dir': '"""./_T_/"""'}), "(dir='./_T_/')\n", (4006, 4020), True, 'from tempfile import TemporaryDirectory as TD\n'), ((2554, 2580), 'os.path.join', 'osp.join', (['tmp', 'member.name'], {}), '(tmp, member.name)\n', (2562, 2580), True, 'import os.path as osp\n'), ((2606, 2651), 're.search', 're.search', (['"""WSS_10M|WDIRlat_10M"""', 'member.name'], {}), "('WSS_10M|WDIRlat_10M', member.name)\n", (2615, 2651), False, 'import re\n'), ((3137, 3153), 'os.path.join', 'osp.join', (['tmp', 'f'], {}), '(tmp, f)\n', (3145, 3153), True, 'import os.path as osp\n'), ((3349, 3399), 'xarray.open_dataset', 'xr.open_dataset', (['n'], {'decode_cf': '(False)', 'chunks': 'chunks'}), '(n, decode_cf=False, chunks=chunks)\n', (3364, 3399), True, 'import xarray as xr\n'), ((3636, 3649), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (3647, 3649), False, 'from dask.diagnostics import ProgressBar\n'), ((4496, 4506), 'pprint.pprint', 'pp', (['merged'], {}), '(merged)\n', (4498, 4506), True, 'from pprint import pprint as pp\n'), ((4986, 5005), 'xarray.merge', 'xr.merge', (['data_vars'], {}), '(data_vars)\n', (4994, 5005), True, 'import xarray as xr\n'), ((5384, 5411), 'subprocess.call', 'call', (["(['rm', '-r'] + merged)"], {}), "(['rm', '-r'] + merged)\n", (5388, 5411), False, 'from subprocess import call\n'), ((4062, 4079), 'tarfile.open', 'tarfile.open', (['tar'], {}), '(tar)\n', (4074, 4079), False, 'import tarfile\n'), ((4659, 4712), 'xarray.open_dataset', 'xr.open_dataset', (['path'], {'decode_cf': '(False)', 'chunks': 'chunks'}), '(path, decode_cf=False, chunks=chunks)\n', (4674, 4712), True, 'import xarray as xr\n'), ((5319, 5332), 'dask.diagnostics.ProgressBar', 'ProgressBar', ([], {}), '()\n', (5330, 5332), False, 'from dask.diagnostics import ProgressBar\n'), ((4156, 4183), 're.search', 're.search', (['sys.argv[4]', 'tar'], {}), '(sys.argv[4], tar)\n', (4165, 4183), False, 'import re\n'), ((2810, 2832), 'os.path.join', 'osp.join', (['tmp', '"""fixed"""'], {}), "(tmp, 'fixed')\n", (2818, 2832), True, 'import os.path as osp\n'), ((3087, 3109), 'os.path.join', 'osp.join', (['tmp', '"""fixed"""'], {}), "(tmp, 'fixed')\n", (3095, 3109), True, 'import os.path as osp\n'), ((4109, 4139), 'os.path.join', 'osp.join', (['sys.argv[1]', '"""*.tar"""'], {}), "(sys.argv[1], '*.tar')\n", (4117, 4139), True, 'import os.path as osp\n'), ((4262, 4312), 're.search', 're.search', (['"""(\\\\d\\\\d\\\\d\\\\d_\\\\d\\\\d)\\\\.tar"""', 'tar.name'], {}), "('(\\\\d\\\\d\\\\d\\\\d_\\\\d\\\\d)\\\\.tar', tar.name)\n", (4271, 4312), False, 'import re\n')]
|
import unittest
from abc import ABC, abstractmethod
from Builder_models import MainData, Report, LeaveRecord
from Builders import Builder, BuilderFI,BuilderIT
from Directors import Director, DirectorCEO
class UtBuilder(unittest.TestCase):
def test_director(self):
myBuilder = BuilderFI()
director = Director(builder=myBuilder)
mainData = director.construct()
self.assertEqual(mainData.targetBU, "Financial Department")
self.assertEqual(mainData.report.name, "ROI report")
self.assertEqual(mainData.leaveRecord.weeks, 2)
def test_directorCEO(self):
myBuilder1 = BuilderFI()
myBuilder2 = BuilderIT()
director = DirectorCEO(builder1=myBuilder1, builder2=myBuilder2)
mainData = director.construct()
self.assertEqual(mainData.targetBU, "CEO")
self.assertEqual(mainData.report.name, "ROI report")
self.assertEqual(mainData.leaveRecord.weeks, 4)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"Builders.BuilderFI",
"Directors.DirectorCEO",
"Directors.Director",
"Builders.BuilderIT"
] |
[((988, 1003), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1001, 1003), False, 'import unittest\n'), ((290, 301), 'Builders.BuilderFI', 'BuilderFI', ([], {}), '()\n', (299, 301), False, 'from Builders import Builder, BuilderFI, BuilderIT\n'), ((321, 348), 'Directors.Director', 'Director', ([], {'builder': 'myBuilder'}), '(builder=myBuilder)\n', (329, 348), False, 'from Directors import Director, DirectorCEO\n'), ((629, 640), 'Builders.BuilderFI', 'BuilderFI', ([], {}), '()\n', (638, 640), False, 'from Builders import Builder, BuilderFI, BuilderIT\n'), ((662, 673), 'Builders.BuilderIT', 'BuilderIT', ([], {}), '()\n', (671, 673), False, 'from Builders import Builder, BuilderFI, BuilderIT\n'), ((693, 746), 'Directors.DirectorCEO', 'DirectorCEO', ([], {'builder1': 'myBuilder1', 'builder2': 'myBuilder2'}), '(builder1=myBuilder1, builder2=myBuilder2)\n', (704, 746), False, 'from Directors import Director, DirectorCEO\n')]
|
# -*- coding: utf-8 -*-
'''preggy 'like' assertions. For use with `expect()` (see `preggy.core`).
'''
# preggy assertions
# https://github.com/heynemann/preggy
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2013 <NAME> <EMAIL>
from __future__ import absolute_import, print_function
import re
from datetime import datetime
import difflib
from uuid import UUID
try:
from six import string_types, binary_type
except ImportError: # pragma: no cover
import warnings
warnings.warn('Ignoring six. Probably setup.py installing package.')
import numbers
from preggy import assertion
from preggy import utils
__all__ = ('to_be_like', 'not_to_be_like')
#-------------------------------------------------------------------------------------------------
# CONSTANTS
#-------------------------------------------------------------------------------------------------
DATE_THRESHOLD = 5.0
RESET = '\033[m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
REMOVE_COLORS_REGEX = re.compile(
r'(\033|\x1b|\x03)' # prefixes
r'\[' # non-regex bracket
r'([0-9]*[;])?' # semi-colon
r'[0-9]*m', # suffix
flags=re.UNICODE
)
NORMALIZE_WHITESPACE_REGEX = re.compile(
r'\s+',
flags=re.UNICODE | re.MULTILINE | re.IGNORECASE
)
#-------------------------------------------------------------------------------------------------
# HELPERS
#-------------------------------------------------------------------------------------------------
_filter_str = lambda s: NORMALIZE_WHITESPACE_REGEX.sub('', s.lower()).strip()
def compare(first, second):
matcher = difflib.SequenceMatcher(None, first, second)
first = get_match_for_text(matcher, first, True)
second = get_match_for_text(matcher, second, True)
return matcher, first, second
def get_match_for_text(matcher, text, first):
result = []
COLOR_MAP = {
'delete': RED,
'insert': GREEN,
'replace': YELLOW
}
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
start, stop = (i1, i2) if first else (j1, j2)
to_append = text[start:stop]
if tag in COLOR_MAP:
to_append = ''.join((COLOR_MAP[tag], to_append, RESET))
result.append(to_append)
return ''.join(result)
def _match_alike(expected, topic, diff=False):
'''Determines the types of `expected` and `topic`, and calls the appropriate comparison function.'''
if topic is None:
return expected is None
if isinstance(topic, UUID):
return _compare_uuid(expected, topic)
if isinstance(topic, string_types + (binary_type, )):
return _compare_strings(expected, topic)
if isinstance(topic, numbers.Number):
return _compare_numbers(expected, topic)
if isinstance(topic, (list, tuple, set)):
return _compare_lists(expected, topic)
if isinstance(topic, dict):
return _compare_dicts(expected, topic)
if isinstance(topic, datetime):
return _compare_datetime(expected, topic)
raise RuntimeError('Could not compare {expected} and {topic}'.format(expected=expected, topic=topic))
def _strip_string(text):
if not text:
return text
text = utils.fix_string(text)
text = REMOVE_COLORS_REGEX.sub('', text)
text = _filter_str(text)
return text
def _compare_strings(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as strings.
Allows some leeway. (Strings don't have to exactly match.)
'''
topic = _strip_string(topic)
expected = _strip_string(expected)
return expected == _filter_str(topic)
def _compare_uuid(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as UUID.'''
topic = str(topic)
expected = str(expected)
return expected == topic
def __timedelta_to_seconds(timedelta):
ms = 10 ** 6 # microseconds/second
days = 24 * 60 * 60 # seconds/day
ms_as_seconds = float(timedelta.microseconds) / ms
seconds = float(timedelta.seconds)
days_as_seconds = float(timedelta.days) * days
total_seconds = sum((ms_as_seconds,
seconds,
days_as_seconds))
return abs(total_seconds) # abs() comes last
def _compare_datetime(expected, topic):
return __timedelta_to_seconds(topic - expected) <= DATE_THRESHOLD
def _compare_numbers(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as Numbers.'''
FALSE_CONDITIONS = (not isinstance(topic, numbers.Number),
not isinstance(expected, numbers.Number), )
if any(FALSE_CONDITIONS):
return False
return float(expected) == float(topic)
def _compare_dicts(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as dicts.'''
return _match_dicts(expected, topic) and _match_dicts(topic, expected)
def _match_dicts(expected, topic):
'''Asserts the "like"-ness of all keys and values in `topic` and `expected`.'''
for k, v in expected.items():
if not k in topic or not _match_alike(topic[k], v):
return False
return True
def _compare_lists(expected, topic):
'''Asserts the "like"-ness of `topic` and `expected` as lists.'''
return _match_lists(expected, topic) and _match_lists(topic, expected)
def _match_lists(expected, topic):
'''Asserts the "like"-ness each item in of `topic` and `expected` (as lists or tuples).'''
# TODO: Rewrite this using itertools
# http://docs.python.org/2/library/itertools.html
for item in expected:
if isinstance(item, (list, tuple)):
found = False
for inner_item in topic:
if isinstance(inner_item, (list, tuple)) and _compare_lists(item, inner_item):
found = True
break
if not found:
return False
elif not item in topic:
return False
return True
#-------------------------------------------------------------------------------------------------
# Assertions
#-------------------------------------------------------------------------------------------------
@assertion
def to_be_like(topic, expected, diff=True):
'''Asserts that `topic` is like (similar to) `expected`. Allows some leeway.'''
result = _match_alike(expected, topic, diff=diff)
is_str = lambda x: isinstance(x, string_types + (binary_type,))
if not result:
if diff is True and (
is_str(topic) and
is_str(expected)
):
first, second = _strip_string(topic), _strip_string(expected)
matcher, first, second = compare(first, second)
print()
print('Expected strings to be equal, but they were different:')
print(first)
print(second)
print()
raise AssertionError("Expected topic('{topic}') to be like '{expected}'".format(topic=topic, expected=expected))
@assertion
def not_to_be_like(topic, expected, diff=False):
'''Asserts that `topic` is NOT like (NOT similar to) `expected`. Allows some leeway.'''
result = _match_alike(expected, topic, diff=diff)
if result:
raise AssertionError("Expected topic('{topic}') not to be like '{expected}'".format(topic=topic, expected=expected))
|
[
"difflib.SequenceMatcher",
"warnings.warn",
"preggy.utils.fix_string",
"re.compile"
] |
[((1046, 1119), 're.compile', 're.compile', (['"""(\\\\033|\\\\x1b|\\\\x03)\\\\[([0-9]*[;])?[0-9]*m"""'], {'flags': 're.UNICODE'}), "('(\\\\033|\\\\x1b|\\\\x03)\\\\[([0-9]*[;])?[0-9]*m', flags=re.UNICODE)\n", (1056, 1119), False, 'import re\n'), ((1264, 1331), 're.compile', 're.compile', (['"""\\\\s+"""'], {'flags': '(re.UNICODE | re.MULTILINE | re.IGNORECASE)'}), "('\\\\s+', flags=re.UNICODE | re.MULTILINE | re.IGNORECASE)\n", (1274, 1331), False, 'import re\n'), ((1675, 1719), 'difflib.SequenceMatcher', 'difflib.SequenceMatcher', (['None', 'first', 'second'], {}), '(None, first, second)\n', (1698, 1719), False, 'import difflib\n'), ((3258, 3280), 'preggy.utils.fix_string', 'utils.fix_string', (['text'], {}), '(text)\n', (3274, 3280), False, 'from preggy import utils\n'), ((531, 599), 'warnings.warn', 'warnings.warn', (['"""Ignoring six. Probably setup.py installing package."""'], {}), "('Ignoring six. Probably setup.py installing package.')\n", (544, 599), False, 'import warnings\n')]
|
# Copyright 2017 Smartwaiver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import sys
sys.path.insert(0, '../')
import smartwaiver
import factory
class SmartwaiverCustomFieldTest(unittest.TestCase):
def test_required_keys(self):
custom_field_data = factory.custom_field()
custom_field_data.pop('value')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverCustomField(custom_field_data)
self.assertEqual('Cannot create a SmartwaiverCustomField with missing field: value', str(cm.exception))
def test_success(self):
custom_field_data = factory.custom_field()
custom_field = smartwaiver.types.SmartwaiverCustomField(custom_field_data)
self.assertEqual(custom_field_data['value'], custom_field.value)
self.assertEqual(custom_field_data['displayText'], custom_field.display_text)
class SmartwaiverGuardianTest(unittest.TestCase):
def test_required_keys(self):
guardian_data = factory.guardian()
guardian_data.pop('firstName')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverGuardian(guardian_data)
self.assertEqual('Cannot create a SmartwaiverGuardian with missing field: firstName', str(cm.exception))
def test_success(self):
guardian_data = factory.guardian()
custom_field = smartwaiver.types.SmartwaiverGuardian(guardian_data)
self.assertEqual(guardian_data['firstName'], custom_field.first_name)
self.assertEqual(guardian_data['middleName'], custom_field.middle_name)
self.assertEqual(guardian_data['lastName'], custom_field.last_name)
self.assertEqual(guardian_data['phone'], custom_field.phone)
self.assertEqual(guardian_data['relationship'], custom_field.relationship)
class SmartwaiverParticipantTest(unittest.TestCase):
def test_required_keys(self):
participant_data = factory.participant()
participant_data.pop('firstName')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverParticipant(participant_data)
self.assertEqual('Cannot create a SmartwaiverParticipant with missing field: firstName', str(cm.exception))
def test_success(self):
participant_data = factory.participant()
participant = smartwaiver.types.SmartwaiverParticipant(participant_data)
self.assertEqual(participant_data['firstName'], participant.first_name)
self.assertEqual(participant_data['middleName'], participant.middle_name)
self.assertEqual(participant_data['lastName'], participant.last_name)
self.assertEqual(participant_data['dob'], participant.dob)
self.assertEqual(participant_data['isMinor'], participant.is_minor)
self.assertEqual(participant_data['gender'], participant.gender)
self.assertEqual(participant_data['phone'], participant.phone)
self.assertEqual(participant_data['tags'], participant.tags)
self.assertTrue(len(participant_data['customParticipantFields']), len(participant.custom_participant_fields))
for guid in participant.custom_participant_fields:
self.assertIs(type(participant.custom_participant_fields[guid]), smartwaiver.types.SmartwaiverCustomField)
class SmartwaiverTemplateTest(unittest.TestCase):
def test_required_keys(self):
template_data = factory.template()
template_data.pop('templateId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverTemplate(template_data)
self.assertEqual('Cannot create a SmartwaiverTemplate with missing field: templateId', str(cm.exception))
def test_success(self):
template_data = factory.template()
template = smartwaiver.types.SmartwaiverTemplate(template_data)
self.assertEqual(template_data['templateId'], template.template_id)
self.assertEqual(template_data['title'], template.title)
self.assertEqual(template_data['publishedVersion'], template.published_version)
self.assertEqual(template_data['publishedOn'], template.published_on)
self.assertEqual(template_data['webUrl'], template.web_url)
self.assertEqual(template_data['kioskUrl'], template.kiosk_url)
class SmartwaiverTypeTest(unittest.TestCase):
def test_required_keys(self):
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverType({'key1': 'val1'}, ['key1', 'key2'], 'SmartwaiverType')
self.assertEqual('Cannot create a SmartwaiverType with missing field: key2', str(cm.exception))
class SmartwaiverWaiverSummaryTest(unittest.TestCase):
def test_required_keys(self):
waiver_summary_data = factory.waiver_summary()
waiver_summary_data.pop('waiverId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiverSummary(waiver_summary_data)
self.assertEqual('Cannot create a SmartwaiverWaiverSummary with missing field: waiverId', str(cm.exception))
def test_success(self):
waiver_summary_data = factory.waiver_summary()
waiver_summary = smartwaiver.types.SmartwaiverWaiverSummary(waiver_summary_data)
self.assertEqual(waiver_summary_data['waiverId'], waiver_summary.waiver_id)
self.assertEqual(waiver_summary_data['templateId'], waiver_summary.template_id)
self.assertEqual(waiver_summary_data['title'], waiver_summary.title)
self.assertEqual(waiver_summary_data['createdOn'], waiver_summary.created_on)
self.assertEqual(waiver_summary_data['expirationDate'], waiver_summary.expiration_date)
self.assertEqual(waiver_summary_data['expired'], waiver_summary.expired)
self.assertEqual(waiver_summary_data['verified'], waiver_summary.verified)
self.assertEqual(waiver_summary_data['kiosk'], waiver_summary.kiosk)
self.assertEqual(waiver_summary_data['firstName'], waiver_summary.first_name)
self.assertEqual(waiver_summary_data['middleName'], waiver_summary.middle_name)
self.assertEqual(waiver_summary_data['lastName'], waiver_summary.last_name)
self.assertEqual(waiver_summary_data['dob'], waiver_summary.dob)
self.assertEqual(waiver_summary_data['isMinor'], waiver_summary.is_minor)
self.assertEqual(waiver_summary_data['tags'], waiver_summary.tags)
class SmartwaiverWaiverTest(unittest.TestCase):
def test_required_keys(self):
waiver_data = factory.waiver()
waiver_data.pop('waiverId')
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual('Cannot create a SmartwaiverWaiver with missing field: waiverId', str(cm.exception))
def test_success(self):
waiver_data = factory.waiver()
waiver = smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual(waiver_data['waiverId'], waiver.waiver_id)
self.assertEqual(waiver_data['templateId'], waiver.template_id)
self.assertEqual(waiver_data['title'], waiver.title)
self.assertEqual(waiver_data['createdOn'], waiver.created_on)
self.assertEqual(waiver_data['expirationDate'], waiver.expiration_date)
self.assertEqual(waiver_data['expired'], waiver.expired)
self.assertEqual(waiver_data['verified'], waiver.verified)
self.assertEqual(waiver_data['kiosk'], waiver.kiosk)
self.assertEqual(waiver_data['firstName'], waiver.first_name)
self.assertEqual(waiver_data['middleName'], waiver.middle_name)
self.assertEqual(waiver_data['lastName'], waiver.last_name)
self.assertEqual(waiver_data['dob'], waiver.dob)
self.assertEqual(waiver_data['isMinor'], waiver.is_minor)
self.assertEqual(waiver_data['tags'], waiver.tags)
self.assertTrue(len(waiver_data['participants']), len(waiver.participants))
for participant in waiver.participants:
self.assertIs(type(participant), smartwaiver.types.SmartwaiverParticipant)
self.assertEqual(waiver_data['email'], waiver.email)
self.assertEqual(waiver_data['marketingAllowed'], waiver.marketing_allowed)
self.assertEqual(waiver_data['addressLineOne'], waiver.address_line_one)
self.assertEqual(waiver_data['addressLineTwo'], waiver.address_line_two)
self.assertEqual(waiver_data['addressCity'], waiver.address_city)
self.assertEqual(waiver_data['addressState'], waiver.address_state)
self.assertEqual(waiver_data['addressZip'], waiver.address_zip)
self.assertEqual(waiver_data['addressCountry'], waiver.address_country)
self.assertEqual(waiver_data['emergencyContactName'], waiver.emergency_contact_name)
self.assertEqual(waiver_data['emergencyContactPhone'], waiver.emergency_contact_phone)
self.assertEqual(waiver_data['insuranceCarrier'], waiver.insurance_carrier)
self.assertEqual(waiver_data['insurancePolicyNumber'], waiver.insurance_policy_number)
self.assertEqual(waiver_data['driversLicenseNumber'], waiver.drivers_license_number)
self.assertEqual(waiver_data['driversLicenseState'], waiver.drivers_license_state)
self.assertTrue(len(waiver_data['customWaiverFields']), len(waiver.custom_waiver_fields))
for guid in waiver.custom_waiver_fields:
self.assertIs(type(waiver.custom_waiver_fields[guid]), smartwaiver.types.SmartwaiverCustomField)
self.assertIs(type(waiver.guardian), smartwaiver.types.SmartwaiverGuardian)
self.assertEqual(waiver_data['pdf'], waiver.pdf)
def test_participant_not_list(self):
waiver_data = factory.waiver()
waiver_data['participants'] = ''
with self.assertRaises(ValueError) as cm:
smartwaiver.types.SmartwaiverWaiver(waiver_data)
self.assertEqual('Participants field must be a list', str(cm.exception))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"smartwaiver.types.SmartwaiverGuardian",
"smartwaiver.types.SmartwaiverCustomField",
"factory.template",
"smartwaiver.types.SmartwaiverParticipant",
"sys.path.insert",
"factory.waiver",
"smartwaiver.types.SmartwaiverType",
"smartwaiver.types.SmartwaiverTemplate",
"factory.participant",
"factory.waiver_summary",
"factory.custom_field",
"factory.guardian",
"smartwaiver.types.SmartwaiverWaiverSummary",
"smartwaiver.types.SmartwaiverWaiver"
] |
[((604, 629), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (619, 629), False, 'import sys\n'), ((10518, 10533), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10531, 10533), False, 'import unittest\n'), ((784, 806), 'factory.custom_field', 'factory.custom_field', ([], {}), '()\n', (804, 806), False, 'import factory\n'), ((1141, 1163), 'factory.custom_field', 'factory.custom_field', ([], {}), '()\n', (1161, 1163), False, 'import factory\n'), ((1187, 1246), 'smartwaiver.types.SmartwaiverCustomField', 'smartwaiver.types.SmartwaiverCustomField', (['custom_field_data'], {}), '(custom_field_data)\n', (1227, 1246), False, 'import smartwaiver\n'), ((1519, 1537), 'factory.guardian', 'factory.guardian', ([], {}), '()\n', (1535, 1537), False, 'import factory\n'), ((1862, 1880), 'factory.guardian', 'factory.guardian', ([], {}), '()\n', (1878, 1880), False, 'import factory\n'), ((1904, 1956), 'smartwaiver.types.SmartwaiverGuardian', 'smartwaiver.types.SmartwaiverGuardian', (['guardian_data'], {}), '(guardian_data)\n', (1941, 1956), False, 'import smartwaiver\n'), ((2462, 2483), 'factory.participant', 'factory.participant', ([], {}), '()\n', (2481, 2483), False, 'import factory\n'), ((2823, 2844), 'factory.participant', 'factory.participant', ([], {}), '()\n', (2842, 2844), False, 'import factory\n'), ((2867, 2925), 'smartwaiver.types.SmartwaiverParticipant', 'smartwaiver.types.SmartwaiverParticipant', (['participant_data'], {}), '(participant_data)\n', (2907, 2925), False, 'import smartwaiver\n'), ((3932, 3950), 'factory.template', 'factory.template', ([], {}), '()\n', (3948, 3950), False, 'import factory\n'), ((4277, 4295), 'factory.template', 'factory.template', ([], {}), '()\n', (4293, 4295), False, 'import factory\n'), ((4315, 4367), 'smartwaiver.types.SmartwaiverTemplate', 'smartwaiver.types.SmartwaiverTemplate', (['template_data'], {}), '(template_data)\n', (4352, 4367), False, 'import smartwaiver\n'), ((5280, 5304), 'factory.waiver_summary', 'factory.waiver_summary', ([], {}), '()\n', (5302, 5304), False, 'import factory\n'), ((5655, 5679), 'factory.waiver_summary', 'factory.waiver_summary', ([], {}), '()\n', (5677, 5679), False, 'import factory\n'), ((5705, 5768), 'smartwaiver.types.SmartwaiverWaiverSummary', 'smartwaiver.types.SmartwaiverWaiverSummary', (['waiver_summary_data'], {}), '(waiver_summary_data)\n', (5747, 5768), False, 'import smartwaiver\n'), ((7038, 7054), 'factory.waiver', 'factory.waiver', ([], {}), '()\n', (7052, 7054), False, 'import factory\n'), ((7367, 7383), 'factory.waiver', 'factory.waiver', ([], {}), '()\n', (7381, 7383), False, 'import factory\n'), ((7401, 7449), 'smartwaiver.types.SmartwaiverWaiver', 'smartwaiver.types.SmartwaiverWaiver', (['waiver_data'], {}), '(waiver_data)\n', (7436, 7449), False, 'import smartwaiver\n'), ((10232, 10248), 'factory.waiver', 'factory.waiver', ([], {}), '()\n', (10246, 10248), False, 'import factory\n'), ((910, 969), 'smartwaiver.types.SmartwaiverCustomField', 'smartwaiver.types.SmartwaiverCustomField', (['custom_field_data'], {}), '(custom_field_data)\n', (950, 969), False, 'import smartwaiver\n'), ((1641, 1693), 'smartwaiver.types.SmartwaiverGuardian', 'smartwaiver.types.SmartwaiverGuardian', (['guardian_data'], {}), '(guardian_data)\n', (1678, 1693), False, 'import smartwaiver\n'), ((2590, 2648), 'smartwaiver.types.SmartwaiverParticipant', 'smartwaiver.types.SmartwaiverParticipant', (['participant_data'], {}), '(participant_data)\n', (2630, 2648), False, 'import smartwaiver\n'), ((4055, 4107), 'smartwaiver.types.SmartwaiverTemplate', 'smartwaiver.types.SmartwaiverTemplate', (['template_data'], {}), '(template_data)\n', (4092, 4107), False, 'import smartwaiver\n'), ((4963, 5055), 'smartwaiver.types.SmartwaiverType', 'smartwaiver.types.SmartwaiverType', (["{'key1': 'val1'}", "['key1', 'key2']", '"""SmartwaiverType"""'], {}), "({'key1': 'val1'}, ['key1', 'key2'],\n 'SmartwaiverType')\n", (4996, 5055), False, 'import smartwaiver\n'), ((5413, 5476), 'smartwaiver.types.SmartwaiverWaiverSummary', 'smartwaiver.types.SmartwaiverWaiverSummary', (['waiver_summary_data'], {}), '(waiver_summary_data)\n', (5455, 5476), False, 'import smartwaiver\n'), ((7155, 7203), 'smartwaiver.types.SmartwaiverWaiver', 'smartwaiver.types.SmartwaiverWaiver', (['waiver_data'], {}), '(waiver_data)\n', (7190, 7203), False, 'import smartwaiver\n'), ((10354, 10402), 'smartwaiver.types.SmartwaiverWaiver', 'smartwaiver.types.SmartwaiverWaiver', (['waiver_data'], {}), '(waiver_data)\n', (10389, 10402), False, 'import smartwaiver\n')]
|
# Payment rest api serializers
from rest_framework import serializers
from rest_framework.serializers import (
SerializerMethodField,
IntegerField
)
from datetime import datetime
from ...payment.models import MpesaPayment
class MpesaPaymentUpdateSerializer(serializers.ModelSerializer):
status = IntegerField(max_value=1, min_value=0)
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'status'
)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance
class MpesaPaymentListSerializer(serializers.ModelSerializer):
time = SerializerMethodField()
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'phone',
'amount',
'first_name',
'middle_name',
'last_name',
'time',
'status')
def get_time(self,obj):
time = obj.created.strftime("%d/%m/%Y %H:%M:%S %p")
return time
|
[
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.SerializerMethodField"
] |
[((313, 351), 'rest_framework.serializers.IntegerField', 'IntegerField', ([], {'max_value': '(1)', 'min_value': '(0)'}), '(max_value=1, min_value=0)\n', (325, 351), False, 'from rest_framework.serializers import SerializerMethodField, IntegerField\n'), ((726, 749), 'rest_framework.serializers.SerializerMethodField', 'SerializerMethodField', ([], {}), '()\n', (747, 749), False, 'from rest_framework.serializers import SerializerMethodField, IntegerField\n')]
|
#!/usr/bin/python
from __future__ import print_function
from icalendar import Calendar
import sys
def get_key_value(a):
val = ""
if a.has_key("UID"):
val = a["UID"]
elif a.has_key("DTSTART"):
val = a["DTSTART"]
elif a.has_key("DESCRIPTION"):
val = a["DESCRIPTION"]
elif a.has_key("SUMMARY"):
val = a["SUMMARY"]
elif a.has_key("SUMMARY"):
val = a["SUMMARY"]
return val
if len(sys.argv) < 3:
print("Usage: sort_ics.py in.ics out.ics")
sys.exit(1)
cal = Calendar.from_ical(open(sys.argv[1], 'rb').read())
cal.subcomponents.sort(key=get_key_value)
# comps = cal.subcomponents
# print(comps)
# comps.sort(key=get_key_value)
# print(comps)
f = open(sys.argv[2], 'wb')
f.write(cal.to_ical())
f.close()
|
[
"sys.exit"
] |
[((514, 525), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (522, 525), False, 'import sys\n')]
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, render_template, request
import googlemaps
from settings import API_KEY
app = Flask(__name__)
GMAPS = googlemaps.Client(key=API_KEY)
ADDRESS = '1600 Amphitheatre Pkwy 94043'
@app.route('/', methods=['GET', 'POST'])
def mapsgeo(gcf_request=None):
"""
main handler - show form and possibly previous translation
"""
# Flask Request object passed in for Cloud Functions
# (use gcf_request for GCF but flask.request otherwise)
local_request = gcf_request if gcf_request else request
# reset all variables (GET)
address = ADDRESS
results = []
# form submission and if there is data to process (POST)
if local_request.method == 'POST':
address = local_request.form['address'].strip()
if not address:
address = ADDRESS
rsp = GMAPS.geocode(address)
if rsp:
for data in rsp:
if 'geometry' in data and 'location' in data['geometry']:
geocode = data['geometry']['location']
results.append({
'full_addr': data['formatted_address'],
'latlong': '%s, %s' % (geocode['lat'], geocode['lng']),
})
# create context & render template
context = {'address': address, 'results': results}
return render_template('index.html', **context)
if __name__ == '__main__':
import os
app.run(debug=True, threaded=True, host='0.0.0.0',
port=int(os.environ.get('PORT', 8080)))
|
[
"googlemaps.Client",
"os.environ.get",
"flask.Flask",
"flask.render_template"
] |
[((678, 693), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (683, 693), False, 'from flask import Flask, render_template, request\n'), ((702, 732), 'googlemaps.Client', 'googlemaps.Client', ([], {'key': 'API_KEY'}), '(key=API_KEY)\n', (719, 732), False, 'import googlemaps\n'), ((1913, 1953), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html', **context)\n", (1928, 1953), False, 'from flask import Flask, render_template, request\n'), ((2073, 2101), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(8080)'], {}), "('PORT', 8080)\n", (2087, 2101), False, 'import os\n')]
|
from pathlib import Path
from subprocess import run
def add_file(file_name):
filepath = Path(file_name).resolve()
command = ["ipfs", "add", "-Q", filepath]
result = run(command, capture_output=True, text=True)
ipfs_hash = result.stdout.strip()
return {ipfs_hash: filepath}
def find_file_locations(ipfs_hash):
command = ["ipfs", "dht", "findprovs", ipfs_hash]
result = run(command, capture_output=True, text=True)
ipfs_hash = result.stdout.strip()
return ipfs_hash
if __name__ == '__main__':
example_file_name = "hello"
file_added = add_file(example_file_name)
ipfs_hash_val, file_path = [(k, v) for k, v in file_added.items()][0]
file_added = find_file_locations(ipfs_hash_val)
file_added.splitlines()
|
[
"subprocess.run",
"pathlib.Path"
] |
[((180, 224), 'subprocess.run', 'run', (['command'], {'capture_output': '(True)', 'text': '(True)'}), '(command, capture_output=True, text=True)\n', (183, 224), False, 'from subprocess import run\n'), ((403, 447), 'subprocess.run', 'run', (['command'], {'capture_output': '(True)', 'text': '(True)'}), '(command, capture_output=True, text=True)\n', (406, 447), False, 'from subprocess import run\n'), ((94, 109), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (98, 109), False, 'from pathlib import Path\n')]
|
import os
from wintermute import APP
def runserver():
port = int(os.environ.get('PORT', 5000))
APP.run(host='0.0.0.0', port=port, debug=True)
if __name__ == '__main__':
runserver()
|
[
"os.environ.get",
"wintermute.APP.run"
] |
[((105, 151), 'wintermute.APP.run', 'APP.run', ([], {'host': '"""0.0.0.0"""', 'port': 'port', 'debug': '(True)'}), "(host='0.0.0.0', port=port, debug=True)\n", (112, 151), False, 'from wintermute import APP\n'), ((71, 99), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (85, 99), False, 'import os\n')]
|
from typing import Dict, Any
from uuid import uuid4
from inventorycalculator.core.loaders.file_loader import FileLoader
from inventorycalculator.core.parsers.inventory_parser import InventoryParser
from inventorycalculator.core.repositories.dynamodb import DynamoDBTable
from inventorycalculator.core.storages.s3_storage import S3Storage
from inventorycalculator.core.workers.aws_lambda import AwsLambda
from inventorycalculator.errors import S3StorageError, DynamoDBError, AsyncWorkerError, InvalidInventoryDataFormatError
from inventorycalculator.settings import S3_BUCKET, TABLE_NAME, STATUSES, ASYNC_WORKER
from OneTicketLogging import elasticsearch_logger
_logger = elasticsearch_logger(__name__)
file_loader = FileLoader()
storage = S3Storage(S3_BUCKET)
db_table = DynamoDBTable(TABLE_NAME)
async_worker = AwsLambda(ASYNC_WORKER)
inventory_parser = InventoryParser()
def crawl_job_handler(event: Dict[str, Any], _: Any) -> Dict:
"""Creates inventory calculator job for async processing"""
_logger.info(event)
file_content = file_loader.by_url(event['url'])
job_id = str(uuid4())
job = {'job_id': job_id}
storage.upload(job_id, file_content)
async_worker.async_invoke(job)
db_table.put({
**job,
'status': STATUSES.RUNNING,
'total_value': 0
})
return job
def async_worker_handler(event: Dict[str, Any], _: Any):
"""Process the tickets"""
_logger.info(event)
job_id = event.get('job_id')
try:
db_table.get(job_id)
tickets = inventory_parser.from_tsv(storage.get(job_id))
total_value = sum([ticket.value for ticket in tickets])
db_table.put({
'job_id': job_id,
'status': STATUSES.SUCCEEDED,
'total_value': total_value
})
except (S3StorageError, DynamoDBError, InvalidInventoryDataFormatError) as e:
_logger.error(e)
db_table.put({
'job_id': job_id,
'status': STATUSES.FAILED
})
raise AsyncWorkerError(f'Unable to proceed job with "job_id":{job_id}')
def status_check_handler(event: Dict[str, Any], _: Any) -> Dict:
"""Check the status of tickets processing"""
_logger.info(event)
payload = db_table.get(event['job_id'])
return {
'status': payload['status'],
'total_value': payload['total_value']
}
|
[
"inventorycalculator.core.repositories.dynamodb.DynamoDBTable",
"uuid.uuid4",
"inventorycalculator.core.parsers.inventory_parser.InventoryParser",
"OneTicketLogging.elasticsearch_logger",
"inventorycalculator.core.storages.s3_storage.S3Storage",
"inventorycalculator.core.workers.aws_lambda.AwsLambda",
"inventorycalculator.errors.AsyncWorkerError",
"inventorycalculator.core.loaders.file_loader.FileLoader"
] |
[((673, 703), 'OneTicketLogging.elasticsearch_logger', 'elasticsearch_logger', (['__name__'], {}), '(__name__)\n', (693, 703), False, 'from OneTicketLogging import elasticsearch_logger\n'), ((718, 730), 'inventorycalculator.core.loaders.file_loader.FileLoader', 'FileLoader', ([], {}), '()\n', (728, 730), False, 'from inventorycalculator.core.loaders.file_loader import FileLoader\n'), ((741, 761), 'inventorycalculator.core.storages.s3_storage.S3Storage', 'S3Storage', (['S3_BUCKET'], {}), '(S3_BUCKET)\n', (750, 761), False, 'from inventorycalculator.core.storages.s3_storage import S3Storage\n'), ((773, 798), 'inventorycalculator.core.repositories.dynamodb.DynamoDBTable', 'DynamoDBTable', (['TABLE_NAME'], {}), '(TABLE_NAME)\n', (786, 798), False, 'from inventorycalculator.core.repositories.dynamodb import DynamoDBTable\n'), ((814, 837), 'inventorycalculator.core.workers.aws_lambda.AwsLambda', 'AwsLambda', (['ASYNC_WORKER'], {}), '(ASYNC_WORKER)\n', (823, 837), False, 'from inventorycalculator.core.workers.aws_lambda import AwsLambda\n'), ((857, 874), 'inventorycalculator.core.parsers.inventory_parser.InventoryParser', 'InventoryParser', ([], {}), '()\n', (872, 874), False, 'from inventorycalculator.core.parsers.inventory_parser import InventoryParser\n'), ((1096, 1103), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1101, 1103), False, 'from uuid import uuid4\n'), ((2008, 2073), 'inventorycalculator.errors.AsyncWorkerError', 'AsyncWorkerError', (['f"""Unable to proceed job with "job_id":{job_id}"""'], {}), '(f\'Unable to proceed job with "job_id":{job_id}\')\n', (2024, 2073), False, 'from inventorycalculator.errors import S3StorageError, DynamoDBError, AsyncWorkerError, InvalidInventoryDataFormatError\n')]
|
from dataclasses import dataclass
@dataclass(frozen=True)
class Conf():
api_title: str
api_description: str
facilities_csv_fp: str
releases_csv_fp: str
waste_transfers_csv_fp: str
api_version: str
conf = Conf(
api_title='FIN-PRTR',
api_description=(
'The European Pollutant Release and Transfer Register (E-PRTR) '
'data published as a national web service '
'(serving only Finnish PRTR data).'
),
api_version='v1',
facilities_csv_fp=r'api/assets/facilities.csv',
releases_csv_fp=r'api/assets/releases.csv',
waste_transfers_csv_fp=r'api/assets/waste_transfers.csv'
)
|
[
"dataclasses.dataclass"
] |
[((37, 59), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (46, 59), False, 'from dataclasses import dataclass\n')]
|
import unittest
import mock
import secretcrypt
from secretcrypt import StrictSecret, Secret
class TestSecret(unittest.TestCase):
@mock.patch('importlib.import_module')
def test_decrypt(self, mock_import_module):
mock_crypter_module = mock.MagicMock()
mock_crypter_module.__name__ = 'secretcrypt.mock_crypter'
def mock_import_side_effect(*args, **kwargs):
self.assertEqual(kwargs['package'], secretcrypt.__name__)
if args[0] == '.mock_crypter':
return mock_crypter_module
raise Exception('Importing wrong module')
mock_import_module.side_effect = mock_import_side_effect
secret = StrictSecret('mock_crypter:key=value&key2=value2:myciphertext')
self.assertEqual(secret._decrypt_params, dict(key='value', key2='value2'))
self.assertEqual(secret._ciphertext, b'myciphertext')
secret.decrypt()
secret.decrypt()
mock_crypter_module.decrypt.assert_called_with(
b'myciphertext',
key='value',
key2='value2',
)
def test_decrypt_plain(self):
secret = StrictSecret('plain::mypass')
self.assertEqual(b'mypass', secret.decrypt())
@mock.patch('importlib.import_module')
def test_eager_decrypt(self, mock_import_module):
mock_crypter_module = mock.MagicMock()
mock_crypter_module.decrypt.side_effect = lambda *args, **kwargs: b'plaintext'
mock_crypter_module.__name__ = 'secretcrypt.mock_crypter'
def mock_import_side_effect(*args, **kwargs):
self.assertEqual(kwargs['package'], secretcrypt.__name__)
if args[0] == '.mock_crypter':
return mock_crypter_module
raise Exception('Importing wrong module')
mock_import_module.side_effect = mock_import_side_effect
secret = Secret('mock_crypter:key=value&key2=value2:myciphertext')
mock_crypter_module.decrypt.assert_called_with(
b'myciphertext',
key='value',
key2='value2',
)
mock_crypter_module.reset_mock()
plaintext = secret.get()
self.assertEqual(b'plaintext', plaintext)
mock_crypter_module.assert_not_called()
|
[
"secretcrypt.StrictSecret",
"mock.MagicMock",
"secretcrypt.Secret",
"mock.patch"
] |
[((138, 175), 'mock.patch', 'mock.patch', (['"""importlib.import_module"""'], {}), "('importlib.import_module')\n", (148, 175), False, 'import mock\n'), ((1234, 1271), 'mock.patch', 'mock.patch', (['"""importlib.import_module"""'], {}), "('importlib.import_module')\n", (1244, 1271), False, 'import mock\n'), ((254, 270), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (268, 270), False, 'import mock\n'), ((685, 748), 'secretcrypt.StrictSecret', 'StrictSecret', (['"""mock_crypter:key=value&key2=value2:myciphertext"""'], {}), "('mock_crypter:key=value&key2=value2:myciphertext')\n", (697, 748), False, 'from secretcrypt import StrictSecret, Secret\n'), ((1144, 1173), 'secretcrypt.StrictSecret', 'StrictSecret', (['"""plain::mypass"""'], {}), "('plain::mypass')\n", (1156, 1173), False, 'from secretcrypt import StrictSecret, Secret\n'), ((1356, 1372), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1370, 1372), False, 'import mock\n'), ((1874, 1931), 'secretcrypt.Secret', 'Secret', (['"""mock_crypter:key=value&key2=value2:myciphertext"""'], {}), "('mock_crypter:key=value&key2=value2:myciphertext')\n", (1880, 1931), False, 'from secretcrypt import StrictSecret, Secret\n')]
|
from .conftest import GoProCameraTest
from goprocam import GoProCamera
from socket import timeout
class GetMediaTest(GoProCameraTest):
def test_get_media_FS(self):
with self.monkeypatch.context() as m:
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'FS')
m.setattr(GoProCamera.GoPro, 'getMediaFusion', lambda s: 'MF')
assert self.goprocam.getMedia() == 'MF'
def test_get_media_empty(self):
with self.monkeypatch.context() as m:
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
# tut tut. this should raise an exception or return None
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM//'
def test_get_media_empty_folder(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = {
'media': [
{
'd': 'folder',
'fs': [
]
}
]
}
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM/folder/'
def test_get_media(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = {
'media': [
{
'd': 'folder',
'fs': [
{'n': 'file'}
]
}
]
}
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM/folder/file'
def test_get_media_timeout(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = timeout()
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() == ''
def test_get_media_httperror(self):
with self.monkeypatch.context() as m:
del(self.responses['/gp/gpMediaList'])
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() == ''
|
[
"socket.timeout"
] |
[((1937, 1946), 'socket.timeout', 'timeout', ([], {}), '()\n', (1944, 1946), False, 'from socket import timeout\n')]
|
from collections import deque
import itertools
def moving_average(iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable) # create an iterable object from input argument
d = deque(itertools.islice(it, n-1)) # create deque object by slicing iterable
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / n # yield is like "return" but is used with generators
ma = moving_average([40, 30, 50, 46, 39, 44])
next(ma)
next(ma)
next(ma)
|
[
"itertools.islice"
] |
[((291, 318), 'itertools.islice', 'itertools.islice', (['it', '(n - 1)'], {}), '(it, n - 1)\n', (307, 318), False, 'import itertools\n')]
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
import torch
import flash
from flash.core.classification import FiftyOneLabels, Labels
from flash.core.data.utils import download_data
from flash.core.integrations.fiftyone import visualize
from flash.image import ImageClassificationData, ImageClassifier
# 1 Download data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip")
# 2 Load data
datamodule = ImageClassificationData.from_folders(
train_folder="data/hymenoptera_data/train/",
val_folder="data/hymenoptera_data/val/",
test_folder="data/hymenoptera_data/test/",
predict_folder="data/hymenoptera_data/predict/",
)
# 3 Fine tune a model
model = ImageClassifier(
backbone="resnet18",
num_classes=datamodule.num_classes,
output=Labels(),
)
trainer = flash.Trainer(
max_epochs=1,
gpus=torch.cuda.device_count(),
limit_train_batches=1,
limit_val_batches=1,
)
trainer.finetune(
model,
datamodule=datamodule,
strategy=("freeze_unfreeze", 1),
)
trainer.save_checkpoint("image_classification_model.pt")
# 4 Predict from checkpoint
model = ImageClassifier.load_from_checkpoint(
"https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt"
)
model.output = FiftyOneLabels(return_filepath=True) # output FiftyOne format
predictions = trainer.predict(model, datamodule=datamodule)
predictions = list(chain.from_iterable(predictions)) # flatten batches
# 5 Visualize predictions in FiftyOne App
# Optional: pass `wait=True` to block execution until App is closed
session = visualize(predictions)
|
[
"flash.core.classification.FiftyOneLabels",
"flash.core.data.utils.download_data",
"flash.core.classification.Labels",
"torch.cuda.device_count",
"flash.image.ImageClassifier.load_from_checkpoint",
"flash.core.integrations.fiftyone.visualize",
"itertools.chain.from_iterable",
"flash.image.ImageClassificationData.from_folders"
] |
[((890, 966), 'flash.core.data.utils.download_data', 'download_data', (['"""https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip"""'], {}), "('https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip')\n", (903, 966), False, 'from flash.core.data.utils import download_data\n'), ((995, 1223), 'flash.image.ImageClassificationData.from_folders', 'ImageClassificationData.from_folders', ([], {'train_folder': '"""data/hymenoptera_data/train/"""', 'val_folder': '"""data/hymenoptera_data/val/"""', 'test_folder': '"""data/hymenoptera_data/test/"""', 'predict_folder': '"""data/hymenoptera_data/predict/"""'}), "(train_folder=\n 'data/hymenoptera_data/train/', val_folder='data/hymenoptera_data/val/',\n test_folder='data/hymenoptera_data/test/', predict_folder=\n 'data/hymenoptera_data/predict/')\n", (1031, 1223), False, 'from flash.image import ImageClassificationData, ImageClassifier\n'), ((1687, 1811), 'flash.image.ImageClassifier.load_from_checkpoint', 'ImageClassifier.load_from_checkpoint', (['"""https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt"""'], {}), "(\n 'https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt'\n )\n", (1723, 1811), False, 'from flash.image import ImageClassificationData, ImageClassifier\n'), ((1823, 1859), 'flash.core.classification.FiftyOneLabels', 'FiftyOneLabels', ([], {'return_filepath': '(True)'}), '(return_filepath=True)\n', (1837, 1859), False, 'from flash.core.classification import FiftyOneLabels, Labels\n'), ((2139, 2161), 'flash.core.integrations.fiftyone.visualize', 'visualize', (['predictions'], {}), '(predictions)\n', (2148, 2161), False, 'from flash.core.integrations.fiftyone import visualize\n'), ((1965, 1997), 'itertools.chain.from_iterable', 'chain.from_iterable', (['predictions'], {}), '(predictions)\n', (1984, 1997), False, 'from itertools import chain\n'), ((1353, 1361), 'flash.core.classification.Labels', 'Labels', ([], {}), '()\n', (1359, 1361), False, 'from flash.core.classification import FiftyOneLabels, Labels\n'), ((1417, 1442), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1440, 1442), False, 'import torch\n')]
|
# coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AddonApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_addon(self, name, base_prices, **kwargs):
"""
Create a new addon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_addon(name, base_prices, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: Name of the type of addon (required)
:param int base_prices: Prices for addon (required)
:param str description: Description of the addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_addon_with_http_info(name, base_prices, **kwargs)
else:
(data) = self.add_addon_with_http_info(name, base_prices, **kwargs)
return data
def add_addon_with_http_info(self, name, base_prices, **kwargs):
"""
Create a new addon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_addon_with_http_info(name, base_prices, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: Name of the type of addon (required)
:param int base_prices: Prices for addon (required)
:param str description: Description of the addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'base_prices', 'description']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `add_addon`")
# verify the required parameter 'base_prices' is set
if ('base_prices' not in params) or (params['base_prices'] is None):
raise ValueError("Missing the required parameter `base_prices` when calling `add_addon`")
resource_path = '/addon/add'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'name' in params:
query_params['name'] = params['name']
if 'description' in params:
query_params['description'] = params['description']
if 'base_prices' in params:
query_params['base_prices'] = params['base_prices']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_addon(self, **kwargs):
"""
Delete an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_addon(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int body: ID of the Addon
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_addon_with_http_info(**kwargs)
else:
(data) = self.delete_addon_with_http_info(**kwargs)
return data
def delete_addon_with_http_info(self, **kwargs):
"""
Delete an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_addon_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int body: ID of the Addon
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_addon" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/delete'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'body' in params:
query_params['body'] = params['body']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_addon(self, id, **kwargs):
"""
Retrieve an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_addon(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the addon to be retrieved (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_addon_with_http_info(id, **kwargs)
else:
(data) = self.get_addon_with_http_info(id, **kwargs)
return data
def get_addon_with_http_info(self, id, **kwargs):
"""
Retrieve an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_addon_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the addon to be retrieved (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_addon`")
resource_path = '/addon'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_addons(self, **kwargs):
"""
Retrieve all addons
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_addons(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_addons_with_http_info(**kwargs)
else:
(data) = self.get_all_addons_with_http_info(**kwargs)
return data
def get_all_addons_with_http_info(self, **kwargs):
"""
Retrieve all addons
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_addons_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_addons" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/all'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Addon]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_with_trashed_addons(self, **kwargs):
"""
Retrieve all addons including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_addons(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_trashed_addons_with_http_info(**kwargs)
else:
(data) = self.get_all_with_trashed_addons_with_http_info(**kwargs)
return data
def get_all_with_trashed_addons_with_http_info(self, **kwargs):
"""
Retrieve all addons including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_addons_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_with_trashed_addons" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/all-with-trashed'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Addon]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_addon(self, id, **kwargs):
"""
Update an Addon
Updates the addon by id using the specified fields
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_addon(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the Addon to be updated (required)
:param str name: Name of the Addon
:param str description: Description of the Addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_addon_with_http_info(id, **kwargs)
else:
(data) = self.update_addon_with_http_info(id, **kwargs)
return data
def update_addon_with_http_info(self, id, **kwargs):
"""
Update an Addon
Updates the addon by id using the specified fields
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_addon_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the Addon to be updated (required)
:param str name: Name of the Addon
:param str description: Description of the Addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'name', 'description']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_addon`")
resource_path = '/addon/edit'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
if 'name' in params:
query_params['name'] = params['name']
if 'description' in params:
query_params['description'] = params['description']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
[
"six.iteritems"
] |
[((4022, 4049), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (4031, 4049), False, 'from six import iteritems\n'), ((8411, 8438), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (8420, 8438), False, 'from six import iteritems\n'), ((12214, 12241), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (12223, 12241), False, 'from six import iteritems\n'), ((16050, 16077), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (16059, 16077), False, 'from six import iteritems\n'), ((19747, 19774), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (19756, 19774), False, 'from six import iteritems\n'), ((23807, 23834), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (23816, 23834), False, 'from six import iteritems\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='tinydb-git',
version='0.2.dev1',
description='A git-based storage backend for tinydb.',
long_description=read('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mbr/tinydb-git',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=['dulwich', 'tinydb'],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
|
[
"os.path.dirname",
"setuptools.find_packages"
] |
[((474, 506), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (487, 506), False, 'from setuptools import setup, find_packages\n'), ((150, 175), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (165, 175), False, 'import os\n')]
|
import dns
import subprocess
import dns.resolver
import traceback
import urllib3
urllib3.disable_warnings()
__domains = ['kh.ssl.ak.tiles.virtualearth.net', 'khstorelive.azureedge.net']
__default_ip = {
'kh.ssl.ak.tiles.virtualearth.net': '172.16.31.10',
'khstorelive.azureedge.net': '192.168.127.12'
}
host_path = "C:\\Windows\\System32\\drivers\\etc\\hosts"
host_entries = [f"\n127.0.0.1 {domain}\n" for domain in __domains]
def add_cert():
subprocess.run(["certutil", "-addstore", "-f", "root",
".\\certs\\cert.crt"], shell=True, check=True)
def get_hosts_origin_ips():
try:
origin_ips = {}
dns_resolver = dns.resolver.Resolver()
for d in __domains:
origin_ips[d] = dns_resolver.resolve(d)[0].to_text()
print(origin_ips)
return origin_ips
except dns.exception.Timeout:
traceback.print_exc()
return __default_ip
def override_hosts():
print("Overriding hosts")
with open(host_path, "a") as f:
f.writelines(host_entries)
def restore_hosts():
print("Restoring hosts")
with open(host_path, "r+") as f:
host = f.read()
for line in host_entries:
host = host.replace(line, "")
f.seek(0)
f.write(host)
f.truncate()
|
[
"subprocess.run",
"dns.resolver.Resolver",
"traceback.print_exc",
"urllib3.disable_warnings"
] |
[((88, 114), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (112, 114), False, 'import urllib3\n'), ((477, 583), 'subprocess.run', 'subprocess.run', (["['certutil', '-addstore', '-f', 'root', '.\\\\certs\\\\cert.crt']"], {'shell': '(True)', 'check': '(True)'}), "(['certutil', '-addstore', '-f', 'root', '.\\\\certs\\\\cert.crt'\n ], shell=True, check=True)\n", (491, 583), False, 'import subprocess\n'), ((692, 715), 'dns.resolver.Resolver', 'dns.resolver.Resolver', ([], {}), '()\n', (713, 715), False, 'import dns\n'), ((909, 930), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (928, 930), False, 'import traceback\n')]
|
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
import json
with open('../tokamaks.json') as f:
tokamaks = json.load(f)
tokamaks_names = []
radii = []
fig, ax = plt.subplots()
for tokamak in tokamaks:
if tokamak["configuration"] in ["tokamak", "stellarator"]:
if "R" in tokamak:
tokamaks_names.append(tokamak["name"])
radii.append(float(tokamak["R"]))
tokamaks_names = [
x for _, x in sorted(zip(radii, tokamaks_names), reverse=True)
]
radii = sorted(radii, reverse=True)
pos_x = 0
pos_y = 0
max_x = 40
min_x = 1 + max(radii)
to_right = True
left_or_right = 1
radii_row = []
texts = []
switch = False
for name, radius in zip(tokamaks_names, radii):
if to_right and pos_x >= max_x:
offset_y = -(max(radii_row)*1.5 + 2)
pos_y += offset_y/2
# pos_x = max_x
to_right = False
left_or_right = -1
radii_row = []
switch = True
if not to_right and pos_x <= min_x:
offset_y = -(max(radii_row)*1.5 + 2)
pos_y += offset_y/2
# pos_x = min_x
to_right = True
left_or_right = 1
radii_row = []
switch = True
radii_row.append(radius)
if not switch:
pos_x += left_or_right*(1 + radius)
circle = plt.Circle(
(pos_x, pos_y), radius,
color=cm.viridis(radius/max(radii)), fill=True)
ax.add_patch(circle)
if radius > 1.7:
text = plt.text(
pos_x - 0.3*len(name), pos_y, name,
weight="bold", fontsize=12+radius**4/len(name)**3)
texts.append(text)
elif radius > 0.3:
text = plt.text(
pos_x - 0.15*len(name), pos_y + radius, name,
rotation=15)
texts.append(text)
elif radius > 0:
text = plt.text(
pos_x - 0.15*len(name), pos_y + radius, name,
rotation=30)
texts.append(text)
if switch:
pos_y += offset_y/2
switch = False
else:
pos_x += left_or_right*radius
# tweak text manually
texts[0].set_fontsize(28)
texts[0].set_position((5, -0.5))
texts[1].set_fontsize(15)
texts[1].set_position((16, -0.5))
texts[2].set_fontsize(16)
texts[2].set_position((29, -0.5))
# texts[5].set_fontsize(16)
# texts[5].set_position((29, -0.5))
ax.set_xlim((0, max_x + 7))
ax.set_ylim((pos_y*1.1, 10))
ax.set_aspect('equal', adjustable='box')
plt.axis('off')
plt.colorbar(
cm.ScalarMappable(norm=Normalize(0, max(radii)), cmap=cm.viridis),
label="Major radius (m)")
plt.tight_layout()
plt.show()
|
[
"json.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] |
[((219, 233), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (231, 233), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2443), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2436, 2443), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2579), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2577, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2588, 2590), True, 'import matplotlib.pyplot as plt\n'), ((162, 174), 'json.load', 'json.load', (['f'], {}), '(f)\n', (171, 174), False, 'import json\n')]
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import csv
import datetime
import io
import json
import pkgutil
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream, HttpSubStream
from airbyte_cdk.sources.streams.http.requests_native_auth import Oauth2Authenticator
from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer
class JobsResource(HttpStream):
"""
https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs
All YouTube Analytics streams require a created reporting job.
This class allows to `list` all existing reporting jobs or `create` new reporting job for a specific stream. One stream can have only one reporting job.
By creating a reporting job, you are instructing YouTube to generate stream data on a daily basis. If reporting job is removed YouTube removes all stream data.
On every connector invocation, it gets a list of all running reporting jobs, if the currently processed stream has a reporting job - connector does nothing,
but if the currently processed stream does not have a job connector immediately creates one. This connector does not store IDs of reporting jobs.
If the reporting job was created by the user separately, this connector just uses that job. This connector does not remove reporting jobs it can only create them.
After reporting job is created, the first data can be available only after up to 48 hours.
"""
name = None
primary_key = None
http_method = None
url_base = "https://youtubereporting.googleapis.com/v1/"
JOB_NAME = "Airbyte reporting job"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
return [response.json()]
def path(self, **kwargs) -> str:
return "jobs"
def request_body_json(self, **kwargs) -> Optional[Mapping]:
if self.name:
return {"name": self.JOB_NAME, "reportTypeId": self.name}
def list(self):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/list"
self.name = None
self.http_method = "GET"
results = list(self.read_records(sync_mode=None))
result = results[0]
return result.get("jobs", {})
def create(self, name):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/create"
self.name = name
self.http_method = "POST"
results = list(self.read_records(sync_mode=None))
result = results[0]
return result["id"]
class ReportResources(HttpStream):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs.reports/list"
name = None
primary_key = "id"
url_base = "https://youtubereporting.googleapis.com/v1/"
def __init__(self, name: str, jobs_resource: JobsResource, job_id: str, **kwargs):
self.name = name
self.jobs_resource = jobs_resource
self.job_id = job_id
super().__init__(**kwargs)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
reports = []
for report in response_json.get("reports", []):
report = {**report}
report["startTime"] = datetime.datetime.strptime(report["startTime"], "%Y-%m-%dT%H:%M:%S%z")
reports.append(report)
reports.sort(key=lambda x: x["startTime"])
date = kwargs["stream_state"].get("date")
if date:
reports = [r for r in reports if int(r["startTime"].date().strftime("%Y%m%d")) >= date]
if not reports:
reports.append(None)
return reports
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if not self.job_id:
self.job_id = self.jobs_resource.create(self.name)
self.logger.info(f"YouTube reporting job is created: '{self.job_id}'")
return "jobs/{}/reports".format(self.job_id)
class ChannelReports(HttpSubStream):
"https://developers.google.com/youtube/reporting/v1/reports/channel_reports"
name = None
primary_key = None
cursor_field = "date"
url_base = ""
transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization)
def __init__(self, name: str, dimensions: List[str], **kwargs):
self.name = name
self.primary_key = dimensions
super().__init__(**kwargs)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
fp = io.StringIO(response.text)
reader = csv.DictReader(fp)
for record in reader:
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
return {self.cursor_field: latest_record[self.cursor_field]}
return {self.cursor_field: max(current_stream_state[self.cursor_field], latest_record[self.cursor_field])}
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return stream_slice["parent"]["downloadUrl"]
def read_records(self, *, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
parent = stream_slice.get("parent")
if parent:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
self.logger.info("no data from parent stream")
yield from []
class SourceYoutubeAnalytics(AbstractSource):
@staticmethod
def get_authenticator(config):
credentials = config["credentials"]
client_id = credentials["client_id"]
client_secret = credentials["client_secret"]
refresh_token = credentials["refresh_token"]
return Oauth2Authenticator(
token_refresh_endpoint="https://oauth2.googleapis.com/token",
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
)
def check_connection(self, logger, config) -> Tuple[bool, any]:
authenticator = self.get_authenticator(config)
jobs_resource = JobsResource(authenticator=authenticator)
try:
jobs_resource.list()
except Exception as e:
return False, str(e)
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = self.get_authenticator(config)
jobs_resource = JobsResource(authenticator=authenticator)
jobs = jobs_resource.list()
report_to_job_id = {j["reportTypeId"]: j["id"] for j in jobs}
channel_reports = json.loads(pkgutil.get_data("source_youtube_analytics", "defaults/channel_reports.json"))
streams = []
for channel_report in channel_reports:
stream_name = channel_report["id"]
dimensions = channel_report["dimensions"]
job_id = report_to_job_id.get(stream_name)
parent = ReportResources(name=stream_name, jobs_resource=jobs_resource, job_id=job_id, authenticator=authenticator)
streams.append(ChannelReports(name=stream_name, dimensions=dimensions, parent=parent, authenticator=authenticator))
return streams
|
[
"pkgutil.get_data",
"io.StringIO",
"airbyte_cdk.sources.streams.http.requests_native_auth.Oauth2Authenticator",
"airbyte_cdk.sources.utils.transform.TypeTransformer",
"csv.DictReader",
"datetime.datetime.strptime"
] |
[((4705, 4764), 'airbyte_cdk.sources.utils.transform.TypeTransformer', 'TypeTransformer', (['TransformConfig.DefaultSchemaNormalization'], {}), '(TransformConfig.DefaultSchemaNormalization)\n', (4720, 4764), False, 'from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer\n'), ((5148, 5174), 'io.StringIO', 'io.StringIO', (['response.text'], {}), '(response.text)\n', (5159, 5174), False, 'import io\n'), ((5192, 5210), 'csv.DictReader', 'csv.DictReader', (['fp'], {}), '(fp)\n', (5206, 5210), False, 'import csv\n'), ((6514, 6683), 'airbyte_cdk.sources.streams.http.requests_native_auth.Oauth2Authenticator', 'Oauth2Authenticator', ([], {'token_refresh_endpoint': '"""https://oauth2.googleapis.com/token"""', 'client_id': 'client_id', 'client_secret': 'client_secret', 'refresh_token': 'refresh_token'}), "(token_refresh_endpoint=\n 'https://oauth2.googleapis.com/token', client_id=client_id,\n client_secret=client_secret, refresh_token=refresh_token)\n", (6533, 6683), False, 'from airbyte_cdk.sources.streams.http.requests_native_auth import Oauth2Authenticator\n'), ((3687, 3757), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["report['startTime']", '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(report['startTime'], '%Y-%m-%dT%H:%M:%S%z')\n", (3713, 3757), False, 'import datetime\n'), ((7394, 7471), 'pkgutil.get_data', 'pkgutil.get_data', (['"""source_youtube_analytics"""', '"""defaults/channel_reports.json"""'], {}), "('source_youtube_analytics', 'defaults/channel_reports.json')\n", (7410, 7471), False, 'import pkgutil\n')]
|
# -*- coding: utf-8 -*-
import json
import re
import mock
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from geotrek.authent.tests import AuthentFixturesTest
from geotrek.common.tests import CommonTest
from geotrek.common.utils import LTE
from geotrek.authent.factories import PathManagerFactory, StructureFactory
from geotrek.authent.models import default_structure
from geotrek.core.factories import (PathFactory, StakeFactory, TrailFactory, ComfortFactory)
from geotrek.core.models import Path, Trail
class PathViewsTest(CommonTest):
model = Path
modelfactory = PathFactory
userfactory = PathManagerFactory
def login(self):
user = PathManagerFactory(password='<PASSWORD>')
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
def get_bad_data(self):
return {'geom': '{"geom": "LINESTRING (0.0 0.0, 1.0 1.0)"}'}, _("Linestring invalid snapping.")
def get_good_data(self):
return {
'name': '',
'structure': default_structure().pk,
'stake': '',
'comfort': ComfortFactory.create().pk,
'trail': '',
'comments': '',
'departure': '',
'arrival': '',
'source': '',
'valid': 'on',
'geom': '{"geom": "LINESTRING (99.0 89.0, 100.0 88.0)", "snap": [null, null]}',
}
def _post_add_form(self):
# Avoid overlap, delete all !
for p in Path.objects.all():
p.delete()
super(PathViewsTest, self)._post_add_form()
def test_structurerelated_filter(self):
def test_structure(structure, stake):
user = self.userfactory(password='<PASSWORD>')
p = user.profile
p.structure = structure
p.save()
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
response = self.client.get(Path.get_add_url())
self.assertEqual(response.status_code, 200)
self.assertTrue('form' in response.context)
form = response.context['form']
self.assertTrue('stake' in form.fields)
stakefield = form.fields['stake']
self.assertTrue((stake.pk, unicode(stake)) in stakefield.choices)
self.client.logout()
# Test for two structures
s1 = StructureFactory.create()
s2 = StructureFactory.create()
st1 = StakeFactory.create(structure=s1)
StakeFactory.create(structure=s1)
st2 = StakeFactory.create(structure=s2)
StakeFactory.create(structure=s2)
test_structure(s1, st1)
test_structure(s2, st2)
def test_basic_format(self):
self.modelfactory.create()
self.modelfactory.create(name=u"ãéè")
super(CommonTest, self).test_basic_format()
def test_path_form_is_not_valid_if_no_geometry_provided(self):
self.login()
data = self.get_good_data()
data['geom'] = ''
response = self.client.post(Path.get_add_url(), data)
self.assertEqual(response.status_code, 200)
def test_manager_can_delete(self):
self.login()
path = PathFactory()
response = self.client.get(path.get_detail_url())
self.assertEqual(response.status_code, 200)
response = self.client.post(path.get_delete_url())
self.assertEqual(response.status_code, 302)
def test_elevation_area_json(self):
self.login()
path = self.modelfactory.create()
url = '/api/en/paths/{pk}/dem.json'.format(pk=path.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
class DenormalizedTrailTest(AuthentFixturesTest):
def setUp(self):
self.trail1 = TrailFactory(no_path=True)
self.trail2 = TrailFactory(no_path=True)
self.path = PathFactory()
self.trail1.add_path(self.path)
self.trail2.add_path(self.path)
def test_path_and_trails_are_linked(self):
self.assertIn(self.trail1, self.path.trails.all())
self.assertIn(self.trail2, self.path.trails.all())
def login(self):
user = PathManagerFactory(password='<PASSWORD>')
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
def test_denormalized_path_trails(self):
PathFactory.create_batch(size=50)
TrailFactory.create_batch(size=50)
self.login()
with self.assertNumQueries(LTE(15)):
self.client.get(reverse('core:path_json_list'))
def test_trails_are_shown_as_links_in_list(self):
self.login()
response = self.client.get(reverse('core:path_json_list'))
self.assertEqual(response.status_code, 200)
paths_json = json.loads(response.content)
trails_column = paths_json['aaData'][0][6]
self.assertTrue(trails_column == u'%s, %s' % (self.trail1.name_display, self.trail2.name_display) or
trails_column == u'%s, %s' % (self.trail2.name_display, self.trail1.name_display))
class TrailViewsTest(CommonTest):
model = Trail
modelfactory = TrailFactory
userfactory = PathManagerFactory
def get_good_data(self):
path = PathFactory.create()
return {
'name': 't',
'departure': 'Below',
'arrival': 'Above',
'comments': 'No comment',
'structure': default_structure().pk,
'topology': '{"paths": [%s]}' % path.pk,
}
def test_detail_page(self):
self.login()
trail = TrailFactory()
response = self.client.get(trail.get_detail_url())
self.assertEqual(response.status_code, 200)
@mock.patch('mapentity.models.MapEntityMixin.get_attributes_html')
def test_document_export(self, get_attributes_html):
get_attributes_html.return_value = '<p>mock</p>'
trail = TrailFactory()
self.login()
with open(trail.get_map_image_path(), 'w') as f:
f.write('***' * 1000)
response = self.client.get(trail.get_document_url())
self.assertEqual(response.status_code, 200)
def test_add_trail_from_existing_topology_does_not_use_pk(self):
import bs4
self.login()
trail = TrailFactory(offset=3.14)
response = self.client.get(Trail.get_add_url() + '?topology=%s' % trail.pk)
soup = bs4.BeautifulSoup(response.content)
textarea_field = soup.find(id="id_topology")
self.assertIn('"kind": "TOPOLOGY"', textarea_field.text)
self.assertIn('"offset": 3.14', textarea_field.text)
self.assertNotIn('"pk": %s' % trail.pk, textarea_field.text)
def test_add_trail_from_existing_topology(self):
self.login()
trail = TrailFactory()
form_data = self.get_good_data()
form_data['topology'] = trail.serialize(with_pk=False)
response = self.client.post(Trail.get_add_url(), form_data)
self.assertEqual(response.status_code, 302) # success, redirects to detail view
p = re.compile(r"http://testserver/trail/(\d+)/")
m = p.match(response['Location'])
new_pk = int(m.group(1))
new_trail = Trail.objects.get(pk=new_pk)
self.assertIn(trail, new_trail.trails.all())
|
[
"django.core.urlresolvers.reverse",
"geotrek.core.models.Path.get_add_url",
"geotrek.core.factories.TrailFactory",
"geotrek.core.models.Trail.get_add_url",
"django.utils.translation.ugettext_lazy",
"geotrek.authent.factories.StructureFactory.create",
"json.loads",
"geotrek.core.factories.PathFactory",
"geotrek.authent.models.default_structure",
"geotrek.core.factories.TrailFactory.create_batch",
"mock.patch",
"geotrek.core.factories.PathFactory.create",
"geotrek.core.models.Trail.objects.get",
"geotrek.common.utils.LTE",
"bs4.BeautifulSoup",
"geotrek.core.factories.ComfortFactory.create",
"re.compile",
"geotrek.core.models.Path.objects.all",
"geotrek.core.factories.StakeFactory.create",
"geotrek.authent.factories.PathManagerFactory",
"geotrek.core.factories.PathFactory.create_batch"
] |
[((5928, 5993), 'mock.patch', 'mock.patch', (['"""mapentity.models.MapEntityMixin.get_attributes_html"""'], {}), "('mapentity.models.MapEntityMixin.get_attributes_html')\n", (5938, 5993), False, 'import mock\n'), ((719, 760), 'geotrek.authent.factories.PathManagerFactory', 'PathManagerFactory', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (737, 760), False, 'from geotrek.authent.factories import PathManagerFactory, StructureFactory\n'), ((1556, 1574), 'geotrek.core.models.Path.objects.all', 'Path.objects.all', ([], {}), '()\n', (1572, 1574), False, 'from geotrek.core.models import Path, Trail\n'), ((2482, 2507), 'geotrek.authent.factories.StructureFactory.create', 'StructureFactory.create', ([], {}), '()\n', (2505, 2507), False, 'from geotrek.authent.factories import PathManagerFactory, StructureFactory\n'), ((2521, 2546), 'geotrek.authent.factories.StructureFactory.create', 'StructureFactory.create', ([], {}), '()\n', (2544, 2546), False, 'from geotrek.authent.factories import PathManagerFactory, StructureFactory\n'), ((2561, 2594), 'geotrek.core.factories.StakeFactory.create', 'StakeFactory.create', ([], {'structure': 's1'}), '(structure=s1)\n', (2580, 2594), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((2603, 2636), 'geotrek.core.factories.StakeFactory.create', 'StakeFactory.create', ([], {'structure': 's1'}), '(structure=s1)\n', (2622, 2636), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((2651, 2684), 'geotrek.core.factories.StakeFactory.create', 'StakeFactory.create', ([], {'structure': 's2'}), '(structure=s2)\n', (2670, 2684), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((2693, 2726), 'geotrek.core.factories.StakeFactory.create', 'StakeFactory.create', ([], {'structure': 's2'}), '(structure=s2)\n', (2712, 2726), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((3299, 3312), 'geotrek.core.factories.PathFactory', 'PathFactory', ([], {}), '()\n', (3310, 3312), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((3959, 3985), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {'no_path': '(True)'}), '(no_path=True)\n', (3971, 3985), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((4008, 4034), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {'no_path': '(True)'}), '(no_path=True)\n', (4020, 4034), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((4055, 4068), 'geotrek.core.factories.PathFactory', 'PathFactory', ([], {}), '()\n', (4066, 4068), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((4352, 4393), 'geotrek.authent.factories.PathManagerFactory', 'PathManagerFactory', ([], {'password': '"""<PASSWORD>"""'}), "(password='<PASSWORD>')\n", (4370, 4393), False, 'from geotrek.authent.factories import PathManagerFactory, StructureFactory\n'), ((4564, 4597), 'geotrek.core.factories.PathFactory.create_batch', 'PathFactory.create_batch', ([], {'size': '(50)'}), '(size=50)\n', (4588, 4597), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((4606, 4640), 'geotrek.core.factories.TrailFactory.create_batch', 'TrailFactory.create_batch', ([], {'size': '(50)'}), '(size=50)\n', (4631, 4640), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((4983, 5011), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (4993, 5011), False, 'import json\n'), ((5447, 5467), 'geotrek.core.factories.PathFactory.create', 'PathFactory.create', ([], {}), '()\n', (5465, 5467), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((5796, 5810), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {}), '()\n', (5808, 5810), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((6124, 6138), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {}), '()\n', (6136, 6138), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((6491, 6516), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {'offset': '(3.14)'}), '(offset=3.14)\n', (6503, 6516), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((6616, 6651), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['response.content'], {}), '(response.content)\n', (6633, 6651), False, 'import bs4\n'), ((6991, 7005), 'geotrek.core.factories.TrailFactory', 'TrailFactory', ([], {}), '()\n', (7003, 7005), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((7279, 7324), 're.compile', 're.compile', (['"""http://testserver/trail/(\\\\d+)/"""'], {}), "('http://testserver/trail/(\\\\d+)/')\n", (7289, 7324), False, 'import re\n'), ((7420, 7448), 'geotrek.core.models.Trail.objects.get', 'Trail.objects.get', ([], {'pk': 'new_pk'}), '(pk=new_pk)\n', (7437, 7448), False, 'from geotrek.core.models import Path, Trail\n'), ((976, 1009), 'django.utils.translation.ugettext_lazy', '_', (['"""Linestring invalid snapping."""'], {}), "('Linestring invalid snapping.')\n", (977, 1009), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3145, 3163), 'geotrek.core.models.Path.get_add_url', 'Path.get_add_url', ([], {}), '()\n', (3161, 3163), False, 'from geotrek.core.models import Path, Trail\n'), ((4878, 4908), 'django.core.urlresolvers.reverse', 'reverse', (['"""core:path_json_list"""'], {}), "('core:path_json_list')\n", (4885, 4908), False, 'from django.core.urlresolvers import reverse\n'), ((7146, 7165), 'geotrek.core.models.Trail.get_add_url', 'Trail.get_add_url', ([], {}), '()\n', (7163, 7165), False, 'from geotrek.core.models import Path, Trail\n'), ((1106, 1125), 'geotrek.authent.models.default_structure', 'default_structure', ([], {}), '()\n', (1123, 1125), False, 'from geotrek.authent.models import default_structure\n'), ((1178, 1201), 'geotrek.core.factories.ComfortFactory.create', 'ComfortFactory.create', ([], {}), '()\n', (1199, 1201), False, 'from geotrek.core.factories import PathFactory, StakeFactory, TrailFactory, ComfortFactory\n'), ((2050, 2068), 'geotrek.core.models.Path.get_add_url', 'Path.get_add_url', ([], {}), '()\n', (2066, 2068), False, 'from geotrek.core.models import Path, Trail\n'), ((4697, 4704), 'geotrek.common.utils.LTE', 'LTE', (['(15)'], {}), '(15)\n', (4700, 4704), False, 'from geotrek.common.utils import LTE\n'), ((4735, 4765), 'django.core.urlresolvers.reverse', 'reverse', (['"""core:path_json_list"""'], {}), "('core:path_json_list')\n", (4742, 4765), False, 'from django.core.urlresolvers import reverse\n'), ((5639, 5658), 'geotrek.authent.models.default_structure', 'default_structure', ([], {}), '()\n', (5656, 5658), False, 'from geotrek.authent.models import default_structure\n'), ((6552, 6571), 'geotrek.core.models.Trail.get_add_url', 'Trail.get_add_url', ([], {}), '()\n', (6569, 6571), False, 'from geotrek.core.models import Path, Trail\n')]
|
import json
import coreapi
import coreschema
from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404
from django.utils.datastructures import MultiValueDictKeyError
from numpydoc import docscrape
from rest_framework import status, schemas
from rest_framework.generics import ListAPIView, RetrieveAPIView, \
ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, \
_convert_clf_param, ATGridSearchCV
from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, \
CVResultScore
from AnyTimeGridSearchCV.grids.serializers import GridSearchSerializer, \
CVResultSerializer, DatasetSerializer
class EstimatorsListView(APIView):
"""
Returns a list of all available scikit-learn classifiers.
"""
def get(self, request, *args, **kwargs):
return Response(list(ESTIMATORS_DICT.keys()), status=status.HTTP_200_OK)
class EstimatorDetailView(APIView):
"""
Returns a detailed view of a scikit-learn classifier - all available arguments for the classifier.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'clf',
required=True,
location='path',
schema=coreschema.String(
description='scikit-learn Estimator name'
)
),
])
def get(self, request, *args, **kwargs):
try:
clf = ESTIMATORS_DICT[kwargs.get('clf',
'Not a valid scikit-learn estimator name')]
except KeyError:
return Response({'name': '', 'type': '', 'desc': ''},
status=status.HTTP_200_OK)
return Response([{'name': arg_name, 'type': arg_type, 'desc': arg_desc}
for arg_name, arg_type, arg_desc in docscrape.ClassDoc(clf)['Parameters']],
status=status.HTTP_200_OK)
class GridsListView(ListCreateAPIView):
"""
get:
Returns a list of all available grid searches.
post:
Creates a new grid search.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
def post(self, request, *args, **kwargs):
return ListCreateAPIView.post(self, request, *args, **kwargs)
class GridDetailView(RetrieveAPIView):
"""
Returns the specified grid (uuid, dataset name and scikit-learn classifier name).
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
lookup_field = 'uuid'
class GridResultsListSchema(schemas.AutoSchema):
def get_manual_fields(self, path, method):
manual_fields = schemas.AutoSchema.get_manual_fields(self, path, method)
if method == 'GET':
return manual_fields
elif method == 'POST':
return manual_fields + [coreapi.Field('cv_data', required=True, location='form',
schema=coreschema.Object(description='Cross validation result'))]
class GridResultsList(ListCreateAPIView):
"""
get:
Returns a list of all the results (CV classifications) for given grid.
post:
Creates a new result instance for specified grid.
"""
queryset = CVResult.objects.all()
serializer_class = CVResultSerializer
schema = GridResultsListSchema(manual_fields=[
coreapi.Field(
'uuid',
required=True,
location='path',
schema=coreschema.String(
description='GridSearch UUID'
)
),
])
def get_queryset(self):
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
return _gs.results.all()
def post(self, request, *args, **kwargs):
import numpy
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
multimetric_scores = json.loads(request.data['cv_data'])
scorers = set(map(lambda j: j.split('_')[-1],
filter(lambda i: i != 'fit_time' and i != 'score_time',
multimetric_scores)))
cv_result, _ = CVResult.objects.get_or_create(gridsearch=_gs,
params=json.loads(request.data['params']))
cv_result.fit_time = multimetric_scores['fit_time']
cv_result.score_time = multimetric_scores['score_time']
cv_result.save()
CVResultScore.objects.bulk_create([CVResultScore(scorer=scorer, train_scores=multimetric_scores['train_%s' % scorer],
test_scores=multimetric_scores['test_%s' % scorer],
score=round(numpy.array(multimetric_scores[
'test_%s' % scorer]).mean(), 6),
cv_result=cv_result) for scorer in scorers])
return Response(CVResultSerializer(cv_result).data, status=status.HTTP_201_CREATED)
class DataSetsList(ListCreateAPIView):
"""
get:
Returns a list of all the existing Datasets.
post:
Creates a new Dataset instance.
"""
queryset = DataSet.objects.all()
serializer_class = DatasetSerializer
def post(self, request, *args, **kwargs):
import numpy
try:
name = request.data['name']
except MultiValueDictKeyError:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
if not name:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
try:
examples, labels = request.FILES['examples'], request.FILES['labels']
except MultiValueDictKeyError:
return Response('Missing dataset files', status=status.HTTP_400_BAD_REQUEST)
if examples.name != 'examples.csv':
return Response('Bad name of examples file', status=status.HTTP_400_BAD_REQUEST)
if labels.name != 'labels.csv':
return Response('Bad name of labels file', status=status.HTTP_400_BAD_REQUEST)
if len(numpy.genfromtxt(examples, delimiter=',')) != len(numpy.genfromtxt(labels, delimiter=',')):
return Response('Examples and labels are not the same length', status=status.HTTP_400_BAD_REQUEST)
try:
return Response(DatasetSerializer(DataSet.objects.create(name=name,
examples=examples,
labels=labels)).data,
status=status.HTTP_201_CREATED)
except IntegrityError:
return Response('Name already exists', status=status.HTTP_400_BAD_REQUEST)
class DataSetGridsListView(ListAPIView):
"""
Returns all grid searches on the given Dataset.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'name',
required=True,
location='path',
schema=coreschema.String(
description='Dataset name'
)
),
])
def get_queryset(self):
_ds = get_object_or_404(DataSet, name=self.kwargs['name'])
return _ds.grid_searches.all()
class ATGridSearchCreateView(APIView):
"""
Creates a new ATGridSearch instance (with the grid specified in the request) and starts it.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'dataset',
required=True,
location='form',
schema=coreschema.String(description='Dataset name')
),
coreapi.Field(
'clf',
required=True,
location='form',
schema=coreschema.String(description='scikit-learn estimator name')
),
coreapi.Field(
'args',
required=True,
location='form',
schema=coreschema.Object(description='Grid to search'),
),
])
def post(self, request, *args, **kwargs):
try:
ds = DataSet.objects.get(name=request.data['dataset'])
except DataSet.DoesNotExist:
return Response('No DataSet named {}'.format(request.data['dataset']), status=status.HTTP_400_BAD_REQUEST)
try:
classifier = ESTIMATORS_DICT[request.data['clf']]
except KeyError:
return Response('No sklearn classifier named {}'.format(request.data['clf']), status=status.HTTP_400_BAD_REQUEST)
clf_params = {k: _convert_clf_param(v) for k, v in request.data['args'].items()}
gs = ATGridSearchCV(classifier(), clf_params, dataset=ds.pk)
gs.fit()
return Response(gs._uuid, status=status.HTTP_201_CREATED)
|
[
"AnyTimeGridSearchCV.grids.models.DataSet.objects.get",
"json.loads",
"AnyTimeGridSearchCV.grids.models.GridSearch.objects.all",
"AnyTimeGridSearchCV.grids.anytime_search._convert_clf_param",
"AnyTimeGridSearchCV.grids.anytime_search.ESTIMATORS_DICT.keys",
"numpydoc.docscrape.ClassDoc",
"AnyTimeGridSearchCV.grids.models.DataSet.objects.create",
"AnyTimeGridSearchCV.grids.serializers.CVResultSerializer",
"numpy.genfromtxt",
"AnyTimeGridSearchCV.grids.models.CVResult.objects.all",
"rest_framework.generics.ListCreateAPIView.post",
"django.shortcuts.get_object_or_404",
"rest_framework.response.Response",
"numpy.array",
"coreschema.Object",
"AnyTimeGridSearchCV.grids.models.DataSet.objects.all",
"rest_framework.schemas.AutoSchema.get_manual_fields",
"coreschema.String"
] |
[((2201, 2225), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (2223, 2225), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((2546, 2570), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (2568, 2570), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((3347, 3369), 'AnyTimeGridSearchCV.grids.models.CVResult.objects.all', 'CVResult.objects.all', ([], {}), '()\n', (3367, 3369), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((5331, 5352), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.all', 'DataSet.objects.all', ([], {}), '()\n', (5350, 5352), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((7037, 7061), 'AnyTimeGridSearchCV.grids.models.GridSearch.objects.all', 'GridSearch.objects.all', ([], {}), '()\n', (7059, 7061), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((2332, 2386), 'rest_framework.generics.ListCreateAPIView.post', 'ListCreateAPIView.post', (['self', 'request', '*args'], {}), '(self, request, *args, **kwargs)\n', (2354, 2386), False, 'from rest_framework.generics import ListAPIView, RetrieveAPIView, ListCreateAPIView\n'), ((2764, 2820), 'rest_framework.schemas.AutoSchema.get_manual_fields', 'schemas.AutoSchema.get_manual_fields', (['self', 'path', 'method'], {}), '(self, path, method)\n', (2800, 2820), False, 'from rest_framework import status, schemas\n'), ((3722, 3777), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['GridSearch'], {'uuid': "self.kwargs['uuid']"}), "(GridSearch, uuid=self.kwargs['uuid'])\n", (3739, 3777), False, 'from django.shortcuts import get_object_or_404\n'), ((3893, 3948), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['GridSearch'], {'uuid': "self.kwargs['uuid']"}), "(GridSearch, uuid=self.kwargs['uuid'])\n", (3910, 3948), False, 'from django.shortcuts import get_object_or_404\n'), ((3978, 4013), 'json.loads', 'json.loads', (["request.data['cv_data']"], {}), "(request.data['cv_data'])\n", (3988, 4013), False, 'import json\n'), ((7410, 7462), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['DataSet'], {'name': "self.kwargs['name']"}), "(DataSet, name=self.kwargs['name'])\n", (7427, 7462), False, 'from django.shortcuts import get_object_or_404\n'), ((8956, 9006), 'rest_framework.response.Response', 'Response', (['gs._uuid'], {'status': 'status.HTTP_201_CREATED'}), '(gs._uuid, status=status.HTTP_201_CREATED)\n', (8964, 9006), False, 'from rest_framework.response import Response\n'), ((5682, 5750), 'rest_framework.response.Response', 'Response', (['"""Missing dataset name"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)\n", (5690, 5750), False, 'from rest_framework.response import Response\n'), ((6037, 6110), 'rest_framework.response.Response', 'Response', (['"""Bad name of examples file"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Bad name of examples file', status=status.HTTP_400_BAD_REQUEST)\n", (6045, 6110), False, 'from rest_framework.response import Response\n'), ((6170, 6241), 'rest_framework.response.Response', 'Response', (['"""Bad name of labels file"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Bad name of labels file', status=status.HTTP_400_BAD_REQUEST)\n", (6178, 6241), False, 'from rest_framework.response import Response\n'), ((6368, 6464), 'rest_framework.response.Response', 'Response', (['"""Examples and labels are not the same length"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Examples and labels are not the same length', status=status.\n HTTP_400_BAD_REQUEST)\n", (6376, 6464), False, 'from rest_framework.response import Response\n'), ((8334, 8383), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.get', 'DataSet.objects.get', ([], {'name': "request.data['dataset']"}), "(name=request.data['dataset'])\n", (8353, 8383), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((8791, 8812), 'AnyTimeGridSearchCV.grids.anytime_search._convert_clf_param', '_convert_clf_param', (['v'], {}), '(v)\n', (8809, 8812), False, 'from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, _convert_clf_param, ATGridSearchCV\n'), ((963, 985), 'AnyTimeGridSearchCV.grids.anytime_search.ESTIMATORS_DICT.keys', 'ESTIMATORS_DICT.keys', ([], {}), '()\n', (983, 985), False, 'from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, _convert_clf_param, ATGridSearchCV\n'), ((1691, 1764), 'rest_framework.response.Response', 'Response', (["{'name': '', 'type': '', 'desc': ''}"], {'status': 'status.HTTP_200_OK'}), "({'name': '', 'type': '', 'desc': ''}, status=status.HTTP_200_OK)\n", (1699, 1764), False, 'from rest_framework.response import Response\n'), ((4336, 4370), 'json.loads', 'json.loads', (["request.data['params']"], {}), "(request.data['params'])\n", (4346, 4370), False, 'import json\n'), ((5085, 5114), 'AnyTimeGridSearchCV.grids.serializers.CVResultSerializer', 'CVResultSerializer', (['cv_result'], {}), '(cv_result)\n', (5103, 5114), False, 'from AnyTimeGridSearchCV.grids.serializers import GridSearchSerializer, CVResultSerializer, DatasetSerializer\n'), ((5573, 5641), 'rest_framework.response.Response', 'Response', (['"""Missing dataset name"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)\n", (5581, 5641), False, 'from rest_framework.response import Response\n'), ((5904, 5973), 'rest_framework.response.Response', 'Response', (['"""Missing dataset files"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Missing dataset files', status=status.HTTP_400_BAD_REQUEST)\n", (5912, 5973), False, 'from rest_framework.response import Response\n'), ((6257, 6298), 'numpy.genfromtxt', 'numpy.genfromtxt', (['examples'], {'delimiter': '""","""'}), "(examples, delimiter=',')\n", (6273, 6298), False, 'import numpy\n'), ((6307, 6346), 'numpy.genfromtxt', 'numpy.genfromtxt', (['labels'], {'delimiter': '""","""'}), "(labels, delimiter=',')\n", (6323, 6346), False, 'import numpy\n'), ((6842, 6909), 'rest_framework.response.Response', 'Response', (['"""Name already exists"""'], {'status': 'status.HTTP_400_BAD_REQUEST'}), "('Name already exists', status=status.HTTP_400_BAD_REQUEST)\n", (6850, 6909), False, 'from rest_framework.response import Response\n'), ((1338, 1398), 'coreschema.String', 'coreschema.String', ([], {'description': '"""scikit-learn Estimator name"""'}), "(description='scikit-learn Estimator name')\n", (1355, 1398), False, 'import coreschema\n'), ((1934, 1957), 'numpydoc.docscrape.ClassDoc', 'docscrape.ClassDoc', (['clf'], {}), '(clf)\n', (1952, 1957), False, 'from numpydoc import docscrape\n'), ((3582, 3630), 'coreschema.String', 'coreschema.String', ([], {'description': '"""GridSearch UUID"""'}), "(description='GridSearch UUID')\n", (3599, 3630), False, 'import coreschema\n'), ((6519, 6586), 'AnyTimeGridSearchCV.grids.models.DataSet.objects.create', 'DataSet.objects.create', ([], {'name': 'name', 'examples': 'examples', 'labels': 'labels'}), '(name=name, examples=examples, labels=labels)\n', (6541, 6586), False, 'from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, CVResultScore\n'), ((7273, 7318), 'coreschema.String', 'coreschema.String', ([], {'description': '"""Dataset name"""'}), "(description='Dataset name')\n", (7290, 7318), False, 'import coreschema\n'), ((7825, 7870), 'coreschema.String', 'coreschema.String', ([], {'description': '"""Dataset name"""'}), "(description='Dataset name')\n", (7842, 7870), False, 'import coreschema\n'), ((7999, 8059), 'coreschema.String', 'coreschema.String', ([], {'description': '"""scikit-learn estimator name"""'}), "(description='scikit-learn estimator name')\n", (8016, 8059), False, 'import coreschema\n'), ((8189, 8236), 'coreschema.Object', 'coreschema.Object', ([], {'description': '"""Grid to search"""'}), "(description='Grid to search')\n", (8206, 8236), False, 'import coreschema\n'), ((3063, 3119), 'coreschema.Object', 'coreschema.Object', ([], {'description': '"""Cross validation result"""'}), "(description='Cross validation result')\n", (3080, 3119), False, 'import coreschema\n'), ((4825, 4876), 'numpy.array', 'numpy.array', (["multimetric_scores['test_%s' % scorer]"], {}), "(multimetric_scores['test_%s' % scorer])\n", (4836, 4876), False, 'import numpy\n')]
|
# Copyright (c) 2021. Universidad de Pinar del Rio
# This file is part of SCEIBA (sceiba.cu).
# SCEIBA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
from typing import Dict
from flask_babelex import lazy_gettext as _
from flask_login import current_user
from invenio_access import Permission
from invenio_access.models import ActionUsers
from invenio_access.utils import get_identity
from invenio_accounts.models import User
from invenio_db import db
from sqlalchemy import exc as sqlalchemyExc
from iroko.sources.models import TermSources
from iroko.utils import string_as_identifier
from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema
from iroko.vocabularies.models import Term, TermClasification, Vocabulary
from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin
# TODO: Revisar lanzamientos de excepciones
class Vocabularies:
'''Manage vocabularies'''
@classmethod
def get_vocabularies(cls):
return Vocabulary.query.all()
@classmethod
def get_vocabulary(cls, name, id=None) -> Dict[str, Vocabulary]:
if id is not None:
vocab = Vocabulary.query.filter_by(id=id).first()
elif name is not None:
vocab = Vocabulary.query.filter_by(identifier=name).first()
if vocab:
return 'ok', vocab
else:
msg = 'Vocabulary not exist id={0}'.format(id)
return msg, None
@classmethod
def edit_vocabulary(cls, name, data) -> Dict[str, Vocabulary]:
msg, vocab = cls.get_vocabulary(name)
if vocab:
try:
valid_data = vocabulary_schema.load(data)
vocab.human_name = valid_data['human_name']
vocab.description = valid_data['description']
vocab.data = valid_data['data']
db.session.commit()
msg = 'New Vocabulary UPDATED name={0}'.format(vocab.identifier)
except Exception as err:
msg = 'ERROR {0} - {1}'.format(err, data)
finally:
return msg, vocab
else:
return msg, vocab
@classmethod
def new_vocabulary(cls, input_data) -> Dict[str, Vocabulary]:
msg = ''
try:
data = vocabulary_schema.load(input_data)
vocab = Vocabulary.query.filter_by(identifier=data['name']).first()
if not vocab:
vocab = Vocabulary()
vocab.identifier = string_as_identifier(data['name'])
vocab.human_name = data['human_name']
vocab.description = data['description']
vocab.data = data['data']
db.session.add(vocab)
db.session.commit()
msg = 'New Vocabulary CREATED name={0}'.format(vocab.identifier)
else:
msg = 'Vocabulary already exist name={0}'.format(vocab.identifier)
vocab = None
except Exception as err:
msg = 'ERROR {0} - {1}'.format(err, data)
vocab = None
finally:
return msg, vocab
@classmethod
def grant_vocabulary_editor_permission(cls, user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id).first()
if not vocabulary:
msg = 'Vocabulary not found'
elif not user:
msg = 'User not found'
else:
db.session.add(ActionUsers.allow(ObjectVocabularyEditor(vocabulary.id), user=user))
db.session.commit()
msg = 'Vocabulary Editor Permission granted over {0}'.format(vocabulary.name)
done = True
except Exception as e:
msg = str(e)
# print(str(e))
return msg, done
@classmethod
def deny_vocabulary_editor_permission(user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id).first()
if not vocabulary:
msg = 'Vocabulary not found'
elif not user:
msg = 'User not found'
else:
db.session.add(ActionUsers.deny(ObjectVocabularyEditor(vocabulary.name), user=user))
db.session.commit()
msg = 'Editor Permission granted over {0}'.format(vocabulary.name)
done = True
except Exception as e:
# print(str(e))
msg = str(e)
return msg, done
@classmethod
def check_user_vocabulary_editor_permission(user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
if is_current_user_taxonomy_admin():
done = True
else:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id)
user_identity = get_identity(user)
permission = Permission(ObjectVocabularyEditor(vocabulary.name))
done = permission.allows(user_identity)
except Exception as e:
msg = str(e)
# print(str(e))
return msg, done
class Terms:
"""Manage Terms"""
@classmethod
def get_terms(cls):
return Term.query.all()
@classmethod
def get_terms_by_vocab(cls, vocabulary_id) -> Dict[str, Term]:
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
terms = vocab.terms
return 'ok', terms
@classmethod
def get_first_level_terms_by_vocabulary(cls, vocabulary_id) -> Dict[str, Term]:
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
terms = vocab.terms.filter_by(parent_id=None).all()
return 'ok', vocab, terms
@classmethod
def get_terms_tree_by_vocabulary(cls, vocabulary_id, level: int) -> [str, Vocabulary, list]:
"""If level < 0, means all the levels of the tree"""
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
msg, terms = Terms.get_first_level_terms_by_vocabulary(vocabulary_id)
terms_full = []
for term in terms:
terms_full.append(term_node_schema.dump_term_node(term, level, 0))
return 'ok', vocab, terms_full
@classmethod
def get_term(cls, uuid) -> Dict[str, Term]:
term = Term.query.filter_by(uuid=uuid).first()
if term:
return 'ok', term
else:
msg = 'Term not exist uuid={0}'.format(uuid)
return msg, None
@classmethod
def get_terms_by_uuid_list(cls, uuid_list):
terms = Term.query.filter(Term.uuid.in_(uuid_list)).all()
return terms
@classmethod
def get_terms_by_id_list(cls, id_list):
terms = Term.query.filter(Term.id.in_(id_list)).all()
return terms
@classmethod
def get_term_by_id(cls, id) -> Dict[str, Term]:
term = Term.query.filter_by(id=id).first()
if term:
return 'ok', term
else:
msg = 'Term not exist id={0}'.format(id)
return msg, None
# @classmethod
# def update_or_create_term(cls, input_data, term_id=None) -> Dict[str, Term]:
# """
# given a term data, try to update if id, uuid or name is present,
# otherwise create a new term.
# """
# data = term_schema.load(input_data)
# # print("****** LOADED term")
# term = None
# # if 'uuid' in data:
# # term = Term.query.filter_by(uuid=data['uuid']).first()
# # elif term is None and 'id' in data:
# # term = Term.query.filter_by(id=data['id']).first()
# if term_id:
# term = Term.query.filter_by(id=term_id).first()
# elif term is None and 'name' in data:
# term = Term.query.filter_by(identifier=data['name']).first()
# # print("********* term is {0}".format(term))
# if term is None and 'name' in data:
# # print('********IS NEW')
# return cls.new_term(data)
# if term and 'uuid' in data:
# # print('********IS UPDATE')
# return cls.edit_term(data['uuid'], data)
# return "error", None
@classmethod
def edit_term(cls, uuid, input_data) -> Dict[str, Term]:
msg = ''
try:
data = term_schema.load(input_data)
term = Term.query.filter_by(uuid=uuid).first()
term.vocabulary_id = data['vocabulary_id']
term.name = string_as_identifier(data['name'])
term.description = data['description']
term.parent_id = data['parent_id']
term.data = data['data']
# cls._update_term_data(term, data)
# print(term.data)
try:
db.session.commit()
cls._update_term_clasification(term, data)
msg = 'New Term UPDATED name={0}'.format(term.name)
return msg, term
except sqlalchemyExc.SQLAlchemyError as e:
msg = 'sqlalthemy: {0}'.format(e)
db.session.rollback()
return msg, None
except Exception as e:
msg = 'ERROR {0} - {1}'.format(e, input_data)
return msg, None
@classmethod
def new_term(cls, data) -> Dict[str, Term]:
msg = ''
# try:
valid_data = term_schema.load(data)
term = Term.query.filter_by(identifier=valid_data['name']).first()
if not term:
# print(valid_data)
term = Term()
term.vocabulary_id = valid_data['vocabulary_id']
term.identifier = string_as_identifier(valid_data['name'])
term.description = valid_data['description']
term.parent_id = valid_data['parent_id']
term.data = valid_data['data']
# print(term.data)
db.session.add(term)
# print(term)
try:
db.session.commit()
cls._update_term_clasification(term, valid_data)
msg = 'New Term CREATED name={0}'.format(term.identifier)
return msg, term
except sqlalchemyExc.SQLAlchemyError as e:
msg = 'sqlalthemy: {0}'.format(e)
db.session.rollback()
return msg, None
else:
msg = 'Term already exist name={0}'.format(valid_data['name'])
return msg, None
# except Exception as e:
# msg = 'ERROR {0} - {1}'.format(e, data)
# return msg, None
# @classmethod
# def _get_term_data(cls, term: Term, data):
# ''''''
# # return {
# # 'vocabulary_id': data['vocabulary_id'],
# # 'name': data['name'],
# # 'description': data['description'],
# # 'parent_id': data['parent_id'],
# # 'data': data['data'],
# # }
# # print(data)
# term.vocabulary_id = data['vocabulary_id']
# # print(data)
# term.name = data['name']
# # print(data)
# term.description = data['description']
# # print(data)
# term.parent_id = data['parent_id']
# # print(data)
# term.data = data['data']
# # print(data)
@classmethod
def _update_term_data(cls, term: Term, data):
''''''
# return {
# 'vocabulary_id': data['vocabulary_id'],
# 'name': data['name'],
# 'description': data['description'],
# 'parent_id': data['parent_id'],
# 'data': data['data'],
# }
# print(data)
term.vocabulary_id = data['vocabulary_id']
# print(data)
term.identifier = data['name']
# print(data)
term.description = data['description']
# print(data)
term.parent_id = data['parent_id']
# print(data)
term.data = data['data']
# print(data)
@classmethod
def _update_term_clasification(cls, term: Term, data):
'''
this search all clasification of the term, delete it, and then create new clasification
based on params
# TODO: This will be replaced by the graph database, when done....
in data:
class_ids: IDs of Terms that clasifies this term
clasified_ids: IDs of Terms clasified by this term
'''
# print('_update_term_clasification', data)
# delete all Clasifications in wich this term is envolved
TermClasification.query.filter_by(term_class_id=term.id).delete()
TermClasification.query.filter_by(term_clasified_id=term.id).delete()
db.session.commit()
# print('_update_term_clasification', data)
# Terms clasified by this term
for clasified_ids in data['clasified_ids']:
clasified = Term.query.filter_by(id=clasified_ids).first()
if clasified:
clasification = TermClasification()
clasification.term_class_id = term.id
clasification.term_clasified_id = clasified.id
db.session.add(clasification)
# Terms that clasifies this term
for class_id in data['class_ids']:
t_class = Term.query.filter_by(id=class_id).first()
if t_class:
clasification = TermClasification()
clasification.term_class_id = t_class.id
clasification.term_clasified_id = term.id
db.session.add(clasification)
db.session.commit()
# print('_update_term_clasification', data)
@classmethod
def delete_term(cls, uuid) -> Dict[str, bool]:
try:
term = Term.query.filter_by(uuid=uuid).first()
if term:
if len(term.children) > 0:
return _(
'No se puede eliminar el término cuando otros términos dependen de él'
), False
in_clasification = TermClasification.query.filter_by(term_class_id=term.id).first()
if in_clasification:
return _(
'No se puede eliminar el término si clasificaciones dependen de él'
), False
in_source = TermSources.query.filter_by(term_id=term.id).first()
if in_source:
return _('No se puede eliminar el término si fuentes dependen de él'), False
db.session.query(TermClasification).filter_by(term_object_id=term.id).delete()
db.session.delete(term)
db.session.commit()
return 'Término: {0}, eliminado satisfactoriamente'.format(term.name), True
except Exception as e:
return str(e), False
@classmethod
def get_terms_by_vocabulary_name(cls, vocabulary_name):
try:
lista = Term.query.join(Term.vocabulary, aliased=True).filter_by(
name=vocabulary_name
).order_by(
Term.identifier
)
# print(lista[0].id)
return lista
except Exception as error:
return []
@classmethod
def get_term_tree_list(cls, term, result):
"""helper fuction to get all the children terms ids in a list
"""
result.append(term.id)
for child in term.children:
cls.get_term_tree_list(child, result)
@classmethod
def get_term_tree_list_by_level(cls, term, result, start_level=0, level=0):
"""
retornar una lista en result comenzando en el start_level abajo del term
recibido y debe avanzar level cantidad abajo de ese nivel
"""
new_start = 0
if start_level == 0:
result.append(term.id)
if start_level > 0:
new_start = start_level - 1
if level > 0:
for child in term.children:
cls.get_term_tree_list_by_level(child, result, new_start, level - 1)
# @classmethod
# def dump_term(cls, term:Term, level_to_reach: int, current_level: int):
# """ helper function to load terms children"""
# if current_level < level_to_reach:
# children = []
# for child in term.children:
# children.append(Terms.dump_term(child, level_to_reach, current_level+1))
# return {'term': term_schema.dump(term), 'children':children}
# else:
# return term_schema.dump(term)
def get_current_user_permissions() -> Dict[str, Dict[str, list]]:
"""
Checks from ActionUsers if current_user has vocabularies_full_editor_actions,
that way it has full permissions over vocabularies and terms
if not, then:
checks if it has vocabulary_editor_actions,
then collect the ids of the vocabularies it has permission on
"""
vocabularies_ids = []
if is_current_user_taxonomy_admin():
return 'actions', {'vocabularies_full_editor_actions': None}
else:
actions = ActionUsers.query.filter_by(
user=current_user,
exclude=False,
action='vocabulary_editor_actions'
).all()
for action in actions:
vocabularies_ids.append(action.argument)
return 'actions', {'vocabulary_editor_actions': vocabularies_ids}
def get_current_user_described_permissions() -> Dict[str, Dict[str, list]]:
"""
Checks from ActionUsers if current_user has vocabularies_full_editor_actions,
that way it has full permissions over vocabularies and terms
if not, then:
checks if it has vocabulary_editor_actions,
then collect the ids of the vocabularies it has permission on
and gives dict of texts
"""
vocabularies_ids = []
if is_current_user_taxonomy_admin():
return 'actions', {'vocabularies_full_editor_actions': None}
else:
actions = ActionUsers.query.filter_by(
user=current_user,
exclude=False,
action='vocabulary_editor_actions'
).all()
for action in actions:
vocabularies_ids.append(action.argument)
return 'actions', {'vocabulary_editor_actions': vocabularies_ids}
|
[
"iroko.vocabularies.models.Term",
"iroko.vocabularies.models.Vocabulary.query.all",
"invenio_db.db.session.add",
"invenio_db.db.session.delete",
"iroko.vocabularies.models.Vocabulary",
"invenio_db.db.session.query",
"iroko.vocabularies.marshmallow.term_node_schema.dump_term_node",
"iroko.vocabularies.models.TermClasification",
"iroko.vocabularies.models.TermClasification.query.filter_by",
"iroko.vocabularies.marshmallow.term_schema.load",
"iroko.vocabularies.models.Term.uuid.in_",
"iroko.vocabularies.permissions.is_current_user_taxonomy_admin",
"invenio_access.models.ActionUsers.query.filter_by",
"iroko.vocabularies.permissions.ObjectVocabularyEditor",
"flask_babelex.lazy_gettext",
"iroko.vocabularies.models.Term.id.in_",
"invenio_access.utils.get_identity",
"iroko.vocabularies.models.Term.query.filter_by",
"iroko.vocabularies.marshmallow.vocabulary_schema.load",
"invenio_db.db.session.commit",
"invenio_db.db.session.rollback",
"iroko.vocabularies.models.Term.query.join",
"iroko.vocabularies.models.Vocabulary.query.filter_by",
"iroko.utils.string_as_identifier",
"iroko.sources.models.TermSources.query.filter_by",
"iroko.vocabularies.models.Term.query.all",
"invenio_accounts.models.User.query.filter_by"
] |
[((17524, 17556), 'iroko.vocabularies.permissions.is_current_user_taxonomy_admin', 'is_current_user_taxonomy_admin', ([], {}), '()\n', (17554, 17556), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((18409, 18441), 'iroko.vocabularies.permissions.is_current_user_taxonomy_admin', 'is_current_user_taxonomy_admin', ([], {}), '()\n', (18439, 18441), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((1105, 1127), 'iroko.vocabularies.models.Vocabulary.query.all', 'Vocabulary.query.all', ([], {}), '()\n', (1125, 1127), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((5694, 5710), 'iroko.vocabularies.models.Term.query.all', 'Term.query.all', ([], {}), '()\n', (5708, 5710), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((9961, 9983), 'iroko.vocabularies.marshmallow.term_schema.load', 'term_schema.load', (['data'], {}), '(data)\n', (9977, 9983), False, 'from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema\n'), ((13257, 13276), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (13274, 13276), False, 'from invenio_db import db\n'), ((14126, 14145), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (14143, 14145), False, 'from invenio_db import db\n'), ((2399, 2433), 'iroko.vocabularies.marshmallow.vocabulary_schema.load', 'vocabulary_schema.load', (['input_data'], {}), '(input_data)\n', (2421, 2433), False, 'from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema\n'), ((5075, 5107), 'iroko.vocabularies.permissions.is_current_user_taxonomy_admin', 'is_current_user_taxonomy_admin', ([], {}), '()\n', (5105, 5107), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((8918, 8946), 'iroko.vocabularies.marshmallow.term_schema.load', 'term_schema.load', (['input_data'], {}), '(input_data)\n', (8934, 8946), False, 'from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema\n'), ((9085, 9119), 'iroko.utils.string_as_identifier', 'string_as_identifier', (["data['name']"], {}), "(data['name'])\n", (9105, 9119), False, 'from iroko.utils import string_as_identifier\n'), ((10132, 10138), 'iroko.vocabularies.models.Term', 'Term', ([], {}), '()\n', (10136, 10138), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((10230, 10270), 'iroko.utils.string_as_identifier', 'string_as_identifier', (["valid_data['name']"], {}), "(valid_data['name'])\n", (10250, 10270), False, 'from iroko.utils import string_as_identifier\n'), ((10467, 10487), 'invenio_db.db.session.add', 'db.session.add', (['term'], {}), '(term)\n', (10481, 10487), False, 'from invenio_db import db\n'), ((1755, 1783), 'iroko.vocabularies.marshmallow.vocabulary_schema.load', 'vocabulary_schema.load', (['data'], {}), '(data)\n', (1777, 1783), False, 'from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema\n'), ((1970, 1989), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1987, 1989), False, 'from invenio_db import db\n'), ((2564, 2576), 'iroko.vocabularies.models.Vocabulary', 'Vocabulary', ([], {}), '()\n', (2574, 2576), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((2612, 2646), 'iroko.utils.string_as_identifier', 'string_as_identifier', (["data['name']"], {}), "(data['name'])\n", (2632, 2646), False, 'from iroko.utils import string_as_identifier\n'), ((2815, 2836), 'invenio_db.db.session.add', 'db.session.add', (['vocab'], {}), '(vocab)\n', (2829, 2836), False, 'from invenio_db import db\n'), ((2853, 2872), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2870, 2872), False, 'from invenio_db import db\n'), ((5268, 5300), 'invenio_accounts.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (5288, 5300), False, 'from invenio_accounts.models import User\n'), ((5333, 5351), 'invenio_access.utils.get_identity', 'get_identity', (['user'], {}), '(user)\n', (5345, 5351), False, 'from invenio_access.utils import get_identity\n'), ((6744, 6791), 'iroko.vocabularies.marshmallow.term_node_schema.dump_term_node', 'term_node_schema.dump_term_node', (['term', 'level', '(0)'], {}), '(term, level, 0)\n', (6775, 6791), False, 'from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema\n'), ((6914, 6945), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'uuid': 'uuid'}), '(uuid=uuid)\n', (6934, 6945), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((7484, 7511), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (7504, 7511), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((9367, 9386), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9384, 9386), False, 'from invenio_db import db\n'), ((10000, 10051), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'identifier': "valid_data['name']"}), "(identifier=valid_data['name'])\n", (10020, 10051), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((10547, 10566), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (10564, 10566), False, 'from invenio_db import db\n'), ((13105, 13161), 'iroko.vocabularies.models.TermClasification.query.filter_by', 'TermClasification.query.filter_by', ([], {'term_class_id': 'term.id'}), '(term_class_id=term.id)\n', (13138, 13161), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((13179, 13239), 'iroko.vocabularies.models.TermClasification.query.filter_by', 'TermClasification.query.filter_by', ([], {'term_clasified_id': 'term.id'}), '(term_clasified_id=term.id)\n', (13212, 13239), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((13549, 13568), 'iroko.vocabularies.models.TermClasification', 'TermClasification', ([], {}), '()\n', (13566, 13568), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((13702, 13731), 'invenio_db.db.session.add', 'db.session.add', (['clasification'], {}), '(clasification)\n', (13716, 13731), False, 'from invenio_db import db\n'), ((13937, 13956), 'iroko.vocabularies.models.TermClasification', 'TermClasification', ([], {}), '()\n', (13954, 13956), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((14088, 14117), 'invenio_db.db.session.add', 'db.session.add', (['clasification'], {}), '(clasification)\n', (14102, 14117), False, 'from invenio_db import db\n'), ((15175, 15198), 'invenio_db.db.session.delete', 'db.session.delete', (['term'], {}), '(term)\n', (15192, 15198), False, 'from invenio_db import db\n'), ((15215, 15234), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (15232, 15234), False, 'from invenio_db import db\n'), ((17655, 17757), 'invenio_access.models.ActionUsers.query.filter_by', 'ActionUsers.query.filter_by', ([], {'user': 'current_user', 'exclude': '(False)', 'action': '"""vocabulary_editor_actions"""'}), "(user=current_user, exclude=False, action=\n 'vocabulary_editor_actions')\n", (17682, 17757), False, 'from invenio_access.models import ActionUsers\n'), ((18540, 18642), 'invenio_access.models.ActionUsers.query.filter_by', 'ActionUsers.query.filter_by', ([], {'user': 'current_user', 'exclude': '(False)', 'action': '"""vocabulary_editor_actions"""'}), "(user=current_user, exclude=False, action=\n 'vocabulary_editor_actions')\n", (18567, 18642), False, 'from invenio_access.models import ActionUsers\n'), ((1263, 1296), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (1289, 1296), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((2454, 2505), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'identifier': "data['name']"}), "(identifier=data['name'])\n", (2480, 2505), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((3429, 3481), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'identifier': 'vocabulary_id'}), '(identifier=vocabulary_id)\n', (3455, 3481), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((3509, 3541), 'invenio_accounts.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (3529, 3541), False, 'from invenio_accounts.models import User\n'), ((3826, 3845), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3843, 3845), False, 'from invenio_db import db\n'), ((4259, 4311), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'identifier': 'vocabulary_id'}), '(identifier=vocabulary_id)\n', (4285, 4311), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((4339, 4371), 'invenio_accounts.models.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'user_id'}), '(id=user_id)\n', (4359, 4371), False, 'from invenio_accounts.models import User\n'), ((4657, 4676), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4674, 4676), False, 'from invenio_db import db\n'), ((5392, 5431), 'iroko.vocabularies.permissions.ObjectVocabularyEditor', 'ObjectVocabularyEditor', (['vocabulary.name'], {}), '(vocabulary.name)\n', (5414, 5431), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((7201, 7225), 'iroko.vocabularies.models.Term.uuid.in_', 'Term.uuid.in_', (['uuid_list'], {}), '(uuid_list)\n', (7214, 7225), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((7350, 7370), 'iroko.vocabularies.models.Term.id.in_', 'Term.id.in_', (['id_list'], {}), '(id_list)\n', (7361, 7370), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((8966, 8997), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'uuid': 'uuid'}), '(uuid=uuid)\n', (8986, 8997), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((9668, 9689), 'invenio_db.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (9687, 9689), False, 'from invenio_db import db\n'), ((10860, 10881), 'invenio_db.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (10879, 10881), False, 'from invenio_db import db\n'), ((13444, 13482), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'id': 'clasified_ids'}), '(id=clasified_ids)\n', (13464, 13482), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((13839, 13872), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'id': 'class_id'}), '(id=class_id)\n', (13859, 13872), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((14299, 14330), 'iroko.vocabularies.models.Term.query.filter_by', 'Term.query.filter_by', ([], {'uuid': 'uuid'}), '(uuid=uuid)\n', (14319, 14330), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((1356, 1399), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'identifier': 'name'}), '(identifier=name)\n', (1382, 1399), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((5184, 5236), 'iroko.vocabularies.models.Vocabulary.query.filter_by', 'Vocabulary.query.filter_by', ([], {'identifier': 'vocabulary_id'}), '(identifier=vocabulary_id)\n', (5210, 5236), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((14430, 14503), 'flask_babelex.lazy_gettext', '_', (['"""No se puede eliminar el término cuando otros términos dependen de él"""'], {}), "('No se puede eliminar el término cuando otros términos dependen de él')\n", (14431, 14503), True, 'from flask_babelex import lazy_gettext as _\n'), ((14597, 14653), 'iroko.vocabularies.models.TermClasification.query.filter_by', 'TermClasification.query.filter_by', ([], {'term_class_id': 'term.id'}), '(term_class_id=term.id)\n', (14630, 14653), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((14726, 14796), 'flask_babelex.lazy_gettext', '_', (['"""No se puede eliminar el término si clasificaciones dependen de él"""'], {}), "('No se puede eliminar el término si clasificaciones dependen de él')\n", (14727, 14796), True, 'from flask_babelex import lazy_gettext as _\n'), ((14883, 14927), 'iroko.sources.models.TermSources.query.filter_by', 'TermSources.query.filter_by', ([], {'term_id': 'term.id'}), '(term_id=term.id)\n', (14910, 14927), False, 'from iroko.sources.models import TermSources\n'), ((14993, 15055), 'flask_babelex.lazy_gettext', '_', (['"""No se puede eliminar el término si fuentes dependen de él"""'], {}), "('No se puede eliminar el término si fuentes dependen de él')\n", (14994, 15055), True, 'from flask_babelex import lazy_gettext as _\n'), ((3759, 3796), 'iroko.vocabularies.permissions.ObjectVocabularyEditor', 'ObjectVocabularyEditor', (['vocabulary.id'], {}), '(vocabulary.id)\n', (3781, 3796), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((4588, 4627), 'iroko.vocabularies.permissions.ObjectVocabularyEditor', 'ObjectVocabularyEditor', (['vocabulary.name'], {}), '(vocabulary.name)\n', (4610, 4627), False, 'from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin\n'), ((15503, 15549), 'iroko.vocabularies.models.Term.query.join', 'Term.query.join', (['Term.vocabulary'], {'aliased': '(True)'}), '(Term.vocabulary, aliased=True)\n', (15518, 15549), False, 'from iroko.vocabularies.models import Term, TermClasification, Vocabulary\n'), ((15080, 15115), 'invenio_db.db.session.query', 'db.session.query', (['TermClasification'], {}), '(TermClasification)\n', (15096, 15115), False, 'from invenio_db import db\n')]
|
import itertools
import ucd
ABC_LINES = '''
0040;COMMERCIAL AT;Po;0;ON;;;;;N;;;;;
0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;
0042;LATIN CAPITAL LETTER B;Lu;0;L;;;;;N;;;;0062;
0043;LATIN CAPITAL LETTER C;Lu;0;L;;;;;N;;;;0063;
'''.strip()
def test_parse_line():
line_A = '0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;'
code, name, old_name, words = ucd.parse_line(line_A)
assert code == 65
assert name == 'LATIN CAPITAL LETTER A'
assert old_name == ''
assert words == ['A', 'CAPITAL', 'LATIN', 'LETTER']
def test_parse_line_with_hyphen_and_field_10():
cases = [
('002D;HYPHEN-MINUS;Pd;0;ES;;;;;N;;;;;',
45, 'HYPHEN-MINUS', '', ['HYPHEN', 'MINUS']),
('005F;LOW LINE;Pc;0;ON;;;;;N;SPACING UNDERSCORE;;;;',
95, 'LOW LINE', 'SPACING UNDERSCORE',
['LINE', 'LOW', 'SPACING', 'UNDERSCORE']),
('0027;APOSTROPHE;Po;0;ON;;;;;N;APOSTROPHE-QUOTE;;;',
39, 'APOSTROPHE', 'APOSTROPHE-QUOTE', ['APOSTROPHE', 'QUOTE']),
]
for line, *fields_ok in cases:
fields = ucd.parse_line(line)
assert fields == tuple(fields_ok)
def test_parser_top_3():
records = list(itertools.islice(ucd.parser(), 3))
assert records == [
(32, 'SPACE', '', ['SPACE']),
(33, 'EXCLAMATION MARK', '', ['EXCLAMATION', 'MARK']),
(34, 'QUOTATION MARK', '', ['MARK', 'QUOTATION']),
]
def test_index():
line = '003E;GREATER-THAN SIGN;Sm;0;ON;;;;;Y;;;;;'
record = ucd.parse_line(line)
idx = ucd.index([record])
assert idx == {'GREATER': [62], 'SIGN': [62], 'THAN': [62]}
def test_index_abc():
records = [ucd.parse_line(line) for line in ABC_LINES.split('\n')]
idx = ucd.index(records)
assert idx == {
'A': [65],
'AT': [64],
'B': [66],
'C': [67],
'CAPITAL': [65, 66, 67],
'COMMERCIAL': [64],
'LATIN': [65, 66, 67],
'LETTER': [65, 66, 67],
}
|
[
"ucd.index",
"ucd.parser",
"ucd.parse_line"
] |
[((370, 392), 'ucd.parse_line', 'ucd.parse_line', (['line_A'], {}), '(line_A)\n', (384, 392), False, 'import ucd\n'), ((1502, 1522), 'ucd.parse_line', 'ucd.parse_line', (['line'], {}), '(line)\n', (1516, 1522), False, 'import ucd\n'), ((1533, 1552), 'ucd.index', 'ucd.index', (['[record]'], {}), '([record])\n', (1542, 1552), False, 'import ucd\n'), ((1722, 1740), 'ucd.index', 'ucd.index', (['records'], {}), '(records)\n', (1731, 1740), False, 'import ucd\n'), ((1068, 1088), 'ucd.parse_line', 'ucd.parse_line', (['line'], {}), '(line)\n', (1082, 1088), False, 'import ucd\n'), ((1656, 1676), 'ucd.parse_line', 'ucd.parse_line', (['line'], {}), '(line)\n', (1670, 1676), False, 'import ucd\n'), ((1194, 1206), 'ucd.parser', 'ucd.parser', ([], {}), '()\n', (1204, 1206), False, 'import ucd\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import re
import codecs
from path import Path
wordlist = []
def main():
parser = argparse.ArgumentParser(description='rename dir based on files inside')
parser.add_argument('dirname', help='path of directory to rename')
args = parser.parse_args()
p = Path(args.dirname)
rename(p)
def rename(p):
files = [f for f in p.walkfiles() if os.stat(f).st_size > 10 * 1024 * 1024]
dirname = ''
if is_gibberish(p.basename()):
if len(files) == 1:
dirname = files[0].basename().stripext()
else:
dirname = long_substr([f.namebase for f in files])
pattern = re.compile('(scene|cd)$', re.IGNORECASE)
dirname = pattern.sub('', dirname.strip())
dirname = re.sub('[._]', ' ', dirname)
dirname = re.sub('- ?$', '', dirname).strip()
if dirname != '':
# print(p.basename() + ' -> ' + dirname)
p = p.rename(Path.joinpath(p.dirname(), dirname))
print(p.abspath())
def is_gibberish(dir):
for w in dir.split(' '):
if w.upper() in wordlist:
return False
return True
def main_main():
dirs = Path('/media/truecrypt4/down/').dirs()
for d in dirs[0:1]:
rename(Path(d))
def long_substr(data):
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and all(data[0][i:i + j] in x for x in data):
substr = data[0][i:i + j]
return substr
if __name__ == '__main__':
with codecs.open('/usr/share/dict/american-english-large', mode='r', encoding='utf-8') as wl:
wordlist = [word.strip().upper() for word in wl.readlines()]
# print('done loading wordlist')
main()
|
[
"codecs.open",
"argparse.ArgumentParser",
"os.stat",
"path.Path",
"re.sub",
"re.compile"
] |
[((199, 270), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""rename dir based on files inside"""'}), "(description='rename dir based on files inside')\n", (222, 270), False, 'import argparse\n'), ((381, 399), 'path.Path', 'Path', (['args.dirname'], {}), '(args.dirname)\n', (385, 399), False, 'from path import Path\n'), ((857, 885), 're.sub', 're.sub', (['"""[._]"""', '""" """', 'dirname'], {}), "('[._]', ' ', dirname)\n", (863, 885), False, 'import re\n'), ((1704, 1790), 'codecs.open', 'codecs.open', (['"""/usr/share/dict/american-english-large"""'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "('/usr/share/dict/american-english-large', mode='r', encoding=\n 'utf-8')\n", (1715, 1790), False, 'import codecs\n'), ((743, 783), 're.compile', 're.compile', (['"""(scene|cd)$"""', 're.IGNORECASE'], {}), "('(scene|cd)$', re.IGNORECASE)\n", (753, 783), False, 'import re\n'), ((1263, 1294), 'path.Path', 'Path', (['"""/media/truecrypt4/down/"""'], {}), "('/media/truecrypt4/down/')\n", (1267, 1294), False, 'from path import Path\n'), ((1341, 1348), 'path.Path', 'Path', (['d'], {}), '(d)\n', (1345, 1348), False, 'from path import Path\n'), ((904, 931), 're.sub', 're.sub', (['"""- ?$"""', '""""""', 'dirname'], {}), "('- ?$', '', dirname)\n", (910, 931), False, 'import re\n'), ((472, 482), 'os.stat', 'os.stat', (['f'], {}), '(f)\n', (479, 482), False, 'import os\n')]
|
from collections import namedtuple
from itertools import islice
from ..errors import Error as DriverError
from .errors import InterfaceError, OperationalError, ProgrammingError
Column = namedtuple(
'Column',
'name type_code display_size internal_size precision scale null_ok'
)
class Cursor(object):
class States(object):
(
NONE,
RUNNING,
FINISHED,
CURSOR_CLOSED
) = range(4)
_states = States()
def __init__(self, client, connection):
self._client = client
self._connection = connection
self._reset_state()
self.arraysize = 1
# Begin non-PEP attributes
self._columns_with_types = None
# End non-PEP attributes
super(Cursor, self).__init__()
def __repr__(self):
is_closed = self._state == self._states.CURSOR_CLOSED
return '<cursor object at 0x{0:x}; closed: {1:}>'.format(
id(self), is_closed
)
# Iteration support.
def __iter__(self):
while True:
one = self.fetchone()
if one is None:
return
yield one
# Context manager integrations.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def description(self):
if self._state == self._states.NONE:
return None
columns = self._columns or []
types = self._types or []
return [
Column(name, type_code, None, None, None, None, True)
for name, type_code in zip(columns, types)
]
@property
def rowcount(self):
"""
:return: the number of rows that the last .execute*() produced.
"""
return self._rowcount
def close(self):
"""
Close the cursor now. The cursor will be unusable from this point
forward; an :data:`~clickhouse_driver.dbapi.Error` (or subclass)
exception will be raised if any operation is attempted with the
cursor.
"""
self._client.disconnect()
self._state = self._states.CURSOR_CLOSED
try:
# cursor can be already closed
self._connection.cursors.remove(self)
except ValueError:
pass
def execute(self, operation, parameters=None):
"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=parameters, with_column_types=True,
**execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response)
self._end_query()
def executemany(self, operation, seq_of_parameters):
"""
Prepare a database operation (query or command) and then execute it
against all parameter sequences found in the sequence
`seq_of_parameters`.
:param operation: query or command to execute.
:param seq_of_parameters: sequences or mappings for execution.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=seq_of_parameters, **execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response, executemany=True)
self._end_query()
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
:return: the next row of a query result set or None.
"""
self._check_query_started()
if self._stream_results:
return next(self._rows, None)
else:
if not self._rows:
return None
return self._rows.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
:param size: amount of rows to return.
:return: list of fetched rows or empty list.
"""
self._check_query_started()
if size is None:
size = self.arraysize
if self._stream_results:
if size == -1:
return list(self._rows)
else:
return list(islice(self._rows, size))
if size < 0:
rv = self._rows
self._rows = []
else:
rv = self._rows[:size]
self._rows = self._rows[size:]
return rv
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples).
:return: list of fetched rows.
"""
self._check_query_started()
if self._stream_results:
return list(self._rows)
rv = self._rows
self._rows = []
return rv
def setinputsizes(self, sizes):
# Do nothing.
pass
def setoutputsize(self, size, column=None):
# Do nothing.
pass
# Begin non-PEP methods
@property
def columns_with_types(self):
"""
:return: list of column names with corresponding types of the last
.execute*(). E.g. [('x', 'UInt64')].
"""
return self._columns_with_types
def set_stream_results(self, stream_results, max_row_buffer):
"""
Toggles results streaming from server. Driver will consume
block-by-block of `max_row_buffer` size and yield row-by-row from each
block.
:param stream_results: enable or disable results streaming.
:param max_row_buffer: specifies the maximum number of rows to buffer
at a time.
:return: None
"""
self._stream_results = stream_results
self._max_row_buffer = max_row_buffer
def set_settings(self, settings):
"""
Specifies settings for cursor.
:param settings: dictionary of query settings
:return: None
"""
self._settings = settings
def set_types_check(self, types_check):
"""
Toggles type checking for sequence of INSERT parameters.
Disabled by default.
:param types_check: new types check value.
:return: None
"""
self._types_check = types_check
def set_external_table(self, name, structure, data):
"""
Adds external table to cursor context.
If the same table is specified more than once the last one is used.
:param name: name of external table
:param structure: list of tuples (name, type) that defines table
structure. Example [(x, 'Int32')].
:param data: sequence of rows of tuples or dicts for transmission.
:return: None
"""
self._external_tables[name] = (structure, data)
def set_query_id(self, query_id):
"""
Specifies the query identifier for cursor.
:param query_id: the query identifier.
:return: None
"""
self._query_id = query_id
# End non-PEP methods
# Private methods.
def _prepare(self):
external_tables = [
{'name': name, 'structure': structure, 'data': data}
for name, (structure, data) in self._external_tables.items()
] or None
execute = self._client.execute
if self._stream_results:
execute = self._client.execute_iter
self._settings = self._settings or {}
self._settings['max_block_size'] = self._max_row_buffer
execute_kwargs = {
'settings': self._settings,
'external_tables': external_tables,
'types_check': self._types_check,
'query_id': self._query_id
}
return execute, execute_kwargs
def _process_response(self, response, executemany=False):
if executemany:
self._rowcount = response
response = None
if not response or isinstance(response, int):
self._columns = self._types = self._rows = []
if isinstance(response, int):
self._rowcount = response
return
if self._stream_results:
columns_with_types = next(response)
rows = response
else:
rows, columns_with_types = response
self._columns_with_types = columns_with_types
# Only SELECT queries have columns_with_types.
# DDL and INSERT INTO ... SELECT queries have empty columns header.
# We need to obtain rows count only during non-streaming SELECTs.
if columns_with_types:
self._columns, self._types = zip(*columns_with_types)
if not self._stream_results:
self._rowcount = len(rows)
else:
self._columns = self._types = []
self._rows = rows
def _reset_state(self):
"""
Resets query state and get ready for another query.
"""
self._state = self._states.NONE
self._columns = None
self._types = None
self._rows = None
self._rowcount = -1
self._stream_results = False
self._max_row_buffer = 0
self._settings = None
self._query_id = None
self._external_tables = {}
self._types_check = False
def _begin_query(self):
self._state = self._states.RUNNING
def _end_query(self):
self._state = self._states.FINISHED
def _check_cursor_closed(self):
if self._state == self._states.CURSOR_CLOSED:
raise InterfaceError('cursor already closed')
def _check_query_started(self):
if self._state == self._states.NONE:
raise ProgrammingError('no results to fetch')
|
[
"collections.namedtuple",
"itertools.islice"
] |
[((189, 282), 'collections.namedtuple', 'namedtuple', (['"""Column"""', '"""name type_code display_size internal_size precision scale null_ok"""'], {}), "('Column',\n 'name type_code display_size internal_size precision scale null_ok')\n", (199, 282), False, 'from collections import namedtuple\n'), ((4985, 5009), 'itertools.islice', 'islice', (['self._rows', 'size'], {}), '(self._rows, size)\n', (4991, 5009), False, 'from itertools import islice\n')]
|
"""CMS Plugins for the ``event_rsvp`` app."""
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import Event
class CMSEventPlugin(CMSPluginBase):
name = _('Upcoming Events')
render_template = 'event_rsvp/upcoming_events.html'
def render(self, context, instance, placeholder):
context.update({
'events': Event.objects.filter(start__gt=now(),
is_published=True)[:3],
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(CMSEventPlugin)
|
[
"django.utils.timezone.now",
"cms.plugin_pool.plugin_pool.register_plugin",
"django.utils.translation.ugettext"
] |
[((654, 697), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['CMSEventPlugin'], {}), '(CMSEventPlugin)\n', (681, 697), False, 'from cms.plugin_pool import plugin_pool\n'), ((295, 315), 'django.utils.translation.ugettext', '_', (['"""Upcoming Events"""'], {}), "('Upcoming Events')\n", (296, 315), True, 'from django.utils.translation import ugettext as _\n'), ((505, 510), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (508, 510), False, 'from django.utils.timezone import now\n')]
|
import os
import sys
import json
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import models
import worlds
class Executor(object):
STOP = 0
def __init__(self, config):
self.config = config
self.device = config.device
self.vocab = config.vocab
self.world = worlds.load(config)
model_config = config.executor.model
model_config.device = config.device
model_config.vocab_size = len(self.vocab)
model_config.loc_embed_size = config.world.loc_embed_size
model_config.max_instruction_length = config.executor.max_instruction_length
model_config.pad_idx = self.vocab['<PAD>']
self.model = models.load(model_config).to(self.device)
logging.info('model: ' + str(self.model))
self.optim = torch.optim.Adam(
self.model.parameters(), lr=model_config.learning_rate)
if hasattr(model_config, 'load_from'):
self.load(model_config.load_from)
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
self.max_instruction_length = config.executor.max_instruction_length
def _to_tensor(self, x):
return torch.tensor(x).to(self.device)
def _to_tensor_from_numpy(self, x):
return torch.from_numpy(x).to(self.device)
def _index_and_pad(self, xs, vocab, reverse=True):
encodings = []
masks = []
for x in xs:
x = x[:self.max_instruction_length] + ['<EOS>']
encodings.append([vocab[w] for w in x])
if reverse:
encodings[-1] = list(reversed(encodings[-1]))
masks.append([0] * len(encodings[-1]))
# Padding
max_len = max([len(encoding) for encoding in encodings])
for i, encoding in enumerate(encodings):
encoding.extend([vocab['<PAD>']] * (max_len - len(encoding)))
for mask in masks:
mask.extend([1] * (max_len - len(mask)))
encodings = self._to_tensor(encodings).long()
masks = self._to_tensor(masks).bool()
return encodings, masks
def _nav_action_variable(self, states):
max_num_a = max(len(state.adj_loc_list) for state in states)
invalid = np.zeros((self.batch_size, max_num_a), np.uint8)
action_embed_size = states[0].action_embeddings.shape[-1]
action_embeds = np.zeros(
(self.batch_size, max_num_a, action_embed_size), dtype=np.float32)
for i, state in enumerate(states):
num_a = len(state.adj_loc_list)
invalid[i, num_a:] = 1
action_embeds[i, :num_a, :] = state.action_embeddings
action_embeds = self._to_tensor_from_numpy(action_embeds).float()
invalid = self._to_tensor_from_numpy(invalid).bool()
return action_embeds, invalid
def init(self, init_poses, instructions, is_eval):
if is_eval:
self.model.eval()
else:
self.model.train()
self.is_eval = is_eval
self.batch_size = len(instructions)
self.state_seqs = []
self.pred_action_seqs = [[] for _ in range(self.batch_size)]
self.teacher_action_seqs = []
self.action_logit_seqs = []
self.logit_mask_seqs = []
self.terminated = [False] * self.batch_size
instr_encodings, instr_masks = self._index_and_pad(
instructions, self.vocab)
self.text_dec_h, self.state_dec_h, self.dec_time, self.instructions = \
self.model.encode(instr_encodings, instr_masks)
self.instruction_masks = instr_masks
self.prev_action_embeds = self.model.init_action(self.batch_size)
self.timer = self.config.executor.max_timesteps
init_states = self.world.init(init_poses)
return init_states
def act(self, states, teacher_actions=None, bc=False):
curr_view_features = [state.curr_view_features for state in states]
curr_view_features = self._to_tensor_from_numpy(
np.stack(curr_view_features))
all_action_embeds, logit_masks = self._nav_action_variable(states)
self.text_dec_h, self.state_dec_h, self.dec_time, action_logits = \
self.model.decode(
self.text_dec_h,
self.state_dec_h,
self.dec_time,
self.prev_action_embeds,
all_action_embeds,
self.instructions,
self.instruction_masks,
curr_view_features,
logit_masks
)
self.action_logit_seqs.append(action_logits)
self.logit_mask_seqs.append(logit_masks)
self.state_seqs.append(states)
if self.is_eval:
pred_actions = action_logits.max(dim=1)[1].tolist()
self.prev_actions = pred_actions
for i in range(self.batch_size):
if not self.terminated[i]:
self.pred_action_seqs[i].append(pred_actions[i])
else:
if bc:
pred_actions = teacher_actions
else:
pred_actions = D.Categorical(logits=action_logits).sample().tolist()
self.prev_actions = pred_actions
teacher_actions = self._to_tensor(teacher_actions).long()
for i in range(self.batch_size):
if self.terminated[i]:
teacher_actions[i] = -1
self.teacher_action_seqs.append(teacher_actions)
self.timer -= 1
for i in range(self.batch_size):
self.terminated[i] |= self.timer <= 0
self.terminated[i] |= self.prev_actions[i] == self.STOP
self.prev_action_embeds = all_action_embeds[np.arange(self.batch_size), pred_actions, :].detach()
return self.prev_actions
def has_terminated(self):
return all(self.terminated)
def get_action_seqs(self):
return self.pred_action_seqs
def predict(self, init_poses, instructions):
with torch.no_grad():
states = self.init(init_poses, instructions, True)
paths = [[state.viewpoint] for state in states]
poses = [[pose] for pose in init_poses]
while not self.has_terminated():
pred_actions = self.act(states)
states = states.step(pred_actions)
for i, state in enumerate(states):
pose = (state.scan, state.viewpoint, state.heading, state.elevation)
if not self.terminated[i]:
poses[i].append(pose)
if state.viewpoint != paths[i][-1]:
paths[i].append(states[i].viewpoint)
return paths, poses
def compute_loss(self):
assert len(self.teacher_action_seqs) == len(self.action_logit_seqs)
loss = 0
zipped_info = zip(self.action_logit_seqs, self.teacher_action_seqs)
for logits, refs in zipped_info:
loss += self.loss_fn(logits, refs)
return loss
def learn(self):
loss = self.compute_loss()
self.optim.zero_grad()
loss.backward()
self.optim.step()
return loss.item() / len(self.teacher_action_seqs)
def save(self, name, trajectories=None):
file_path = os.path.join(self.config.experiment_dir, name + '.ckpt')
ckpt = { 'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optim.state_dict() }
torch.save(ckpt, file_path)
logging.info('Saved %s model to %s' % (name, file_path))
def load(self, file_path):
ckpt = torch.load(file_path, map_location=self.device)
self.model.load_state_dict(ckpt['model_state_dict'])
self.optim.load_state_dict(ckpt['optim_state_dict'])
logging.info('Loaded model from %s' % file_path)
|
[
"numpy.stack",
"torch.distributions.Categorical",
"torch.load",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"models.load",
"torch.save",
"logging.info",
"worlds.load",
"numpy.arange",
"torch.no_grad",
"os.path.join",
"torch.tensor",
"torch.from_numpy"
] |
[((391, 410), 'worlds.load', 'worlds.load', (['config'], {}), '(config)\n', (402, 410), False, 'import worlds\n'), ((1095, 1131), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (1114, 1131), True, 'import torch.nn as nn\n'), ((2302, 2350), 'numpy.zeros', 'np.zeros', (['(self.batch_size, max_num_a)', 'np.uint8'], {}), '((self.batch_size, max_num_a), np.uint8)\n', (2310, 2350), True, 'import numpy as np\n'), ((2441, 2516), 'numpy.zeros', 'np.zeros', (['(self.batch_size, max_num_a, action_embed_size)'], {'dtype': 'np.float32'}), '((self.batch_size, max_num_a, action_embed_size), dtype=np.float32)\n', (2449, 2516), True, 'import numpy as np\n'), ((7367, 7423), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', "(name + '.ckpt')"], {}), "(self.config.experiment_dir, name + '.ckpt')\n", (7379, 7423), False, 'import os\n'), ((7557, 7584), 'torch.save', 'torch.save', (['ckpt', 'file_path'], {}), '(ckpt, file_path)\n', (7567, 7584), False, 'import torch\n'), ((7593, 7649), 'logging.info', 'logging.info', (["('Saved %s model to %s' % (name, file_path))"], {}), "('Saved %s model to %s' % (name, file_path))\n", (7605, 7649), False, 'import logging\n'), ((7697, 7744), 'torch.load', 'torch.load', (['file_path'], {'map_location': 'self.device'}), '(file_path, map_location=self.device)\n', (7707, 7744), False, 'import torch\n'), ((7875, 7923), 'logging.info', 'logging.info', (["('Loaded model from %s' % file_path)"], {}), "('Loaded model from %s' % file_path)\n", (7887, 7923), False, 'import logging\n'), ((4080, 4108), 'numpy.stack', 'np.stack', (['curr_view_features'], {}), '(curr_view_features)\n', (4088, 4108), True, 'import numpy as np\n'), ((6068, 6083), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6081, 6083), False, 'import torch\n'), ((776, 801), 'models.load', 'models.load', (['model_config'], {}), '(model_config)\n', (787, 801), False, 'import models\n'), ((1254, 1269), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1266, 1269), False, 'import torch\n'), ((1342, 1361), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1358, 1361), False, 'import torch\n'), ((5780, 5806), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (5789, 5806), True, 'import numpy as np\n'), ((5184, 5219), 'torch.distributions.Categorical', 'D.Categorical', ([], {'logits': 'action_logits'}), '(logits=action_logits)\n', (5197, 5219), True, 'import torch.distributions as D\n')]
|
import torch
from elegantrl.agent import AgentPPO
from elegantrl.run import Arguments
from finrl.neo_finrl.data_processor import DataProcessor
from ray.rllib.agents.ppo import PPOTrainer, ppo
from stable_baselines3 import PPO
def trade(
start_date,
end_date,
ticker_list,
data_source,
time_interval,
technical_indicator_list,
drl_lib,
env,
agent,
mode="backtesting",
if_vix=True,
**kwargs
):
if mode == "backtesting":
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, risk_array = DP.df_to_array(data, if_vix)
env_config = {
"price_array": price_array,
"tech_array": tech_array,
"risk_array": risk_array,
"if_train": False,
}
env_instance = env(config=env_config)
net_dimension = kwargs.get("net_dimension", 2 ** 7)
cwd = kwargs.get("cwd", "./" + str(agent))
# test on elegantrl
if drl_lib == "elegantrl":
# select agent
if agent == "ppo":
args = Arguments(if_on_policy=True)
args.agent = AgentPPO()
args.env = env_instance
args.agent.if_use_cri_target = True
else:
raise ValueError(
"Invalid agent input or the agent input is not \
supported yet."
)
# load agent
try:
state_dim = env_instance.state_dim
action_dim = env_instance.action_dim
agent = args.agent
net_dim = net_dimension
agent.init(net_dim, state_dim, action_dim)
agent.save_or_load_agent(cwd=cwd, if_save=False)
act = agent.act
device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
_torch = torch
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
with _torch.no_grad():
for i in range(env_instance.max_step):
s_tensor = _torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor) # action_tanh = act.forward()
action = (
a_tensor.detach().cpu().numpy()[0]
) # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day]
* env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
if done:
break
print("Test Finished!")
# return episode returns on testing data
return episode_returns
# test using rllib
elif drl_lib == "rllib":
# load agent
config = ppo.DEFAULT_CONFIG.copy()
config["env"] = env
config["log_level"] = "WARN"
config["env_config"] = {
"price_array": price_array,
"tech_array": tech_array,
"risk_array": risk_array,
"if_train": False,
}
trainer = PPOTrainer(env=env, config=config)
try:
trainer.restore(cwd)
print("Restoring from checkpoint path", cwd)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
done = False
while not done:
action = trainer.compute_single_action(state)
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day] * env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
print("episode return: " + str(episode_return))
print("Test Finished!")
return episode_returns
# test using stable baselines3
elif drl_lib == "stable_baselines3":
try:
# load agent
model = PPO.load(cwd)
print("Successfully load model", cwd)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
done = False
while not done:
action = model.predict(state)[0]
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day] * env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
print("episode_return", episode_return)
print("Test Finished!")
return episode_returns
else:
raise ValueError("DRL library input is NOT supported yet. Please check.")
elif mode == "paper_trading":
print("Paper trading is NOT supported for now.")
else:
raise ValueError(
"Invalid mode input! Please input either 'backtesting' or 'paper_trading'."
)
if __name__ == "__main__":
# fetch data
from finrl.neo_finrl.neofinrl_config import FAANG_TICKER
from finrl.neo_finrl.neofinrl_config import TECHNICAL_INDICATORS_LIST
from finrl.neo_finrl.neofinrl_config import TRADE_START_DATE
from finrl.neo_finrl.neofinrl_config import TRADE_END_DATE
# construct environment
from finrl.neo_finrl.env_stock_trading.env_stock_trading import StockTradingEnv
env = StockTradingEnv
# demo for elegantrl
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="elegantrl",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
# demo for rllib
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="rllib",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
# demo for stable-baselines3
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="stable_baseline3",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
|
[
"stable_baselines3.PPO.load",
"elegantrl.run.Arguments",
"ray.rllib.agents.ppo.PPOTrainer",
"ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG.copy",
"elegantrl.agent.AgentPPO",
"finrl.neo_finrl.data_processor.DataProcessor"
] |
[((486, 522), 'finrl.neo_finrl.data_processor.DataProcessor', 'DataProcessor', (['data_source'], {}), '(data_source, **kwargs)\n', (499, 522), False, 'from finrl.neo_finrl.data_processor import DataProcessor\n'), ((1329, 1357), 'elegantrl.run.Arguments', 'Arguments', ([], {'if_on_policy': '(True)'}), '(if_on_policy=True)\n', (1338, 1357), False, 'from elegantrl.run import Arguments\n'), ((1387, 1397), 'elegantrl.agent.AgentPPO', 'AgentPPO', ([], {}), '()\n', (1395, 1397), False, 'from elegantrl.agent import AgentPPO\n'), ((3571, 3596), 'ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG.copy', 'ppo.DEFAULT_CONFIG.copy', ([], {}), '()\n', (3594, 3596), False, 'from ray.rllib.agents.ppo import PPOTrainer, ppo\n'), ((3907, 3941), 'ray.rllib.agents.ppo.PPOTrainer', 'PPOTrainer', ([], {'env': 'env', 'config': 'config'}), '(env=env, config=config)\n', (3917, 3941), False, 'from ray.rllib.agents.ppo import PPOTrainer, ppo\n'), ((5145, 5158), 'stable_baselines3.PPO.load', 'PPO.load', (['cwd'], {}), '(cwd)\n', (5153, 5158), False, 'from stable_baselines3 import PPO\n')]
|
from django.utils import timezone
from django.utils.translation import ugettext as _
from drf_spectacular.utils import (
OpenApiResponse,
extend_schema,
extend_schema_view,
)
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from hacktheback.forms.models import Form
from hacktheback.rest.exceptions import ConflictError
from hacktheback.rest.forms.openapi import id_or_type_parameter
from hacktheback.rest.forms.serializers import FormSerializer
from hacktheback.rest.pagination import StandardResultsPagination
from hacktheback.rest.permissions import AdminSiteModelPermissions
class IdOrTypeLookupMixin:
lookup_field = None
lookup_url_kwarg = "id_or_type"
def get_object(self):
"""
Returns the object the view is displaying.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg
assert self.lookup_url_kwarg in self.kwargs, (
"Expected view %s to be called with a URL keyword argument "
'named "%s". Fix your URL conf.'
% (self.__class__.__name__, lookup_url_kwarg)
)
lookup_value = self.kwargs[lookup_url_kwarg]
if lookup_value == "hacker_application":
filter_kwargs = {"type": Form.FormType.HACKER_APPLICATION}
else:
filter_kwargs = {"pk": lookup_value}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
@extend_schema(tags=["Hacker APIs", "Forms"])
class FormsViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
queryset = Form.objects.filter(
is_draft=False,
)
authentication_classes = ()
serializer_class = FormSerializer
@extend_schema(summary="List Forms")
def list(self, request, *args, **kwargs):
"""
List all forms that have been published between their `start_at` and
`end_at` times.
"""
return super().list(request, *args, **kwargs)
@extend_schema(summary="Retrieve a Form")
def retrieve(self, request, *args, **kwargs):
"""
Retrieve a form that has been published between its `start_at` and
`end_at` times.
"""
return super().retrieve(request, *args, **kwargs)
def get_hacker_application_form(self):
queryset = self.filter_queryset(self.get_queryset())
return get_object_or_404(
queryset, type=Form.FormType.HACKER_APPLICATION
)
@extend_schema(summary="Retrieve the Hacker Application Form")
@action(detail=False)
def hacker_application(self, request, *args, **kwargs):
"""
Retrieve the hacker application form that has been published between
its `start_at` and `end_at` times.
"""
self.get_object = self.get_hacker_application_form
return self.retrieve(request, *args, **kwargs)
@extend_schema(tags=["Admin APIs", "Forms"])
@extend_schema_view(
list=extend_schema(summary="List Forms", description="List all forms."),
retrieve=extend_schema(
summary="Retrieve a Form",
description="Retrieve a form.",
parameters=[id_or_type_parameter()],
),
create=extend_schema(
summary="Create a Form", description="Create a form."
),
update=extend_schema(
summary="Update a Form",
description="Update a form.",
parameters=[id_or_type_parameter()],
),
partial_update=extend_schema(
summary="Partial Update a Form",
description="Partial update a form.",
parameters=[id_or_type_parameter()],
),
destroy=extend_schema(
summary="Delete a Form",
description="Delete a form.",
parameters=[id_or_type_parameter()],
),
publish=extend_schema(
summary="Publish a Form",
description="Publish a form. This sets `is_draft` to `False`.",
parameters=[id_or_type_parameter()],
request=None,
responses={
"204": OpenApiResponse(description="Form published successfully."),
},
),
unpublish=extend_schema(
summary="Unpublish a Form",
description="Unpublish a form. This sets `is_draft` to `True`.",
parameters=[id_or_type_parameter()],
request=None,
responses={
"204": OpenApiResponse(
description="Form unpublished successfully."
),
},
),
)
class FormsAdminViewSet(IdOrTypeLookupMixin, viewsets.ModelViewSet):
queryset = Form.objects.all()
serializer_class = FormSerializer
pagination_class = StandardResultsPagination
permission_classes = (AdminSiteModelPermissions,)
def perform_create(self, serializer):
# If the form we are creating is of type hacker application, raise
# conflict error if one that already exists.
type_to_create = serializer.data.get("type", None)
if type_to_create == Form.FormType.HACKER_APPLICATION:
raise ConflictError(
detail=_("A hacker application form already exists.")
)
serializer.save()
@action(detail=True, methods=["POST"])
def publish(self, request, id_or_type=None):
form = self.get_object()
form.is_draft = False
form.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=["POST"])
def unpublish(self, request, id_or_type=None):
form = self.get_object()
form.is_draft = True
form.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"hacktheback.forms.models.Form.objects.filter",
"drf_spectacular.utils.OpenApiResponse",
"drf_spectacular.utils.extend_schema",
"rest_framework.response.Response",
"rest_framework.decorators.action",
"rest_framework.generics.get_object_or_404",
"hacktheback.rest.forms.openapi.id_or_type_parameter",
"hacktheback.forms.models.Form.objects.all",
"django.utils.translation.ugettext"
] |
[((1740, 1784), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'tags': "['Hacker APIs', 'Forms']"}), "(tags=['Hacker APIs', 'Forms'])\n", (1753, 1784), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((3200, 3243), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'tags': "['Admin APIs', 'Forms']"}), "(tags=['Admin APIs', 'Forms'])\n", (3213, 3243), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((1910, 1945), 'hacktheback.forms.models.Form.objects.filter', 'Form.objects.filter', ([], {'is_draft': '(False)'}), '(is_draft=False)\n', (1929, 1945), False, 'from hacktheback.forms.models import Form\n'), ((2037, 2072), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'summary': '"""List Forms"""'}), "(summary='List Forms')\n", (2050, 2072), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((2304, 2344), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'summary': '"""Retrieve a Form"""'}), "(summary='Retrieve a Form')\n", (2317, 2344), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((2791, 2852), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'summary': '"""Retrieve the Hacker Application Form"""'}), "(summary='Retrieve the Hacker Application Form')\n", (2804, 2852), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((2858, 2878), 'rest_framework.decorators.action', 'action', ([], {'detail': '(False)'}), '(detail=False)\n', (2864, 2878), False, 'from rest_framework.decorators import action\n'), ((4823, 4841), 'hacktheback.forms.models.Form.objects.all', 'Form.objects.all', ([], {}), '()\n', (4839, 4841), False, 'from hacktheback.forms.models import Form\n'), ((5425, 5462), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (5431, 5462), False, 'from rest_framework.decorators import action\n'), ((5660, 5697), 'rest_framework.decorators.action', 'action', ([], {'detail': '(True)', 'methods': "['POST']"}), "(detail=True, methods=['POST'])\n", (5666, 5697), False, 'from rest_framework.decorators import action\n'), ((1574, 1618), 'rest_framework.generics.get_object_or_404', 'get_object_or_404', (['queryset'], {}), '(queryset, **filter_kwargs)\n', (1591, 1618), False, 'from rest_framework.generics import get_object_or_404\n'), ((2696, 2762), 'rest_framework.generics.get_object_or_404', 'get_object_or_404', (['queryset'], {'type': 'Form.FormType.HACKER_APPLICATION'}), '(queryset, type=Form.FormType.HACKER_APPLICATION)\n', (2713, 2762), False, 'from rest_framework.generics import get_object_or_404\n'), ((5610, 5653), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (5618, 5653), False, 'from rest_framework.response import Response\n'), ((5846, 5889), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (5854, 5889), False, 'from rest_framework.response import Response\n'), ((3274, 3340), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'summary': '"""List Forms"""', 'description': '"""List all forms."""'}), "(summary='List Forms', description='List all forms.')\n", (3287, 3340), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((3508, 3576), 'drf_spectacular.utils.extend_schema', 'extend_schema', ([], {'summary': '"""Create a Form"""', 'description': '"""Create a form."""'}), "(summary='Create a Form', description='Create a form.')\n", (3521, 3576), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((5332, 5378), 'django.utils.translation.ugettext', '_', (['"""A hacker application form already exists."""'], {}), "('A hacker application form already exists.')\n", (5333, 5378), True, 'from django.utils.translation import ugettext as _\n'), ((3465, 3487), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (3485, 3487), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((3709, 3731), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (3729, 3731), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((3882, 3904), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (3902, 3904), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((4032, 4054), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (4052, 4054), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((4217, 4239), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (4237, 4239), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((4303, 4362), 'drf_spectacular.utils.OpenApiResponse', 'OpenApiResponse', ([], {'description': '"""Form published successfully."""'}), "(description='Form published successfully.')\n", (4318, 4362), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n'), ((4540, 4562), 'hacktheback.rest.forms.openapi.id_or_type_parameter', 'id_or_type_parameter', ([], {}), '()\n', (4560, 4562), False, 'from hacktheback.rest.forms.openapi import id_or_type_parameter\n'), ((4626, 4687), 'drf_spectacular.utils.OpenApiResponse', 'OpenApiResponse', ([], {'description': '"""Form unpublished successfully."""'}), "(description='Form unpublished successfully.')\n", (4641, 4687), False, 'from drf_spectacular.utils import OpenApiResponse, extend_schema, extend_schema_view\n')]
|
import matplotlib.pyplot as plt
# import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import PySimpleGUI as sg
import matplotlib
import serial as ser
from serial import SerialException
from time import sleep
import glob
import os
# from pathlib import Path
from threading import Thread
from configparser import ConfigParser
import sys
import signal
import time
import datetime
def command(l):
print('SEND: '+l)
s.write(l.encode('utf-8'))
s.write(b'\r\n')
return receive(1)
def send(l):
s.write(l.encode('utf-8'))
#s.write(b'\r\n')
def receive(end): ## end will signal when to die
global busy, recibido
while True:
line = s.readline().decode()
recibido += line
if line.strip() == end:
busy = False
with open(file,'w') as f:
for i in recibido.splitlines():
if len(i)>1:
f.write(i.replace('.',',')+'\n')
f.close()
return
def update(): # send elon & force values to Arduino
global recibido, MaximumElongation, MaxPushForce
send('L'+str(MaximumElongation))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
send('F'+str(MaxPushForce))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
config = ConfigParser()
found = config.read('defaults.ini')
if len(found):
# Read config parameter from INI file
print("INI file: "+str(found[0]))
port = config.get('SerialPort','COM')
ComSpeed = config.getint('SerialPort','BaudRate')
CellScale = config.getfloat('General','CellScale')
MaxPushForce = config.getfloat('General','MaxPushForce')
MaximumElongation = config.getfloat('General','MaximumElongation')
DataDir = config.get('General','DataDir')
layout=[[sg.Text("Serial Port to Arduino:"), sg.Input(port, size=(25, 1), enable_events=True, key="Port"), sg.Button('Connect'),sg.Button('Disconnect')],
[sg.Text('MaxDisplacement (mm)'), sg.Input(MaximumElongation,size=(5,1),key="Elon"), sg.Text('MaxForce (N)'),sg.Input(MaxPushForce,size=(5,1),key="Force"), sg.Button('Set') ],
[sg.Button('Start'), sg.Button('ResetCell'),sg.Button('ManualMeasurement'), sg.Button('STOP',button_color=(None,'red'))],
[sg.Button('StartManualTest'), sg.Text('motor disabled')],
[sg.Multiline('Last measures',size=(40,10),key='box', autoscroll=True,)]]
window = sg.Window('Push Device Control',layout, finalize=True)
window['Disconnect'].update(disabled=True)
recibido='Last measurements\n'
connected = False
busy = False
while True:
if not busy:
windows, event, values = sg.read_all_windows()
else:
windows, event, values = sg.read_all_windows(timeout=200)
window['box'].update(recibido) #values['box']+"Line")
if not connected and event == 'Connect': #################CONNECT!!!!!!!
connected = True
window['Disconnect'].update(disabled=False)
window['Connect'].update(disabled=True)
try:
port = values['Port']
s = ser.Serial(port, baudrate=ComSpeed, timeout=2)
except SerialException:
print("ERROR Opening the Serial Port: "+values['Port'])
event='Exit'
s.close()
break
#sleep(1)
ok = False
for i in range(3):
line=s.readline().strip()
# print(line)
if line == b'ready':
ok = True
break
if not ok:
print('NOT CONNECTED')
event='Exit'
s.close()
break
recibido = 'CONNECTED\n'
window['box'].update(recibido)
update()
if connected and event == 'Disconnect': #######DISCONNECT
connected = False
window['Connect'].update(disabled=False)
window['Disconnect'].update(disabled=True)
s.close()
if event == sg.WIN_CLOSED or event == 'Exit': break
if connected and event == 'STOP': send('X')
if connected and event == 'Start':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('S')
thread = Thread(target=receive, args=('.'))
thread.start()
if connected and event == 'ManualMeasurement':
send('?')
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
if event == 'Set':
MaximumElongation = values['Elon']
MaxPushForce = values['Force']
if connected: # if connected then push the values to the Arduino
update()
config['General']['MaxPushForce'] = MaxPushForce
config['General']['MaximumElongation'] = MaximumElongation
config['SerialPort']['COM'] = values['Port']
with open('defaults.ini', 'w') as configfile:
config.write(configfile)
if connected and event == 'StartManualTest':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('M')
thread = Thread(target=receive, args=('.'))
thread.start()
window.close()
s.close()
|
[
"PySimpleGUI.Button",
"threading.Thread",
"serial.Serial",
"PySimpleGUI.Input",
"PySimpleGUI.Multiline",
"PySimpleGUI.Text",
"PySimpleGUI.Window",
"PySimpleGUI.popup_get_file",
"configparser.ConfigParser",
"PySimpleGUI.read_all_windows"
] |
[((1436, 1450), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1448, 1450), False, 'from configparser import ConfigParser\n'), ((2560, 2615), 'PySimpleGUI.Window', 'sg.Window', (['"""Push Device Control"""', 'layout'], {'finalize': '(True)'}), "('Push Device Control', layout, finalize=True)\n", (2569, 2615), True, 'import PySimpleGUI as sg\n'), ((1937, 1971), 'PySimpleGUI.Text', 'sg.Text', (['"""Serial Port to Arduino:"""'], {}), "('Serial Port to Arduino:')\n", (1944, 1971), True, 'import PySimpleGUI as sg\n'), ((1973, 2033), 'PySimpleGUI.Input', 'sg.Input', (['port'], {'size': '(25, 1)', 'enable_events': '(True)', 'key': '"""Port"""'}), "(port, size=(25, 1), enable_events=True, key='Port')\n", (1981, 2033), True, 'import PySimpleGUI as sg\n'), ((2035, 2055), 'PySimpleGUI.Button', 'sg.Button', (['"""Connect"""'], {}), "('Connect')\n", (2044, 2055), True, 'import PySimpleGUI as sg\n'), ((2056, 2079), 'PySimpleGUI.Button', 'sg.Button', (['"""Disconnect"""'], {}), "('Disconnect')\n", (2065, 2079), True, 'import PySimpleGUI as sg\n'), ((2092, 2123), 'PySimpleGUI.Text', 'sg.Text', (['"""MaxDisplacement (mm)"""'], {}), "('MaxDisplacement (mm)')\n", (2099, 2123), True, 'import PySimpleGUI as sg\n'), ((2125, 2177), 'PySimpleGUI.Input', 'sg.Input', (['MaximumElongation'], {'size': '(5, 1)', 'key': '"""Elon"""'}), "(MaximumElongation, size=(5, 1), key='Elon')\n", (2133, 2177), True, 'import PySimpleGUI as sg\n'), ((2176, 2199), 'PySimpleGUI.Text', 'sg.Text', (['"""MaxForce (N)"""'], {}), "('MaxForce (N)')\n", (2183, 2199), True, 'import PySimpleGUI as sg\n'), ((2200, 2248), 'PySimpleGUI.Input', 'sg.Input', (['MaxPushForce'], {'size': '(5, 1)', 'key': '"""Force"""'}), "(MaxPushForce, size=(5, 1), key='Force')\n", (2208, 2248), True, 'import PySimpleGUI as sg\n'), ((2247, 2263), 'PySimpleGUI.Button', 'sg.Button', (['"""Set"""'], {}), "('Set')\n", (2256, 2263), True, 'import PySimpleGUI as sg\n'), ((2277, 2295), 'PySimpleGUI.Button', 'sg.Button', (['"""Start"""'], {}), "('Start')\n", (2286, 2295), True, 'import PySimpleGUI as sg\n'), ((2297, 2319), 'PySimpleGUI.Button', 'sg.Button', (['"""ResetCell"""'], {}), "('ResetCell')\n", (2306, 2319), True, 'import PySimpleGUI as sg\n'), ((2320, 2350), 'PySimpleGUI.Button', 'sg.Button', (['"""ManualMeasurement"""'], {}), "('ManualMeasurement')\n", (2329, 2350), True, 'import PySimpleGUI as sg\n'), ((2353, 2398), 'PySimpleGUI.Button', 'sg.Button', (['"""STOP"""'], {'button_color': "(None, 'red')"}), "('STOP', button_color=(None, 'red'))\n", (2362, 2398), True, 'import PySimpleGUI as sg\n'), ((2409, 2437), 'PySimpleGUI.Button', 'sg.Button', (['"""StartManualTest"""'], {}), "('StartManualTest')\n", (2418, 2437), True, 'import PySimpleGUI as sg\n'), ((2439, 2464), 'PySimpleGUI.Text', 'sg.Text', (['"""motor disabled"""'], {}), "('motor disabled')\n", (2446, 2464), True, 'import PySimpleGUI as sg\n'), ((2477, 2549), 'PySimpleGUI.Multiline', 'sg.Multiline', (['"""Last measures"""'], {'size': '(40, 10)', 'key': '"""box"""', 'autoscroll': '(True)'}), "('Last measures', size=(40, 10), key='box', autoscroll=True)\n", (2489, 2549), True, 'import PySimpleGUI as sg\n'), ((2793, 2814), 'PySimpleGUI.read_all_windows', 'sg.read_all_windows', ([], {}), '()\n', (2812, 2814), True, 'import PySimpleGUI as sg\n'), ((2860, 2892), 'PySimpleGUI.read_all_windows', 'sg.read_all_windows', ([], {'timeout': '(200)'}), '(timeout=200)\n', (2879, 2892), True, 'import PySimpleGUI as sg\n'), ((4272, 4335), 'PySimpleGUI.popup_get_file', 'sg.popup_get_file', (['"""Filename to store test data:"""'], {'save_as': '(True)'}), "('Filename to store test data:', save_as=True)\n", (4289, 4335), True, 'import PySimpleGUI as sg\n'), ((4419, 4451), 'threading.Thread', 'Thread', ([], {'target': 'receive', 'args': '"""."""'}), "(target=receive, args='.')\n", (4425, 4451), False, 'from threading import Thread\n'), ((5203, 5266), 'PySimpleGUI.popup_get_file', 'sg.popup_get_file', (['"""Filename to store test data:"""'], {'save_as': '(True)'}), "('Filename to store test data:', save_as=True)\n", (5220, 5266), True, 'import PySimpleGUI as sg\n'), ((5350, 5382), 'threading.Thread', 'Thread', ([], {'target': 'receive', 'args': '"""."""'}), "(target=receive, args='.')\n", (5356, 5382), False, 'from threading import Thread\n'), ((3240, 3286), 'serial.Serial', 'ser.Serial', (['port'], {'baudrate': 'ComSpeed', 'timeout': '(2)'}), '(port, baudrate=ComSpeed, timeout=2)\n', (3250, 3286), True, 'import serial as ser\n')]
|
import requests
from bs4 import BeautifulSoup
res = requests.get('https://www.imdb.com/chart/top/')
html = res.text
soup = BeautifulSoup(html, 'html.parser')
tbody = soup.find('tbody')
trs = tbody.findAll('tr')
for tr in trs:
td = tr.find('td', {'class': 'titleColumn'})
print(td.a)
# movieId = td.a['href']
# movieUrl = f'https://www.imdb.com/{movieId}'
# res2 = requests.get(movieUrl)
# html = res2.text
# soup2 = BeautifulSoup(html, 'html.parser')
# info = soup2.find('div', {'class': 'subtext'})
# a = info.findAll('a')
# print(td.a.string)
# print(info.time.string.strip())
# print(a[0].string.strip())
# print(a[1].string.strip())
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((52, 99), 'requests.get', 'requests.get', (['"""https://www.imdb.com/chart/top/"""'], {}), "('https://www.imdb.com/chart/top/')\n", (64, 99), False, 'import requests\n'), ((124, 158), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (137, 158), False, 'from bs4 import BeautifulSoup\n')]
|
# -*- coding: utf-8 -*-
import json
import onmt
import onmt.io
import torch
import torch.cuda
from torch.autograd import Variable
from rouge import Rouge
class Reward():
def __init__(self, reward):
self.reward = reward
print("Reward : {}".format(reward))
if reward == "rouge":
self.rouge = Rouge(metrics=["rouge-l"], stats=["f"])
elif "entailment" in reward:
# entail_model_path = "0617_entail_400k_model.tar.gz" # kor entail index:1
entail_model_path = "allennlp_eng_model.tar.gz" # eng entail index:0, cotra, neu:1,2
self.entailment = onmt.modules.Entailment_scorer(entail_model_path, 0) # model path and index of entailment
self.eps = -1e-5
self.error = 0
def get_rouge_reward(self, batch, sample_indices, max_indices, copy):
# print("Reward line:5 sample indices", sample_indices) # tgt_len * batch
# print("Reward line:5 max indices", max_indices) # tgt_len * batch
# print("rweard line:7 sample_indeices[:,0]", sample_indices[:,0])
# print("rweard line:20 batch", batch)
# print("rweard line:21 batch", batch.tgt)
tgt_vocab = batch.dataset.fields['tgt'].vocab
global_vocab = batch.dataset.fields['src'].vocab
# print("batch src")
# print(batch.src)
# input()
sample_scores = []
max_scores = []
alignments = [] # for efficiency, calculate alignments here
for i in range(len(batch)):
in_batch_index = batch.indices.data[i]
# print("Reward line:11 in batch index",in_batch_index)
# print("Reward line:29 in raw example", len(batch.dataset.examples))
# print("Reward line:30 batch dataset fileds tgt", batch.dataset.fields['tgt'])
src_vocab = batch.dataset.src_vocabs[in_batch_index] if copy else global_vocab
# raw_tgt = batch.dataset.examples[in_batch_index].tgt
raw_tokens = self.build_target_tokens(src_vocab, tgt_vocab, batch.tgt.data[1:,i])
# print("reward line:36 raw_tgt", raw_tgt)
# print("reward line:37 raw_tokens", raw_tokens)
sample_tokens = self.build_target_tokens(src_vocab, tgt_vocab, sample_indices[:,i])
max_tokens = self.build_target_tokens(src_vocab, tgt_vocab, max_indices[:,i])
# print("reward line:16 sample tokens",sample_tokens)
sample_rouge_f1_s = self.calculate_rouge(sample_tokens, raw_tokens)
max_rouge_f1_s = self.calculate_rouge(max_tokens, raw_tokens)
# calculate alginemts
mask = [0] + [src_vocab.stoi[w] for w in sample_tokens]
alignments.append(mask)
# print("reward line:37 sample_tokens", sample_tokens)
# print("reward line:37 max_tokens", max_tokens)
sample_scores.append(sample_rouge_f1_s['rouge-l']['f'])
max_scores.append(max_rouge_f1_s['rouge-l']['f'])
if torch.rand(1)[0] <= 0.005:
src_tokens = self.build_target_tokens(src_vocab, batch.dataset.fields['src'].vocab, batch.src[0].data[:,i])
print("in batch index = {}".format(in_batch_index))
print("\t src tokes")
print("\t\t", src_tokens)
print("\t target tokens")
print("\t\t", raw_tokens)
print("\tsampled tokens")
print("\t\t", sample_scores[-1], sample_tokens)
print("\t max tokens")
print("\t\t", max_scores[-1], max_tokens)
# print("Rewards line:72 alignments", alignments )
max_sample_len = max(len(x) for x in alignments)
max_sample_len = max(sample_indices.size(0)+1, max_sample_len)
# print("Reward line:75 sample_indices", sample_indices.size())
# print("Reward line:76 max", max(len(x) for x in alignments))
for i in range(len(alignments)):
alignments[i] += [0] * max(0, max_sample_len - len(alignments[i]))
alignments[i] = torch.LongTensor(alignments[i]).cuda()
# print("Rewards line:77 alignments", alignments )
sample_alignments = torch.stack(alignments).transpose(0,1)
# print("reward line:29 rouge", sample_rouge_f1_s, max_rouge_f1_s)
sample_scores = torch.Tensor(sample_scores).cuda()
max_scores = torch.Tensor(max_scores).cuda()
batch_scores = max_scores - sample_scores
return batch_scores, sample_scores, max_scores, sample_alignments
def get_entailment_reward(self, batch, sample_indices, max_indices, entail_type):
# print("Reward line:5 sample indices", sample_indices) # tgt_len * batch
# print("Reward line:5 max indices", max_indices) # tgt_len * batch
# print("rweard line:7 sample_indeices[:,0]", sample_indices[:,0])
# print("rweard line:20 batch", batch)
# print("rweard line:21 batch", batch.tgt)
tgt_vocab = batch.dataset.fields['tgt'].vocab
src_vocab = batch.dataset.fields['src'].vocab
# print("batch src")
# print(batch.src)
# input()
sample_scores = []
max_scores = []
alignments = [] # for efficiency, calculate alignments here
for i in range(len(batch)):
in_batch_index = batch.indices.data[i]
# print("Reward line:11 in batch index",in_batch_index)
# print("Reward line:29 in raw example", len(batch.dataset.examples))
# print("Reward line:30 batch dataset fileds tgt", batch.dataset.fields['tgt'])
# src_vocab = batch.dataset.src_vocabs[in_batch_index]
# raw_tgt = batch.dataset.examples[in_batch_index].tgt
raw_src_tokens = self.build_src_tokens(src_vocab, batch.src[0].data[:,i])
# print("reward line:36 raw_tgt", raw_tgt)
# print("reward line:37 raw_tokens", raw_tokens)
sample_tokens = self.build_target_tokens(src_vocab, tgt_vocab, sample_indices[:,i])
max_tokens = self.build_target_tokens(src_vocab, tgt_vocab, max_indices[:,i])
# print("reward line:16 sample tokens",sample_tokens)
# sample_entail_s = self.get_entailment_score(raw_src_tokens, sample_tokens)
# max_entail_s = self.get_entailment_score(raw_src_tokens, max_tokens)
# calculate alginemt
instance_src_vocab = batch.dataset.src_vocabs[in_batch_index]
raw_tokens = self.build_target_tokens(instance_src_vocab, tgt_vocab, batch.tgt.data[1:,i])
if entail_type == "entailment_src_hyp_sample":
hyp_tokens = raw_src_tokens
elif entail_type == "entailment_src_hyp_gold":
hyp_tokens = raw_src_tokens
max_tokens = raw_tokens
elif entail_type == "entailment_tgt_hyp":
hyp_tokens = raw_tokens
else:
input("Parameter Error!")
sample_entail_s = self.get_entailment_score(hyp_tokens, sample_tokens, True)
max_entail_s = self.get_entailment_score(hyp_tokens, max_tokens, True) # use gold target to baseline
mask = [0] + [instance_src_vocab.stoi[w] for w in sample_tokens]
alignments.append(mask)
# print("reward line:37 sample_tokens", sample_entail_s, sample_tokens)
# print("reward line:37 max_tokens", max_entail_s, max_tokens)
sample_scores.append(sample_entail_s)
max_scores.append(max_entail_s)
if torch.rand(1)[0] <= 0.005:
# src_tokens = self.build_target_tokens(src_vocab, batch.dataset.fields['src'].vocab, batch.src[0].data[:,i])
src_tokens = raw_src_tokens
print("in batch index = {}".format(in_batch_index))
print("\t src tokes")
print("\t\t", src_tokens)
print("\t target tokens")
print("\t\t", raw_tokens)
print("\tsampled tokens")
print("\t\t", sample_scores[-1], sample_tokens)
print("\t max tokens")
print("\t\t", max_scores[-1], max_tokens)
# print("Rewards line:72 alignments", alignments )
max_sample_len = max(len(x) for x in alignments)
max_sample_len = max(sample_indices.size(0)+1, max_sample_len)
# print("Reward line:75 sample_indices", sample_indices.size())
# print("Reward line:76 max", max(len(x) for x in alignments))
for i in range(len(alignments)):
alignments[i] += [0] * max(0, max_sample_len - len(alignments[i]))
alignments[i] = torch.LongTensor(alignments[i]).cuda()
# print("Rewards line:77 alignments", alignments )
sample_alignments = torch.stack(alignments).transpose(0,1)
# print("reward line:29 rouge", sample_rouge_f1_s, max_rouge_f1_s)
sample_scores = torch.Tensor(sample_scores).cuda()
max_scores = torch.Tensor(max_scores).cuda()
batch_scores = max_scores - sample_scores
return batch_scores, sample_scores, max_scores, sample_alignments
def get_batch_reward(self, batch, sample_indices, max_indices, copy=None):
if self.reward == "rouge":
assert copy is not None
return self.get_rouge_reward(batch, sample_indices, max_indices, copy)
elif "entailment" in self.reward:
return self.get_entailment_reward(batch, sample_indices, max_indices, self.reward)
def get_entailment_score(self, src_tokens, sample_tokens, length_penalty=False):
premise = " ".join(src_tokens)
hypothesis = " ".join(sample_tokens)
json_data = {"premise":premise, "hypothesis":hypothesis}
# json_data = json.dumps(json_data, ensure_ascii=False)
# print(json_data)
score = self.entailment.predict_entailment(json_data)
if length_penalty:
penalty = len(sample_tokens) / len(src_tokens)
score = penalty * score
return score
def calculate_rouge(self, hyp, ref):
hyp = " ".join(hyp)
ref = " ".join(ref)
score = self.rouge.get_scores(hyp, ref)
return score[0]
def build_src_tokens(self, src_vocab, indices):
tokens = []
# print("reward line:18 onmt.io.EOS_WORD", onmt.io.EOS_WORD)
for tok in indices:
try:
tokens.append(src_vocab.itos[tok])
except IndexError:
self.error += 1
print("Reward line 82: Error index occured {}".format(self.error))
tokens.append('<unk>')
return tokens
def build_target_tokens(self, src_vocab, tgt_vocab, pred):
tokens = []
# print("reward line:18 onmt.io.EOS_WORD", onmt.io.EOS_WORD)
for tok in pred:
try:
if tok < len(tgt_vocab):
tokens.append(tgt_vocab.itos[tok])
else:
tokens.append(src_vocab.itos[tok - len(tgt_vocab)])
if tokens[-1] == onmt.io.EOS_WORD:
tokens = tokens[:-1]
break
except IndexError:
self.error += 1
print("Reward line 82: Error index occured {}".format(self.error))
tokens.append('<unk>')
return tokens
def criterion(self, input, seq, reward):
# print("reward line 69 input", input)
# print("reward line 69 seq", seq)
# print("reward line 69 reward", reward)
# print("reward line 69 reward", reward.expand_as(input))
reward = reward.expand_as(input) + self.eps
print("reward line 76 reward", reward)
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq>0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1)).view(-1)
print("reward line 89 input req", input.requires_grad)
# output = - input * reward * Variable(mask)
output = - input
output = torch.sum(output) / torch.sum(mask) / 7
# output = torch.sum(output)
return output
|
[
"rouge.Rouge",
"torch.stack",
"torch.LongTensor",
"onmt.modules.Entailment_scorer",
"torch.Tensor",
"torch.rand",
"torch.sum"
] |
[((335, 374), 'rouge.Rouge', 'Rouge', ([], {'metrics': "['rouge-l']", 'stats': "['f']"}), "(metrics=['rouge-l'], stats=['f'])\n", (340, 374), False, 'from rouge import Rouge\n'), ((625, 677), 'onmt.modules.Entailment_scorer', 'onmt.modules.Entailment_scorer', (['entail_model_path', '(0)'], {}), '(entail_model_path, 0)\n', (655, 677), False, 'import onmt\n'), ((4329, 4352), 'torch.stack', 'torch.stack', (['alignments'], {}), '(alignments)\n', (4340, 4352), False, 'import torch\n'), ((4471, 4498), 'torch.Tensor', 'torch.Tensor', (['sample_scores'], {}), '(sample_scores)\n', (4483, 4498), False, 'import torch\n'), ((4527, 4551), 'torch.Tensor', 'torch.Tensor', (['max_scores'], {}), '(max_scores)\n', (4539, 4551), False, 'import torch\n'), ((9065, 9088), 'torch.stack', 'torch.stack', (['alignments'], {}), '(alignments)\n', (9076, 9088), False, 'import torch\n'), ((9207, 9234), 'torch.Tensor', 'torch.Tensor', (['sample_scores'], {}), '(sample_scores)\n', (9219, 9234), False, 'import torch\n'), ((9263, 9287), 'torch.Tensor', 'torch.Tensor', (['max_scores'], {}), '(max_scores)\n', (9275, 9287), False, 'import torch\n'), ((12725, 12742), 'torch.sum', 'torch.sum', (['output'], {}), '(output)\n', (12734, 12742), False, 'import torch\n'), ((12745, 12760), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (12754, 12760), False, 'import torch\n'), ((3098, 3111), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (3108, 3111), False, 'import torch\n'), ((4191, 4222), 'torch.LongTensor', 'torch.LongTensor', (['alignments[i]'], {}), '(alignments[i])\n', (4207, 4222), False, 'import torch\n'), ((7788, 7801), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (7798, 7801), False, 'import torch\n'), ((8927, 8958), 'torch.LongTensor', 'torch.LongTensor', (['alignments[i]'], {}), '(alignments[i])\n', (8943, 8958), False, 'import torch\n')]
|
from fairseq.models import (
register_model,
register_model_architecture,
FairseqModel,
)
from pytorch_translate import vocab_reduction
from pytorch_translate.rnn import (
torch_find,
LSTMSequenceEncoder,
RNNEncoder,
RNNDecoder,
)
from .word_predictor import WordPredictor
class FairseqWordPredictionModel(FairseqModel):
def __init__(self, encoder, decoder, predictor):
super().__init__(encoder, decoder)
self.predictor = predictor
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_output = self.encoder(src_tokens, src_lengths)
pred_output = self.predictor(encoder_output)
decoder_output = self.decoder(prev_output_tokens, encoder_output)
return pred_output, decoder_output
def get_predictor_normalized_probs(self, pred_output, log_probs):
return self.predictor.get_normalized_probs(pred_output, log_probs)
def get_target_words(self, sample):
return sample['target']
@register_model('rnn_wp')
class RNNWordPredictionModel(FairseqWordPredictionModel):
@staticmethod
def add_args(parser):
parser.add_argument(
'--dropout',
default=0.1,
type=float,
metavar='D',
help='dropout probability',
)
parser.add_argument(
'--encoder-embed-dim',
type=int,
metavar='N',
help='encoder embedding dimension',
)
parser.add_argument(
'--encoder-freeze-embed',
default=False,
action='store_true',
help=('whether to freeze the encoder embedding or allow it to be '
'updated during training'),
)
parser.add_argument(
'--encoder-hidden-dim',
type=int,
metavar='N',
help='encoder cell num units',
)
parser.add_argument(
'--encoder-layers',
type=int,
metavar='N',
help='number of encoder layers',
)
parser.add_argument(
'--encoder-bidirectional',
action='store_true',
help='whether the first layer is bidirectional or not',
)
parser.add_argument(
'--averaging-encoder',
default=False,
action='store_true',
help=(
'whether use mean encoder hidden states as decoder initial '
'states or not'
),
)
parser.add_argument(
'--decoder-embed-dim',
type=int,
metavar='N',
help='decoder embedding dimension',
)
parser.add_argument(
'--decoder-freeze-embed',
default=False,
action='store_true',
help=('whether to freeze the encoder embedding or allow it to be '
'updated during training'),
)
parser.add_argument(
'--decoder-hidden-dim',
type=int,
metavar='N',
help='decoder cell num units',
)
parser.add_argument(
'--decoder-layers',
type=int,
metavar='N',
help='number of decoder layers',
)
parser.add_argument(
'--decoder-out-embed-dim',
type=int,
metavar='N',
help='decoder output embedding dimension',
)
parser.add_argument(
'--attention-type',
type=str,
metavar='EXPR',
help='decoder attention, defaults to dot',
)
parser.add_argument(
'--residual-level',
default=None,
type=int,
help=(
'First layer where to apply a residual connection. '
'The value should be greater than 0 and smaller than the number of '
'layers.'
),
)
parser.add_argument(
'--cell-type',
default='lstm',
type=str,
metavar='EXPR',
help='cell type, defaults to lstm, values:lstm, milstm, layer_norm_lstm',
)
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument(
'--encoder-dropout-in',
type=float,
metavar='D',
help='dropout probability for encoder input embedding',
)
parser.add_argument(
'--encoder-dropout-out',
type=float,
metavar='D',
help='dropout probability for encoder output',
)
parser.add_argument(
'--decoder-dropout-in',
type=float,
metavar='D',
help='dropout probability for decoder input embedding',
)
parser.add_argument(
'--decoder-dropout-out',
type=float,
metavar='D',
help='dropout probability for decoder output',
)
parser.add_argument(
'--sequence-lstm',
action='store_true',
help='use nn.LSTM implementation for encoder',
)
# new arg
parser.add_argument(
'--predictor-hidden-dim',
type=int,
metavar='N',
help='word predictor num units',
)
# Args for vocab reduction
vocab_reduction.add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
src_dict, dst_dict = task.source_dictionary, task.target_dictionary
base_architecture_wp(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
encoder = encoder_class(
src_dict,
embed_dim=args.encoder_embed_dim,
freeze_embed=args.encoder_freeze_embed,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
decoder = RNNDecoder(
src_dict=src_dict,
dst_dict=dst_dict,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
freeze_embed=args.decoder_freeze_embed,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
predictor = WordPredictor(
args.encoder_hidden_dim, args.predictor_hidden_dim, len(dst_dict)
)
return cls(encoder, decoder, predictor)
def get_targets(self, sample, net_output):
targets = sample['target'].view(-1)
possible_translation_tokens = net_output[-1]
if possible_translation_tokens is not None:
targets = torch_find(
possible_translation_tokens.data,
targets.data,
len(self.dst_dict),
)
return targets
@register_model_architecture('rnn_wp', 'rnn_wp')
def base_architecture_wp(args):
# default architecture
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', 1)
args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 512)
args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_hidden_dim = getattr(args, 'decoder_hidden_dim', 512)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.attention_type = getattr(args, 'attention_type', 'dot')
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
args.averaging_encoder = getattr(args, 'averaging_encoder', False)
args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False)
args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False)
args.cell_type = getattr(args, 'cell_type', 'lstm')
vocab_reduction.set_arg_defaults(args)
args.sequence_lstm = getattr(args, 'sequence_lstm', False)
args.predictor_hidden_dim = getattr(args, 'predictor_hidden_dim', 512)
|
[
"pytorch_translate.vocab_reduction.set_arg_defaults",
"pytorch_translate.vocab_reduction.add_args",
"fairseq.models.register_model",
"pytorch_translate.rnn.RNNDecoder",
"fairseq.models.register_model_architecture"
] |
[((1006, 1030), 'fairseq.models.register_model', 'register_model', (['"""rnn_wp"""'], {}), "('rnn_wp')\n", (1020, 1030), False, 'from fairseq.models import register_model, register_model_architecture, FairseqModel\n'), ((7616, 7663), 'fairseq.models.register_model_architecture', 'register_model_architecture', (['"""rnn_wp"""', '"""rnn_wp"""'], {}), "('rnn_wp', 'rnn_wp')\n", (7643, 7663), False, 'from fairseq.models import register_model, register_model_architecture, FairseqModel\n'), ((8955, 8993), 'pytorch_translate.vocab_reduction.set_arg_defaults', 'vocab_reduction.set_arg_defaults', (['args'], {}), '(args)\n', (8987, 8993), False, 'from pytorch_translate import vocab_reduction\n'), ((5433, 5465), 'pytorch_translate.vocab_reduction.add_args', 'vocab_reduction.add_args', (['parser'], {}), '(parser)\n', (5457, 5465), False, 'from pytorch_translate import vocab_reduction\n'), ((6324, 6903), 'pytorch_translate.rnn.RNNDecoder', 'RNNDecoder', ([], {'src_dict': 'src_dict', 'dst_dict': 'dst_dict', 'vocab_reduction_params': 'args.vocab_reduction_params', 'encoder_hidden_dim': 'args.encoder_hidden_dim', 'embed_dim': 'args.decoder_embed_dim', 'freeze_embed': 'args.decoder_freeze_embed', 'out_embed_dim': 'args.decoder_out_embed_dim', 'cell_type': 'args.cell_type', 'num_layers': 'args.decoder_layers', 'hidden_dim': 'args.decoder_hidden_dim', 'attention_type': 'args.attention_type', 'dropout_in': 'args.decoder_dropout_in', 'dropout_out': 'args.decoder_dropout_out', 'residual_level': 'args.residual_level', 'averaging_encoder': 'args.averaging_encoder'}), '(src_dict=src_dict, dst_dict=dst_dict, vocab_reduction_params=\n args.vocab_reduction_params, encoder_hidden_dim=args.encoder_hidden_dim,\n embed_dim=args.decoder_embed_dim, freeze_embed=args.\n decoder_freeze_embed, out_embed_dim=args.decoder_out_embed_dim,\n cell_type=args.cell_type, num_layers=args.decoder_layers, hidden_dim=\n args.decoder_hidden_dim, attention_type=args.attention_type, dropout_in\n =args.decoder_dropout_in, dropout_out=args.decoder_dropout_out,\n residual_level=args.residual_level, averaging_encoder=args.\n averaging_encoder)\n', (6334, 6903), False, 'from pytorch_translate.rnn import torch_find, LSTMSequenceEncoder, RNNEncoder, RNNDecoder\n')]
|
import readline
readline.parse_and_bind("tab: menu-complete")
import getch
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
class Completer:
def __init__(self, words):
self.words = words
self.prefix = None
def complete(self, prefix, index):
if prefix != self.prefix:
# we have a new prefix!
# find all words that start with this prefix
self.matching_words = [
w for w in self.words if w.startswith(prefix)
]
self.prefix = prefix
try:
return self.matching_words[index]
except IndexError:
return None
def query_choice(choices,
prompt="Choice: ",
default=None, single_char=False,
allow_other=False,
case_insensitive=False):
while True:
if single_char:
print(prompt, end=' ')
choice = getch.getche()
print()
else:
completer = Completer(choices)
readline.set_completer(completer.complete)
choice = rlinput(prompt=prompt, prefill=default or None)
readline.set_completer(None)
if not allow_other:
if (
(case_insensitive and choice not in choices)
or (not case_insensitive
and choice.lower()
not in [ c.lower() for c in choices ]
)
):
print("%s not one of: %s" %(choice, ','.join(choices)))
continue
return choice
def query_yes_no(prompt=">>> ", default="y", single_char=False):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
choices = " [y/n] "
elif default in ["yes", "y"]:
choices = " [Y/n] "
elif default in ["no", "n"]:
choices = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
choice = query_choice("yn", prompt=prompt + choices,
default=default,
single_char=single_char, case_insensitive=True)
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
readline.parse_and_bind("tab: menu-complete")
# print query_yes_no(single_char=True)
# print query_choice(["foo", "bar", "baz"])
# print query_choice(["foo", "bar", "baz"], default="foo", allow_other=True)
|
[
"readline.parse_and_bind",
"readline.set_completer",
"readline.set_startup_hook",
"getch.getche",
"readline.insert_text"
] |
[((16, 61), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: menu-complete"""'], {}), "('tab: menu-complete')\n", (39, 61), False, 'import readline\n'), ((2841, 2886), 'readline.parse_and_bind', 'readline.parse_and_bind', (['"""tab: menu-complete"""'], {}), "('tab: menu-complete')\n", (2864, 2886), False, 'import readline\n'), ((237, 264), 'readline.set_startup_hook', 'readline.set_startup_hook', ([], {}), '()\n', (262, 264), False, 'import readline\n'), ((149, 178), 'readline.insert_text', 'readline.insert_text', (['prefill'], {}), '(prefill)\n', (169, 178), False, 'import readline\n'), ((1086, 1100), 'getch.getche', 'getch.getche', ([], {}), '()\n', (1098, 1100), False, 'import getch\n'), ((1190, 1232), 'readline.set_completer', 'readline.set_completer', (['completer.complete'], {}), '(completer.complete)\n', (1212, 1232), False, 'import readline\n'), ((1314, 1342), 'readline.set_completer', 'readline.set_completer', (['None'], {}), '(None)\n', (1336, 1342), False, 'import readline\n')]
|
#
# Project SketchCNN
#
# Author: <NAME> (<EMAIL>),
# Copyright (c) 2018. All Rights Reserved.
#
# ==============================================================================
"""Convert LMDB to TFRecords
"""
import lmdb
import tensorflow as tf
import os
tfrecord_fn = r'path_to_tfrecord\train_db.tfrecords'
data_dir = r'path_to_lmdb'
def __bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def lmdb_to_TFRecords():
writer = tf.python_io.TFRecordWriter(tfrecord_fn)
# collect all lmdbs to write into one TFRecords (at least one lmdb)
db_paths = [os.path.join(data_dir, 'lmdb_0'), os.path.join(data_dir, 'lmdb_1'), os.path.join(data_dir, 'lmdb_2')]
for i in range(3):
env = lmdb.open(db_paths[i], readonly=True)
with env.begin() as txn:
with txn.cursor() as curs:
for key, value in curs:
print('put key: {} to train tfrecord'.format(key.decode('utf-8')))
feature = {
'name': __bytes_feature(key),
'block': __bytes_feature(value)
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
# Set GPU (could remove this setting when running on machine without GPU)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
lmdb_to_TFRecords()
|
[
"tensorflow.train.BytesList",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.Features",
"lmdb.open",
"os.path.join"
] |
[((487, 527), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['tfrecord_fn'], {}), '(tfrecord_fn)\n', (514, 527), True, 'import tensorflow as tf\n'), ((617, 649), 'os.path.join', 'os.path.join', (['data_dir', '"""lmdb_0"""'], {}), "(data_dir, 'lmdb_0')\n", (629, 649), False, 'import os\n'), ((651, 683), 'os.path.join', 'os.path.join', (['data_dir', '"""lmdb_1"""'], {}), "(data_dir, 'lmdb_1')\n", (663, 683), False, 'import os\n'), ((685, 717), 'os.path.join', 'os.path.join', (['data_dir', '"""lmdb_2"""'], {}), "(data_dir, 'lmdb_2')\n", (697, 717), False, 'import os\n'), ((757, 794), 'lmdb.open', 'lmdb.open', (['db_paths[i]'], {'readonly': '(True)'}), '(db_paths[i], readonly=True)\n', (766, 794), False, 'import lmdb\n'), ((412, 445), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (430, 445), True, 'import tensorflow as tf\n'), ((1215, 1249), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (1232, 1249), True, 'import tensorflow as tf\n')]
|
import aiohttp
from urllib.parse import urljoin
from extensions.archive.archive_result import ArchiveResult
class ArchiveHandler:
archive_org_user_agent = "Durkabot (https://github.com/Durkastan/durkabot)"
domain = "http://web.archive.org"
save_url = urljoin(domain, 'save/')
def __init__(self, loop):
self.session = aiohttp.ClientSession(loop=loop)
async def _fetch(self, link, headers):
async with self.session.get(link, headers=headers) as response:
await response.read() # no awaitable method for headers :/
return response
async def archive(self, link) -> ArchiveResult:
request_url = self.save_url + link
response = await self._fetch(request_url, {'User-Agent': self.archive_org_user_agent})
# Error handling
if response.status in [403, 502]:
raise Exception(response.headers['X-Archive-Wayback-Runtime-Error'])
archive_result = self.process_result(response.headers)
return archive_result
@classmethod
def process_result(cls, headers):
archive_id = headers['Content-Location']
link = urljoin(cls.domain, archive_id)
# Determine if page is cached
cache_hit = headers.get('X-Page-Cache') == 'HIT'
archive_date = headers['X-Archive-Orig-Date']
return ArchiveResult(link, archive_date, cache_hit)
|
[
"aiohttp.ClientSession",
"urllib.parse.urljoin",
"extensions.archive.archive_result.ArchiveResult"
] |
[((266, 290), 'urllib.parse.urljoin', 'urljoin', (['domain', '"""save/"""'], {}), "(domain, 'save/')\n", (273, 290), False, 'from urllib.parse import urljoin\n'), ((345, 377), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'loop': 'loop'}), '(loop=loop)\n', (366, 377), False, 'import aiohttp\n'), ((1149, 1180), 'urllib.parse.urljoin', 'urljoin', (['cls.domain', 'archive_id'], {}), '(cls.domain, archive_id)\n', (1156, 1180), False, 'from urllib.parse import urljoin\n'), ((1348, 1392), 'extensions.archive.archive_result.ArchiveResult', 'ArchiveResult', (['link', 'archive_date', 'cache_hit'], {}), '(link, archive_date, cache_hit)\n', (1361, 1392), False, 'from extensions.archive.archive_result import ArchiveResult\n')]
|
import copy
from django_countries import countries
from faker import Faker
import random
import string
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from TWLight.resources.factories import PartnerFactory, StreamFactory, VideoFactory, SuggestionFactory
from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode
class Command(BaseCommand):
help = "Adds a number of example resources, streams, suggestions, and tags."
def add_arguments(self, parser):
parser.add_argument('num', nargs='+', type=int)
def handle(self, *args, **options):
num_partners = options['num'][0]
tag_list = ["science", "humanities", "social science", "history",
"law", "video", "multidisciplinary"]
fake = Faker()
coordinators = User.objects.filter(groups__name='coordinators')
for _ in range(num_partners):
partner = PartnerFactory(
company_location = random.choice(list(countries)),
renewals_available = random.choice([True, False]),
short_description = fake.paragraph(nb_sentences=4),
send_instructions = fake.paragraph(nb_sentences=2),
coordinator = random.choice(coordinators),
real_name = self.chance(True, False, 40),
country_of_residence = self.chance(True, False, 20),
specific_title = self.chance(True, False, 10),
specific_stream = self.chance(True, False, 10),
occupation = self.chance(True, False, 10),
affiliation = self.chance(True, False, 10),
agreement_with_terms_of_use = self.chance(True, False, 10),
mutually_exclusive = False
)
# ManyToMany relationships can't be set until the partner object has
# been created.
random_languages = random.sample(Language.objects.all(),
random.randint(1,2)
)
for lang in random_languages:
partner.languages.add(lang)
partner.save()
all_partners = Partner.even_not_available.all()
for partner in all_partners:
for tag in random.sample(tag_list, random.randint(1,4)):
partner.tags.add(tag)
# Set 5 partners to need a registration URL. We do this separately
# because it requires both the account_email and registration_url
# fields to be set concurrently.
for registration_partner in random.sample(all_partners, 5):
registration_partner.account_email = True
registration_partner.registration_url = fake.uri()
registration_partner.save()
# While most fields can be set at random, we want to make sure we
# get partners with certain fields set to particular values.
# Set 5 random partners to be unavailable
for unavailable_partner in random.sample(all_partners, 5):
unavailable_partner.status = Partner.NOT_AVAILABLE
unavailable_partner.save()
# Set 5% random partners to have excerpt limit in words
for words in random.sample(all_partners, 10):
words.excerpt_limit = random.randint(100, 250)
words.save()
# Set 5% random partners to have excerpt limit in words
for percentage in random.sample(all_partners, 10):
percentage.excerpt_limit_percentage = random.randint(5, 50)
percentage.save()
# Set 1 random partner to have excerpt limits both in words and percentage
for percentage_words in random.sample(all_partners, 1):
percentage_words.excerpt_limit_percentage = random.randint(5, 50)
percentage_words.excerpt_limit = random.randint(100, 250)
percentage_words.save()
available_partners = all_partners.exclude(status= Partner.NOT_AVAILABLE)
# Set 10 random available partners to be waitlisted
for waitlisted_partner in random.sample(available_partners, 10):
waitlisted_partner.status = Partner.WAITLIST
waitlisted_partner.save()
# Set 25 random partners to have a long description
for long_description in random.sample(all_partners, 25):
long_description.description = fake.paragraph(nb_sentences = 10)
long_description.save()
# Set 10 random available partners to be featured
for featured_partner in random.sample(available_partners, 10):
featured_partner.featured = True
featured_partner.save()
# Give any specific_stream flagged partners streams.
stream_partners = all_partners.filter(specific_stream=True)
# Random number of accounts available for all partners without streams
for accounts in all_partners:
if not accounts.specific_stream:
accounts.accounts_available = random.randint(10, 550)
accounts.save()
# If we happened to not create any partners with streams,
# create one deliberately.
if stream_partners.count() == 0:
stream_partners = random.sample(all_partners, 1)
stream_partners[0].specific_stream = True
stream_partners[0].save()
for partner in stream_partners:
for _ in range(3):
stream = StreamFactory(
partner= partner,
name= fake.sentence(nb_words= 3)[:-1], # [:-1] removes full stop
description= fake.paragraph(nb_sentences=2)
)
# Set 15 partners to have somewhere between 1 and 5 video tutorial URLs
for partner in random.sample(all_partners, 15):
for _ in range(random.randint(1, 5)):
VideoFactory(
partner = partner,
tutorial_video_url = fake.url()
)
# Random number of accounts available for all streams
all_streams = Stream.objects.all()
for each_stream in all_streams:
each_stream.accounts_available = random.randint(10, 100)
each_stream.save()
#Generate a few number of suggestions with upvotes
all_users = User.objects.exclude(is_superuser=True)
author_user = random.choice(all_users)
for _ in range(random.randint(3, 10)):
suggestion = SuggestionFactory(
description = fake.paragraph(nb_sentences=10),
author = author_user
)
suggestion.save()
suggestion.upvoted_users.add(author_user)
random_users = random.sample(all_users, random.randint(1, 10))
suggestion.upvoted_users.add(*random_users)
# Set 5 partners use the access code authorization method,
# and generate a bunch of codes for each.
for partner in random.sample(available_partners, 5):
partner.authorization_method = Partner.CODES
partner.save()
for i in range(25):
new_access_code = AccessCode()
new_access_code.code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
new_access_code.partner = partner
new_access_code.save()
# Set 5 partners use the access code authorization method,
# and generate a bunch of codes for each.
for partner in random.sample(available_partners, 5):
partner.authorization_method = Partner.CODES
partner.save()
for i in range(25):
new_access_code = AccessCode()
new_access_code.code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
new_access_code.partner = partner
new_access_code.save()
def chance(self, selected, default, chance):
# A percentage chance to select something, otherwise selects
# the default option. Used to generate data that's more
# in line with the live site distribution.
roll = random.randint(0,100)
if roll < chance:
selection = selected
else:
selection = default
return selection
|
[
"TWLight.resources.models.Language.objects.all",
"random.randint",
"faker.Faker",
"random.sample",
"django.contrib.auth.models.User.objects.filter",
"TWLight.resources.models.Stream.objects.all",
"random.choice",
"TWLight.resources.models.Partner.even_not_available.all",
"TWLight.resources.models.AccessCode",
"django.contrib.auth.models.User.objects.exclude"
] |
[((837, 844), 'faker.Faker', 'Faker', ([], {}), '()\n', (842, 844), False, 'from faker import Faker\n'), ((869, 917), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', ([], {'groups__name': '"""coordinators"""'}), "(groups__name='coordinators')\n", (888, 917), False, 'from django.contrib.auth.models import User\n'), ((2214, 2246), 'TWLight.resources.models.Partner.even_not_available.all', 'Partner.even_not_available.all', ([], {}), '()\n', (2244, 2246), False, 'from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode\n'), ((2618, 2648), 'random.sample', 'random.sample', (['all_partners', '(5)'], {}), '(all_partners, 5)\n', (2631, 2648), False, 'import random\n'), ((3037, 3067), 'random.sample', 'random.sample', (['all_partners', '(5)'], {}), '(all_partners, 5)\n', (3050, 3067), False, 'import random\n'), ((3269, 3300), 'random.sample', 'random.sample', (['all_partners', '(10)'], {}), '(all_partners, 10)\n', (3282, 3300), False, 'import random\n'), ((3489, 3520), 'random.sample', 'random.sample', (['all_partners', '(10)'], {}), '(all_partners, 10)\n', (3502, 3520), False, 'import random\n'), ((3752, 3782), 'random.sample', 'random.sample', (['all_partners', '(1)'], {}), '(all_partners, 1)\n', (3765, 3782), False, 'import random\n'), ((4157, 4194), 'random.sample', 'random.sample', (['available_partners', '(10)'], {}), '(available_partners, 10)\n', (4170, 4194), False, 'import random\n'), ((4384, 4415), 'random.sample', 'random.sample', (['all_partners', '(25)'], {}), '(all_partners, 25)\n', (4397, 4415), False, 'import random\n'), ((4621, 4658), 'random.sample', 'random.sample', (['available_partners', '(10)'], {}), '(available_partners, 10)\n', (4634, 4658), False, 'import random\n'), ((5893, 5924), 'random.sample', 'random.sample', (['all_partners', '(15)'], {}), '(all_partners, 15)\n', (5906, 5924), False, 'import random\n'), ((6224, 6244), 'TWLight.resources.models.Stream.objects.all', 'Stream.objects.all', ([], {}), '()\n', (6242, 6244), False, 'from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode\n'), ((6473, 6512), 'django.contrib.auth.models.User.objects.exclude', 'User.objects.exclude', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (6493, 6512), False, 'from django.contrib.auth.models import User\n'), ((6535, 6559), 'random.choice', 'random.choice', (['all_users'], {}), '(all_users)\n', (6548, 6559), False, 'import random\n'), ((7132, 7168), 'random.sample', 'random.sample', (['available_partners', '(5)'], {}), '(available_partners, 5)\n', (7145, 7168), False, 'import random\n'), ((7685, 7721), 'random.sample', 'random.sample', (['available_partners', '(5)'], {}), '(available_partners, 5)\n', (7698, 7721), False, 'import random\n'), ((8347, 8369), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (8361, 8369), False, 'import random\n'), ((3336, 3360), 'random.randint', 'random.randint', (['(100)', '(250)'], {}), '(100, 250)\n', (3350, 3360), False, 'import random\n'), ((3572, 3593), 'random.randint', 'random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (3586, 3593), False, 'import random\n'), ((3840, 3861), 'random.randint', 'random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (3854, 3861), False, 'import random\n'), ((3907, 3931), 'random.randint', 'random.randint', (['(100)', '(250)'], {}), '(100, 250)\n', (3921, 3931), False, 'import random\n'), ((5329, 5359), 'random.sample', 'random.sample', (['all_partners', '(1)'], {}), '(all_partners, 1)\n', (5342, 5359), False, 'import random\n'), ((6330, 6353), 'random.randint', 'random.randint', (['(10)', '(100)'], {}), '(10, 100)\n', (6344, 6353), False, 'import random\n'), ((6583, 6604), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (6597, 6604), False, 'import random\n'), ((1989, 2011), 'TWLight.resources.models.Language.objects.all', 'Language.objects.all', ([], {}), '()\n', (2009, 2011), False, 'from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode\n'), ((2033, 2053), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (2047, 2053), False, 'import random\n'), ((2331, 2351), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (2345, 2351), False, 'import random\n'), ((5088, 5111), 'random.randint', 'random.randint', (['(10)', '(550)'], {}), '(10, 550)\n', (5102, 5111), False, 'import random\n'), ((5953, 5973), 'random.randint', 'random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (5967, 5973), False, 'import random\n'), ((6911, 6932), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (6925, 6932), False, 'import random\n'), ((7321, 7333), 'TWLight.resources.models.AccessCode', 'AccessCode', ([], {}), '()\n', (7331, 7333), False, 'from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode\n'), ((7874, 7886), 'TWLight.resources.models.AccessCode', 'AccessCode', ([], {}), '()\n', (7884, 7886), False, 'from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode\n'), ((1099, 1127), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (1112, 1127), False, 'import random\n'), ((1295, 1322), 'random.choice', 'random.choice', (['coordinators'], {}), '(coordinators)\n', (1308, 1322), False, 'import random\n'), ((7381, 7434), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (7394, 7434), False, 'import random\n'), ((7934, 7987), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (7947, 7987), False, 'import random\n')]
|
from pytest import raises
import npk
filename = "testres/sample.npk"
sample = "testres/sample.txt"
key = (98521, 16322, 7163, 992)
class TestNpk(object):
def test_open_package(self):
pack = npk.package(filename, key)
pack.close()
def test_create_package(self):
pack = npk.package()
pack.add(sample)
pack.save("test.npk")
pack.close()
def test_open_package_fail(self):
with raises(npk.FailToOpenPackage):
npk.package(filename, reversed(key))
def test_iterate_entities(self):
pack = npk.package(filename, key)
entities = pack.all()
entities_expected = ['sample.txt', 'tea.txt', 'zip.txt', 'zipntea.txt']
assert len(entities) == 4
assert set(sorted([str(x) for x in entities])) == set(sorted(entities_expected))
pack.close()
def test_get_entity(self):
pack = npk.package(filename, key)
for entity in pack.all():
assert entity.read() == open(sample).read()
pack.close()
def test_export_entity(self, tmpdir):
pack = npk.package(filename, key)
for entity in pack.all():
export_filename = str(tmpdir.join(entity.name()))
entity.export(export_filename)
assert open(export_filename).read() == open(sample).read()
pack.close()
def test_get_entity_fail(self):
pack = npk.package(filename, key)
with raises(npk.EntityNotFound):
pack.get("notfound.42")
pack.close()
|
[
"npk.package",
"pytest.raises"
] |
[((206, 232), 'npk.package', 'npk.package', (['filename', 'key'], {}), '(filename, key)\n', (217, 232), False, 'import npk\n'), ((305, 318), 'npk.package', 'npk.package', ([], {}), '()\n', (316, 318), False, 'import npk\n'), ((580, 606), 'npk.package', 'npk.package', (['filename', 'key'], {}), '(filename, key)\n', (591, 606), False, 'import npk\n'), ((908, 934), 'npk.package', 'npk.package', (['filename', 'key'], {}), '(filename, key)\n', (919, 934), False, 'import npk\n'), ((1104, 1130), 'npk.package', 'npk.package', (['filename', 'key'], {}), '(filename, key)\n', (1115, 1130), False, 'import npk\n'), ((1414, 1440), 'npk.package', 'npk.package', (['filename', 'key'], {}), '(filename, key)\n', (1425, 1440), False, 'import npk\n'), ((447, 476), 'pytest.raises', 'raises', (['npk.FailToOpenPackage'], {}), '(npk.FailToOpenPackage)\n', (453, 476), False, 'from pytest import raises\n'), ((1454, 1480), 'pytest.raises', 'raises', (['npk.EntityNotFound'], {}), '(npk.EntityNotFound)\n', (1460, 1480), False, 'from pytest import raises\n')]
|
#!/usr/bin/env python3
# wykys 2019
from numpy import ndarray
from awgn import awgn
WAV_PATH = '../wav/'
def wav(path: str, snr_db: float = None) -> list:
from scipy.io import wavfile
fs, s = wavfile.read(WAV_PATH + path)
if not (snr_db is None):
s = awgn(s, snr_db)
s = s / s.max()
return fs, s
|
[
"awgn.awgn",
"scipy.io.wavfile.read"
] |
[((203, 232), 'scipy.io.wavfile.read', 'wavfile.read', (['(WAV_PATH + path)'], {}), '(WAV_PATH + path)\n', (215, 232), False, 'from scipy.io import wavfile\n'), ((275, 290), 'awgn.awgn', 'awgn', (['s', 'snr_db'], {}), '(s, snr_db)\n', (279, 290), False, 'from awgn import awgn\n')]
|
#%%
from frontmatter import Frontmatter
import markdown
fp = 'testtemplate.md'
# %%
head_body = Frontmatter.read_file(fp)
type(head_body['body'])
# %%
bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables','fenced_code'])
# bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables','fenced_code', 'codehilite'])
bodyhtml
# %%
ofp = 'test.html'
of = open(ofp,'w',encoding='utf-8',errors='xmlcharrefreplace')
of.write(bodyhtml)
of.close()
# %%
md = markdown.Markdown(extensions=['toc', 'tables','fenced_code'])
# need fenced_code here too
# %%
bodytoc = md.convert(head_body['body'])
# bodytoc
bodyhtml == bodytoc
# %%
md.toc
# %%
with open('test.html','r+',encoding='utf-8',errors='xmlcharrefreplace') as f:
old = f.read()
f.seek(0)
f.write(md.toc)
f.write(old)
f.close()
#%%
from bs4 import BeautifulSoup
htmlfp = '../saturn-drmtest.github.io/layout/articletest.html'
soup = BeautifulSoup(open(htmlfp).read(), "html.parser")
soup.title
# %%
type(soup.title.string)
# %%
soup.title.string = 'new title'
soup.title
# %%
soup = BeautifulSoup('<div id="content"></div>', "html.parser")
targetdiv = soup.find(id='content')
targetdiv.insert(0, tempcontent[1])
targetdiv
# %%
html = '''
<div id="offsetheader">
<img src="/assets/img/covers/codingcover.jpg"/>
</div>
'''
headImgSrc = '/assests/img/covers/architecturecover.jpg'
soup = BeautifulSoup(html, "html.parser")
targetDiv = soup.find(id='offsetheader')
targetDiv.img['src'] = headImgSrc
targetDiv
# %%
import os
class filepaths():
def __init__(self, orifp, desfolder):
self.path = orifp
self.fileList = []
self.validFileList = []
self.desFileDict = {}
self.desfolder = desfolder
def getFiles(self):
for root, subFolders, files in os.walk(self.path):
for fileName in files:
self.fileList.append(os.path.join(root, fileName))
def validFiles(self):
for fileName in self.fileList:
clearFileName = os.path.basename(fileName)
subfolderFileName = '/'.join(fileName.split('/')[3:])
htmlBaseName = os.path.splitext(subfolderFileName)[0] + '.html'
if clearFileName == '.DS_Store':
pass
elif os.path.exists(os.path.join(self.desfolder, htmlBaseName)):
pass
else:
self.validFileList.append(fileName)
self.desFileDict[fileName] = os.path.join(self.desfolder, htmlBaseName)
def getFilePaths(self):
return self.fileList
def getValidFileNames(self):
return self.validFileList
filepathclass = filepaths('../saturn-drmtest.github.io/posts', '../saturn-drmtest.github.io/postshtml')
filepathclass.getFiles()
filepathclass.validFiles()
# filepathclass.validFileList
dic = filepathclass.desFileDict
# %%
clearFileName = os.path.basename('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md')
clearFileName
# %%
subfolderFileName = '/'.join('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md'.split('/')[3:])
subfolderFileName
# %%
htmlBaseName = os.path.splitext(clearFileName)[0] + '.html'
htmlBaseName
# %%
os.path.exists(os.path.join('../saturn-drmtest.github.io/postshtml', htmlBaseName))
# %%
print('Converting %s' % os.path.basename('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md'))
# %%
def insertDiv(modifiedSoup, id=''):
targetDiv = soup.find(id=id)
targetDiv.clear()
targetDiv.insert(0, modifiedSoup)
htmltxt = '''
<h1 class="anchor" id="head1">head1</h1>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et
dolore
magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit
anim id est laborum.</p>
<h2 id="subhead1">subhead1</h2>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et
dolore
magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit
anim id est laborum.</p>
'''
htmlfp = '../saturn-drmtest.github.io/layout/article.html'
soup = BeautifulSoup(open(htmlfp).read(), "html.parser")
insertDiv(BeautifulSoup(htmltxt, 'html.parser'), id='content')
soup
# %%
from bs4 import BeautifulSoup
soup = BeautifulSoup("<b>stop</b>")
tag = soup.new_tag('h1')
tag.string = "Don't"
soup.find('b').string.insert_before(tag)
soup.b
# %%
html = '''
<div id="content">
<div id="post"><h1>title</h1></div>
</div>
'''
soup = BeautifulSoup(html, 'html.parser')
tag = soup.new_tag('h1')
tag.string = 'title2'
targettag = soup.find(id='post')
targettag.insert(0, tag)
soup
# %%
|
[
"os.path.basename",
"os.walk",
"frontmatter.Frontmatter.read_file",
"markdown.markdown",
"os.path.splitext",
"markdown.Markdown",
"os.path.join",
"bs4.BeautifulSoup"
] |
[((99, 124), 'frontmatter.Frontmatter.read_file', 'Frontmatter.read_file', (['fp'], {}), '(fp)\n', (120, 124), False, 'from frontmatter import Frontmatter\n'), ((166, 251), 'markdown.markdown', 'markdown.markdown', (["head_body['body']"], {'extensions': "['toc', 'tables', 'fenced_code']"}), "(head_body['body'], extensions=['toc', 'tables',\n 'fenced_code'])\n", (183, 251), False, 'import markdown\n'), ((492, 554), 'markdown.Markdown', 'markdown.Markdown', ([], {'extensions': "['toc', 'tables', 'fenced_code']"}), "(extensions=['toc', 'tables', 'fenced_code'])\n", (509, 554), False, 'import markdown\n'), ((1097, 1153), 'bs4.BeautifulSoup', 'BeautifulSoup', (['"""<div id="content"></div>"""', '"""html.parser"""'], {}), '(\'<div id="content"></div>\', \'html.parser\')\n', (1110, 1153), False, 'from bs4 import BeautifulSoup\n'), ((1416, 1450), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1429, 1450), False, 'from bs4 import BeautifulSoup\n'), ((2902, 2995), 'os.path.basename', 'os.path.basename', (['"""../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md"""'], {}), "(\n '../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md')\n", (2918, 2995), False, 'import os\n'), ((5017, 5045), 'bs4.BeautifulSoup', 'BeautifulSoup', (['"""<b>stop</b>"""'], {}), "('<b>stop</b>')\n", (5030, 5045), False, 'from bs4 import BeautifulSoup\n'), ((5230, 5264), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (5243, 5264), False, 'from bs4 import BeautifulSoup\n'), ((3245, 3312), 'os.path.join', 'os.path.join', (['"""../saturn-drmtest.github.io/postshtml"""', 'htmlBaseName'], {}), "('../saturn-drmtest.github.io/postshtml', htmlBaseName)\n", (3257, 3312), False, 'import os\n'), ((4916, 4953), 'bs4.BeautifulSoup', 'BeautifulSoup', (['htmltxt', '"""html.parser"""'], {}), "(htmltxt, 'html.parser')\n", (4929, 4953), False, 'from bs4 import BeautifulSoup\n'), ((1828, 1846), 'os.walk', 'os.walk', (['self.path'], {}), '(self.path)\n', (1835, 1846), False, 'import os\n'), ((3166, 3197), 'os.path.splitext', 'os.path.splitext', (['clearFileName'], {}), '(clearFileName)\n', (3182, 3197), False, 'import os\n'), ((3344, 3437), 'os.path.basename', 'os.path.basename', (['"""../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md"""'], {}), "(\n '../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md')\n", (3360, 3437), False, 'import os\n'), ((2044, 2070), 'os.path.basename', 'os.path.basename', (['fileName'], {}), '(fileName)\n', (2060, 2070), False, 'import os\n'), ((1920, 1948), 'os.path.join', 'os.path.join', (['root', 'fileName'], {}), '(root, fileName)\n', (1932, 1948), False, 'import os\n'), ((2164, 2199), 'os.path.splitext', 'os.path.splitext', (['subfolderFileName'], {}), '(subfolderFileName)\n', (2180, 2199), False, 'import os\n'), ((2311, 2353), 'os.path.join', 'os.path.join', (['self.desfolder', 'htmlBaseName'], {}), '(self.desfolder, htmlBaseName)\n', (2323, 2353), False, 'import os\n'), ((2492, 2534), 'os.path.join', 'os.path.join', (['self.desfolder', 'htmlBaseName'], {}), '(self.desfolder, htmlBaseName)\n', (2504, 2534), False, 'import os\n')]
|
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password, check_password
import datetime
import time
import random
from decorator import *
from app_cart.views import *
from .models import *
def getOrderByUser(user_id=None, mode="all"):
if user_id == None:
raise ParamException()
if mode == "all":
orders = Order.objects.filter(
user_id=user_id, isdelete='0').order_by("-id")
elif mode == "unfinished":
orders = Order.objects.filter(
user_id=user_id, isdelete='0', status__in=['processing', 'examining', 'preparing', 'delivering', 'delivered'])
elif mode == "finished":
orders = Order.objects.filter(
user_id=user_id, isdelete='0', status='confirmed')
return orders
def getOrderByMode(mode=None):
print(mode)
if mode == None:
raise ParamException()
elif mode not in ['unprocessed', 'processing', 'examining', 'preparing', 'delivering', 'delivered', 'confirmed', 'all']:
raise ParamException()
elif mode == "all":
orders = Order.objects.filter(isdelete='0')
else:
orders = Order.objects.filter(isdelete='0', status=mode)
return orders
def getOrderByID(order_id=None, serialnumber=None):
if order_id == None and serialnumber == None:
raise ParamException()
if order_id:
if Order.objects.filter(isdelete='0', id=order_id).count() == 1:
return Order.objects.get(isdelete='0', id=order_id)
raise RFSException('50012', '无效订单')
elif serialnumber:
if Order.objects.filter(isdelete='0', serialnumber=serialnumber).count() == 1:
return Order.objects.get(isdelete='0', serialnumber=serialnumber)
raise RFSException('50012', '无效订单')
raise ParamException()
def createOrder(user_id=None, discount=1, paymentname=None, address_id=None):
if None in [user_id, discount, paymentname, address_id]:
raise ParamException()
carts = getSelectedCart(user_id)
if len(carts) == 0:
raise RFSException('50112', '未选择任何商品或购物车为空')
totalprice = 0
for cart in carts:
if cart.goods.remain < cart.amount:
raise RFSException('50111', '商品余量不足')
totalprice = totalprice + cart.goods.price * cart.amount * discount
# 创建订单表
serialnumber = "{}{}{}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
int(time.time()*1000),
random.randint(1000, 9999))
order = Order.objects.create(
serialnumber=serialnumber,
user_id=user_id,
address_id=address_id,
totalprice=int(totalprice),
discount=discount,
createtime=datetime.datetime.now(),
paymentname=paymentname
)
# 创建订单详情表
for cart in carts:
createOrderDetail(order.id, cart.goods_id,
cart.goods.price, cart.amount)
deleteSelectedCart(user_id)
return order
def paidOrder(order_id=None):
pass
def changeOrder(order_id=None, mode=None):
if None in [order_id, mode]:
raise ParamException()
if mode not in ['processing', 'examining', 'preparing', 'delivering', 'delivered', 'unprocessed']:
raise RFSException('50513', '订单状态非法')
order = getOrderByID(order_id)
# 检验当前状态是否正确
if order.status == 'processing' and mode == "processing":
order.status = 'examining'
elif order.status == 'examining' and mode == "examining":
order.status = 'preparing'
elif order.status == 'preparing' and mode == "preparing":
order.status = 'delivering'
elif order.status == 'delivering' and mode == "delivering":
order.status = 'delivered'
elif order.status == 'delivered' and mode == "delivered":
order.status = 'confirmed'
elif order.status == 'unprocessed' and mode == "unprocessed":
order.status = 'processing'
else:
raise RFSException('50513', '订单状态非法')
order.save()
def createOrderDetail(order_id=None, goods_id=None, price=None, amount=None):
if None in [order_id, goods_id, price, amount]:
raise ParamException()
return OrderDetail.objects.create(
order_id=order_id, goods_id=goods_id, price=int(price), amount=amount)
def deleteOrder(order_id=None):
if order_id == None:
raise ParamException()
order = getOrderByID(order_id)
if order_obj.status in['processing', 'examining', 'confirmed']:
order.isdelete = '1'
order.save()
else:
raise RFSException("50301", "删除失败")
def paidConfirm(order_id=None, serialnumber=None):
order = getOrderByID(order_id, serialnumber)
if order.status == "unprocessed":
order.status = "examining"
order.paidtime = datetime.datetime.now()
order.save()
else:
raise RFSException("90101", "请勿重复支付")
return order
|
[
"datetime.datetime.now",
"random.randint",
"time.time"
] |
[((2532, 2558), 'random.randint', 'random.randint', (['(1000)', '(9999)'], {}), '(1000, 9999)\n', (2546, 2558), False, 'import random\n'), ((4817, 4840), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4838, 4840), False, 'import datetime\n'), ((2768, 2791), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2789, 2791), False, 'import datetime\n'), ((2389, 2412), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2410, 2412), False, 'import datetime\n'), ((2478, 2489), 'time.time', 'time.time', ([], {}), '()\n', (2487, 2489), False, 'import time\n')]
|
from django.db import models
from reviewsapp.celebrities.models import Celebrity
from reviewsapp.core.behaviors import Filmable, Timestampable
class Network(models.Model):
name = models.CharField(max_length=100)
class TvShow(Filmable, Timestampable, models.Model):
premiere_date = models.DateField()
cast_crew = models.ManytoMany(Celebrity, related_name='tvshows', through='tvShowCastCrew')
creators = models.ManytoMany(Celebrity)
network = models.ForeignKey(Network, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.title
class Crew(models.Model):
celebrity = models.ForeignKey(Celebrity, on_delete=models.CASCADE)
tv_show = models.ForeignKey(TvShow, on_delete=models.CASCADE)
class TvSeason(models.Model):
pass
class TvEpisode(models.Model):
pass
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.DateField",
"django.db.models.ManytoMany"
] |
[((186, 218), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (202, 218), False, 'from django.db import models\n'), ((294, 312), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (310, 312), False, 'from django.db import models\n'), ((329, 407), 'django.db.models.ManytoMany', 'models.ManytoMany', (['Celebrity'], {'related_name': '"""tvshows"""', 'through': '"""tvShowCastCrew"""'}), "(Celebrity, related_name='tvshows', through='tvShowCastCrew')\n", (346, 407), False, 'from django.db import models\n'), ((423, 451), 'django.db.models.ManytoMany', 'models.ManytoMany', (['Celebrity'], {}), '(Celebrity)\n', (440, 451), False, 'from django.db import models\n'), ((466, 530), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Network'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), '(Network, on_delete=models.SET_NULL, null=True)\n', (483, 530), False, 'from django.db import models\n'), ((624, 678), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Celebrity'], {'on_delete': 'models.CASCADE'}), '(Celebrity, on_delete=models.CASCADE)\n', (641, 678), False, 'from django.db import models\n'), ((693, 744), 'django.db.models.ForeignKey', 'models.ForeignKey', (['TvShow'], {'on_delete': 'models.CASCADE'}), '(TvShow, on_delete=models.CASCADE)\n', (710, 744), False, 'from django.db import models\n')]
|
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. <NAME> and TSRI 2016
##
################################################################################
#############################################################################
#
# Author: <NAME>
#
# Copyright: <NAME> TSRI 2014
#
#########################################################################
#
# $Header: /mnt/raid/services/cvs/PmvApp/__init__.py,v 1.2.4.1 2017/07/13 20:55:28 annao Exp $
#
# $Id: __init__.py,v 1.2.4.1 2017/07/13 20:55:28 annao Exp $
#
def mkPmvApp(eventHandler=None):
# create PmvApp
from PmvApp.Pmv import MolApp
pmv = MolApp()
pmv.trapExceptions = False
return pmv
def loadDefaultCommands(pmv):
from PmvApp.msmsCmds import ComputeMSMS, DisplayMSMS, UndisplayMSMS
pmv.addCommand(ComputeMSMS(), 'computeMSMS')
pmv.computeMSMS.loadCommand() # load the command
#pmv.userpref.set('Compute cavities by default', 'yes')
pmv.addCommand(DisplayMSMS(), 'displayMSMS')
pmv.addCommand(UndisplayMSMS(), 'undisplayMSMS')
#pmv.lazyLoad('displayHyperBallsCmds', package='PmvApp')
pmv.lazyLoad('cartoonCmds', package='PmvApp')
pmv.lazyLoad('interactionsCmds', package='PmvApp')
#pmv.lazyLoad('coarseMolecularSurfaceCmds', package='PmvApp')
pmv.setOnAddObjectCmd('Molecule', [pmv.displayLines,
pmv.colorByAtomType,
pmv.colorByMolecules],
kwList=[{}, {}, {'carbonsOnly':True}])
## pmv.lazyLoad('bondsCmds', package='PmvApp')
## pmv.lazyLoad('fileCmds', package='PmvApp')
## pmv.lazyLoad('displayCmds', package='PmvApp')
## pmv.lazyLoad('editCmds', package='PmvApp')
## pmv.displayLines.loadCommand()
## pmv.lazyLoad("colorCmds", package="PmvApp")
## pmv.color.loadCommand()
## pmv.lazyLoad("selectionCmds", package="PmvApp")
## pmv.lazyLoad('deleteCmds', package='PmvApp')
## pmv.lazyLoad('labelCmds', package='PmvApp')
## pmv.lazyLoad('msmsCmds', package='PmvApp')
## pmv.lazyLoad('displayHyperBallsCmds', package='PmvApp')
## pmv.lazyLoad('interactionsCmds', package='PmvApp')
## pmv.lazyLoad('coarseMolecularSurfaceCmds', package='PmvApp')
## pmv.setOnAddObjectCmd('Molecule', [pmv.displayLines, pmv.colorByAtomType])
|
[
"PmvApp.msmsCmds.ComputeMSMS",
"PmvApp.msmsCmds.DisplayMSMS",
"PmvApp.Pmv.MolApp",
"PmvApp.msmsCmds.UndisplayMSMS"
] |
[((1421, 1429), 'PmvApp.Pmv.MolApp', 'MolApp', ([], {}), '()\n', (1427, 1429), False, 'from PmvApp.Pmv import MolApp\n'), ((1598, 1611), 'PmvApp.msmsCmds.ComputeMSMS', 'ComputeMSMS', ([], {}), '()\n', (1609, 1611), False, 'from PmvApp.msmsCmds import ComputeMSMS, DisplayMSMS, UndisplayMSMS\n'), ((1760, 1773), 'PmvApp.msmsCmds.DisplayMSMS', 'DisplayMSMS', ([], {}), '()\n', (1771, 1773), False, 'from PmvApp.msmsCmds import ComputeMSMS, DisplayMSMS, UndisplayMSMS\n'), ((1809, 1824), 'PmvApp.msmsCmds.UndisplayMSMS', 'UndisplayMSMS', ([], {}), '()\n', (1822, 1824), False, 'from PmvApp.msmsCmds import ComputeMSMS, DisplayMSMS, UndisplayMSMS\n')]
|
from typing import Protocol, Union
import dataclasses
import numpy as np
class Motion(Protocol):
"""Protocol of a 1D motion."""
"""Shift of motion along time axis."""
offset: float
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the position at time(s)."""
...
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the velocity at time(s)."""
...
@dataclasses.dataclass
class PolynomialMotion(Motion):
"""One-dimensional motion represented by a polynomial of degree N.
Args:
offset: Global time offset of this motion
coeffs: N+1 polynomial coefficients starting with the highest term.
"""
offset: float
coeffs: np.ndarray
degree: int = dataclasses.field(init=False)
def __post_init__(self):
self.degree = len(self.coeffs) - 1
self.coeffs = np.asarray(self.coeffs).reshape(-1, 1)
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the position at time(s)."""
scalar = np.isscalar(t)
t = np.atleast_1d(t)
v = np.vander(t - self.offset, self.degree + 1) # Nx(D+1)
x = v @ self.coeffs # Nx1
if scalar:
return x.item()
else:
return x.squeeze(-1)
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""Returns the velocity at time(s)."""
scalar = np.isscalar(t)
t = np.atleast_1d(t) - self.offset
dv = np.array(
[i * t ** (i - 1) for i in reversed(range(1, self.degree + 1))]
) # NxD
dx = dv.T @ self.coeffs[:-1]
if scalar:
return dx.item()
else:
return dx.squeeze(-1)
def poly_blend_3(m1: Motion, m2: Motion, tnow: float, h: float) -> PolynomialMotion:
"""Returns a third-degree polynomial function that blends two motions.
Args:
m1: First motion
m2: Second motion
tnow: Start of blend
h: Horizon of blend
Returns:
mblend: Polynomial motion blending m1 and m2 in segment [tnow, tnow+h].
"""
if h <= 0.0:
raise ValueError("Horizon has to be > 0.0")
A = np.zeros((4, 4))
b = np.zeros(4)
# Position at start (tnow) should match m1
# Note, the offset (shift) of blended motion will be tnow
A[0, 0] = 0
A[0, 1] = 0
A[0, 2] = 0
A[0, 3] = 1
b[0] = m1.at(tnow)
# Position at end of horizon should match m2
A[1, 0] = h ** 3
A[1, 1] = h ** 2
A[1, 2] = h
A[1, 3] = 1
b[1] = m2.at(tnow + h)
# Velocity at start should match m1
A[2, 0] = 0
A[2, 1] = 0
A[2, 2] = 1
A[2, 3] = 0
b[2] = m1.d_at(tnow)
# Velocity at end should match m2
A[3, 0] = 3 * h ** 2
A[3, 1] = 2 * h
A[3, 2] = 1
A[3, 3] = 0
b[3] = m2.d_at(tnow + h)
coeffs = np.linalg.solve(A, b) # TODO: handle singularities
return PolynomialMotion(tnow, coeffs)
@dataclasses.dataclass
class PolynomialMotionBlend(Motion):
"""A piecewise blended motion with C1 smoothness.
The blended motion consists of three pieces
- m1 when t < start
- blend when start <= t <= end of blending
- m2 when end < t
At joint points the positions and first order derivatives match up.
If `flatten` is True, m1 and m2 will be simplified assuming that t is
monotonically increasing and values of `t < start` are not of interest.
Otherwise, recursive blending may lead to memory overflow.
"""
m1: Motion
m2: Motion
offset: float
horizon: float
blend: Motion = dataclasses.field(init=False)
flatten: dataclasses.InitVar[bool] = False
def __post_init__(self, flatten: bool):
if flatten:
self.m1 = _flatten(self.m1, self.offset)
self.m2 = _flatten(self.m2, self.offset)
self.blend = poly_blend_3(self.m1, self.m2, self.offset, self.horizon)
def at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return self._compute(t, "at")
def d_at(self, t: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
return self._compute(t, "d_at")
@property
def range(self):
return (self.offset, self.offset + self.horizon)
def _compute(
self, t: Union[float, np.ndarray], attr: str
) -> Union[float, np.ndarray]:
scalar = np.isscalar(t)
t = np.atleast_1d(t)
low, high = self.range
x = np.empty_like(t)
mask = t < low
x[mask] = getattr(self.m1, attr)(t[mask])
mask = t > high
x[mask] = getattr(self.m2, attr)(t[mask])
mask = np.logical_and(t >= low, t <= high)
x[mask] = getattr(self.blend, attr)(t[mask])
if scalar:
return x.item()
else:
return x
def _flatten(m: Motion, offset: float) -> Motion:
"""Recursively simplify older motions to avoid stacking of blends.
The resulting motion is identical fo `t>=offset`, but may change for
values less than offset.
"""
if isinstance(m, PolynomialMotionBlend):
if m.range[1] < offset:
return m.m2
elif m.range[0] < offset:
return m.blend
else:
return _flatten(m.m1, offset)
else:
return m
|
[
"numpy.vander",
"numpy.logical_and",
"numpy.isscalar",
"numpy.asarray",
"numpy.zeros",
"numpy.empty_like",
"dataclasses.field",
"numpy.atleast_1d",
"numpy.linalg.solve"
] |
[((802, 831), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (819, 831), False, 'import dataclasses\n'), ((2260, 2276), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2268, 2276), True, 'import numpy as np\n'), ((2285, 2296), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2293, 2296), True, 'import numpy as np\n'), ((2934, 2955), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (2949, 2955), True, 'import numpy as np\n'), ((3672, 3701), 'dataclasses.field', 'dataclasses.field', ([], {'init': '(False)'}), '(init=False)\n', (3689, 3701), False, 'import dataclasses\n'), ((1106, 1120), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (1117, 1120), True, 'import numpy as np\n'), ((1133, 1149), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (1146, 1149), True, 'import numpy as np\n'), ((1163, 1206), 'numpy.vander', 'np.vander', (['(t - self.offset)', '(self.degree + 1)'], {}), '(t - self.offset, self.degree + 1)\n', (1172, 1206), True, 'import numpy as np\n'), ((1490, 1504), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (1501, 1504), True, 'import numpy as np\n'), ((4448, 4462), 'numpy.isscalar', 'np.isscalar', (['t'], {}), '(t)\n', (4459, 4462), True, 'import numpy as np\n'), ((4475, 4491), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (4488, 4491), True, 'import numpy as np\n'), ((4536, 4552), 'numpy.empty_like', 'np.empty_like', (['t'], {}), '(t)\n', (4549, 4552), True, 'import numpy as np\n'), ((4715, 4750), 'numpy.logical_and', 'np.logical_and', (['(t >= low)', '(t <= high)'], {}), '(t >= low, t <= high)\n', (4729, 4750), True, 'import numpy as np\n'), ((1517, 1533), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (1530, 1533), True, 'import numpy as np\n'), ((927, 950), 'numpy.asarray', 'np.asarray', (['self.coeffs'], {}), '(self.coeffs)\n', (937, 950), True, 'import numpy as np\n')]
|
import subprocess
import argparse
from pathlib import Path
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def parse_TMscore(result):
lines = result.split('\n')
for line in lines:
line_split = line.split()
if len(line_split) == 0:
continue
elif line_split[0] == 'TM-score':
tmscore = float(line_split[2])
elif line_split[0] == 'GDT-TS-score=':
gdtts = line_split[1]
elif line_split[0] == 'GDT-HA-score=':
gdtha = line_split[1]
return tmscore, gdtts, gdtha
def run_TMscore(native_pdb, model_pdb):
cmd = ['TMscore', model_pdb, native_pdb, '-outfmt', '-1']
result = subprocess.check_output(cmd)
return result.decode('utf-8')
def get_gdt(native_pdb, model_pdb):
result = run_TMscore(native_pdb, model_pdb)
tmscore, gdtts, gdtha = parse_TMscore(result)
return tmscore, gdtts, gdtha
def get_gdt_for_target(native_pdb_path, model_pdb_dir, blast_xml_csv_path, out_gdt_path):
model_array = []
tmscore_array = []
gdtts_array = []
gdtha_array = []
for model in model_pdb_dir.iterdir():
model_array.append(model.stem)
tmscore, gdtts, gdtha = get_gdt(native_pdb_path, model)
tmscore_array.append(tmscore)
gdtts_array.append(gdtts)
gdtha_array.append(gdtha)
df = pd.DataFrame({'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA': gdtha_array}, index=model_array)
df = df.astype('float')
df = df.sort_index()
df['target'] = [index.rsplit('_', 4)[0] for index in df.index]
df['template'] = [index.split('_', 2)[2].rsplit('_', 1)[0] for index in df.index]
df_template = pd.read_csv(blast_xml_csv_path, index_col=0)
df = pd.merge(df, df_template, left_on='template', right_index=True, how='left')
df.to_csv(out_gdt_path)
def get_gdt_for_target_df(native_pdb_path, model_pdb_dir, blast_xml_csv_path) -> pd.DataFrame:
model_array = []
tmscore_array = []
gdtts_array = []
gdtha_array = []
for model in model_pdb_dir.iterdir():
model_array.append(model.stem)
tmscore, gdtts, gdtha = get_gdt(native_pdb_path, model)
tmscore_array.append(tmscore)
gdtts_array.append(gdtts)
gdtha_array.append(gdtha)
df = pd.DataFrame({'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA': gdtha_array}, index=model_array)
df = df.astype('float')
df = df.sort_index()
df['target'] = [index.rsplit('_', 4)[0] for index in df.index]
df['template'] = [index.split('_', 2)[2].rsplit('_', 1)[0] for index in df.index]
df_template = pd.read_csv(blast_xml_csv_path, index_col=0)
df = pd.merge(df, df_template, left_on='template', right_index=True, how='left')
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('target', type=str, help='target name')
parser.add_argument('--blastdb', '-b', type=str, help='blastdb name', default='pdbaa_20200712')
parser.add_argument('--dataset_name', '-d', type=str, help='name of the dataset', default='target_10')
args = parser.parse_args()
native_pdb = (Path('../native_pdb') / args.dataset_name / args.target).with_suffix('.pdb')
model_pdb_dir = Path('../pdb')/args.dataset_name/args.target
df_template_path = (Path('../blast_xml') / args.blastdb / args.dataset_name / args.target).with_suffix('.csv')
out_dir = Path('../tmscore') / args.dataset_name
out_dir.mkdir(parents=True, exist_ok=True)
out_path = (out_dir/args.target).with_suffix('.csv')
get_gdt_for_target(native_pdb, model_pdb_dir, df_template_path, out_path)
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.read_csv",
"pandas.merge",
"subprocess.check_output",
"pathlib.Path",
"matplotlib.use"
] |
[((97, 118), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (111, 118), False, 'import matplotlib\n'), ((718, 746), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (741, 746), False, 'import subprocess\n'), ((1390, 1499), 'pandas.DataFrame', 'pd.DataFrame', (["{'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA': gdtha_array}"], {'index': 'model_array'}), "({'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA':\n gdtha_array}, index=model_array)\n", (1402, 1499), True, 'import pandas as pd\n'), ((1720, 1764), 'pandas.read_csv', 'pd.read_csv', (['blast_xml_csv_path'], {'index_col': '(0)'}), '(blast_xml_csv_path, index_col=0)\n', (1731, 1764), True, 'import pandas as pd\n'), ((1774, 1849), 'pandas.merge', 'pd.merge', (['df', 'df_template'], {'left_on': '"""template"""', 'right_index': '(True)', 'how': '"""left"""'}), "(df, df_template, left_on='template', right_index=True, how='left')\n", (1782, 1849), True, 'import pandas as pd\n'), ((2323, 2432), 'pandas.DataFrame', 'pd.DataFrame', (["{'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA': gdtha_array}"], {'index': 'model_array'}), "({'TMscore': tmscore_array, 'GDT_TS': gdtts_array, 'GDT_HA':\n gdtha_array}, index=model_array)\n", (2335, 2432), True, 'import pandas as pd\n'), ((2653, 2697), 'pandas.read_csv', 'pd.read_csv', (['blast_xml_csv_path'], {'index_col': '(0)'}), '(blast_xml_csv_path, index_col=0)\n', (2664, 2697), True, 'import pandas as pd\n'), ((2707, 2782), 'pandas.merge', 'pd.merge', (['df', 'df_template'], {'left_on': '"""template"""', 'right_index': '(True)', 'how': '"""left"""'}), "(df, df_template, left_on='template', right_index=True, how='left')\n", (2715, 2782), True, 'import pandas as pd\n'), ((2839, 2864), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2862, 2864), False, 'import argparse\n'), ((3456, 3474), 'pathlib.Path', 'Path', (['"""../tmscore"""'], {}), "('../tmscore')\n", (3460, 3474), False, 'from pathlib import Path\n'), ((3282, 3296), 'pathlib.Path', 'Path', (['"""../pdb"""'], {}), "('../pdb')\n", (3286, 3296), False, 'from pathlib import Path\n'), ((3185, 3206), 'pathlib.Path', 'Path', (['"""../native_pdb"""'], {}), "('../native_pdb')\n", (3189, 3206), False, 'from pathlib import Path\n'), ((3351, 3371), 'pathlib.Path', 'Path', (['"""../blast_xml"""'], {}), "('../blast_xml')\n", (3355, 3371), False, 'from pathlib import Path\n')]
|
import cv2
def img2sketch(photo, k_size):
#Read Image
img=cv2.imread(photo)
# Convert to Grey Image
grey_img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Invert Image
invert_img=cv2.bitwise_not(grey_img)
#invert_img=255-grey_img
# Blur image
blur_img=cv2.GaussianBlur(invert_img, (k_size,k_size),0)
# Invert Blurred Image
invblur_img=cv2.bitwise_not(blur_img)
#invblur_img=255-blur_img
# Sketch Image
sketch_img=cv2.divide(grey_img,invblur_img, scale=256.0)
# Save Sketch
cv2.imwrite('sketch.jpg', sketch_img)
# Display sketch
cv2.imshow('sketch image',sketch_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
#Function call
img2sketch(photo='d.jpg', k_size=111)
|
[
"cv2.GaussianBlur",
"cv2.bitwise_not",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"cv2.divide",
"cv2.imshow"
] |
[((71, 88), 'cv2.imread', 'cv2.imread', (['photo'], {}), '(photo)\n', (81, 88), False, 'import cv2\n'), ((138, 175), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (150, 175), False, 'import cv2\n'), ((214, 239), 'cv2.bitwise_not', 'cv2.bitwise_not', (['grey_img'], {}), '(grey_img)\n', (229, 239), False, 'import cv2\n'), ((304, 353), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['invert_img', '(k_size, k_size)', '(0)'], {}), '(invert_img, (k_size, k_size), 0)\n', (320, 353), False, 'import cv2\n'), ((399, 424), 'cv2.bitwise_not', 'cv2.bitwise_not', (['blur_img'], {}), '(blur_img)\n', (414, 424), False, 'import cv2\n'), ((494, 540), 'cv2.divide', 'cv2.divide', (['grey_img', 'invblur_img'], {'scale': '(256.0)'}), '(grey_img, invblur_img, scale=256.0)\n', (504, 540), False, 'import cv2\n'), ((567, 604), 'cv2.imwrite', 'cv2.imwrite', (['"""sketch.jpg"""', 'sketch_img'], {}), "('sketch.jpg', sketch_img)\n", (578, 604), False, 'import cv2\n'), ((634, 672), 'cv2.imshow', 'cv2.imshow', (['"""sketch image"""', 'sketch_img'], {}), "('sketch image', sketch_img)\n", (644, 672), False, 'import cv2\n'), ((677, 691), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (688, 691), False, 'import cv2\n'), ((697, 720), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (718, 720), False, 'import cv2\n')]
|