text stringlengths 38 1.54M |
|---|
import fitz
class PdfHandler():
def __init__(self, filename):
self.pdf = fitz.open(filename)
self.pages = [None] * len(self.pdf)
for pageNum in range(len(self.pdf)):
self.pages[pageNum] = self.pdf[pageNum].getDisplayList()
def getImage(self, pageNum = 0):
page = self.pages[pageNum]
if page == None:
self.pages[pageNum] = self.pdf[pageNum].getDisplayList()
page = self.pages[pageNum]
mat_0 = fitz.Matrix(1, 1)
pixMap = page.getPixmap(matrix=mat_0, alpha=False)
return pixMap.getImageData("ppm")
def savePageAsImage(self, filename, pageNum=0):
page = self.pages[pageNum]
if page == None:
self.pages[pageNum] = self.pdf[pageNum].getDisplayList()
page = self.pages[pageNum]
mat_0 = fitz.Matrix(1, 1)
pixMap = page.getPixmap(matrix=mat_0, alpha=False)
pixMap.writePNG(filename)
def saveAs(self, filename):
self.pdf.save(filename)
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("VALID")
# Include the RandomNumberGeneratorService definition
process.load("IOMC.RandomEngine.IOMC_cff")
# Famos sequences
process.load("FastSimulation.Configuration.CommonInputsFake_cff")
process.load("FastSimulation.Configuration.FamosSequences_cff")
process.load("FastSimulation.Validation.TrackValidation_HighPurity_cff")
# endpath outpath = { o1 }
# Keep the logging output to a nice level #
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100000)
)
process.source = cms.Source("FlatRandomPtGunSource",
PGunParameters = cms.untracked.PSet(
MaxPt = cms.untracked.double(100.0),
MinPt = cms.untracked.double(100.0),
PartID = cms.untracked.vint32(211),
MaxEta = cms.untracked.double(3.0),
MaxPhi = cms.untracked.double(3.14159265359),
MinEta = cms.untracked.double(-3.0),
MinPhi = cms.untracked.double(-3.14159265359) ## it must be in radians
),
Verbosity = cms.untracked.int32(0), ## for printouts, set it to 1 (or greater)
firstRun = cms.untracked.uint32(1)
)
process.o1 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('test_pi_100GeV.root')
)
process.p1 = cms.Path(process.famosWithTracks*process.valid)
#process.load("Configuration.StandardSequences.MagneticField_40T_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
process.VolumeBasedMagneticFieldESProducer.useParametrizedTrackerField = True
process.famosSimHits.SimulateCalorimetry = False
process.famosSimHits.SimulateTracking = True
process.multiTrackValidator.outputFile = 'valid_pi_100GeV.root'
process.MessageLogger.destinations = ['detailedInfo_pi100.txt']
|
import numpy as np
def agregar_imagen(fondo, imagen, x, y):
# verificar si la imagen tiene informacion de opacidad
alto = imagen.shape[0]
ancho = imagen.shape[1]
if imagen.shape[-1] == 4:
# normalizar la opacidad
opacidad = imagen[:,:,3]/255
# alpha blending
# generar una imagen vacia
imagen_3_canales = np.zeros((imagen.shape[0], imagen.shape[1], 3))
# a cada canal multiplicarle la opacidad
imagen_3_canales[:,:,0] = imagen[:,:,0] * opacidad
imagen_3_canales[:,:,1] = imagen[:,:,1] * opacidad
imagen_3_canales[:,:,2] = imagen[:,:,2] * opacidad
# a la imagen de fondo, se le suma la imagen con informacion de opacidad
fondo[y:y+alto, x:x+ancho, :] = (1-np.stack([opacidad, opacidad, opacidad], axis=-1)) * fondo[y:y+alto, x:x+ancho, :] + imagen_3_canales
else:
# reemplazamos la informacion del fondo
fondo[y:y+alto, x:x+ancho, :] = imagen |
#!/usr/bin/env python3
import numpy as np
import copy as cp
from tqdm import tqdm
try:
import lib.metrics as metrics
except ModuleNotFoundError:
import metrics
import sklearn.model_selection as sk_modsel
import sklearn.metrics as sk_metrics
__all__ = ["kFoldCrossValidation", "MCCrossValidation"]
class __CV_core:
"""Core class for performing k-fold cross validation."""
_reg = None
def __init__(self, X_data, y_data, reg):
"""Initializer for Cross Validation.
Args:
X_data (ndarray): Design matrix on the shape (N, p)
y_data (ndarray): y data on the shape (N, 1). Data to be
approximated.
reg (Regression Instance): an initialized regression method
"""
assert X_data.shape[0] == len(
y_data), "x and y data not of equal lengths"
assert hasattr(reg, "fit"), ("regression method must have "
"attribute fit()")
assert hasattr(reg, "predict"), ("regression method must have "
"attribute predict()")
self.X_data = cp.deepcopy(X_data)
self.y_data = cp.deepcopy(y_data)
self._reg = reg
@property
def reg(self):
return self._reg
@reg.setter
def reg(self, reg):
"""Args:
rmethod (regression class): regression class to use
"""
self._reg = reg
@property
def coef_(self):
return self.coef_coefs
@coef_.getter
def coef_(self):
return self.beta_coefs
@property
def coef_var(self):
return self.beta_coefs_var
@coef_var.getter
def coef_var(self):
return self.beta_coefs_var
class kFoldCrossValidation(__CV_core):
"""Class for performing k-fold cross validation."""
def cross_validate(self, k_splits=5, test_percent=0.2, shuffle=False,
X_test=None, y_test=None):
"""
Args:
k_splits (float): percentage of the data which is to be used
for cross validation. Default is 5.
test_percent (float): size of test data in percent. Optional,
default is 0.2.
X_test (ndarray): design matrix for test values, shape (N, p),
optional.
y_test (ndarray): y test data on shape (N, 1), optional.
"""
N_total_size = self.X_data.shape[0]
# Checks if we have provided test data or not
if isinstance(X_test, type(None)) and \
isinstance(y_test, type(None)):
# Splits X data and design matrix data
X_train, X_test, y_train, y_test = \
sk_modsel.train_test_split(self.X_data, self.y_data,
test_size=test_percent,
shuffle=False)
else:
# If X_test and y_test is provided, we simply use those as test
# values.
X_train = self.X_data
y_train = self.y_data
X_test = cp.deepcopy(X_test)
y_test = cp.deepcopy(y_test)
test_size = y_test.shape[0]
# Splits kfold train data into k actual folds
X_subdata = np.array_split(X_train, k_splits, axis=0)
y_subdata = np.array_split(y_train, k_splits, axis=0)
# Stores the test values from each k trained data set in an array
r2_list = np.empty(k_splits)
beta_coefs = []
self.y_pred_list = np.empty((test_size, k_splits))
for ik in tqdm(range(k_splits), desc="k-fold Cross Validation"):
# Sets up indexes
set_list = list(range(k_splits))
set_list.pop(ik)
# Sets up new data set
k_X_train = np.concatenate([X_subdata[d] for d in set_list])
k_y_train = np.concatenate([y_subdata[d] for d in set_list])
# Trains method bu fitting data
self.reg.fit(k_X_train, k_y_train)
# Getting a prediction given the test data
y_predict = self.reg.predict(X_test).ravel()
# Appends prediction and beta coefs
self.y_pred_list[:, ik] = y_predict
beta_coefs.append(self.reg.coef_)
# Mean Square Error, mean((y - y_approx)**2)
_mse = (y_test - self.y_pred_list)**2
self.mse = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(self.y_pred_list, axis=1, keepdims=True)
_bias = y_test - _mean_pred
self.bias = np.mean(_bias**2)
# R^2 score, 1 - sum(y-y_approx)/sum(y-mean(y))
# _r2 = metrics.r2(y_test, self.y_pred_list, axis=1)
# self.r2 = np.mean(_r2)
self.r2 = sk_metrics.r2_score(y_test, self.y_pred_list.mean(axis=1))
# Variance, var(y_predictions)
self.var = np.mean(np.var(self.y_pred_list, axis=1, keepdims=True))
beta_coefs = np.asarray(beta_coefs)
self.beta_coefs_var = np.asarray(beta_coefs).var(axis=1)
self.beta_coefs = np.asarray(beta_coefs).mean(axis=1)
self.x_pred_test = X_test[:, 1]
self.y_pred = np.mean(self.y_pred_list, axis=1)
self.y_pred_var = np.var(self.y_pred_list, axis=1)
class kkFoldCrossValidation(__CV_core):
"""A nested k fold CV for getting bias."""
def cross_validate(self, k_splits=4, test_percent=0.2, X_test=None,
y_test=None, shuffle=False):
"""
Args:
k_splits (float): Number of k folds to make in the data. Optional,
default is 4 folds.
test_percent (float): Percentage of data set to set aside for
testing. Optional, default is 0.2.
X_test (ndarray): design matrix test data, shape (N,p). Optional,
default is using 0.2 percent of data as test data.
y_test (ndarray): design matrix test data. Optional, default is
default is using 0.2 percent of data as test data.
"""
# Checks if we have provided test data or not
if isinstance(X_test, type(None)) and \
isinstance(y_test, type(None)):
# Splits X data and design matrix data
X_train, X_test, y_train, y_test = \
sk_modsel.train_test_split(self.X_data, self.y_data,
test_size=test_percent,
shuffle=shuffle)
else:
# If X_test and y_test is provided, we simply use those as test
# values.
X_train = self.X_data
y_train = self.y_data
X_test = cp.deepcopy(X_test)
y_test = cp.deepcopy(y_test)
N_total_size = X_train.shape[0]
# Splits dataset into a holdout test chuck to find bias, variance ect
# on and one to perform k-fold CV on.
holdout_test_size = N_total_size // k_splits
# In case we have an uneven split
if (N_total_size % k_splits != 0):
X_train = X_train[:holdout_test_size*k_splits]
y_train = y_train[:holdout_test_size*k_splits]
# Splits data
X_data = np.split(X_train, k_splits, axis=0)
y_data = np.split(y_train, k_splits, axis=0)
# Sets up some arrays for storing the different MSE, bias, var, R^2
# scores.
mse_arr = np.empty(k_splits)
r2_arr = np.empty(k_splits)
var_arr = np.empty(k_splits)
bias_arr = np.empty(k_splits)
beta_coefs = []
x_pred_test = []
y_pred_mean_list = []
y_pred_var_list = []
for i_holdout in tqdm(range(k_splits),
desc="Nested k fold Cross Validation"):
# Gets the testing holdout data to be used. Makes sure to use
# every holdout test data once.
X_holdout = X_data[i_holdout]
y_holdout = y_data[i_holdout]
# Sets up indexes
holdout_set_list = list(range(k_splits))
holdout_set_list.pop(i_holdout)
# Sets up new holdout data sets
X_holdout_train = np.concatenate(
[X_data[d] for d in holdout_set_list])
y_holdout_train = np.concatenate(
[y_data[d] for d in holdout_set_list])
# Splits dataset into managable k fold tests
test_size = X_holdout_train.shape[0] // k_splits
# Splits kfold train data into k actual folds
X_subdata = np.array_split(X_holdout_train, k_splits, axis=0)
y_subdata = np.array_split(y_holdout_train, k_splits, axis=0)
# Stores the test values from each k trained data set in an array
r2_list = np.empty(k_splits)
y_pred_list = np.empty((X_test.shape[0], k_splits))
# Loops over all k-k folds, ensuring every fold is used as a
# holdout set.
for ik in range(k_splits):
# Sets up indexes
set_list = list(range(k_splits))
set_list.pop(ik)
# Sets up new data set
k_X_train = np.concatenate([X_subdata[d] for d in set_list])
k_y_train = np.concatenate([y_subdata[d] for d in set_list])
# Trains method bu fitting data
self.reg.fit(k_X_train, k_y_train)
# Appends prediction and beta coefs
y_pred_list[:, ik] = self.reg.predict(X_test).ravel()
beta_coefs.append(self.reg.coef_)
# Mean Square Error, mean((y - y_approx)**2)
_mse = (y_test - y_pred_list)**2
mse_arr[i_holdout] = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(y_pred_list, axis=1, keepdims=True)
_bias = y_test - _mean_pred
bias_arr[i_holdout] = np.mean(_bias**2)
# R^2 score, 1 - sum(y-y_approx)/sum(y-mean(y))
r2_arr[i_holdout] = metrics.r2(
y_test, y_pred_list.mean(axis=1, keepdims=True))
# Variance, var(y_predictions)
_var = np.var(y_pred_list, axis=1, keepdims=True)
var_arr[i_holdout] = np.mean(_var)
y_pred_mean_list.append(np.mean(y_pred_list, axis=1))
y_pred_var_list.append(np.var(y_pred_list, axis=1))
self.var = np.mean(var_arr)
self.bias = np.mean(bias_arr)
self.r2 = np.mean(r2_arr)
# self.r2 = sk_metrics.r2_score(y_test, y_predict.mean(axis=1))
self.mse = np.mean(mse_arr)
beta_coefs = np.asarray(beta_coefs)
self.beta_coefs_var = np.asarray(beta_coefs).var(axis=0)
self.beta_coefs = np.asarray(beta_coefs).mean(axis=0)
self.x_pred_test = X_test[:, 1]
self.y_pred = np.array(y_pred_mean_list).mean(axis=0)
self.y_pred_var = np.array(y_pred_var_list).mean(axis=0)
class MCCrossValidation(__CV_core):
"""
https://stats.stackexchange.com/questions/51416/k-fold-vs-monte-carlo-cross-validation
"""
def cross_validate(self, N_mc, k_splits=4, test_percent=0.2, X_test=None,
y_test=None, shuffle=False):
"""
Args:
N_mc (int): Number of cross validations to perform
k_splits (float): Number of k folds to make in the data. Optional,
default is 4 folds.
test_percent (float): Percentage of data set to set aside for
testing. Optional, default is 0.2.
X_test (ndarray): Design matrix test data, shape (N,p). Optional,
default is using 0.2 percent of data as test data.
y_test (ndarray): Design matrix test data. Optional, default is
default is using 0.2 percent of data as test data.
shuffle (bool): if True, will shuffle the data before splitting
"""
# Checks if we have provided test data or not
if isinstance(X_test, type(None)) and \
isinstance(y_test, type(None)):
# Splits X data and design matrix data
X_train, X_test, y_train, y_test = \
sk_modsel.train_test_split(self.X_data, self.y_data,
test_size=test_percent,
shuffle=shuffle)
else:
# If X_test and y_test is provided, we simply use those as test
# values.
X_train = self.X_data
y_train = self.y_data
X_test = cp.deepcopy(X_test)
y_test = cp.deepcopy(y_test)
# Splits dataset into managable k fold tests
mc_test_size = X_train.shape[0] // k_splits
# All possible indices available
mc_indices = list(range(X_train.shape[0]))
# Stores the test values from each k trained data set in an array
beta_coefs = []
self.y_pred_list = np.empty((y_test.shape[0], N_mc))
for i_mc in tqdm(range(N_mc), desc="Monte Carlo Cross Validation"):
# Gets retrieves indexes for MC-CV. No replacement.
mccv_test_indexes = np.random.choice(mc_indices, mc_test_size)
mccv_train_indices = np.array(
list(set(mc_indices) - set(mccv_test_indexes)))
# Sets up new data set
# k_x_train = x_mc_train[mccv_train_indices]
k_X_train = X_train[mccv_train_indices]
k_y_train = y_train[mccv_train_indices]
# Trains method bu fitting data
self.reg.fit(k_X_train, k_y_train)
y_predict = self.reg.predict(X_test)
# Adds prediction and beta coefs
self.y_pred_list[:, i_mc] = y_predict.ravel()
beta_coefs.append(self.reg.coef_)
# Mean Square Error, mean((y - y_approx)**2)
_mse = (y_test - self.y_pred_list)**2
self.mse = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(self.y_pred_list, axis=1, keepdims=True)
_bias = y_test - _mean_pred
self.bias = np.mean(_bias**2)
# R^2 score, 1 - sum(y-y_approx)/sum(y-mean(y))
# _r2 = metrics.r2(y_test, self.y_pred_list, axis=0)
self.r2 = sk_metrics.r2_score(y_test, self.y_pred_list.mean(axis=1))
# self.r2 = np.mean(_r2)
# Variance, var(y_predictions)
self.var = np.mean(np.var(self.y_pred_list, axis=1, keepdims=True))
beta_coefs = np.asarray(beta_coefs)
self.beta_coefs_var = np.asarray(beta_coefs).var(axis=1)
self.beta_coefs = np.asarray(beta_coefs).mean(axis=1)
self.x_pred_test = X_test[:, 1]
self.y_pred = np.mean(self.y_pred_list, axis=1)
self.y_pred_var = np.var(self.y_pred_list, axis=1)
def kFoldCVWrapper(X, y, reg, k=4, test_percent=0.4,
shuffle=False, X_test=None, y_test=None):
"""k-fold Cross Validation using a manual method.
Args:
X_data (ndarray): design matrix on the shape (N, p)
y_data (ndarray): y data on the shape (N, 1). Data to be
approximated.
reg (Regression Instance): an initialized regression method
k (int): optional, number of k folds. Default is 4.
test_percent (float): optional, size of testing data. Default is 0.4.
shuffle (bool): optional, if the data will be shuffled. Default is
False.
X_test, (ndarray): design matrix for test values, shape (N, p).
y_test, (ndarray): y test data on shape (N, 1).
Return:
dictionary with r2, mse, bias, var, coef, coef_var
"""
kfcv_reg = kFoldCrossValidation(X, y, reg)
kfcv_reg.cross_validate(k_splits=k, test_percent=test_percent,
shuffle=shuffle, X_test=X_test, y_test=y_test)
return {
"r2": kfcv_reg.r2, "mse": kfcv_reg.mse, "bias": kfcv_reg.bias,
"var": kfcv_reg.var,
"diff": kfcv_reg.mse - kfcv_reg.bias - kfcv_reg.var,
"coef": kfcv_reg.beta_coefs, "coef_var": kfcv_reg.beta_coefs_var}
# , "x_pred": kfcv_reg.x_pred_test,
# "y_pred": kfcv_reg.y_pred, "y_pred_var": kfcv_reg.y_pred_var}
def SKLearnkFoldCV(X, y, reg, k=4, test_percent=0.4,
shuffle=False, X_test=None, y_test=None):
"""k-fold Cross Validation using SciKit Learn.
Args:
X_data (ndarray): design matrix on the shape (N, p)
y_data (ndarray): y data on the shape (N, 1). Data to be
approximated.
reg (Regression Instance): an initialized regression method
k (int): number of k folds. Optional, default is 4.
test_percent (float): size of testing data. Optional, default is 0.4.
X_test, (ndarray): design matrix for test values, shape (N, p).
y_test, (ndarray): y test data on shape (N, 1).
Return:
dictionary with r2, mse, bias, var, coef, coef_var
"""
# kfcv_reg = kFoldCrossValidation(x, y, reg, design_matrix)
# kfcv_reg.cross_validate(k_splits=k, test_percent=test_percent)
if ((isinstance(X_test, type(None))) and
(isinstance(y_test, type(None)))):
# Splits X data and design matrix data
X_train, X_test, y_train, y_test = \
sk_modsel.train_test_split(X, y, test_size=test_percent,
shuffle=False)
else:
# If X_test and y_test is provided, we simply use those as test values
X_train = X
y = y
# X_train, X_test, y_train, y_test = sk_modsel.train_test_split(
# X, y, test_size=test_percent, shuffle=shuffle)
# Preps lists to be filled
y_pred_list = np.empty((y_test.shape[0], k))
r2_list = np.empty(k)
beta_coefs = []
# Specifies the number of splits
kfcv = sk_modsel.KFold(n_splits=k, shuffle=shuffle)
for i, val in tqdm(enumerate(kfcv.split(X_train)),
desc="SK-learn k-fold CV"):
train_index, test_index = val
reg.fit(X_train[train_index], y_train[train_index])
y_predict = reg.predict(X_test)
r2_list[i] = sk_metrics.r2_score(y_test, y_predict)
y_pred_list[:, i] = y_predict.ravel()
beta_coefs.append(reg.coef_)
# Mean Square Error, mean((y - y_approx)**2)
_mse = (y_test - y_pred_list)**2
mse = np.mean(np.mean(_mse, axis=1, keepdims=True))
# Bias, (y - mean(y_approx))^2
_mean_pred = np.mean(y_pred_list, axis=1, keepdims=True)
_bias = y_test - _mean_pred
bias = np.mean(_bias**2)
# R^2 score, 1 - sum(y-y_approx)/sum(y-mean(y))
r2 = r2_list.mean()
# Variance, var(y_predictions)
var = np.mean(np.var(y_pred_list, axis=1, keepdims=True))
r2 = sk_metrics.r2_score(y_test, y_pred_list.mean(axis=1))
return {"r2": r2, "mse": mse, "bias": bias, "var": var,
"diff": mse - bias - var,
"coef": np.asarray(beta_coefs).var(axis=1),
"coef_var": np.asarray(beta_coefs).mean(axis=1)}
def kkfoldCVWrapper(X, y, reg, k=4, test_percent=0.4,
shuffle=False, X_test=None, y_test=None):
"""k-fold Cross Validation using a manual method.
Args:
X_data (ndarray): design matrix on the shape (N, p)
y_data (ndarray): y data on the shape (N, 1). Data to be
approximated.
reg (Regression Instance): an initialized regression method
k (int): optional, number of k folds. Default is 4.
test_percent (float): optional, size of testing data. Default is 0.4.
shuffle (bool): optional, if the data will be shuffled. Default is
False.
X_test, (ndarray): design matrix for test values, shape (N, p).
y_test, (ndarray): y test data on shape (N, 1).
Return:
dictionary with r2, mse, bias, var, coef, coef_var
"""
kkfcv_reg = kkFoldCrossValidation(X, y, reg)
kkfcv_reg.cross_validate(k_splits=k, test_percent=test_percent,
shuffle=shuffle, X_test=X_test, y_test=y_test)
return {
"r2": kkfcv_reg.r2, "mse": kkfcv_reg.mse, "bias": kkfcv_reg.bias,
"var": kkfcv_reg.var,
"diff": kkfcv_reg.mse - kkfcv_reg.bias - kkfcv_reg.var,
"coef": kkfcv_reg.beta_coefs, "coef_var": kkfcv_reg.beta_coefs_var}
def MCCVWrapper(X, y, reg, N_mc, k=4, test_percent=0.4, shuffle=False,
X_test=None, y_test=None):
"""k-fold Cross Validation using a manual method.
Args:
X_data (ndarray): design matrix on the shape (N, p)
y_data (ndarray): y data on the shape (N, 1). Data to be
approximated.
reg (Regression Instance): an initialized regression method
N_mc (int): number of MC samples to use.
k (int): optional, number of k folds. Default is 4.
test_percent (float): optional, size of testing data. Default is 0.4.
shuffle (bool): optional, if the data will be shuffled. Default is
False.
X_test, (ndarray): design matrix for test values, shape (N, p).
y_test, (ndarray): y test data on shape (N, 1).
Return:
dictionary with r2, mse, bias, var, coef, coef_var
"""
mccv_reg = MCCrossValidation(X, y, reg)
mccv_reg.cross_validate(N_mc, k_splits=k, test_percent=test_percent,
X_test=X_test, y_test=y_test, shuffle=shuffle)
return {
"r2": mccv_reg.r2, "mse": mccv_reg.mse, "bias": mccv_reg.bias,
"var": mccv_reg.var,
"diff": mccv_reg.mse - mccv_reg.bias - mccv_reg.var,
"coef": mccv_reg.beta_coefs, "coef_var": mccv_reg.beta_coefs_var}
def SKLearnMCCV(X, y, reg, N_bs, k=4, test_percent=0.4):
raise NotImplementedError("SKLearnMCCV")
def __compare_kfold_cv():
"""Runs a comparison between implemented method of k-fold Cross Validation
and SK-learn's implementation of SK-learn. Since they both are
deterministic, should the answer be exactly the same."""
from regression import OLSRegression
import sklearn.preprocessing as sk_preproc
import sklearn.linear_model as sk_model
import copy as cp
deg = 2
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
k_splits = 4
# N_bs = 10000
# Initial values
n = 100
noise = 0.3
np.random.seed(1234)
test_percent = 0.35
shuffle = False
# Sets up random matrices
x = np.random.rand(n, 1)
# x = np.c_[np.linspace(0,1,n)]
def func_excact(_x):
return 2*_x*_x + np.exp(-2*_x) # + noise * \
#np.random.randn(_x.shape[0], _x.shape[1])
y = func_excact(x)
X = poly.fit_transform(x)
kfcv_my = kFoldCVWrapper(
cp.deepcopy(X), cp.deepcopy(y),
sk_model.LinearRegression(fit_intercept=False), k=k_splits,
test_percent=test_percent, shuffle=shuffle)
print("Manual implementation:")
print("r2:", kfcv_my["r2"], "mse:", kfcv_my["mse"],
"var: {:.16f}".format(kfcv_my["var"]),
"bias: {:.16f}".format(kfcv_my["bias"]),
"diff: {:.16f}".format(
abs(kfcv_my["mse"] - kfcv_my["var"] - kfcv_my["bias"])))
kfcv_sk = SKLearnkFoldCV(
cp.deepcopy(X), cp.deepcopy(y),
sk_model.LinearRegression(fit_intercept=False), k=k_splits,
test_percent=test_percent, shuffle=shuffle)
print("SK-Learn:")
print("r2:", kfcv_sk["r2"], "mse:", kfcv_sk["mse"],
"var: {:.16f}".format(kfcv_sk["var"]),
"bias: {:.16f}".format(kfcv_sk["bias"]),
"diff: {:.16f}".format(
abs(kfcv_sk["mse"] - kfcv_sk["var"] - kfcv_sk["bias"])))
def __compare_mc_cv():
raise NotImplementedError("__compare_mc_cv")
def __test_cross_validation_methods():
# A small implementation of a test case
from regression import OLSRegression
import sklearn.preprocessing as sk_preproc
import matplotlib.pyplot as plt
# Initial values
n = 100
N_bs = 200
deg = 2
k_splits = 4
test_percent = 0.35
noise = 0.3
np.random.seed(1234)
# Sets up random matrices
x = np.random.rand(n, 1)
y = 2*x*x + np.exp(-2*x) + noise*np.random.randn(x.shape[0], x.shape[1])
# Sets up design matrix
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x)
# Performs regression
reg = OLSRegression()
reg.fit(X, y)
y_predict = reg.predict(X)
print("Regular linear regression")
print("R2: {:-20.16f}".format(reg.score(X, y)))
print("MSE: {:-20.16f}".format(metrics.mse(y, y_predict)))
print("Bias^2:{:-20.16f}".format(metrics.bias(y, y_predict)))
# Small plotter
plt.plot(x, y, "o", label="data")
plt.plot(x, y_predict, "o",
label=r"Pred, $R^2={:.4f}$".format(reg.score(X, y)))
print("k-fold Cross Validation")
kfcv = kFoldCrossValidation(X, y, OLSRegression())
kfcv.cross_validate(k_splits=k_splits,
test_percent=test_percent)
print("R2: {:-20.16f}".format(kfcv.r2))
print("MSE: {:-20.16f}".format(kfcv.mse))
print("Bias^2:{:-20.16f}".format(kfcv.bias))
print("Var(y):{:-20.16f}".format(kfcv.var))
print("MSE = Bias^2 + Var(y) = ")
print("{} = {} + {} = {}".format(kfcv.mse, kfcv.bias, kfcv.var,
kfcv.bias + kfcv.var))
print("Diff: {}".format(abs(kfcv.bias + kfcv.var - kfcv.mse)))
plt.errorbar(kfcv.x_pred_test, kfcv.y_pred,
yerr=np.sqrt(kfcv.y_pred_var), fmt="o",
label=r"k-fold CV, $R^2={:.4f}$".format(kfcv.r2))
print("kk Cross Validation")
kkcv = kkFoldCrossValidation(X, y, OLSRegression())
kkcv.cross_validate(k_splits=k_splits,
test_percent=test_percent)
print("R2: {:-20.16f}".format(kkcv.r2))
print("MSE: {:-20.16f}".format(kkcv.mse))
print("Bias^2:{:-20.16f}".format(kkcv.bias))
print("Var(y):{:-20.16f}".format(kkcv.var))
print("MSE = Bias^2 + Var(y) = ")
print("{} = {} + {} = {}".format(kkcv.mse, kkcv.bias, kkcv.var,
kkcv.bias + kkcv.var))
print("Diff: {}".format(abs(kkcv.bias + kkcv.var - kkcv.mse)))
plt.errorbar(kkcv.x_pred_test, kkcv.y_pred,
yerr=np.sqrt(kkcv.y_pred_var), fmt="o",
label=r"kk-fold CV, $R^2={:.4f}$".format(kkcv.r2))
print("Monte Carlo Cross Validation")
mccv = MCCrossValidation(X, y, OLSRegression())
mccv.cross_validate(N_bs, k_splits=k_splits,
test_percent=test_percent)
print("R2: {:-20.16f}".format(mccv.r2))
print("MSE: {:-20.16f}".format(mccv.mse))
print("Bias^2:{:-20.16f}".format(mccv.bias))
print("Var(y):{:-20.16f}".format(mccv.var))
print("MSE = Bias^2 + Var(y) = ")
print("{} = {} + {} = {}".format(mccv.mse, mccv.bias, mccv.var,
mccv.bias + mccv.var))
print("Diff: {}".format(abs(mccv.bias + mccv.var - mccv.mse)))
print("\nCross Validation methods tested.")
plt.errorbar(mccv.x_pred_test, mccv.y_pred,
yerr=np.sqrt(mccv.y_pred_var), fmt="o",
label=r"MC CV, $R^2={:.4f}$".format(mccv.r2))
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
plt.title(r"$y=2x^2 + e^{-2x}$")
y = 2*x*x + np.exp(-2*x) + noise*np.random.randn(x.shape[0], x.shape[1])
plt.legend()
plt.show()
def __test_bias_variance_kfcv():
"""Checks bias-variance relation."""
from regression import OLSRegression
import sklearn.linear_model as sk_model
import sklearn.preprocessing as sk_preproc
import matplotlib.pyplot as plt
# Initial values
N_polynomials = 30
deg_list = np.linspace(1, N_polynomials, N_polynomials, dtype=int)
n = 500
test_percent = 0.2
noise = 0.1
np.random.seed(2018)
x = np.random.rand(n, 1)
y = 2*x*x + np.exp(-2*x) + noise * \
np.random.randn(x.shape[0], x.shape[1])
x_train, x_test, y_train, y_test = \
sk_modsel.train_test_split(x, y,
test_size=test_percent,
shuffle=False)
mse_list = np.empty(N_polynomials)
var_list = np.empty(N_polynomials)
bias_list = np.empty(N_polynomials)
r2_list = np.empty(N_polynomials)
for i, deg in enumerate(deg_list):
# Sets up design matrix
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x_train)
results = kFoldCVWrapper(X, y_train, sk_model.LinearRegression(
fit_intercept=False),
X_test=poly.fit_transform(x_test),
y_test=y_test)
mse_list[i] = results["mse"]
var_list[i] = results["var"]
bias_list[i] = results["bias"]
r2_list[i] = results["r2"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(deg_list, mse_list, "-*", label=r"$\mathrm{MSE}$")
ax.plot(deg_list, var_list, "-x", label=r"$\mathrm{Var}$")
ax.plot(deg_list, bias_list, "-.", label=r"$\mathrm{Bias}$")
ax.set_xlabel(r"Polynomial degree")
ax.set_ylabel(r"MSE/Var/Bias")
ax.set_ylim(-0.01, 0.2)
ax.legend()
plt.show()
plt.close(fig)
def __test_bias_variance_kkfcv():
"""Checks bias-variance relation."""
from regression import OLSRegression
import sklearn.linear_model as sk_model
import sklearn.preprocessing as sk_preproc
import matplotlib.pyplot as plt
# Initial values
N_polynomials = 30
deg_list = np.linspace(1, N_polynomials, N_polynomials, dtype=int)
n = 500
test_percent = 0.2
noise = 0.1
np.random.seed(2018)
x = np.random.rand(n, 1)
y = 2*x*x + np.exp(-2*x) + noise * \
np.random.randn(x.shape[0], x.shape[1])
x_train, x_test, y_train, y_test = \
sk_modsel.train_test_split(x, y,
test_size=test_percent,
shuffle=False)
mse_list = np.empty(N_polynomials)
var_list = np.empty(N_polynomials)
bias_list = np.empty(N_polynomials)
r2_list = np.empty(N_polynomials)
for i, deg in enumerate(deg_list):
# Sets up design matrix
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x_train)
results = kkfoldCVWrapper(X, y_train, sk_model.LinearRegression(
fit_intercept=False),
X_test=poly.fit_transform(x_test),
y_test=y_test)
mse_list[i] = results["mse"]
var_list[i] = results["var"]
bias_list[i] = results["bias"]
r2_list[i] = results["r2"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(deg_list, mse_list, "-*", label=r"$\mathrm{MSE}$")
ax.plot(deg_list, var_list, "-x", label=r"$\mathrm{Var}$")
ax.plot(deg_list, bias_list, "-.", label=r"$\mathrm{Bias}$")
ax.set_xlabel(r"Polynomial degree")
ax.set_ylabel(r"MSE/Var/Bias")
ax.set_ylim(-0.01, 0.2)
ax.legend()
plt.show()
plt.close(fig)
def __test_bias_variance_mccv():
"""Checks bias-variance relation."""
from regression import OLSRegression
import sklearn.linear_model as sk_model
import sklearn.preprocessing as sk_preproc
import matplotlib.pyplot as plt
# Initial values
N_polynomials = 30
deg_list = np.linspace(1, N_polynomials, N_polynomials, dtype=int)
n = 500
N_bs = 200
test_percent = 0.2
noise = 0.1
np.random.seed(2018)
x = np.random.rand(n, 1)
y = 2*x*x + np.exp(-2*x) + noise * \
np.random.randn(x.shape[0], x.shape[1])
x_train, x_test, y_train, y_test = \
sk_modsel.train_test_split(x, y,
test_size=test_percent,
shuffle=False)
mse_list = np.empty(N_polynomials)
var_list = np.empty(N_polynomials)
bias_list = np.empty(N_polynomials)
r2_list = np.empty(N_polynomials)
for i, deg in enumerate(deg_list):
# Sets up design matrix
poly = sk_preproc.PolynomialFeatures(degree=deg, include_bias=True)
X = poly.fit_transform(x_train)
results = MCCVWrapper(X, y_train, sk_model.LinearRegression(
fit_intercept=False), N_bs,
X_test=poly.fit_transform(x_test), y_test=y_test)
mse_list[i] = results["mse"]
var_list[i] = results["var"]
bias_list[i] = results["bias"]
r2_list[i] = results["r2"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(deg_list, mse_list, "-*", label=r"$\mathrm{MSE}$")
ax.plot(deg_list, var_list, "-x", label=r"$\mathrm{Var}$")
ax.plot(deg_list, bias_list, "-.", label=r"$\mathrm{Bias}$")
ax.set_xlabel(r"Polynomial degree")
ax.set_ylabel(r"MSE/Var/Bias")
ax.set_ylim(-0.01, 0.2)
ax.legend()
plt.show()
plt.close(fig)
if __name__ == '__main__':
__test_cross_validation_methods()
__test_bias_variance_kfcv()
__test_bias_variance_kkfcv()
__test_bias_variance_mccv()
__compare_kfold_cv()
|
def cube_odd(arr):
sum = 0
for i, k in enumerate(arr):
if isinstance(k, (int, float, complex)):
if k%2 != 0:
sum += pow(k, 3)
else:
return None
return sum
if __name__ == "__main__":
cube_odd([1,2,3,4]) |
# Generated by Django 2.1.3 on 2018-11-30 09:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20181130_1459'),
('quiz', '0002_auto_20181125_1616'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('correct', models.BooleanField(default=False, help_text='Is this a correct answer?')),
],
),
migrations.CreateModel(
name='QuestionResponse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='quiz.Answer')),
],
),
migrations.RemoveField(
model_name='studentresponse',
name='question',
),
migrations.RemoveField(
model_name='studentresponse',
name='student',
),
migrations.RemoveField(
model_name='question',
name='a',
),
migrations.RemoveField(
model_name='question',
name='b',
),
migrations.RemoveField(
model_name='question',
name='c',
),
migrations.RemoveField(
model_name='question',
name='d',
),
migrations.RemoveField(
model_name='question',
name='solution',
),
migrations.AlterField(
model_name='quizresponse',
name='responses',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.QuestionResponse'),
),
migrations.DeleteModel(
name='StudentResponse',
),
migrations.AddField(
model_name='questionresponse',
name='question',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='quiz.Question'),
),
migrations.AddField(
model_name='questionresponse',
name='student',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='users.Student'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='quiz.Question'),
),
]
|
########## CHANGE THIS TO SET CONFIGURATION FILE NAME ###########
config_file_name = "config.txt"
from TwitterFollowBot import TwitterBot
from twitter import Twitter, OAuth, TwitterHTTPError
from random import randint
import time
from time import sleep
import colorama
from colorama import Fore, Style, Back, init
import subprocess
import os
os.system('cls' if os.name=='nt' else 'clear')
init(autoreset=True)
#Helper function to truncate time
def truncate(x, d):
return int(x*(10.0**d))/(10.0**d)
##### Introduction ######
#Twitter logo
print "\n"
if os.path.exists('twitterLogo.txt'):
with open('twitterLogo.txt', 'r') as fin:
print Fore.BLUE + Style.BRIGHT + fin.read()
#Print BotTweet
print "\n"
if os.path.exists('titleLogo.txt'):
with open('titleLogo.txt', 'r') as fin:
print Fore.BLUE + Style.BRIGHT + fin.read()
print "Copyright " + u"\u00a9" + " 2015 Brandon Jabr.\n"
#TO-DO: CHECK CONFIG FILE
#Load environment
TwitterAPI = TwitterBot(config_file_name)
username = TwitterAPI.BOT_CONFIG["TWITTER_HANDLE"]
print Style.BRIGHT + "\nCurrent User: " + Fore.BLUE + str(username)
TwitterAPI.sync_follows()
sessionTweets = 0
totalTweets = 0
skipTweets = []
currentID = None
maxID = None
userMaxID = None
tweet_type = ""
amount = 15
following = list(TwitterAPI.get_follows_list())
auto_follow = False
#Collect user input
while True:
search_term = raw_input(Style.BRIGHT + "Please enter a phrase to search (with quotes): ")
if len(search_term) != 0 and search_term[0] == "\"" and search_term[-1] == "\"":
break
else:
print(Fore.RED + "Invalid Input: Please put quotes around your search phrase. Example: \"#botTweet\"")
continue
while True:
tweet_type = raw_input(Style.BRIGHT + "\nType of tweet to search (recent|popular|mixed): ")
if tweet_type not in {'recent','popular','mixed'}:
print(Fore.RED + "Invalid Input: Please type either recent, popular, or mixed. (Note: mixed means both popular and recent will be searched).")
continue
else:
break
while True:
amount = raw_input(Style.BRIGHT + "\nNumber to search per batch. (1-100): ")
if not amount.isdigit():
print(Fore.RED + "Invalid Input: Enter a number between 1 and 100. (Note this is tweets per batch, not total tweets to search.")
continue
elif amount > 100:
amount = 15
break
else:
break
while True:
auto_follow_str = raw_input(Style.BRIGHT + "\nAutomatically follow users you retweet (yes|no): ")
if auto_follow_str in {'yes','YES','Yes','y'}:
auto_follow = True
break
elif auto_follow_str in {'no','NO','No','n'}:
auto_follow = False
break
else:
print(Fore.RED + "Invalid input. Please enter yes or no.")
continue
############# Get all user tweets/retweets to skip #################
print Fore.LIGHTBLUE_EX + Style.NORMAL + "\nLoading tweets to skip (user already tweeted/retweeted)..."
print Fore.LIGHTBLUE_EX + Style.NORMAL + "This may take a few seconds."
for page in range(0,16):
myTweets = None
if userMaxID == None:
myTweets = TwitterAPI.TWITTER_CONNECTION.statuses.user_timeline(screen_name=username,count=200)
else:
myTweets = TwitterAPI.TWITTER_CONNECTION.statuses.user_timeline(screen_name=username,count=200,max_id=userMaxID)
for skipTweet in myTweets:
if "retweeted_status" in skipTweet:
if skipTweet["retweeted_status"]["retweeted"] == True:
skipTweets.append(skipTweet["retweeted_status"]["id"])
skipTweets.append(str(skipTweet["id"]))
if skipTweet == myTweets[-1]:
userMaxID = skipTweet["id"]
totalTweets = len(skipTweets)
print Fore.LIGHTBLUE_EX + Style.NORMAL + "Done."
sleep(1)
################################################################
###START###
time_start = time.time()
if os.path.exists('start.txt'):
with open('start.txt', 'r') as fin:
print Fore.GREEN + Style.BRIGHT + fin.read()
while True:
pageRetweets = 0
print (Fore.LIGHTBLUE_EX + Style.NORMAL + "Loading new results for: " + "'" + search_term + "'...")
sleep(1)
print (Fore.LIGHTBLUE_EX + Style.NORMAL + "Applying filters...\n\n")
result = TwitterAPI.TWITTER_CONNECTION.search.tweets(q=search_term, count=100, result_type = "mixed",max_id=maxID)
print (Fore.BLACK + Style.BRIGHT + "Found ") + (Fore.GREEN + Style.BRIGHT + str(len(result["statuses"]))) + (Fore.BLACK + Style.BRIGHT + " matches on current page.")
if len(result["statuses"]) == 0:
print Fore.YELLOW + Style.BRIGHT + "\nNo new results found, waiting 15-20 minutes for additional tweets.\n"
sleep(900)
continue
print (Fore.GREEN + Style.BRIGHT + "Start Retweeting:\n")
sleep(1)
finishedTweets = 0
for tweet in result["statuses"]:
try:
if "retweeted_status" in tweet:
originalTweet = tweet["retweeted_status"]
checkFollow = originalTweet["retweeted"]
checkFollow2 = tweet["retweeted"]
checkID = str(originalTweet["id"])
checkRepeat = False
if checkID in skipTweets:
checkRepeat = True
if checkFollow == False and checkFollow2 == False and checkRepeat == False:
currentID = originalTweet["id"]
TwitterAPI.TWITTER_CONNECTION.statuses.retweet(id=originalTweet["id"])
sessionTweets = sessionTweets + 1
print Fore.BLUE + Style.BRIGHT + "\nRetweeted: '" + Style.RESET_ALL + originalTweet["text"] + "'"
getUser = originalTweet["user"]["id"]
getHandle = originalTweet["user"]["screen_name"]
#Auto-Follow
if auto_follow is True:
TwitterAPI.TWITTER_CONNECTION.friendships.create(user_id=getUser, follow=True)
following.append(originalTweet["user"]["id"])
print Fore.MAGENTA + Style.BRIGHT + "Followed: " + Style.RESET_ALL + str(getHandle) + ""
print "\n"
print Style.BRIGHT + "Retweets: " + Fore.BLUE + str(sessionTweets) + Fore.BLACK
if auto_follow is True:
print Style.BRIGHT + "Follows: " + Fore.MAGENTA + str(sessionTweets) + Fore.BLACK
print Style.BRIGHT + Fore.BLACK + "Runtime: " + Fore.GREEN + str(truncate((time.time() - time_start),2)) + "s\n"
pageRetweets = pageRetweets + 1
print Fore.LIGHTBLUE_EX + Style.NORMAL+ "\nWaiting for 45-60 seconds...\n"
sleep(randint(45, 60))
except TwitterHTTPError as api_error:
if "rate limit" in str(api_error).lower():
if os.path.exists('stop.txt'):
with open('stop.txt', 'r') as fin:
print Fore.RED + Style.BRIGHT + fin.read()
print (Fore.RED + Style.BRIGHT + "\nYou have reached your rate limit. Sleeping for 15 minutes.\n")
sleep(950)
elif "status update limit" in str(api_error).lower():
if os.path.exists('stop.txt'):
with open('stop.txt', 'r') as fin:
print Fore.RED + Style.BRIGHT + fin.read()
print (Fore.RED + Style.BRIGHT + "\nYou have reached your status update limit. Sleeping for 15 minutes...\n")
sleep(950)
elif "already retweeted" in str(api_error).lower():
skipTweets.append(str(currentID))
finally:
finishedTweets = finishedTweets + 1
if finishedTweets == len(result["statuses"]):
#load next page
maxID = currentID
print (Style.BRIGHT + "\n\n---- Page finished with ") + (Fore.GREEN + Style.BRIGHT + str(pageRetweets)) + (Style.BRIGHT + Fore.BLACK + " new retweets (" + (Fore.BLUE + Style.BRIGHT + str(sessionTweets)) + " total" + Style.BRIGHT + Fore.BLACK + "). Loading new page of tweets. ----\n\n")
sleep(randint(2,5))
|
import logging
LOG = logging.getLogger(__file__)
import os
import tarfile
import shutil
def init_path(full_path):
"""
Initializes full path and recreate if already exists
"""
dir_path = os.path.dirname(full_path)
LOG.info('Initializing data folder: %s ...', full_path)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
os.makedirs(dir_path)
LOG.info('Completed setting up data folder: %s ...', full_path)
def extract(filename, output_dir):
LOG.info('Extracting %s into folder %s', filename, output_dir)
with tarfile.open(filename) as f:
f.extractall(path=output_dir)
LOG.info('File extraction completed!')
|
print("how many cats do you have?")
numCats= input()
try:
if int(numCats)>=4:
print("That is a lot of cats.")
elif int(numCats)<0:
print("That is no cat at all.")
else: print("That is not that many cats.")
except ValueError:
print("You did not enter a number.")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 16:35:39 2020
@author: Administrator
"""
import os
import pandas as pd
path = 'd:/Test'
os.chdir(path)
list_df = []
df1 = pd.DataFrame({'eNodeB' : range(729600,731648),'manufacturers' : '混合'})
list_df.append(df1)
df2 = pd.DataFrame({'eNodeB' : range(1019136,1019392),'manufacturers' : '爱立信'})
list_df.append(df2)
df3 = pd.DataFrame({'eNodeB' : range(582656,582912),'manufacturers' : '中兴'})
list_df.append(df3)
df4 = pd.DataFrame({'eNodeB' : range(585216,585472),'manufacturers' : '中兴'})
list_df.append(df4)
df5 = pd.DataFrame({'eNodeB' : range(588288,588416),'manufacturers' : '中兴'})
list_df.append(df5)
df6 = pd.DataFrame({'eNodeB' : range(588416,588544),'manufacturers' : '混合'})
list_df.append(df6)
df7 = pd.DataFrame({'eNodeB' : range(591104,591360),'manufacturers' : '中兴'})
list_df.append(df7)
df = pd.concat(list_df,axis = 0)
df.sort_values(by = 'eNodeB',ascending = True,inplace =True)
with open('./eNodeB.csv','a',encoding = 'utf-8') as f:
df.to_csv(f,index = False) |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize
class Player(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Music Player")
self.setGeometry(450,150,600,800)
self.UI()
self.show()
def UI(self):
self.widgets()
self.layouts()
def widgets(self):
##### PROGRESS BAR #####
self.progressBar=QProgressBar()
##### BUTTONS #####
self.addButton=QToolButton()
self.addButton.setIcon(QIcon("./icons/add.png"))
self.addButton.setIconSize(QSize(64,64))
def layouts(self):
##### LAYOUTS #####
self.mainLayout=QVBoxLayout()
self.topMainLayout=QVBoxLayout()
self.topGroupBox=QGroupBox("Music Player")
self.topGroupBox.setStyleSheet('background-color:#00E19B;')
self.topLayout=QHBoxLayout()
self.middleLayout=QHBoxLayout()
self.bottomLayout=QVBoxLayout()
##### ADDING WIDGETS #####
##### TOP LAYOUT WIDGETS #####
self.topLayout.addWidget(self.progressBar)
##### Middle LAYOUT WIDGETS #####
self.middleLayout.addWidget(self.addButton)
self.topMainLayout.addLayout(self.topLayout)
self.topMainLayout.addLayout(self.middleLayout)
self.topGroupBox.setLayout(self.topMainLayout)
self.mainLayout.addWidget(self.topGroupBox)
self.mainLayout.addLayout(self.bottomLayout)
self.setLayout(self.mainLayout)
def main():
App = QApplication(sys.argv)
player = Player()
sys.exit(App.exec_())
if __name__=="__main__":
main()
|
from bs4 import BeautifulSoup
read_files = open('listfiles.txt', 'r')
file_names = read_files.readlines()
write_file = open('articles.txt', 'a')
for file in file_names:
file = file.strip()
try:
f = open(file, 'r')
soup = BeautifulSoup(f, 'html.parser')
txt = soup.get_text()
print_txt = ""
for word in txt.split():
# if word != "\n":
print_txt += (word)
print_txt += " "
write_file.write(print_txt)
write_file.write('\n')
except:
print("error in", file)
continue |
class Solution:
def numRookCaptures(self, board: List[List[str]]) -> int:
# find rook
flag = 0
for i in range(8):
for j in range(8):
if board[i][j] == "R":
x = i
y = j
flag = 1
break
if flag:
break
R_loc = [x, y]
#print("R_loc: " R_loc)
cap_num = 0
derection = [1, -1]
# move
for d in derection:
for k in range(2):
for i in range(((1 + d) * (8 - R_loc[k]) + (1 - d) * R_loc[k]) // 2 - 1):
R_loc[k] += d
#print("d: {}, k: {}, R[k]: {}".format(d, k, R[k]))
if board[R_loc[0]][R_loc[1]] == "p":
cap_num += 1
break
elif board[R_loc[0]][R_loc[1]] == "B":
break
R_loc = [x, y]
return cap_num
|
import urllib
from pyquery import PyQuery as py
url = 'https://cart.jd.com/cart.action#none'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
# 'Accept-Encoding: gzip, deflate, br
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'user-key=fe3a107e-14ec-4040-895e-51ec415ab12a; cd=0; shshshfp=ff8f6fe1fe21e832c6282fe635fa8b9d; shshshfpa=6a620fb6-15b6-6487-25be-baaf96c141ea-1536455060; shshshfpb=04f84e982e7f1da5319769d4320b44bde9917ffdced25f7d15b9471956; __jda=122270672.15364550617311782657344.1536455062.1536455062.1536455062.1; __jdc=122270672; __jdv=122270672|direct|-|none|-|1536455061732; __jdu=15364550617311782657344; 3AB9D23F7A4B3C9B=ECPU5L4JJGLBV4P7QDVLOAEHPJRZKUMEPK24IF2SOV7CQH5OHIKEUWNI57OB3ZEUREHCQ4QJ34YFEABLFACUEPODNE; cart-main=xx; ipLoc-djd=1-72-2819; wlfstk_smdl=l69kfuqh3hclessm4bqep755voydw1kh; mt_xid=V2_52007VwMWUFpdVl4eThlaB2cDFFteX1ZcHUwcbAAyBEdaWw9SRk9LSlkZYlQTU0EIVl8XVRwIAGECFloOCFFZH3kaXQVuHxNaQVlaSx5BEl0BbAATYl9oUWocSB9UAGIzElRVUQ%3D%3D; cn=7; shshshsID=8b9bac675a5ac22ba1128f85f17c99be_4_1536455125814; __jdb=122270672.6.15364550617311782657344|1.1536455062',
'Host': 'cart.jd.com',
'Referer': 'https://cart.jd.com/cart.action',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
def cart():
request = urllib.request.Request(url, headers=headers)
response = urllib.request.urlopen(request)
doc = py(response.read().decode('utf-8'))
items = doc('div.item-form').items()
for item in items:
yield {
'title': item.find('div.item-msg').text(),
'props-txt': item.find('div.props-txt').text(),
'price': item.find('div.cell.p-price.p-price-new strong').text(),
'sale': item.find('a.sales-promotion.ml5').text(),
'promotion': item.find('ul li:eq(0)').text(),
'weight': item.find('span.weight').attr('data'),
'image': item.find('div.p-img a img').attr('src')
}
for i in cart():
print(i)
print() |
import pytest
@pytest.fixture(params=['1','2','3'])
def data(request):
return request.param
class Test_Add:
@pytest.fixture(scope='class', autouse=True)
def click_con_list(self): #执行一次
#点击联系人
print('11111')
@pytest.fixture(autouse=True)
def click_add_con(self): # 执行三次
#点击
print('2222')
def test_add_con(self,data): # 执行三次
#添加
print('33333') |
#!/usr/bin/env python
import argparse
import os
import sys
import json
import random as rnd
import string
import math
import random
import subprocess
import time
import pwd
import os.path as op
from termcolor import colored
from boutiques.evaluate import evaluateEngine
from boutiques.logger import raise_error, print_info
class ExecutorOutput():
def __init__(self, stdout, stderr, exit_code, desc_err,
output_files, missing_files, shell_command,
container_command,
container_location):
try:
self.stdout = stdout.decode("utf=8")
except AttributeError as e:
self.stdout = stdout
try:
self.stderr = stderr.decode("utf=8")
except AttributeError as e:
self.stderr = stderr
self.exit_code = exit_code
self.error_message = desc_err
self.output_files = output_files
self.missing_files = missing_files
self.shell_command = shell_command
self.container_command = container_command
self.container_location = container_location
def __str__(self):
formatted_output_files = ""
for f in self.output_files:
if formatted_output_files != "":
formatted_output_files += os.linesep
formatted_output_files += ("\t- "+str(f))
formatted_missing_files = ""
for f in self.missing_files:
if formatted_missing_files != "":
formatted_missing_files += os.linesep
formatted_missing_files += ("\t- "+str(f))
def title(s):
return colored(s + os.linesep, 'green')
out = (title("Shell command") +
"{0}" + os.linesep +
title("Container location") +
"{1}" + os.linesep +
title("Container command") +
"{2}" + os.linesep +
title("Exit code") +
"{3}" + os.linesep +
(title("Std out") +
"{4}" + os.linesep if self.stdout else "") +
(title("Std err") +
colored("{5}", 'red') + os.linesep if self.stderr else "") +
title("Error message") +
colored("{6}", 'red') + os.linesep +
title("Output files") +
"{7}" + os.linesep +
title("Missing files") +
colored("{8}", 'red') +
os.linesep).format(self.shell_command,
self.container_location,
self.container_command,
self.exit_code,
self.stdout,
self.stderr,
self.error_message,
formatted_output_files,
formatted_missing_files)
return out
class FileDescription():
def __init__(self, boutiques_name, file_name, optional):
self.boutiques_name = boutiques_name
self.file_name = file_name
self.optional = 'Optional'
if not optional:
self.optional = 'Required'
def __str__(self):
return "{0} ({1}, {2})".format(self.file_name, self.boutiques_name,
self.optional)
class ExecutorError(Exception):
pass
# Executor class
class LocalExecutor(object):
"""
This class represents a json descriptor of a tool, and can execute
various tasks related to it. It is constructed first via an
input json descriptor file, which is held in the desc_dict field.
An input can be added to it via the in_dict field, a dictionary from
param ids to values. The in_dict field should only be modified
via the public readInput method, which can either take
a file (json or csv) or a string written in the command line.
The field is always validated by checking the input parameters
with respect to the descriptor.
Other public methods include:
execute - attempts to execute the tool described by the descriptor
based on the current input (in in_dict)
printCmdLine - simply prints the generated command line
based on the current input values
generateRandomParams - fills in_dict with random values
(following the constraints of the descriptor schema)
"""
# Constructor
def __init__(self, desc, invocation, options={}):
# Initial parameters
self.desc_path = desc # Save descriptor path
self.errs = [] # Empty errors holder
self.invocation = invocation
# Extra Options
# Include: forcePathType and debug
self.debug = False
for option in list(options.keys()):
setattr(self, option, options.get(option))
# Parse JSON descriptor
self.desc_dict = loadJson(desc, self.debug)
# Set the shell
self.shell = self.desc_dict.get("shell")
if self.shell is None:
self.shell = "/bin/sh"
# Helpers Functions
# The set of input parameters from the json descriptor
self.inputs = self.desc_dict['inputs'] # Struct: [{id:}..,{id:}]
# The set of output parameters from the json descriptor
self.outputs = self.desc_dict.get('output-files') or []
# The set of parameter groups, according to the json descriptor
self.groups = self.desc_dict.get('groups') or []
# Container-image Options
self.con = self.desc_dict.get('container-image')
self.launchDir = None
if self.con is not None:
self.con.get('working-directory')
# Container Implementation check
conEngines = ['docker', 'singularity']
if (self.con is not None) and self.con['type'] not in conEngines:
msg = "Other container types than {0} (e.g. {1})"\
" are not yet supported"
raise_error(ValueError, msg.format(", ".join(conEngines),
self.con['type']))
# Generate the command line
if self.invocation:
self.readInput(self.invocation)
# Retrieves the parameter corresponding to the given id
def byId(self, n):
return [v for v in self.inputs+self.outputs if v['id'] == n][0]
# Retrieves the group corresponding to the given id
def byGid(self, g):
return [v for v in self.groups if v['id'] == g][0]
# Retrieves the value of a field of an input
# from the descriptor. Returns None if not present.
def safeGet(self, i, k):
if k not in list(self.byId(i).keys()):
return None
return self.byId(i)[k]
# Retrieves the value of a field of a group from
# the descriptor. Returns None if not present.
def safeGrpGet(self, g, k):
if k not in list(self.byGid(g).keys()):
return None
return self.byGid(g)[k]
# Retrieves the group a given parameter id belongs to;
# otherwise, returns None
def assocGrp(self, i):
return ([g for g in self.groups if i in g["members"]] or [None])[0]
# Returns the required inputs of a given input id, or the empty string
def reqsOf(self, t):
return self.safeGet(t, "requires-inputs") or []
# Attempt local execution of the command line
# generated from the input values
def execute(self, mount_strings):
'''
The execute method runs the generated command line
(from either generateRandomParams or readInput)
If docker is specified, it will attempt to use it, instead
of local execution.
After execution, it checks for output file existence.
'''
command, exit_code, con = self.cmd_line[0], None, self.con or {}
# Check for Container image
conType, conImage = con.get('type'), con.get('image'),
conIndex = con.get("index")
conOpts = con.get("container-opts")
conIsPresent = (conImage is not None)
# Export environment variables, if they are specified in the descriptor
envVars = {}
if 'environment-variables' in list(self.desc_dict.keys()):
variables = [(p['name'], p['value']) for p in
self.desc_dict['environment-variables']]
for (envVarName, envVarValue) in variables:
os.environ[envVarName] = envVarValue
envVars[envVarName] = envVarValue
# Container script constant name
# Note that docker/singularity cannot do a local volume
# mount of files starting with a '.', hence this one does not
millitime = int(time.time()*1000)
# Change launch (working) directory if desired
launchDir = self.workDir
if launchDir is None:
launchDir = op.realpath('./')
launchDir = op.realpath(launchDir)
dsname = (str(launchDir)+'/temp-' +
str(random.SystemRandom().randint(0, int(millitime))) +
"-" + str(millitime) + '.localExec.boshjob.sh') #this doesnt workkkkk
dsname = op.realpath(dsname)
# If container is present, alter the command template accordingly
container_location = ""
container_command = ""
if conIsPresent:
# Pull the container
(conPath, container_location) = self.prepare()
# Generate command script
# Get the supported shell by the docker or singularity
cmdString = "#!"+self.shell+" -l"+os.linesep+str(command)
with open(dsname, "w") as scrFile:
scrFile.write(cmdString)
# Ensure the script is executable
self._localExecute("chmod 755 " + dsname)
# Prepare extra environment variables
envString = ""
if envVars:
for (key, val) in list(envVars.items()):
envString += "SINGULARITYENV_{0}='{1}' ".format(key, val)
# Get the container options
conOptsString = ""
if conOpts:
for opt in conOpts:
conOptsString += opt + ' '
# Run it in docker
mount_strings = [] if not mount_strings else mount_strings
mount_strings = [op.realpath(m.split(":")[0])+":"+m.split(":")[1]
for m in mount_strings] #converts them to the real path
mount_strings.append(launchDir + ':' + launchDir) #adds work dir to the list of strings to append
if conType == 'docker':
envString = " "
if envVars:
for (key, val) in list(envVars.items()):
envString += " -e {0}='{1}' ".format(key, val)
# export mounts to docker string
docker_mounts = " -v ".join(m for m in mount_strings)
# If --changeUser was desired, provides the current user id
# and its group id as the user and group to be used instead
# of the default root within the container.
userchange = ''
if self.changeUser:
userchange = ' -u $(id -u):$(id -g)'
container_command = ('docker run' + userchange +
' --entrypoint=' + self.shell +
' --rm' + envString +
' -v ' + docker_mounts +
' -w ' + launchDir + ' ' +
conOptsString +
str(conImage) + ' ' + dsname)
elif conType == 'singularity':
envString = ""
if envVars:
for (key, val) in list(envVars.items()):
envString += "SINGULARITYENV_{0}='{1}' ".format(key,
val)
# TODO: Singularity 2.4.6 default configuration binds: /proc,
# /sys, /dev, ${HOME}, /tmp, /var/tmp, /etc/localtime, and
# /etc/hosts. This means that any path down-stream shouldn't
# be bound on the command-line, as this will currently raise
# an exception. See:
# https://github.com/singularityware/singularity/issues/1469
#
# Previous bind string:
# singularity_mounts = " -B ".join(m for m in mount_strings)
def_mounts = ["/proc", "/sys", "/dev", "/tmp", "/var/tmp",
"/etc/localtime", "/etc/hosts",
op.realpath(op.expanduser('~')),
op.expanduser('~')]
#COMMENTED OUT THIS PART, SEEMS TO FIX THE MOUNTING THING, YAY.
# Ensures the set of paths provided has no overlap
# compaths = list()
# for idxm, m in enumerate(mount_strings):
# for n in mount_strings[idxm:]:
# if n != m:
# tmp = op.dirname(op.commonprefix([n, m]))
# if tmp != '/':
# compaths += [tmp]
# if not any(m.startswith(c) for c in compaths):
# compaths += [m]
# mount_strings = set(compaths)
# Only adds mount points for those not already included
singularity_mounts = ""
for m in mount_strings:
if not any([d for d in def_mounts if m.startswith(d)]):
singularity_mounts += "-B {0} ".format(m)
container_command = (envString + 'singularity exec '
'--cleanenv ' +
singularity_mounts +
' -W ' + launchDir + ' ' + #as far as I can tell, this flag accomplishes nothing on singularity's end
conOptsString +
str(conPath) + ' ' + dsname)
else:
raise_error(ExecutorError, 'Unrecognized container type: '
'\"%s\"' % conType)
(stdout, stderr), exit_code = self._localExecute(container_command)
# Otherwise, just run command locally
else:
(stdout, stderr), exit_code = self._localExecute(command)
time.sleep(0.5) # Give the OS a (half) second to finish writing
# Destroy temporary docker script, if desired.
# By default, keep the script so the dev can look at it.
if conIsPresent and not self.debug:
if os.path.isfile(dsname):
os.remove(dsname)
# Check for output files
missing_files = []
output_files = []
all_files = evaluateEngine(self, "output-files")
required_files = evaluateEngine(self, "output-files/optional=False")
optional_files = evaluateEngine(self, "output-files/optional=True")
for f in all_files.keys():
file_name = all_files[f]
fd = FileDescription(f, file_name, False)
if op.exists(file_name):
output_files.append(fd)
else: # file does not exist
if f in required_files.keys():
missing_files.append(fd)
# Set error messages
desc_err = ''
if 'error-codes' in list(self.desc_dict.keys()):
for err_elem in self.desc_dict['error-codes']:
if err_elem['code'] == exit_code:
desc_err = err_elem['description']
break
return ExecutorOutput(stdout,
stderr,
exit_code,
desc_err,
output_files,
missing_files,
command,
container_command, container_location)
# Looks for the container image locally and pulls it if not found
# Returns a tuple containing the container filename (for Singularity)
# and the container location (local or pulled)
def prepare(self):
con = self.con
if con is None:
return ("", "Descriptor does not specify a container image.")
conType, conImage = con.get('type'), con.get('image'),
conIndex = con.get("index")
# If container is present, alter the command template accordingly
conName = ""
if conType == 'docker':
# Pull the docker image
if self._localExecute("docker pull " + str(conImage))[1]:
container_location = "Local copy"
else:
container_location = "Pulled from Docker"
return (conName, container_location)
if conType == 'singularity':
if not conIndex:
conIndex = "shub://"
elif not conIndex.endswith("://"):
conIndex = conIndex + "://"
if self.imagePath:
conName = op.basename(self.imagePath)
imageDir = op.normpath(op.dirname(self.imagePath))
else:
conName = conImage.replace("/", "-").replace(":", "-") + ".simg"
imageDir = op.normpath("")
# Check if container already exists
if self._singConExists(conName, imageDir):
conPath = op.abspath(op.join(imageDir, conName))
return conPath, "Local ({0})".format(conName)
# Container does not exist, try to pull it
if self.imagePath:
lockDir = self.imagePath + "-lock"
else:
lockDir = conName + "-lock"
maxcount = 36
count = 0
while count < maxcount:
count += 1
try:
os.mkdir(lockDir)
except OSError:
time.sleep(5)
else:
try:
# Check if container was created while waiting
if self._singConExists(conName, imageDir):
conPath = op.abspath(op.join(imageDir, conName))
container_location = "Local ({0})".format(conName)
# Container still does not exist, so pull it
else:
conPath, container_location = self._pullSingImage(
conName, conIndex, conImage, imageDir, lockDir)
return conPath, container_location
finally:
self._cleanUpAfterSingPull(lockDir)
# If loop times out, check again for existence, otherwise
# raise an error
if self._singConExists(conName, imageDir):
conPath = op.abspath(op.join(imageDir, conName))
return conPath, "Local ({0})".format(conName)
raise_error(ExecutorError, "Unable to retrieve Singularity "
"image.")
# Invalid container type
raise_error(ExecutorError, 'Unrecognized container'
' type: \"%s\"' % conType)
# Private method that checks if a Singularity image exists locally
def _singConExists(self, conName, imageDir):
return conName in os.listdir(imageDir)
# Private method that pulls a Singularity image
def _pullSingImage(self, conName, conIndex, conImage, imageDir, lockDir):
# Give the file a temporary name while it's building
conNameTmp = conName + ".tmp"
# Set the pull directory to the specified imagePath
if self.imagePath:
os.environ["SINGULARITY_PULLFOLDER"] = imageDir
pull_loc = "\"{0}\" {1}{2}".format(conNameTmp,
conIndex,
conImage)
container_location = ("Pulled from {1}{2} ({0} not found "
"in current working "
"directory or specified "
"image path)").format(conName,
conIndex,
conImage)
# Pull the singularity image
sing_command = "singularity pull --name " + pull_loc
(stdout, stderr), return_code = self._localExecute(
sing_command)
if return_code:
message = ("Could not pull Singularity"
" image: " + os.linesep + " * Pull command: "
+ sing_command + os.linesep + " * Error: "
+ stderr.decode("utf-8"))
raise_error(ExecutorError, message)
os.rename(op.join(imageDir, conNameTmp), op.join(imageDir, conName))
conPath = op.abspath(op.join(imageDir, conName))
return conPath, container_location
# Removes the lock directory and environment variable
# created while pulling an image
def _cleanUpAfterSingPull(self, lockDir):
os.rmdir(lockDir)
if "SINGULARITY_PULLFOLDER" in os.environ:
del os.environ["SINGULARITY_PULLFOLDER"]
# Private method that attempts to locally execute the given
# command. Returns the exit code.
def _localExecute(self, command):
# Note: invokes the command through the shell
# (potential injection dangers)
if self.debug:
print_info("Running: {0}".format(command))
try:
if self.stream:
process = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
else:
process = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
sys.stderr.write('OS Error during attempted execution!')
raise e
except ValueError as e:
sys.stderr.write('Input Value Error during attempted execution!')
raise e
if not self.stream:
return process.communicate(), process.returncode
while True:
if process.poll() is not None:
break
outLine = process.stdout.readline().decode()
if outLine != '':
sys.stdout.write(outLine)
# Return (stdout, stderr) as (None, None) since it was already
# printed in real time
return (None, None), process.returncode
# Private method to generate a random input parameter set that follows
# the constraints from the json descriptor
# This method fills in the in_dict field of the object
# with constrained random values
def _randomFillInDict(self):
# Private helper functions for filling the dictionary
# Helpers for generating random numbers, strings, etc...
# Note: uses-absolute-path is satisfied for files by the
# automatic replacement in the _validateDict
# nd = number of random characters to use in generating strings,
# nl = max number of random list items
nd, nl = 2, 5
def randDigs():
# Generate random string of digits
return ''.join(rnd.choice(string.digits)
for _ in range(nd))
def randFile(id):
return ('f_' + id + '_' + randDigs() +
rnd.choice(['.csv', '.tex', '.j',
'.cpp', '.m', '.mnc',
'.nii.gz', '']))
def randStr(id):
return 'str_' + id + '_' + ''.join(rnd.choice(string.digits +
string.ascii_letters)
for _ in range(nd))
# A function for generating a number type parameter input
# p is a dictionary object corresponding to a parameter
# description in the json descriptor
# E.g. if p had no constraints from the json descriptor,
# the output would be a float in [defaultMin,defaultMax]
# if p had "integer": true, "minimum": 7,
# "maximum": 9, the output would be an int in [7,9]
def randNum(p):
param_id, defaultMin, defaultMax = p['id'], -50, 50
# Check if the input parameter should be an int
isInt = self.safeGet(param_id, 'integer')
def roundTowardsZero(x):
return int(math.copysign(1, x) * int(abs(x)))
# Assign random values to min and max,
# unless they have been specified
minv = self.safeGet(param_id, 'minimum')
maxv = self.safeGet(param_id, 'maximum')
if minv is None and maxv is None:
minv, maxv = defaultMin, defaultMax
elif minv is None and not (maxv is None):
minv = maxv + defaultMin
elif not (minv is None) and maxv is None:
maxv = minv + defaultMax
# Coerce the min/max to the proper number type
if isInt:
minv, maxv = roundTowardsZero(minv), roundTowardsZero(maxv)
else:
minv, maxv = float(minv), float(maxv)
# Apply exclusive boundary constraints, if any
if self.safeGet(param_id, 'exclusive-minimum'):
minv += 1 if isInt else 0.001
if self.safeGet(param_id, 'exclusive-maximum'):
maxv -= 1 if isInt else 0.001
# Returns a random int or a random float, depending on the type of p
return (rnd.randint(minv, maxv)
if isInt else round(rnd.uniform(minv, maxv), nd))
# Generate a random parameter value based on the input
# type (where prm \in self.inputs)
def paramSingle(prm):
if self.safeGet(prm['id'], 'value-choices'):
return rnd.choice(self.safeGet(prm['id'], 'value-choices'))
if prm['type'] == 'String':
return randStr(prm['id'])
if prm['type'] == 'Number':
return randNum(prm)
if prm['type'] == 'Flag':
return rnd.choice([True, False])
if prm['type'] == 'File':
return randFile(prm['id'])
# For this function, given prm (a parameter description),
# a parameter value is generated
# If prm is a list, a sequence of outputs is generated;
# otherwise, a single value is returned
def makeParam(prm):
mn = self.safeGet(prm['id'], 'min-list-entries') or 2
mx = self.safeGet(prm['id'], 'max-list-entries') or nl
isList = self.safeGet(prm['id'], 'list') or False
return [str(paramSingle(prm)) for _ in
range(rnd.randint(mn, mx))] if isList else paramSingle(prm)
# Returns a list of the ids of parameters that
# disable the input parameter
def disablersOf(inParam):
return [disabler[0] for disabler in
[disabler for disabler in
[(prm['id'], self.safeGet(prm['id'],
'disables-inputs') or [])
for prm in self.inputs] if inParam['id'] in disabler[1]]]
# Returns the list of mutually requiring parameters of the target
def mutReqs(targetParam):
return [self.byId(mutualReq[0]) for mutualReq in
[possibleMutReq for possibleMutReq in
[(reqOfTarg, self.reqsOf(reqOfTarg)) for reqOfTarg in
self.reqsOf(targetParam['id'])]
if targetParam['id'] in possibleMutReq[1]]]
# Returns whether targ (an input parameter) has a
# value or is allowed to have one
def isOrCanBeFilled(targ):
# If it is already filled in, report so
if targ['id'] in list(self.in_dict.keys()):
return True
# If a disabler or a disabled target is already active,
# it cannot be filled
for d in disablersOf(targ) + (self.safeGet(
targ['id'],
'disables-inputs') or []):
if d in list(self.in_dict.keys()):
return False
# If at least one non-mutual requirement has
# not been met, it cannot be filled
for r in self.reqsOf(targ['id']):
if r not in self.in_dict: # If a requirement is not present
# and it is not mutually required
if targ['id'] not in self.reqsOf(r):
return False
# If it is in a mutex group with one target already chosen,
# it cannot be filled
# Get the group that the target belongs to, if any
g = self.assocGrp(targ['id'])
if (g is not None) and self.safeGrpGet(g['id'],
'mutually-exclusive'):
if len([x for x in g['members']
if x in list(self.in_dict.keys())]) > 0:
return False
return True
# Handle the mutual requirement case by breadth first search in
# the graph of mutual requirements. Essentially a graph is built,
# starting from targ (the target input parameter), where nodes
# are input parameters and edges are a mutual requirement
# relation between nodes. BFS is used to check every node, to see
# if can be (or has been) given a value. If all the nodes are
# permitted to be given values, then they are all added at once;
# if even one of them cannot be given a value (e.g. it has an
# active disabler) then none of the graph members can be added
# and so we just return false. # Input: an input parameter from
# which to start building the graph Output: Returns False if at
# least one of the mutual requirements cannot be met Returns a
# list of params to fill if all of them can be met (or [targ.id]
# if it has no mutReqs)
def checkMutualRequirements(targ):
checked, toCheck = [], [targ]
while len(toCheck) > 0:
current = toCheck.pop()
checked.append(current)
if not isOrCanBeFilled(current):
return False
for mutreq in mutReqs(current):
if not mutreq['id'] in [c['id'] for c in checked]:
toCheck.append(mutreq)
return checked
# Start actual dictionary filling part
# Clear the dictionary
self.in_dict = {}
# Fill in the required parameters
for reqp in [r for r in self.inputs if not r.get('optional')]:
self.in_dict[reqp['id']] = makeParam(reqp)
# Fill in a random choice for each one-is-required group
for grp in [g for g in self.groups
if self.safeGrpGet(g['id'], 'one-is-required')]:
# Loop to choose an allowed value,
# in case a previous choice disabled that one
while True:
# Pick a random parameter
choice = self.byId(rnd.choice(grp['members']))
# see if it and its mutual requirements can be filled
res = checkMutualRequirements(choice)
if res is False:
# Try again if the chosen group member is not permissible
continue
for r in res:
self.in_dict[r['id']] = makeParam(r)
break # If we were allowed to add a parameter, we can stop
# Choose a random number of times to try to fill optional inputs
opts = [p for p in self.inputs
if self.safeGet(p['id'], '') in [None, True]]
# Loop a random number of times, each time
# attempting to fill a random parameter
for _ in range(rnd.randint(int(len(opts) / 2 + 1), len(opts) * 2)):
targ = rnd.choice(opts) # Choose an optional output
# If it is already filled in, continue
if targ['id'] in list(self.in_dict.keys()):
continue
# If it is a prohibited option, continue
# (isFilled case handled above)
if not isOrCanBeFilled(targ):
continue
# Now we handle the mutual requirements case. This is a little
# more complex because a mutual requirement
# of targ can have its own mutual requirements, ad nauseam.
# We need to look at all of them recursively and either
# fill all of them in (i.e. at once) or none of them
# (e.g. if one of them is disabled by some other param).
result = checkMutualRequirements(targ)
# Leave if the mutreqs cannot be satisfied
if result is False:
continue
# Fill in the target(s) otherwise
for r in result:
self.in_dict[r['id']] = makeParam(r)
# Function to generate random parameter values
# This fills the in_dict with random values, validates the input,
# and generates the appropriate command line
def generateRandomParams(self, n):
'''
The generateRandomParams method fills the in_dict field
with randomly generated values following the schema.
It then generates command line strings based on these
values (more than 1 if -n was given).
'''
self.cmd_line = []
for i in range(0, n):
# Set in_dict with random values
self._randomFillInDict()
# Look at generated input, if debugging
if self.debug:
print_info("Input: " + str(self.in_dict))
# Check results (as much as possible)
try:
self._validateDict()
# If an error occurs, print out the problems already
# encountered before blowing up
except Exception as e: # Avoid BaseExceptions like SystemExit
sys.stderr.write("An error occurred in validation\n"
"Previously saved issues\n")
for err in self.errs:
sys.stderr.write("\t" + str(err) + "\n")
raise e # Pass on (throw) the caught exception
# Add new command line
self.cmd_line.append(self._generateCmdLineFromInDict())
# Read in parameter input file or string
def readInput(self, infile):
'''
The readInput method sets the in_dict field of the executor
object, based on a fixed input.
It then generates a command line based on the input.
infile: either the inputs in a file or
the command-line string (from -s).
stringInput: a boolean as to whether the method has
been given a string or a file.
'''
# Quick check that the descriptor has already been read in
assert self.desc_dict is not None
self.in_dict = loadJson(infile)
# Input dictionary
if self.debug:
print_info("Input: " + str(self.in_dict))
# Add default values for required parameters,
# if no value has been given
addDefaultValues(self.desc_dict, self.in_dict)
# Check results (as much as possible)
try:
self._validateDict()
except Exception: # Avoid catching BaseExceptions like SystemExit
sys.stderr.write("An error occurred in validation\n"
"Previously saved issues\n")
for err in self.errs:
# Write any errors we found
sys.stderr.write("\t" + str(err) + "\n")
raise # Raise the exception that caused failure
# Build and save output command line (as a single-entry list)
self.cmd_line = [self._generateCmdLineFromInDict()]
# Private method to replace the keys in template by input and output
# values. Input and output values are looked up in self.in_dict and
# self.out_dict
# * if use_flags is true, keys will be replaced by:
# * flag+flag-separator+value if flag is not None
# * value otherwise
# * if unfound_keys is "remove", unfound keys will be replaced by ""
# * if unfound_keys is "clear" then the template is cleared if it has
# unfound keys (useful for configuration files)
# * before being substituted, the values will be:
# * stripped from all the strings in stripped_extensions
# * escaped for special characters
def _replaceKeysInTemplate(self, template,
use_flags=False, unfound_keys="remove",
stripped_extensions=[],
escape_special_chars=True):
def escape_string(s):
try:
from shlex import quote
except ImportError as e:
from pipes import quote
return quote(s)
# Concatenate input and output dictionaries
in_out_dict = dict(self.in_dict)
in_out_dict.update(self.out_dict)
# Go through all the keys
for param_id in [x['id'] for x in self.inputs + self.outputs]:
escape = (escape_special_chars and
(self.safeGet(param_id, 'type') == 'String' or
self.safeGet(param_id, 'type') == 'File') or
param_id in self.out_dict.keys())
clk = self.safeGet(param_id, 'value-key')
if clk is None:
continue
if param_id in list(in_out_dict.keys()): # param has a value
val = in_out_dict[param_id]
if type(val) is list:
s_val = ""
list_sep = self.safeGet(param_id, 'list-separator')
if list_sep is None:
list_sep = " "
for x in val:
s = str(x)
if escape:
s = escape_string(str(x))
if val.index(x) == len(val)-1:
s_val += s
else:
s_val += s + list_sep
val = s_val
elif escape:
val = escape_string(val)
# Add flags and separator if necessary
flag = self.safeGet(param_id, 'command-line-flag')
if (use_flags and flag is not None):
sep = self.safeGet(param_id,
'command-line-flag-separator')
if sep is None:
sep = ' '
# special case for flag-type inputs
if self.safeGet(param_id, 'type') == 'Flag':
val = '' if val is False else flag
else:
val = flag + sep + str(val)
# Remove file extensions from input value
if (self.safeGet(param_id, 'type') == 'File' or
self.safeGet(param_id, 'type') == 'String'):
for extension in stripped_extensions:
val = val.replace(extension, "")
# Here val can be a number so we need to cast it
template = template.replace(clk, str(val))
else: # param has no value
if unfound_keys == "remove":
template = template.replace(clk, '')
elif unfound_keys == "clear":
if clk in template:
return ""
return template
# Private method to generate output file names.
# Output file names will be put in self.out_dict.
def _generateOutputFileNames(self):
if not hasattr(self, 'out_dict'):
# a dictionary that will contain the output file names
self.out_dict = {}
for outputId in [x['id'] for x in self.outputs]:
# Initialize file name with path template or existing value
if outputId in list(self.out_dict.keys()):
outputFileName = self.out_dict[outputId]
else:
outputFileName = self.safeGet(outputId, 'path-template')
stripped_extensions = self.safeGet(
outputId,
"path-template-stripped-extensions")
if stripped_extensions is None:
stripped_extensions = []
# We keep the unfound keys because they will be
# substituted in a second call to the method in case
# they are output keys
outputFileName = self._replaceKeysInTemplate(outputFileName,
False,
"keep",
stripped_extensions,
False)
if self.safeGet(outputId, 'uses-absolute-path'):
outputFileName = os.path.abspath(outputFileName)
self.out_dict[outputId] = outputFileName
# Private method to write configuration files
# Configuration files are output files that have a file-template
def _writeConfigurationFiles(self):
for outputId in [x['id'] for x in self.outputs]:
fileTemplate = self.safeGet(outputId, 'file-template')
if fileTemplate is None:
continue # this is not a configuration file
stripped_extensions = self.safeGet(
outputId,
"path-template-stripped-extensions")
if stripped_extensions is None:
stripped_extensions = []
# We substitute the keys line by line so that we can
# clear the lines that have keys with no value
# (undefined optional params)
newTemplate = []
for line in fileTemplate:
newTemplate.append(self._replaceKeysInTemplate(
line,
False, "clear",
stripped_extensions,
True))
template = os.linesep.join(newTemplate)
# Write the configuration file
fileName = self.out_dict[outputId]
file = open(fileName, 'w')
file.write(template)
file.close()
# Private method to build the actual command line by substitution,
# using the input data
def _generateCmdLineFromInDict(self):
# Genrate output file names
self._generateOutputFileNames()
# it is required to call the method twice in case path
# templates contain output keys
self._generateOutputFileNames()
# Write configuration files
self._writeConfigurationFiles()
# Get the command line template
template = self.desc_dict['command-line']
# Substitute every given value into the template
# (incl. flags, flag-seps, ...)
template = self._replaceKeysInTemplate(template, True,
"remove", [], True)
# Return substituted command line
return template
# Print the command line result
def printCmdLine(self):
print("Generated Command" +
('s' if len(self.cmd_line) > 1 else '') + ':')
for cmd in self.cmd_line:
print(cmd)
# Private method for validating input parameters
def _validateDict(self):
# Holder for errors
self.errs = []
# Return whether s is a proper number; if intCheck is true,
# also check if it is an int
def isNumber(s, intCheck=False):
try:
int(s) if intCheck else float(s)
return True
except ValueError:
return False
# Check individual inputs
for key in self.in_dict:
isList = self.safeGet(key, "list")
# Get current value and schema descriptor properties
val, targ = self.in_dict[key], self.byId(key)
# A little closure helper to check if input values satisfy
# the json descriptor's constraints
# Checks whether 'value' is appropriate for parameter
# 'input' by running 'isGood' on it
# If the input parameter is bad, it adds 'msg' to the list of errors
def check(keyname, isGood, msg, value): # Checks input values
# No need to check constraints if they were not specified
dontCheck = ((keyname not in list(targ.keys()))
or (targ[keyname] is False))
# Keyname = None is a flag to check the type
if (keyname is not None) and dontCheck:
return
# The input function is used to check whether
# the input parameter is acceptable
if not isGood(value, keyname):
self.errs.append(key + ' (' + str(value) + ') ' + msg)
# The id exists as an input and is not duplicate
if len([k for k in self.inputs if k['id'] == key]) != 1:
self.errs.append(key+' is an invalid id')
# Types are correct
if targ["type"] == "Number":
# Number type and constraints are not violated
# (note the lambda safety checks)
for v in (val if isList else [val]):
check('minimum', lambda x, y: float(x) >= targ[y],
"violates minimum value", v)
check('exclusive-minimum',
lambda x, y: float(x) > targ['minimum'],
"violates exclusive min value", v)
check('maximum', lambda x, y: float(x) <= targ[y],
"violates maximum value", v)
check('exclusive-maximum',
lambda x, y: float(x) < targ['maximum'],
"violates exclusive max value", v)
check('integer', lambda x, y: isNumber(x, True),
"violates integer requirement", v)
check(None, lambda x, y: isNumber(x), "is not a number", v)
elif self.safeGet(targ['id'], 'value-choices'):
# Value is in the list of allowed values
if isinstance(val, list):
fn = (lambda x, y: all([x1 in targ[y] for x1 in x]))
else:
fn = (lambda x, y: x in targ[y])
check('value-choices', fn,
"is not a valid enum choice", val)
elif targ["type"] == "Flag":
# Should be 'true' or 'false' when lower-cased
# (based on our transformations of the input)
check(None,
lambda x, y: type(x) == bool,
"is not a valid flag value", val)
elif targ["type"] == "File":
# Check path-type (absolute vs relative)
if not self.forcePathType:
for ftarg in (str(val).split() if isList else [val]):
check('uses-absolute-path',
lambda x, y: os.path.isabs(x),
"is not an absolute path", ftarg)
else:
# Replace incorrectly specified paths if desired
replacementFiles = []
launchDir = os.getcwd()
if self.launchDir is not None:
launchDir = self.launchDir
for ftarg in (val if isList else [val]):
# Special case 1: launchdir is specified and we
# want to use absolute path
# Note: in this case, the pwd is mounted as the
# launchdir; we do not attempt to move files if they
# will not be mounted, currently
# That is, specified files that are not in the pwd or a
# subfolder will not be mounted to the container
if (targ.get('uses-absolute-path') is True and
self.launchDir is not None):
# relative path to target, from the pwd
relpath = os.path.relpath(ftarg, os.getcwd())
# absolute path in the container
mountedAbsPath = os.path.join(launchDir, relpath)
replacementFiles.append(
os.path.abspath(mountedAbsPath))
# If the input uses-absolute-path, replace
# the path with its absolute version
elif targ.get('uses-absolute-path') is True:
replacementFiles.append(os.path.abspath(ftarg))
else:
replacementFiles.append(ftarg)
# Replace old val with the new one
self.in_dict[key] = " ".join(replacementFiles)
# List length constraints are satisfied
if isList:
check('min-list-entries',
lambda x, y: len(x) >= targ[y],
"violates min size", val)
if isList:
check('max-list-entries',
lambda x, y: len(x) <= targ[y],
"violates max size", val)
# Required inputs are present
for reqId in [v['id'] for v in self.inputs if not v.get('optional')]:
if reqId not in list(self.in_dict.keys()):
self.errs.append('Required input ' + str(reqId) +
' is not present')
# Disables/requires is satisfied
for givenVal in [v for v in self.inputs
if v['id'] in list(self.in_dict.keys())]:
# Check that requirements are present
for r in self.reqsOf(givenVal['id']):
if r not in list(self.in_dict.keys()):
self.errs.append('Input ' + str(givenVal['id']) +
' is missing requirement '+str(r))
for d in (givenVal['disables-inputs']
if 'disables-inputs' in list(givenVal.keys()) else []):
# Check if a disabler is present
if d in list(self.in_dict.keys()):
self.errs.append('Input ' + str(d) +
' should be disabled by ' +
str(givenVal['id']))
# Group one-is-required/mutex is ok
for group, mbs in [(x, x["members"]) for x in self.groups]:
# Check that the set of parameters in mutually
# exclusive groups have at most one member present
if (("mutually-exclusive" in list(group.keys())) and
group["mutually-exclusive"]):
if len(set.intersection(set(mbs),
set(self.in_dict.keys()))) > 1:
self.errs.append('Group ' + str(group["id"]) +
' is supposed to be mutex')
# Check that the set of parameters in one-is-required
# groups have at least one member present
if (("one-is-required" in list(group.keys())) and
group["one-is-required"]):
if len(set.intersection(set(mbs),
set(self.in_dict.keys()))) < 1:
self.errs.append('Group ' + str(group["id"]) +
' requires one member to be present')
# Fast-fail if there was a problem with the input parameters
if len(self.errs) != 0:
message = "Problems found with prospective input:\n"
for err in self.errs:
message += ("\t" + err + "\n")
raise_error(ExecutorError, message)
# Helper function that loads the JSON object coming from either a string,
# a local file or a file pulled from Zenodo
def loadJson(userInput, verbose=False):
# Check for JSON file (local or from Zenodo)
json_file = None
if os.path.isfile(userInput):
json_file = userInput
elif userInput.split(".")[0].lower() == "zenodo":
from boutiques.puller import Puller
puller = Puller(userInput, verbose)
json_file = puller.pull()
if json_file is not None:
with open(json_file, 'r') as f:
return json.loads(f.read())
# JSON file not found, so try to parse JSON object
e = ("Cannot parse input {}: file not found, "
"invalid Zenodo ID, or invalid JSON object").format(userInput)
if userInput.isdigit():
raise_error(ExecutorError, e)
try:
return json.loads(userInput)
except ValueError:
raise_error(ExecutorError, e)
# Adds default values to input dictionary
# for parameters whose values were not given
def addDefaultValues(desc_dict, in_dict):
inputs = desc_dict['inputs']
for in_param in [s for s in inputs
if s.get("default-value") is not None]:
if in_dict.get(in_param['id']) is None:
in_dict[in_param['id']] = in_param.get("default-value")
return in_dict
|
"""
Revision ID: 0338_add_notes_to_service
Revises: 0337_broadcast_msg_api
Create Date: 2021-01-13 11:50:06.333369
"""
import sqlalchemy as sa
from alembic import op
revision = "0338_add_notes_to_service"
down_revision = "0337_broadcast_msg_api"
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("services", sa.Column("notes", sa.Text(), nullable=True))
op.add_column("services_history", sa.Column("notes", sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("services_history", "notes")
op.drop_column("services", "notes")
# ### end Alembic commands ###
|
from flask import render_template, redirect, url_for, current_app, session
from app.main import bp
from app.main.forms import SearchForm
import pandas as pd
from datetime import datetime, timedelta
# authlib version 13 +
from authlib.integrations.requests_client import OAuth2Session
from app import client
from bs4 import BeautifulSoup
from app.decorators import *
# curl -X POST -u 'client_a2d1bf0b6857ae01a6f9ff352f3fbd39:secret_62134ebc40d8c82372c9021c75462529' -H "Content-Type: application/x-www-form-urlencoded" -d 'grant_type=client_credentials&scope=api_listings_read' 'https://auth.domain.com.au/v1/connect/token'
def get_token():
scope='api_listings_read'
client = OAuth2Session(current_app.config['DOMAIN_CLIENT_ID'], current_app.config['DOMAIN_CLIENT_SECRET'], scope=scope)
# token = client.fetch_access_token('https://auth.domain.com.au/v1/connect/token', grant_type='client_credentials')
# now = datetime.now()
# later = now + timedelta(seconds=token['expires_in']-900)
resp = client.fetch_token('https://auth.domain.com.au/v1/connect/token')
# current_app.logger.info(resp.items())
return resp
def get_session(token):
return OAuth2Session(current_app.config['DOMAIN_CLIENT_ID'], current_app.config['DOMAIN_CLIENT_SECRET'], token=token)
# In case key does not exist in JSON, then return an empty string
def get_data(data, key):
if key in data:
#print(data[key])
return data[key]
else:
#print('')
return ''
# Contact is returned in list of dictionaries, sometimes they are empty
# This functions, outputs the data in normal string format
def get_contact(contact):
con_str = ''
if len(contact) != 0:
for i,c in enumerate(contact):
# check if key exists in dictionary
if 'name' in c:
# if last element then dont have comma separator
sep = ', '
if i >= len(contact)-1:
sep = ''
# if yes append value to string
con_str = con_str + c['name'] + sep
else:
#list is empty
return ''
return con_str
def get_pet(data):
if 'features' in data:
if 'PetsAllowed' in data['features']:
return 'Yes'
else:
return ''
else:
return ''
@bp.route('/')
@bp.route('/index', methods=['GET','POST'])
@login_required
def index():
form=SearchForm()
if form.validate_on_submit():
# if first time
if 'domain' not in session:
session['domain'] = get_token()
# get token again if expired
expiry = datetime.now() + timedelta(seconds=session['domain']['expires_in']-900)
if datetime.now() >= expiry:
session['domain'] = get_token()
# build data from search filters to send to domain
data = dict()
min_bedrooms = int(form.min_bedrooms.data)
min_bathrooms = int(form.min_bathrooms.data)
max_price = int(form.max_price.data)
#surround_suburbs = form.surround_suburbs.data
postcode = form.postcode.data
if (min_bedrooms != -1):
data['minBedrooms'] = min_bedrooms
if (min_bathrooms != -1):
data['minBathrooms'] = min_bathrooms
if (max_price):
data['maxPrice'] = max_price
if (postcode):
loc_data = []
loc_data.append({'postcode':postcode})
data['locations'] = loc_data
#if (postcode or surround_suburbs):
# loc_data = []
# if (postcode):
# loc_data.append({'postcode':postcode})
# if (surround_suburbs):
# loc_data.append({'includeSurroundingSuburbs':surround_suburbs})
# data['locations'] = loc_data
data['page'] = 1
data['pageSize'] = 200
data['listingType'] = 'Rent'
# send a post request to domain api with json data
domain_session = get_session(session['domain'])
resp=domain_session.post('https://api.domain.com.au/v1/listings/residential/_search',json=data)
# construct pandas dataframe and save to csv
df = pd.DataFrame(columns=[\
'Property type', 'Price', 'Suburb','Postcode','Display address','Bedrooms',\
'Bathrooms','Carspaces','Pets allowed','Headline','Description',\
'url','Advert type','Advert name','Advert contact'\
])
json_resp = resp.json()
for j in json_resp:
df = df.append({\
'Property type': get_data(data=j['listing']['propertyDetails'], key='propertyType'),\
'Price': get_data(data=j['listing']['priceDetails'], key='displayPrice'),\
'Suburb': get_data(data=j['listing']['propertyDetails'], key='suburb'),\
'Postcode': get_data(data=j['listing']['propertyDetails'], key='postcode'),\
'Display address' : get_data(data=j['listing']['propertyDetails'], key='displayableAddress'),\
'Bedrooms': get_data(data=j['listing']['propertyDetails'], key='bedrooms'),\
'Bathrooms': get_data(data=j['listing']['propertyDetails'], key='bathrooms'),\
'Carspaces': get_data(data=j['listing']['propertyDetails'], key='carspaces'),\
'Pets allowed': get_pet(data=j['listing']['propertyDetails']),\
'Headline': get_data(data=j['listing'], key='headline'),\
'Description': BeautifulSoup(get_data(data=j['listing'], key='summaryDescription'),'html.parser').\
get_text().replace('\r','').replace('\n',' '),\
'url': 'http://www.domain.com.au/'+get_data(data=j['listing'], key='listingSlug'),\
'Advert type': get_data(data=j['listing']['advertiser'], key='type'),\
'Advert name': get_data(data=j['listing']['advertiser'], key='name'),\
'Advert contact': get_contact(get_data(data=j['listing']['advertiser'], key='contacts'))\
}, ignore_index=True)
# get current date time for filename
curr_dt = datetime.now().strftime('%Y%m%d_%H%M%S')
filename = curr_dt + '_domain.csv'
df.to_csv(current_app.config['RENTAL_FOLDER'] / filename, index=False)
return render_template('main/results.html',data=df, filename=filename)
return render_template('main/index.html',form=form)
@bp.route('/other', methods=['GET'])
@login_required
def other():
# project id 417245 is the rental project in scrapy cloud
project = client.get_project(417245)
# get the spider which is called rental
spider = project.spiders.get('rental')
# get the most recent job's key
jobs_summary = spider.jobs.iter()
key = [j['key'] for j in jobs_summary][0]
# get the most recent job
job = client.get_job(key)
# retrieve items
items = job.items.iter()
# construct pandas dataframe and save to csv
df = pd.DataFrame(columns=[\
'Suburb','Status','Price','Home Type','Available','Occupants','Description','url'
])
for item in items:
df = df.append({\
'Suburb': item['suburb'],\
'Status': item['status'],\
'Price': item['price'],\
'Home_Type': item['home_type'],\
'Available': item['available'],\
'Occupants': item['occupants'],\
'Description': item['description'],\
'url': item['url']
}, ignore_index=True)
df.to_csv(current_app.config['RENTAL_FOLDER'] / 'other.csv', index=False)
return render_template('main/other.html', data=df)
@bp.route('/logout', methods=['GET'])
def logout():
session['logged_in'] = False
return redirect(url_for('auth.login'))
|
#!/home/cheshire/cheshire3/install/bin/python
# -*- coding: utf-8 -*-
"""Clean up the character encoding problems when moving from C2 to C3 (June 2009)"""
import sys
import os
import re
import time
sys.path.insert(1, '/home/cheshire/cheshire3/code')
from cheshire3.web import www_utils
from cheshire3.web.www_utils import *
from cheshire3.document import StringDocument
from cheshire3.baseObjects import Session
from cheshire3.server import SimpleServer
cheshirePath = "/home/cheshire"
# Build environment...
session = Session()
serv = SimpleServer(session, cheshirePath + "/cheshire3/configs/serverConfig.xml")
session.database = 'db_istc'
db = serv.get_object(session, 'db_istc')
print 'cleaningData'
#listFiles = os.listdir("encodingtest/")
listFiles = os.listdir("/home/cheshire/cheshire3/dbs/istc/newData/")
parser = db.get_object(session, 'LxmlParser')
marcTxr = db.get_object(session, 'dataTransformer')
indentingTxr = db.get_object(session, 'indentingTxr')
istcnoregex = re.compile('<fld001>(\S*)</fld001>')
preparser = db.get_object(session, 'CharacterEntityPreParser')
forliRE = re.compile('Forl.*grave;')
LokkosRE = re.compile('LQkk.*s')
mondoviRE = re.compile('Mondov.*grave;')
errors = 0;
errorids = []
correct = 0;
for file in listFiles:
#print file
dataFile = open("/home/cheshire/cheshire3/dbs/istc/newData/" + file, 'r')
# dataFile = open("encodingtest" + "/" + file, 'r')
dataString = dataFile.read()
dataString = forliRE.sub('Forlì', dataString)
dataString = mondoviRE.sub('Mondovì', dataString)
dataString = LokkosRE.sub('Lőkkös', dataString)
dataString = dataString.replace('GdaDsk', 'Gdańsk')
dataString = dataString.replace('WrocBaw', 'Wrocław')
dataString = dataString.replace('WBocBawek', 'Włocławek')
dataString = dataString.replace('PoznaD', 'Poznań')
dataString = dataString.replace('&', '&')
dataString = dataString.replace('', 'ć').replace('', 'ę').replace('', '')
dataString = dataString.replace('&#', '&#')
dataString = dataString.replace('&amp;', '&')
#extrabits
doc = StringDocument(dataString)
try:
rec = parser.process_document(session, doc)
output = marcTxr.process_record(session, rec)
rec = parser.process_document(session, output)
output = indentingTxr.process_record(session, rec)
correct += 1
except:
dataFile = open("/home/cheshire/cheshire3/dbs/istc/newData/" + file, 'r')
dataString = dataFile.readlines()
newfile = []
for l in dataString:
l = forliRE.sub('Forlì', l)
l = mondoviRE.sub('Mondovì', l)
l = LokkosRE.sub('Lőkkös', l)
l = l.replace('GdaDsk', 'Gdańsk')
l = l.replace('WrocBaw', 'Wrocław')
l = l.replace('WBocBawek', 'Włocławek')
l = l.replace('PoznaD', 'Poznań')
l = l.replace('&', '&')
l = l.replace('', 'ć').replace('', 'ę').replace('', '')
l = l.replace('&#', '&#')
l = l.replace('&amp;', '&')
try:
l = l.decode('utf-8')
except:
l = l.decode('iso-8859-1')
newfile.append(l)
doc = StringDocument(' '.join(newfile))
try:
rec = parser.process_document(session, doc)
output = marcTxr.process_record(session, rec)
rec = parser.process_document(session, output)
output = indentingTxr.process_record(session, rec)
correct += 1
except:
try:
errorids.append(re.findall(istcnoregex, doc.get_raw(session))[0])
print re.findall(istcnoregex, doc.get_raw(session))[0]
except:
print doc.get_raw(session)
errors += 1
# dataWrite = open("encodingtest/all" + file, 'w')
dataWrite = open("/home/cheshire/cheshire3/dbs/istc/data/" + file, 'w')
dataWrite.write(output.get_raw(session))
dataWrite.close
dataFile.close
print 'Sucessful: %s' % correct
print 'Errors: %s' % errors
|
from random import randint
class Node(object):
def __init__(self,v):
self.value = v
self.left = None
self.right = None
def build_Bitree(n):
assert n > 0
lb = 0
ub = 29 if n < 29 else n
nums = []
for _ in range(n):
trial = randint(lb,ub)
while trial in nums:
trial = randint(lb,ub)
nums.append(trial)
root = Node(nums[0])
print('nums:',nums)
for v in nums[1:]:
p = root
while True:
if v < p.value:
if None == p.left:
p.left = Node(v)
break
else:
p = p.left
elif v > p.value:
if None == p.right:
p.right = Node(v)
break
else:
p = p.right
else:
break
return root
def bitree_search(root,value):
assert isinstance(root,Node)
q = root
p = root
while True:
if value < p.value:
if None == p.left:
return q
q,p = p,p.left
elif value > p.value:
if None == p.right:
return p
q,p = p,p.right
else:
return p
return None
def print_tree_node(node):
assert isinstance(node, Node)
print(node.value)
def print_bitree_simple(root,depth=0):
if None == root:
return
print(' '*depth,root.value)
print_bitree_simple(root.left,depth+1)
print_bitree_simple(root.right,depth+1)
def print_tree_horizontally(root, print_func, depth, branch, item_width=3):
if None == root:
return
for i in range(depth):
twig = '-' if i == depth - 1 else ' '
if branch[i]:
print("|",end='');
else:
print(twig,end='');
for _ in range(item_width):
print(twig,end='');
print_func(root)
# print the branch only when the tree has right children.
if len(branch) <= depth:
branch.append(False)
if (root.right):
branch[depth] = True
print_tree_horizontally(root.left, print_func, depth+1, branch)
branch[depth] = False;
print_tree_horizontally(root.right, print_func, depth+1, branch)
def test():
n = 13
root = build_Bitree(n)
print_bitree_simple(root)
print_tree_horizontally(root, print_tree_node, 0, [])
value = randint(2,29)
node = bitree_search(root,value)
print(value, ':', node.value)
if __name__ == '__main__':
test() |
import random
from bokeh.plotting import figure, show
def tirar_dado(num_tiros):
secuencia_tiros = []
for _ in range(num_tiros):
tiro = random.choice([1, 2, 3, 4, 5, 6])
secuencia_tiros.append(tiro)
return secuencia_tiros
def simulacion(num_tiros, num_intentos):
tiros = []
for _ in range(num_intentos):
secuencia_tiros = tirar_dado(num_tiros)
tiros.append(secuencia_tiros)
tiros_con_1 = 0
for tiro in tiros:
if 6 in tiro:
tiros_con_1 += 1
probabilidad_tiros_1 = tiros_con_1 / num_intentos
print(f'Probabilidad de obtener por lo menos un 1 en {num_tiros} tiros: {probabilidad_tiros_1}')
def simulacion_2(num_tiros, num_intentos):
tiros = []
for _ in range(num_intentos):
secuencia_tiros = tirar_dado(num_tiros)
tiros.append(secuencia_tiros)
tiros_con_6 = 0
for tiro in tiros:
if 6 in tiro:
tiros_con_6 += 1
probabilidad_tiros_6_dado_1 = tiros_con_6 / num_intentos
tiros = []
for _ in range(num_intentos):
secuencia_tiros = tirar_dado(num_tiros)
tiros.append(secuencia_tiros)
tiros_con_6 = 0
for tiro in tiros:
if 6 in tiro:
tiros_con_6 += 1
probabilidad_tiros_6_dado_2 = tiros_con_6 / num_intentos
probabilidad_final = probabilidad_tiros_6_dado_1 * probabilidad_tiros_6_dado_2
print(f'Probabilidad de obtener por lo menos un 12 en {num_tiros} tiros: {probabilidad_final}')
def main():
num_tiros = int(input('Ingrese la cantidad de tiros que se realizaran: '))
num_intentos = int(input('Cuantas veces correra la simulacion?: '))
num_dados = int(input('Ingrese 1 para usar un dado o 2 para usar dos dados: '))
if num_dados == 1:
simulacion(num_tiros, num_intentos)
elif num_dados == 2:
simulacion_2(num_tiros, num_intentos,)
if __name__ == '__main__':
main() |
# Test that default values do not change.
# Test that settings can be imported without the DJANGO_SETTINGS_MODULE
# environment variable set.
# Test that default values can be changed by the DJANGO_SETTINGS_MODULE.
|
import numpy as np
from tools.utils import Helper, INFO, ERROR, NOTE
import sys
import argparse
def main(train_set: str):
centroids = np.load(f'data/{train_set}_anchor.npy')
print(NOTE, f'Now anchors are :\n{centroids}')
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('train_set', type=str, help=NOTE + 'this is train dataset name , the output *.npy file will be {train_set}_anchors.list')
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args.train_set)
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import neighbors
from sklearn.svm import SVR
from warnings import filterwarnings
filterwarnings('ignore')
df= pd.read_csv("Hitters.csv")
df= df.dropna()
dms= pd.get_dummies(df[["League","Division","NewLeague"]]) #kategorik değişkenleri dumm değişkenlere çevirme işlemi OneHotEncoding yaklaşımı yapmış olduk
y= df["Salary"] #bağımlı değişkeni atama işlemi
X_= df.drop(['Salary','League','Division','NewLeague'], axis=1).astype('float64') #bağımlı değişkeni ve kategorik değişkenleri veri setinde kaldırıp X_ atama işlemi
X= pd.concat([X_, dms[['League_N','Division_W','NewLeague_N']]], axis=1) #oluşturmuş olduğumuz dumm değişkenleri ve bağımsız değişkenleri bir araya getirme işlemi
#yukarda yapılan işlemler kategorik değişkenleri dumm çevirerek veri setinde tutup diğer bağımsız değişkenlerle birleştirdik
#aşağıda eğitim ve deneme seti olarak ayrıştırma işlemi yaptık
X_train, X_test, y_train, y_test= train_test_split(X,
y,
test_size=0.25,
random_state=42)
print(df.head())
print(df.shape)
print(X_train.head())
X_train= pd.DataFrame(X_train["Hits"])
X_test= pd.DataFrame(X_test["Hits"])
#Model ve Tahmin
dtr= DecisionTreeRegressor(max_leaf_nodes=10)
cart_model= dtr.fit(X_train,y_train)
print(cart_model)
X_grid= np.arange(min(np.array(X_train)),max(np.array(X_train)), 0.01)
X_grid= X_grid.reshape((len(X_grid), 1))
plt.scatter(X_train, y_train, color='red')
plt.plot(X_grid,cart_model.predict(X_grid),color='blue')
plt.title('CART REGRESYON AĞACI')
plt.xlabel('Atış Sayısı (Hits)')
plt.ylabel('Maaş (Salary)')
plt.show()
#Tek Değişkenli Tahmin
print(cart_model.predict(X_test)[0:5])
y_pred= cart_model.predict(X_test)
rmse= np.sqrt(mean_squared_error(y_test,y_pred))
print(rmse)
#Tüm Değişkenleri Kullanarak Tahmin
df= pd.read_csv("Hitters.csv")
df= df.dropna()
dms= pd.get_dummies(df[["League","Division","NewLeague"]]) #kategorik değişkenleri dumm değişkenlere çevirme işlemi OneHotEncoding yaklaşımı yapmış olduk
y= df["Salary"] #bağımlı değişkeni atama işlemi
X_= df.drop(['Salary','League','Division','NewLeague'], axis=1).astype('float64') #bağımlı değişkeni ve kategorik değişkenleri veri setinde kaldırıp X_ atama işlemi
X= pd.concat([X_, dms[['League_N','Division_W','NewLeague_N']]], axis=1) #oluşturmuş olduğumuz dumm değişkenleri ve bağımsız değişkenleri bir araya getirme işlemi
#yukarda yapılan işlemler kategorik değişkenleri dumm çevirerek veri setinde tutup diğer bağımsız değişkenlerle birleştirdik
#aşağıda eğitim ve deneme seti olarak ayrıştırma işlemi yaptık
X_train, X_test, y_train, y_test= train_test_split(X,
y,
test_size=0.25,
random_state=42)
dtr= DecisionTreeRegressor()
cart_model= dtr.fit(X_train,y_train)
y_pred= cart_model.predict(X_test)
rmse= np.sqrt(mean_squared_error(y_test,y_pred))
print(rmse)
#Model Tuning
dtr= DecisionTreeRegressor(max_depth=5) #max_depth parametresi sayesinde modelin ne kadar derinleşeceğini belirtmek için kullanılırız
cart_model= dtr.fit(X_train,y_train)
y_pred= cart_model.predict(X_test)
rmse= np.sqrt(mean_squared_error(y_test,y_pred))
print(rmse)
cart_params= {"max_depth":[2,3,4,5,10,20],
"min_samples_split":[2,10,5,30,50,10]}
#ağac modelinin içi boş olması için tekrar oluşturduk
cart_model= DecisionTreeRegressor() #max_depth parametresi sayesinde modelin ne kadar derinleşeceğini belirtmek için kullanılırız
#cart_model= dtr.fit(X_train,y_train)
gs= GridSearchCV(cart_model,cart_params,cv=10)
cart_cv_model= gs.fit(X_train,y_train)
print(cart_cv_model.best_params_)
#Final Model
dtr= DecisionTreeRegressor(max_depth=4, min_samples_split=50)
cart_tuned= dtr.fit(X_train,y_train)
print(cart_tuned)
y_pred= cart_tuned.predict(X_test)
rmse= np.sqrt(mean_squared_error(y_test,y_pred))
print(rmse) |
# Generated by Django 3.0 on 2020-11-27 03:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Contests', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='contests',
name='created',
),
]
|
from setuptools import setup
setup(
name="todo",
version="0.3.1",
author="hit9",
author_email="nz2324@126.com",
description="""
Cli todo tool with readable storage.
""",
license="MIT",
keywords="todo readable commandline cli",
url='https://github.com/secreek/todo',
packages=['todo'],
include_package_data=True,
entry_points={
'console_scripts': [
'todo = todo.app:main'
]
},
long_description=open('README.md').read(),
install_requires=open("requirements.txt").read().splitlines()
)
|
import numpy
import math
# S
def mag(vec):
# Pythagoras
return numpy.array(math.sqrt(vec[0] ** 2 + vec[1] ** 2))
# S
def mag1(vec):
return numpy.linalg.norm(vec)
# T
def unit(vec):
x = vec[0]
y = vec[1]
length = mag(vec)
print(length)
return numpy.array([x/length, y/length])
# T
def unit1(vec):
return vec/mag(vec)
# U
def rot90(vec):
x = vec[0]
y = vec[1]
return numpy.array([-y, x])
# V
def scalar_multiply(scalar: float, vec):
return numpy.array([scalar * v_i for v_i in vec])
def add(vec1, vec2):
x1 = vec1[0]
y1 = vec1[1]
x2 = vec2[0]
y2 = vec2[1]
return numpy.array([x1 + x2, y1+y2])
# W
def sub(vec1, vec2):
x1 = vec1[0]
y1 = vec1[1]
x2 = vec2[0]
y2 = vec2[1]
return numpy.array([x1 - x2, y1 - y2])
# W
a = [3, 2]
b = [8,7]
c = [1,5]
print(mag1(a)) # S
print(unit1(a)) # T
print(rot90(a)) # U
print(scalar_multiply(2, a)) # V
print(sub(add(a, b), b)) # W
print(numpy.dot(a, a)) # Y
print(mag(a) * mag(a)) # Y
print(numpy.dot(a,b)) # Z
print(numpy.dot(a, rot90(a))) # Æ
|
class Jugada:
@classmethod
def toInstance(cls, mensaje):
for subclass in cls.__subclasses__():
if subclass.mensaje == mensaje:
return subclass()
raise RuntimeError()
def __str__(self):
return str(self.to_message())
def to_message(self):
return {
"mensaje": self.mensaje
}
class Null(Jugada):
mensaje = ""
class Truco(Jugada):
mensaje = "truco"
class ReTruco(Jugada):
mensaje = "retruco"
class Vale4(Jugada):
mensaje = "vale 4"
class Envido(Jugada):
mensaje = "envido"
class RealEnvido(Jugada):
mensaje = "real envido"
class FaltaEnvido(Jugada):
mensaje = "falta envido"
class Quiero(Jugada):
mensaje = "quiero"
class NoQuiero(Jugada):
mensaje = "no quiero"
class BajarCarta(Jugada):
mensaje = "carta"
def __init__(self, carta=None):
self.carta = carta
def to_message(self):
return {
"mensaje": self.mensaje,
"carta": {
"palo": self.carta.palo,
"numero": self.carta.numero
}
}
class IrseAlMazo(Jugada):
mensaje = "irse al mazo"
|
# class Vehicle(object):
# """docstring"""
# def __init__(self, color, doors, tires, vtype):
# """Constructor"""
# self.color = color
# self.doors = doors
# self.tires = tires
# self.vtype = vtype
# def brake(self):
# """
# Stop the car
# """
# return "%s braking" % self.vtype
# def drive(self):
# """
# Drive the car
# """
# return "I'm driving a %s %s!" % (self.color, self.vtype)
# if __name__ == "__main__":
# car = Vehicle("blue", 5, 4, "car")
# print(car.brake())
# print(car.drive())
# truck = Vehicle("red", 3, 6, "truck")
# print(truck.drive())
# print(truck.brake())
class Point1(object):
def __init__(self, x, y):
self.x = x
self.y = y
def picters(self):
return "Дана точка з кординатами х = %s, y = %s" % (self.x, self.y)
p1 = Point1(30, 40)
print(p1.picters()) |
#/###################/#
# Import modules
#
#import
import ShareYourSystem as SYS
#define
MyLifer=SYS.LiferClass(
).lif(
_StationaryExternalCurrentMeanFloat=15.,
_StationaryExternalCurrentNoiseFloat=21.
)
#print
print('MyLifer is')
SYS._print(MyLifer)
|
# -*- coding: utf8 -*-
# author: ronniecao
# time: 2021/03/22
# description: environment of drawing
import copy
import json
import math
import numpy
import cv2
class Env:
"""
环境类:控制环境
"""
def __init__(self, option):
# 读取配置
self.option = option
# 初始化颜色
self.color_dict = {'wall': (200, 50, 50), 'beam': (50, 200, 50),
'column': (50, 50, 200), 'other': (50, 50, 50),
'text': (50, 50, 50), 'ywlabel': (150, 150, 50)}
def reset(self, path):
"""
初始化一个平面图
"""
# 读取数据
data = json.load(open(path, 'r'))
# 获取components
component_dict = {}
pic_left, pic_top, pic_right, pic_bottom = 1e6, 1e6, 0, 0
for component in data['components']:
ctype = component['category']
color = self.color_dict[component['category']]
for i in range(len(component['contour_pts'])):
point = component['contour_pts'][i]
pic_left = min([pic_left, point[0]])
pic_top = min([pic_top, point[1]])
pic_right = max([pic_right, point[0]])
pic_bottom = max([pic_bottom, point[1]])
left = min([p[0] for p in component['contour_pts']])
top = min([p[1] for p in component['contour_pts']])
right = max([p[0] for p in component['contour_pts']])
bottom = max([p[1] for p in component['contour_pts']])
component_dict[component['id']] = {'ctype': ctype, 'color': color,
'points': component['contour_pts'], 'box': [left, top, right, bottom]}
# 获取drawings
drawing_dict = {}
for drawing in data['drawing_components']:
drawing_dict[drawing['id']] = {'cids': drawing['related_componentids']}
# 获取texts
text_dict = {}
max_h = 0
for text in data['texts']:
left = min([t[0] for t in text['boundingbox']])
top = min([t[1] for t in text['boundingbox']])
right = max([t[0] for t in text['boundingbox']])
bottom = max([t[1] for t in text['boundingbox']])
if (text['orientation'] // 90) % 2 == 0:
max_h = max(max_h, bottom - top + 2)
else:
max_h = max(max_h, right - left + 2)
text_dict[text['id']] = {'box': [left, top, right, bottom]}
pic_left -= 10 * max_h
pic_top -= 10 * max_h
pic_right += 10 * max_h
pic_bottom += 10 * max_h
# obtain jzlabels
jzlabel_dict = {}
for label in data['labels_jz']:
left = min([text_dict[textid]['box'][0] for textid in label['related_textids']])
top = min([text_dict[textid]['box'][1] for textid in label['related_textids']])
right = max([text_dict[textid]['box'][2] for textid in label['related_textids']])
bottom = max([text_dict[textid]['box'][3] for textid in label['related_textids']])
point1 = tuple([t for t in label['leading_line'][0]])
point2 = tuple([t for t in label['leading_line'][1]])
pic_left = min([pic_left, point1[0], point2[0], left])
pic_top = min([pic_top, point1[1], point2[1], top])
pic_right = max([pic_right, point1[0], point2[0], right])
pic_bottom = max([pic_bottom, point1[1], point2[1], bottom])
jzlabel_dict[label['id']] = {'box': [left, top, right, bottom],
'points': label['leading_line'], 'did': label['related_componentid']}
# obtain ywlabels
ywlabel_dict = {}
for label in data['labels_yw']:
left = text_dict[label['related_textid']]['box'][0]
top = text_dict[label['related_textid']]['box'][1]
right = text_dict[label['related_textid']]['box'][2]
bottom = text_dict[label['related_textid']]['box'][3]
pic_left = min([pic_left, point1[0], point2[0], left])
pic_top = min([pic_top, point1[1], point2[1], top])
pic_right = max([pic_right, point1[0], point2[0], right])
pic_bottom = max([pic_bottom, point1[1], point2[1], bottom])
ywlabel_dict[label['id']] = {'box': [left, top, right, bottom]}
# 获取页面边缘
pic_left = int(math.floor(pic_left))
pic_top = int(math.floor(pic_top))
pic_right = int(math.ceil(pic_right))
pic_bottom = int(math.ceil(pic_bottom))
# 调整component的坐标
for cid in component_dict:
component = component_dict[cid]
for i in range(len(component['points'])):
component['points'][i][0] -= pic_left
component['points'][i][1] -= pic_top
component['box'][0] -= pic_left
component['box'][1] -= pic_top
component['box'][2] -= pic_left
component['box'][3] -= pic_top
# 调整label的坐标
for lid in jzlabel_dict:
jzlabel = jzlabel_dict[lid]
jzlabel['box'][0] -= pic_left
jzlabel['box'][1] -= pic_top
jzlabel['box'][2] -= pic_left
jzlabel['box'][3] -= pic_top
jzlabel['points'][0][0] -= pic_left
jzlabel['points'][0][1] -= pic_top
jzlabel['points'][1][0] -= pic_left
jzlabel['points'][1][1] -= pic_top
jzlabel['orientation'] = 'vertical' if \
abs(jzlabel['points'][0][0] - jzlabel['points'][1][0]) <= 1 else 'horizontal'
# 调整label的坐标
for lid in ywlabel_dict:
ywlabel = ywlabel_dict[lid]
ywlabel['box'][0] -= pic_left
ywlabel['box'][1] -= pic_top
ywlabel['box'][2] -= pic_left
ywlabel['box'][3] -= pic_top
self.info = {
'pic_box': [pic_left, pic_top, pic_right, pic_bottom],
'component_dict': component_dict,
'drawing_dict': drawing_dict,
'jzlabel_dict': jzlabel_dict,
'ywlabel_dict': ywlabel_dict
}
# 获取重合面积
self.info['text_overlap_area'], _ = \
self.get_text_overlap_area(self.info)
self.info['yw_overlap_area'], _ = \
self.get_jz_and_yw_overlap_area(self.info)
self.info['line_overlap_area'], _ = \
self.get_line_and_beam_overlap_area(self.info)
self.info['overlap_area'] = self.info['text_overlap_area'] + \
self.info['yw_overlap_area'] + self.info['line_overlap_area']
return self.info
def step(self, action=[]):
"""
向env传入action,获得next_state, reward, is_end等信息
"""
# 获取new_state
action_jz, action_move = action[0], action[1]
jzlabel = self.info['jzlabel_dict'][action_jz]
is_valid, new_jzlabel = self.move(jzlabel=jzlabel, move_type=action_move)
if not is_valid:
return None, None, None, False
self.info['jzlabel_dict'][action_jz] = new_jzlabel
# 获取reward
text_overlap_area, _ = self.get_text_overlap_area(self.info)
yw_overlap_area, _ = self.get_jz_and_yw_overlap_area(self.info)
line_overlap_area, _ = self.get_line_and_beam_overlap_area(self.info)
overlap_area = text_overlap_area + yw_overlap_area + line_overlap_area
reward = self.info['overlap_area'] - overlap_area
self.info['overlap_area'] = overlap_area
# 获取is_end
is_end = bool(self.info['overlap_area'] == 0)
return self.info, reward, is_end, True
def move(self, jzlabel, move_type):
"""
对jzlabel进行move_type的移动操作
"""
ratio = self.option['option']['move_ratio']
new_jzlabel = copy.deepcopy(jzlabel)
if move_type == 0:
# 水平翻转
if jzlabel['orientation'] == 'vertical':
new_left = -jzlabel['box'][0] + 2 * jzlabel['points'][0][0]
new_right = -jzlabel['box'][2] + 2 * jzlabel['points'][0][0]
if new_right > new_left:
new_jzlabel['box'][0], new_jzlabel['box'][2] = new_left, new_right
else:
new_jzlabel['box'][0], new_jzlabel['box'][2] = new_right, new_left
else:
new_left = -jzlabel['box'][0] + 2 * jzlabel['points'][0][0]
new_right = -jzlabel['box'][2] + 2 * jzlabel['points'][0][0]
if new_right > new_left:
new_jzlabel['box'][0], new_jzlabel['box'][2] = new_left, new_right
else:
new_jzlabel['box'][0], new_jzlabel['box'][2] = new_right, new_left
new_jzlabel['points'][1][0] = \
-jzlabel['points'][1][0] + 2 * jzlabel['points'][0][0]
elif move_type == 1:
# 垂直翻转
if jzlabel['orientation'] == 'horizontal':
new_top = -jzlabel['box'][1] + 2 * jzlabel['points'][0][1]
new_bottom = -jzlabel['box'][3] + 2 * jzlabel['points'][0][1]
if new_bottom > new_top:
new_jzlabel['box'][1], new_jzlabel['box'][3] = new_top, new_bottom
else:
new_jzlabel['box'][1], new_jzlabel['box'][3] = new_bottom, new_top
else:
new_top = -jzlabel['box'][1] + 2 * jzlabel['points'][0][1]
new_bottom = -jzlabel['box'][3] + 2 * jzlabel['points'][0][1]
if new_bottom > new_top:
new_jzlabel['box'][1], new_jzlabel['box'][3] = new_top, new_bottom
else:
new_jzlabel['box'][1], new_jzlabel['box'][3] = new_bottom, new_top
new_jzlabel['points'][1][1] = \
-jzlabel['points'][1][1] + 2 * jzlabel['points'][0][1]
elif move_type == 2:
# 水平向左移动
if jzlabel['orientation'] == 'vertical':
width_offset = int((jzlabel['box'][2] - jzlabel['box'][0]) / ratio)
new_jzlabel['box'][0] -= width_offset
new_jzlabel['box'][2] -= width_offset
new_jzlabel['points'][0][0] -= width_offset
new_jzlabel['points'][1][0] -= width_offset
else:
width_offset = int((jzlabel['box'][2] - jzlabel['box'][0]) / ratio)
new_jzlabel['box'][0] -= width_offset
new_jzlabel['box'][2] -= width_offset
new_jzlabel['points'][1][0] -= width_offset
elif move_type == 3:
# 水平向右移动
if jzlabel['orientation'] == 'vertical':
width_offset = int((jzlabel['box'][2] - jzlabel['box'][0]) / ratio)
new_jzlabel['box'][0] += width_offset
new_jzlabel['box'][2] += width_offset
new_jzlabel['points'][0][0] += width_offset
new_jzlabel['points'][1][0] += width_offset
else:
width_offset = int((jzlabel['box'][2] - jzlabel['box'][0]) / ratio)
new_jzlabel['box'][0] += width_offset
new_jzlabel['box'][2] += width_offset
new_jzlabel['points'][1][0] += width_offset
elif move_type == 4:
# 垂直向上移动
if jzlabel['orientation'] == 'horizontal':
height_offset = int((jzlabel['box'][3] - jzlabel['box'][1]) / ratio)
new_jzlabel['box'][1] -= height_offset
new_jzlabel['box'][3] -= height_offset
new_jzlabel['points'][0][1] -= height_offset
new_jzlabel['points'][1][1] -= height_offset
else:
height_offset = int((jzlabel['box'][3] - jzlabel['box'][1]) / ratio)
new_jzlabel['box'][1] -= height_offset
new_jzlabel['box'][3] -= height_offset
new_jzlabel['points'][1][1] -= height_offset
elif move_type == 5:
# 垂直向下移动
if jzlabel['orientation'] == 'horizontal':
height_offset = int((jzlabel['box'][3] - jzlabel['box'][1]) / ratio)
new_jzlabel['box'][1] += height_offset
new_jzlabel['box'][3] += height_offset
new_jzlabel['points'][0][1] += height_offset
new_jzlabel['points'][1][1] += height_offset
else:
height_offset = int((jzlabel['box'][3] - jzlabel['box'][1]) / ratio)
new_jzlabel['box'][1] += height_offset
new_jzlabel['box'][3] += height_offset
new_jzlabel['points'][1][1] += height_offset
# 判断新的jzlabel是否valid
is_valid = self._judge_jzlabel_valid(new_jzlabel)
return is_valid, new_jzlabel
def render(self, index):
"""
画图
"""
scale = self.option['option']['scale']
width = int((self.info['pic_box'][2] - self.info['pic_box'][0] + 1) / 20)
height = int((self.info['pic_box'][3] - self.info['pic_box'][1] + 1) / 20)
image = numpy.zeros((height, width, 3), dtype='uint8') + 255
for cid in self.info['component_dict']:
component = self.info['component_dict'][cid]
if component['ctype'] in ['beam', 'column']:
left = int(component['box'][0] / scale)
top = int(component['box'][1] / scale)
right = int(component['box'][2] / scale)
bottom = int(component['box'][3] / scale)
cv2.rectangle(image, (left, top), (right, bottom), component['color'], 2)
else:
for i in range(-1, len(component['points'])-1, 1):
point1 = tuple([int(t/scale) for t in component['points'][i]])
point2 = tuple([int(t/scale) for t in component['points'][i+1]])
cv2.line(image, point1, point2, component['color'], 2)
for lid in self.info['ywlabel_dict']:
ywlabel = self.info['ywlabel_dict'][lid]
left = int(ywlabel['box'][0] / scale)
top = int(ywlabel['box'][1] / scale)
right = int(ywlabel['box'][2] / scale)
bottom = int(ywlabel['box'][3] / scale)
cv2.rectangle(image, (left, top), (right, bottom), self.color_dict['ywlabel'], 2)
for lid in self.info['jzlabel_dict']:
jzlabel = self.info['jzlabel_dict'][lid]
left = int(jzlabel['box'][0] / scale)
top = int(jzlabel['box'][1] / scale)
right = int(jzlabel['box'][2] / scale)
bottom = int(jzlabel['box'][3] / scale)
cv2.rectangle(image, (left, top), (right, bottom), self.color_dict['text'], 1)
point1 = tuple([int(t/scale) for t in jzlabel['points'][0]])
point2 = tuple([int(t/scale) for t in jzlabel['points'][1]])
cv2.line(image, point1, point2, self.color_dict['text'], 2)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 画index
image = cv2.putText(image, str(index), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 2, cv2.LINE_AA)
return image
def _judge_jzlabel_valid(self, jzlabel):
"""
判断jzlabel是否符合规范
"""
is_valid = False
# 判断是否页面范围内
pic_width = self.info['pic_box'][2] - self.info['pic_box'][0]
pic_height = self.info['pic_box'][3] - self.info['pic_box'][1]
if 0 <= jzlabel['box'][0] < jzlabel['box'][2] < pic_width - 1 and \
0 <= jzlabel['box'][1] < jzlabel['box'][3] < pic_height - 1:
is_in_page = True
# 判断引线是否在梁的范围内
if jzlabel['orientation'] == 'horizontal':
pos = jzlabel['points'][0][1]
is_in_beam = False
for cid in self.info['drawing_dict'][jzlabel['did']]['cids']:
top = self.info['component_dict'][cid]['box'][1]
bottom = self.info['component_dict'][cid]['box'][3]
if top <= pos <= bottom:
is_in_beam = True
break
else:
pos = jzlabel['points'][0][0]
is_in_beam = False
for cid in self.info['drawing_dict'][jzlabel['did']]['cids']:
left = self.info['component_dict'][cid]['box'][0]
right = self.info['component_dict'][cid]['box'][2]
if left <= pos <= right:
is_in_beam = True
break
is_valid = bool(is_in_page and is_in_beam)
return is_valid
def get_text_overlap_area(self, info):
"""
获取jzlabel之间的重合面积
"""
# 寻找jzlabel之间的overlap
scale = self.option['option']['scale']
overlap_list = []
jzkeys = list(info['jzlabel_dict'].keys())
for i in range(len(jzkeys)):
[lefta, topa, righta, bottoma] = info['jzlabel_dict'][jzkeys[i]]['box']
for j in range(i+1, len(jzkeys)):
[leftb, topb, rightb, bottomb] = info['jzlabel_dict'][jzkeys[j]]['box']
lefti = max(lefta, leftb)
topi = max(topa, topb)
righti = min(righta, rightb)
bottomi = min(bottoma, bottomb)
if righti > lefti and bottomi > topi:
area = (righti - lefti) * (bottomi - topi) / (scale * scale)
# print(jzkeys[i], jzkeys[j], area)
overlap_list.append([jzkeys[i], jzkeys[j], area])
# 对重合的面积求和
overlap_area = sum([area for _, _, area in overlap_list])
return overlap_area, overlap_list
def get_jz_and_yw_overlap_area(self, info):
"""
获取jzlabel和ywlabel之间的重合面积
"""
scale = self.option['option']['scale']
overlap_list = []
jzkeys = list(info['jzlabel_dict'].keys())
ywkeys = list(info['ywlabel_dict'].keys())
for i in range(len(jzkeys)):
[lefta, topa, righta, bottoma] = info['jzlabel_dict'][jzkeys[i]]['box']
for j in range(len(ywkeys)):
[leftb, topb, rightb, bottomb] = info['ywlabel_dict'][ywkeys[j]]['box']
lefti = max(lefta, leftb)
topi = max(topa, topb)
righti = min(righta, rightb)
bottomi = min(bottoma, bottomb)
if righti > lefti and bottomi > topi:
area = (righti - lefti) * (bottomi - topi) / (scale * scale)
overlap_list.append([jzkeys[i], ywkeys[j], area])
# 对重合的面积求和
overlap_area = sum([area for _, _, area in overlap_list])
return overlap_area, overlap_list
def get_line_and_beam_overlap_area(self, info):
"""
获取当前引线重合面积
"""
# 寻找jzlabel和beam之间的overlap
scale = self.option['option']['scale']
overlap_list = []
jzkeys = list(info['jzlabel_dict'].keys())
for i in range(len(jzkeys)):
jzlabel = info['jzlabel_dict'][jzkeys[i]]
drawing = info['drawing_dict'][jzlabel['did']]['cids']
[point1, point2] = jzlabel['points']
for cid in info['component_dict']:
component = info['component_dict'][cid]
if component['ctype'] == 'beam' and cid not in drawing:
[cleft, ctop, cright, cbottom] = component['box']
if jzlabel['orientation'] == 'vertical':
# 起点在该beam的bottom下方,并且终点在beam的bottom上方,则重合
if point1[1] > cbottom and point2[1] < cbottom and \
cleft <= point1[0] <= cright:
area = 10 * (cbottom - point2[1])
overlap_list.append([jzkeys[i], cid, area])
# 起点在该beam的top上方,并且终点在beam的top下方,则重合
elif point1[1] < ctop and point2[1] > ctop and \
cleft <= point1[0] <= cright:
area = 10 * (point2[1] - ctop)
overlap_list.append([jzkeys[i], cid, area])
else:
# 起点在该beam的right右方,并且终点在beam的right左方,则重合
if point1[0] > cright and point2[0] < cright and \
ctop <= point1[1] <= cbottom:
area = 10 * (cright - point2[0])
overlap_list.append([jzkeys[i], cid, area])
# 起点在该beam的left左方,并且终点在beam的left右方,则重合
elif point1[0] < cleft and point2[0] > cleft and \
ctop <= point1[1] <= cbottom:
area = 10 * (point2[0] - cleft)
overlap_list.append([jzkeys[i], cid, area])
# 对重合的面积求和
overlap_area = sum([area for _, _, area in overlap_list])
return overlap_area, overlap_list
def get_state_string(self, info):
"""
获取state string
"""
keys = sorted(list(info['jzlabel_dict'].keys()))
jz_strings = []
for key in keys:
jzlabel = info['jzlabel_dict'][key]
box_string = ','.join([str(round(t, 2)) for t in jzlabel['box']])
line_string = ','.join([str(round(t, 2)) for t in jzlabel['points'][0]] + \
[str(round(t, 2)) for t in jzlabel['points'][0]])
jz_strings.append('%s@%s&%s' % (key, box_string, line_string))
return ';'.join(jz_strings) |
from django.shortcuts import render, redirect
import serial
arduino = serial.Serial('COM3', 9600)
def home(request):
if(request.GET.get('led1on')):
led = '1'
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
elif(request.GET.get('led1off')):
#led1 off button clicked
led = "2"
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
elif(request.GET.get('led2on')):
#led1 off button clicked
led = '3'
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
elif(request.GET.get('led2off')):
#led1 off button clicked
led = '4'
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
elif(request.GET.get('led3on')):
#led1 off button clicked
led = '5'
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
elif(request.GET.get('led3off')):
#led1 off button clicked
led = '6'
print(led)
arduino.write(led.encode('ascii'))
return redirect(home)
return render(request, 'home.html') |
"""Extract games from 20 Questions game HITs.
See ``python extractgames.py --help`` for more information.
"""
import json
import logging
import click
from scripts import _utils
logger = logging.getLogger(__name__)
# main function
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'xml_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def extractgames(xml_dir, output_path):
"""Extract games from XML_DIR and write to OUTPUT_PATH.
Extract the 20 Questions game data from a batch of 20 Questions
HITs. XML_DIR should be the XML directory of one of the 20 Questions
HIT batches, extracted with AMTI. OUTPUT_PATH is the location to
which the data will be written.
"""
# submissions : the form data submitted from the twentyquestions
# HITs as a list of dictionaries mapping the question identifiers to
# the free text, i.e.:
#
# [{'gameRoomJson': game_room_json_string}, ...]
#
submissions = _utils.extract_xml_dir(xml_dir)
# deduplicate the games because each crowdworker who participates in
# the game submits a copy of the game data.
game_jsons = set([
submission['gameRoomJson']
for submission in submissions
])
# write out the data
with click.open_file(output_path, 'w') as output_file:
output_file.write('\n'.join(game_jsons))
if __name__ == '__main__':
extractgames()
|
import os;
import types
from project.tools.spider.spider_text import *
from pathlib import Path;
from project.spider_enterprise_site.spider.SpiderCssFile import SpiderCssFile
from project.spider_enterprise_site.spider.SpiderJsFile import SpiderJsFile
from project.spider_enterprise_site.spider.SpiderImageFile import SpiderImageFile
import re
class SiteSpider :
@staticmethod
def setLevel(level):
SiteSpider.level = level
@staticmethod
def getLevel():
return SiteSpider.level
downingList = []
level = 0
cssRe = [
re.compile(r'link[\s\d\w"\'=]*href="([^"]+)"', re.I|re.M),
re.compile(r'link[\s\d\w"\'=]*href=\'([^\']+)\'', re.I)
]
cssDir = "static_source/css"
jsRe = [
#<script src="http://bk.st.styleweb.com.cn/editor/js/jquery.min.js"></script>
re.compile(r'script[\s\d\w"\']*src="([^"]+)"', re.I),
re.compile(r"script[\s\d\w\"']*src='([^']+)'", re.I)
]
jsDir = "static_source/js"
imageRe = [
#<script src="http://bk.st.styleweb.com.cn/editor/image/jquery.min.image"></script>
re.compile(r'<img[\s\d\w"\']*(?<!data-)src="([^"]+)"', re.I),
re.compile(r"<img[\s\d\w\"']*(?<!data-)src='([^']+)'", re.I)
]
imageDir = "static_source/image"
htmlRe = [
#<script src="http://bk.st.styleweb.com.cn/editor/html/jquery.min.html"></script>
#(?<!\.jpg) href不以'.jpg'结束
re.compile(r'<a[\s\d\w"\']*href="([^"]+)(?<!\.jpg)"', re.I),
re.compile(r"<a[\s\d\w\"']*href='([^']+)(?<!\.jpg)'", re.I)
]
htmlDir = "html"
htmlContent = ""
def __init__(self, domain, indexPage="", spiderChildLevel=False, directory='', saveToDirectory='down_site/'):
self.domain = domain
self.indexPage = indexPage if indexPage==None else ""
self.savePath = os.getcwd() + "/" + saveToDirectory + self.__getDomainName()
self.spiderChildLevel = spiderChildLevel
self.__checkAndCreateSaveDir()
self.directory = directory
def __checkAndCreateSaveDir(self):
if os.path.exists(self.savePath) == False :
os.makedirs(self.savePath)
def run(self,url=''):
# 防止重复下载
if url in SiteSpider.downingList :
return False
SiteSpider.downingList.append(url)
# 下载指定当前页面
self.__spiderSpecialPage(url)
# 设置下载级别,以备循环调用判断当前层级
level = self.getLevel()
if level > 0 :
SiteSpider.setLevel(level - 1)
def __spiderSpecialPage(self, url=''):
# 抓取首页
if url=='' :
indexPage = self.__getPageName()
self.__spiderPage(indexPage)
else :
self.__spiderPage(url)
pass
def __spiderPage(self, url):
# 抓取页面
# 1.抓取html
self.__spiderHtml(url)
# 2.抓取css
self.__spiderCss(self.htmlContent)
# 3.抓取js
self.__spiderJs(self.htmlContent)
# 4.抓取图片
self.__spiderImage(self.htmlContent)
if self.spiderChildLevel :
self.__spiderJumpHtml(self.htmlContent)
self.__saveHtml(url)
def __spiderHtml(self, url):
# 抓取html
try:
spider = SpiderText(url)
fileName = self.getPageFileName(url)
filePath = self.savePath + "/" + self.directory + fileName
spider.setUrlContent(url)
self.__updateContent(str(spider.content, 'utf-8'))
except Exception as e:
print(str(e))
pass;
def __saveHtml(self, url):
try:
spider = SpiderText()
fileName = self.getPageFileName(url)
filePath = self.savePath + '/' + fileName
content = bytes(self.htmlContent, 'utf-8')
spider.setContent(content)
spider.save(filePath)
except Exception as e :
print(str(e))
def __spiderCss(self, htmlContent):
# 抓取css
#1. 抓取所有css url
#2. 抓取所有提取到的css文件并保存到本地
#3. 替换所有css url
spiderCss = SpiderCssFile(htmlContent, self.cssRe, relativeDir=self.cssDir,
rootDir=self.savePath, domain=self.domain, directory=self.directory)
spiderCss.run()
self.__updateContent(spiderCss.content)
pass
def __updateContent(self, content):
self.htmlContent = content
def __spiderJs(self, htmlContent):
#抓取js
spiderJs = SpiderJsFile(htmlContent, self.jsRe, relativeDir=self.jsDir,
rootDir=self.savePath, domain = self.domain, directory=self.directory)
spiderJs.run()
self.__updateContent(spiderJs.content)
pass
def __spiderImage(self, htmlContent):
#抓取js
spiderImage = SpiderImageFile(htmlContent, self.imageRe, relativeDir=self.imageDir, rootDir=self.savePath, domain = self.domain, directory=self.directory)
spiderImage.run()
self.__updateContent(spiderImage.content)
pass
def __spiderJumpHtml(self, htmlContent):
#抓取html跳转页
spiderHtml = SpiderHtmlFile(htmlContent, self.htmlRe, relativeDir=self.htmlDir, rootDir=self.savePath, domain = self.domain)
spiderHtml.run()
self.__updateContent(spiderHtml.content)
pass
def __getPageName(self,url='',isAbsolutPath=True):
if url=='' :
# 获取首页地址
indexPageUrl = self.domain + "/" + self.directory + self.indexPage
return indexPageUrl
else :
r = re.compile('^http.*',re.I)
if r.match(url) == None and url[0,1] != '/':
url = self.domain + self.directory + url
elif r.match(url) == None :
url = self.domain + url
fileName = url.replace('/','__')
fileName = fileName.replace('\\','___')
fileName = fileName.replace(':','____')
fileName = fileName.replace('?','_____')
fileName = fileName.replace('&','_______')
fileName = fileName.replace('%','________')
fileName = fileName[0:120]
relativeDir = self.__getDomainName()
if isAbsolutPath :
saveDir = os.getcwd() + '/' + relativeDir + self.directory
try :
if os.path.exists(saveDir) == False:
os.makedirs(saveDir)
savePath = saveDir + '/' + fileName + '.html'
except Exception as e:
savePath = False
print(str(e))
else :
savePath = relativeDir + "/" + self.directory + fileName + '.html'
return savePath
pass
def getPageFileName(self, url):
fileName = ""
if isinstance(url, str) :
url = url.replace("http://","")
url = url.replace("https://","")
arr = url.split('/')
index = len(arr) - 1
name = arr[index]
fileName = (name + '.html') if name != "" and name.find('.')== -1 else name
prex = '_'.join(arr[0:index])
fileName = prex + '_' + fileName
domainName = self.__getDomainName()
if fileName.find(domainName) != -1 :
fileName = fileName.replace(domainName + "_", "")
elif fileName[0] == "_" :
fileName = fileName[1:(len(fileName))]
if fileName == "" :
fileName = 'index.html'
return fileName
def __getDomainName(self):
name = ""
if isinstance(self.domain, str):
arr = self.domain.split('/')
index = len(arr) - 1
name = arr[index]
return name
pass
class SpiderHtmlFile :
urlList = []
rootDir = []
def __init__(self, content, htmlRe, relativeDir, rootDir, domain):
self.content = content
self.htmlRe = htmlRe
self.relativeDir = relativeDir
self.rootDir = rootDir
self.domain = domain
def run(self):
self.getHtmlUrlList()
self.saveHtmlFiles()
self.replaceContent()
pass
def getHtmlUrlList(self):
# 1. 抓取所有html url
self.urlList = []
for reItem in self.htmlRe:
list = reItem.findall(self.content)
if len(list) :
self.urlList.extend(list)
tmpList = []
for k,url in enumerate(self.urlList):
url = url.replace(' ','')
if url in ['','/',
'/index.html',
'/index.php','/index','index','index.html','index.php'] \
!= False \
or url.find('mailto:') != -1 \
or url.find('qq.com') != -1 \
or url.find('baidu.com') != -1 \
or url.find('javascript:;') != -1 :
if url in self.urlList:
self.urlList.remove(url)
else :
tmpList.append(url)
uniqueList = []
[uniqueList.append(i) for i in tmpList if not i in uniqueList]
self.urlList = uniqueList
pass
def saveHtmlFiles(self):
# 2. 抓取所有提取到的html文件并保存到本地
if len(self.urlList) > 0 :
level = SiteSpider.getLevel()
if level < 1 :
SiteSpider.setLevel(level + 1)
for url in self.urlList :
oldUrl = url
#下载页面
try :
spider = SiteSpider(self.domain, spiderChildLevel=True)
if(url.find(self.domain) == -1 and url[0:3] != "http") :
url = self.domain + '/' + url if url[0] != '/' else self.domain + url
result = spider.run(url)
if result != False and spider.htmlContent == "" :
self.urlList.remove(oldUrl)
except Exception as e:
(self.urlList).remove(oldUrl)
print("faild url:", oldUrl)
pass
def replaceContent(self):
# 3. 替换所有html url
if len(self.urlList) > 0 :
spider = SiteSpider(self.domain)
for url in self.urlList :
if isinstance(url, str) :
savePath = spider.getPageFileName(url)
#替换为本地新的html url
self.content = self.content.replace(url, savePath)
pass
def getNewFilePath(self, url, isAbsolutPath=True):
#fileName = base64.b64encode(bytes(url, 'utf-8'))
# fileName = str(fileName)
r = re.compile('^http.*',re.I)
url = url if r.match(url) != None else self.domain + url
fileName = url.replace('/','__')
fileName = fileName.replace('\\','___')
fileName = fileName.replace(':','____')
fileName = fileName.replace('?','_____')
fileName = fileName.replace('&','_______')
fileName = fileName.replace('%','________')
fileName = fileName[0:120]
_tmpRelativeDir = self.relativeDir
if isAbsolutPath :
_tmpRelativeDir = self.rootDir + '/' + _tmpRelativeDir
try :
if os.path.exists(_tmpRelativeDir) == False:
os.makedirs(_tmpRelativeDir)
savePath = _tmpRelativeDir + '/' + fileName + '.html'
except Exception as e:
savePath = False
print(str(e))
else :
savePath = _tmpRelativeDir + "/" + fileName + '.html'
return savePath
pass
if __name__ == '__main__' :
# domain = "http://chongwumoban.s5.cn.vc"
# SiteSpider.setLevel(SiteSpider.getLevel()+1)
# spider = SiteSpider("http://demo.cssmoban.com", directory='/cssthemes4/sbtp_60_sept/', spiderChildLevel=True)
# spider.run()
domain = "http://www.haorooms.com"
SiteSpider.setLevel(SiteSpider.getLevel()+1)
spider = SiteSpider("http://chongwumoban.s5.cn.vc", directory='', spiderChildLevel=True)
spider.run()
# spider.run("http://chongwumoban.s5.cn.vc/fuwu")
|
from datetime import datetime
from voluptuous import Schema, Required, MultipleInvalid
import traceback
def Date(fmt='%Y-%m-%d'):
return lambda v: datetime.strptime(v, fmt)
Schema = Schema({Required('ad_network'): str,
Required('date'): Date(),
Required('app_name'): str,
Required('unit_id'): str,
Required('request'): int,
Required('revenue'): float,
Required('imp'): int})
typeRule = {"ad_network": str,
"date": str,
"app_name": str,
"unit_id": str,
"request": int,
"revenue": float,
"imp": int}
def test_regularFormat():
event = {"ad_network": "FOO",
"date": "2019-06-05",
"app_name": "LINETV",
"unit_id": "55665201314",
"request": "100",
"revenue": "0.00365325",
"imp": "23"}
try:
result = {key : value for key, value in event.items() if event.get(key)}
validateResult = {key : float(value) if typeRule[key] == float else int(value) if typeRule[key] == int else value for key, value in result.items()}
Schema(validateResult)
print(result)
assert True
except:
print('API format error')
assert False
def test_keyloss():
event = {
"date": "2019-06-05",
"app_name": "LINETV",
"unit_id": "55665201314",
"request": "100",
"revenue": "0.00365325",
"imp": "23"}
try:
result = {key : value for key, value in event.items() if event.get(key)}
validateResult = {key : float(value) if typeRule[key] == float else int(value) if typeRule[key] == int else value for key, value in result.items()}
Schema(validateResult)
print(result)
assert True
except:
print('API format error')
assert False
def test_valueTypeError():
event = {"ad_network": "FOO",
"date": "2019-06-05",
"app_name": "LINETV",
"unit_id": "55665201314",
"request": "hi",
"revenue": "0.00365325",
"imp": "23"}
try:
result = {key : value for key, value in event.items() if event.get(key)}
validateResult = {key : float(value) if typeRule[key] == float else int(value) if typeRule[key] == int else value for key, value in result.items()}
Schema(validateResult)
print(result)
assert True
except:
print('API format error')
assert False |
def get_summ(one, two, delimiter='&'):
one= one.capitalize()
two=two.capitalize()
return (f'{one} {delimiter} {two}')
print(get_summ ("lEARN","pyTHOn")) |
# 进程间的通信
# 队列:先进先出
from multiprocessing import Queue
from multiprocessing import Process
import time
# q.put('') # 放元素
# q.get() # 取元素
# q.full() # 判断队列是否是满的
# q.empty() # 判断队列是否是空的
# q.qsize() # 获取队列长度
def download(q):
images = ['girl.jpg', 'boy.jpg', 'man.jpg']
for image in images:
print('正在下载:', image)
time.sleep(0.5)
q.put(image)
def getfile(q):
while True:
try:
file = q.get(timeout=5)
print('{}保存成功'.format(file))
# 如果获取元素时超时5秒,那么说明元素取出完毕
except:
print('全部保存完毕!')
break
if __name__ == '__main__':
q = Queue(5)
# 两个进程之间用的是同一个队列
p1 = Process(target=download, args=(q,))
p2 = Process(target=getfile, args=(q,))
p1.start()
p1.join() # p1插队,现将p1执行完成再往下执行
p2.start()
p2.join()
|
# Реализовать функцию my_func(), которая принимает
# три позиционных аргумента и возвращает сумму наибольших
# двух аргументов
def int_func(x, y, z):
a = x+y
b = y+z
if a > b:
x+y
return a
else:
y+z
return b
print (int_func(20, 10, 30))
|
import nltk
from nltk import sent_tokenize
from nltk.tokenize import word_tokenize
import re
from num2words import num2words
from autocorrect import Speller
spell_corrector = Speller(lang='en')
def getProcessedContentsByLineFromFiles(filep, filen):
with open(filep, encoding="utf-8_sig") as fp:
linesp = fp.readlines()
lineContentsp = []
for line in linesp:
## remove url
url_pattern = r'https?://\S+|www\.\S+'
without_urls = re.sub(pattern=url_pattern, repl=" ", string=line)
## convert number to words
after_spliting = without_urls.split()
for index in range(len(after_spliting)):
# isdigit(): judge num or str
if after_spliting[index].isdigit():
after_spliting[index] = num2words(after_spliting[index])
numbers_to_words = " ".join(after_spliting)
sentencesp = sent_tokenize(numbers_to_words.lower())
sep_sentencesp = []
for sentence in sentencesp:
tagged_token = word_tokenize(sentence)
tagged_words = nltk.pos_tag(tagged_token)
sep_sentence = " ".join(w for (w, t) in tagged_words if t.startswith('JJ') or t.startswith('NN') or t.startswith('VB') or t.startswith('RB'))
sep_sentencesp.append(sep_sentence)
lineContentsp.append(" ".join(sep_sentencesp))
fp.close()
with open(filen, encoding="utf-8_sig") as fn:
linesn = fn.readlines()
lineContentsn = []
for line in linesn:
## remove url
url_pattern = r'https?://\S+|www\.\S+'
without_urls = re.sub(pattern=url_pattern, repl=" ", string=line)
## convert number to words
after_spliting = without_urls.split()
for index in range(len(after_spliting)):
# isdigit(): judge num or str
if after_spliting[index].isdigit():
after_spliting[index] = num2words(after_spliting[index])
numbers_to_words = " ".join(after_spliting)
sentencesn = sent_tokenize(numbers_to_words.lower())
sep_sentencesn = []
for sentence in sentencesn:
tagged_token = word_tokenize(sentence)
tagged_words = nltk.pos_tag(tagged_token)
sep_sentence = " ".join(w for (w, t) in tagged_words if t.startswith('JJ') or t.startswith('NN') or t.startswith('VB') or t.startswith('RB'))
sep_sentencesn.append(sep_sentence)
lineContentsn.append(" ".join(sep_sentencesn))
fn.close()
input_documents = lineContentsp + lineContentsn
return input_documents, lineContentsp, lineContentsn |
"""change relationships for user and tags
Revision ID: 2094e9ee194d
Revises: 39ce44cc5447
Create Date: 2020-03-07 22:21:10.509935
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2094e9ee194d'
down_revision = '39ce44cc5447'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2018/8/16 2:03 PM
# @Author: jasmine sun
# @File : DNR.py
# PIL验证码图片预处理: https://www.jianshu.com/p/41127bf90ca9
from PIL import Image
# 灰度化
def dnr_img():
# convert(): 将当前图像转换为其他模式,并且返回新的图像
img = Image.open('origin.jpg').convert('L')
img.save('1.jpg', 'jpeg')
# img.show()
return img
# 二值化
def binarizing(img, threshold):
pixdata = img.load()
w, h = img.size
for y in range(h):
for x in range(w):
if pixdata[x, y] < threshold:
pixdata[x, y] = 0
else:
pixdata[x, y] = 255
img.save('2.jpg', 'jpeg')
# img.show()
return img
# 降噪
# img参数要传一个str类型:Image.open('...')
def depoint(img):
# load(): 为图像分配内存并从文件中加载它(或者从源图像,对于懒操作)。正常情况下,用户不需要调用这个方法,因为在第一次访问图像时,Image类会自动地加载打开的图像
pixdata = img.load()
w, h = img.size
for y in range(1, h - 1):
for x in range(1, w - 1):
count = 0
if pixdata[x, y - 1] > 10:
count = count + 1
if pixdata[x, y + 1] > 10:
count = count + 1
if pixdata[x - 1, y] > 10:
count = count + 1
if pixdata[x + 1, y] > 10:
count = count + 1
if count > 2:
pixdata[x, y] = 255
img.save('123.jpg', 'jpeg')
# img.show()
return img
if __name__ == '__main__':
dnr_img()
binarizing(Image.open('1.jpg'), 100)
depoint(Image.open('2.jpg'))
|
# -*- coding: utf-8 -*-
from phanterweb.helpers import (
DIV,
H3,
FORM,
I
)
from phanterweb.materialize import (
MaterializeInputText,
MaterializePreloaderCircle,
MaterializeInputHidden,
MaterializeButtonForm
)
html = DIV(
DIV(
DIV(
I("close", _class="material-icons"),
_class='fechar_modal_layout'),
H3("Recuperar Senha", _class="titulo-user-form"),
DIV(
DIV(
DIV(
FORM(
MaterializeInputHidden(
"csrf_token",
"csrf token",
default="",
error="",
_phanterwebformvalidator_isnotempty="",
_phanterwebformvalidator_group="request-password"),
DIV(
MaterializeInputText(
"email-request-password",
"Email",
default="",
error="",
_phanterwebformvalidator_isnotempty="",
_phanterwebformvalidator_isemail="",
_phanterwebformvalidator_group="request-password",
_class="col s12"
),
_class="row reset-css-row"
),
DIV(
MaterializePreloaderCircle('profile-ajax', "big"),
_class='captcha-ajax-container',
_id="captcha-request-password-container"),
DIV(
DIV(
DIV(
DIV(_class="phantergallery_progressbar-movement"),
_class="phantergallery_progressbar"),
_id="progressbar-form-request-password",
_class="progressbar-form-modal"),
_class="progressbar-container-form-modal"),
DIV(
DIV(
MaterializeButtonForm(
"request-password-ajax-button-submit",
"Requisitar Nova Senha",
_class="waves-effect waves-teal",
_phanterwebformvalidator_submit="",
_phanterwebformvalidator_group="request-password"
),
MaterializeButtonForm(
"request-password-ajax-button-login",
"Login",
_class="waves-effect waves-teal"
),
MaterializeButtonForm(
"request-password-ajax-button-registrar",
"Criar Conta",
_class="waves-effect waves-teal"
),
_class='buttons-form-container'
),
_class="input-field col s12"
),
_action="",
_phanterwebformvalidator_group="request-password",
_id="form-request-password",
_class="form-request-password",
_enctype="multipart/form-data",
_method="pt",
_autocomplete="off"
),
_class="col-12"
),
_class="row"
),
_class='request-password-container'
),
_class="subcontainer-request-password"
),
_class="main-container-request-password"
)
|
__author__ = 'luocheng'
import os
fout = open('run.sh','w')
for f in os.listdir('./bleu'):
fout.write('python CorrelationAnlysis.py ./bleu/'+f+"\n")
fout.close() |
# coding=UTF-8
from cloudify import ctx
from fcntl import lockf, LOCK_EX, LOCK_UN
from os import getpid
from socket import getfqdn
import uuid
class FileLock(object):
"""A file lock to prevent race conditions."""
def __init__(self, lock_file=None, log_file=None, lock_details=None):
"""
Register lock and log file.
:param lock_file: path to the lock file, does not have to exist
:param log_file: path to the log file, does not have to exist
:return:
"""
self.lock_file = lock_file or '/tmp/lock-' + str(uuid.uuid1())
self.log_file = log_file or self.lock_file + '.log'
if lock_details is None:
self.lock_details = ''
elif isinstance(lock_details, basestring):
self.lock_details = lock_details
elif isinstance(lock_details, dict):
self.lock_details = '\n'.join('{} {}'.format(k, v)
for k, v in lock_details.items())
elif isinstance(lock_details, list):
self.lock_details = '\n'.join(map(str, lock_details))
else:
self.lock_details = str(lock_details)
self.acquired = False
def __enter__(self):
"""
Open lock and log files, write
:return: reference to instantiated lock
"""
self._acquire()
return self
def __exit__(self, exc_type, _v, _tb):
"""
Clean up and release any locks, close open files.
:param exc_type: part of generic signature
:param _v: part of generic signature
:param _tb: part of generic signature
:return:
"""
self._release()
def _acquire(self):
"""
Open lock and log files, write identification details into lock.
:return:
"""
self.lock_fd = open(self.lock_file, 'w+')
self.log_fd = open(self.log_file, 'a')
lockf(self.lock_fd, LOCK_EX)
self.lock_fd.write('lock_pid {}\nlock_status locked\n{}'.format(
getpid(), self.lock_details))
self.lock_fd.flush()
self.acquired = True
def _release(self):
"""
Update lock file, release lock and clean up..
:return:
"""
if self.acquired:
self.log_fd.seek(0)
self.lock_fd.write('lock_pid {}\nlock_status unlocked\n{}'.format(
getpid(), self.lock_details))
self.lock_fd.flush()
lockf(self.lock_fd, LOCK_UN)
self.lock_fd.close()
self.log_fd.close()
def log(self, text):
"""
Non-fancily log text to log file by writing out a line.
:param text: message to log
:return:
"""
if self.acquired:
self.log_fd.write(text + '\n')
else:
raise Exception('trying to write when unlocked')
lock_file = '{}-{}'.format('/tmp/lock', ctx.deployment.id)
lock_details = {'deployment': ctx.deployment.id,
'node': ctx.source.instance.id}
with FileLock(lock_file, lock_details=lock_details) as lock:
src = ctx.source.instance.runtime_properties
tgt = ctx.target.instance.runtime_properties
src['master_ip'] = tgt['ip']
src['master_fqdn'] = getfqdn()
ctx.source.instance.update()
lock.log('config_in_master: {}'.format(src['master_ip']))
lock.log('config_in_master: {}'.format(src['master_fqdn']))
|
positivos=0
for i in range(6):
numero=float(input())
if numero > 0:
positivos=positivos+1
print(f'{positivos} valores positivos')
|
import paho.mqtt.client as MQTT
import requests
import telepot
import time
import json
import os
import datetime
class DoSomething(object):
def __init__(self):
#self.clientID=None
#self.msg=None
self.fifo=None
self.timer=None
self.weights_vector=None
self.products=None
self.remove_product=None
self.add_product=None
self.current_product=None
self.received_products=None
self.weights_vector=None
self.last=None
self.answers=None
self.last_barcode_msg={}
self.add_product_w={}
self.t_w={}
self.scalechecker=ScaleChecker(50,0.9)
self.last_stab={}
def temperature(self,clientID,msg,flags):
# Obtain user and payload
user,payload=self.user_payload(msg)
# Check if 15 s are passed from the last temperature sample.
# If it is, it sets flags['timer']=True and timer is updated
flags,self.timer,msg=self.common(clientID,flags,msg)
# If current user's FIFO is not in FIFOs create a FIFO for this user
if user not in list(self.fifo.keys()):
self.fifo[user]=FIFO(8)
# Check if temperature is over threshold insert 1 to user's FIFO
if payload['e'][0]['v']>payload['e'][0]['tr']:
self.fifo[user].insert(1)
else:
self.fifo[user].insert(0)
# If FIFO is full set flags['allarm']=True
if self.fifo[user].check():
flags['allarm']=True
return msg,flags
def humidity(self,clientID,msg,flags):
# Check if 15 s are passed from the last humidity sample.
# If it is, it sets flags['timer']=True and timer is updated
flags,self.timer,msg=self.common(clientID,flags,msg)
return msg,flags
def motion(self,clientID,msg,flags):
# Check if 15 s are passed from the last motion sample.
# If it is, it sets flags['timer']=True and timer is updated
flags,self.timer,msg=self.common(clientID,flags,msg)
return msg,flags
def barcode(self,clientID,msg,flags):
# Obtain user and payload
user,payload=self.user_payload(msg)
resource=payload['e'][0]['n']
topic_l=msg.topic.split('/')
topic_l[0]='MicroServices/'+resource+'/BarcodeService'
msg.topic='/'.join(topic_l)
# If current user's list of product isn't in the product archive
# create an empty product list for the current user
if user not in list(self.products.keys()):
self.products[user]=[]
self.received_products[user]=[]
self.remove_product[user]=False
self.add_product[user]=False
self.add_product_w[user]=False
self.last[user]=0
self.weights_vector[user]=[]
#Backup message
self.last_barcode_msg[user]=msg
# Add received product to proper user's received product list
self.received_products[user].append([payload['e'][0]['v'].encode(),payload['e'][0]['t']])
# Add/Remove received product to/from proper user's product list
#try:
if not self.remove_product[user]:
self.add_product[user]=True
if self.remove_product[user]:
msg.payload=json.loads(msg.payload)
try:
out=[i for i,p in enumerate(self.products[user]) if self.received_products[user][-1][0]==p[0]]
self.products[user].pop(out[0])
except:
pass
msg.payload['e'][0]['v']=self.received_products[user][-1][0]
msg.payload['e'][0]['action']='remove'
msg.payload['e'][0]['t']=time.strftime("%a %d %b %Y %H:%M:%S GMT", time.gmtime(self.received_products[user][-1][1]))
msg.payload['e'][0]['user']=user
msg.payload['bn']=msg.topic
msg.payload=json.dumps(msg.payload)
flags["bc"]=msg
self.remove_product[user]=False
return msg,flags
def weight(self,clientID,msg,flags):
# Obtain user and payload
user,payload=self.user_payload(msg)
flags,self.timer,msg=self.common(clientID,flags,msg)
# If current user's list of product isn't in the product archive
# create an empty weights vector for the current user
if user not in list(self.products.keys()):
self.last[user]=0
self.weights_vector[user]=[]
self.remove_product[user]=False
self.add_product[user]=False
self.add_product_w[user]=False
self.products[user]=[]
self.received_products[user]=[]
self.t_w[user]=0
self.last_stab[user]=0
# Add received weight sample to current user's weights vector
if payload['e'][0]['t']>self.t_w[user]:
self.weights_vector[user].append(payload['e'][0]['v'])
self.t_w[user]=payload['e'][0]['t']
# Check if the current sample is a positive/negative step
self.answers[user],self.last_stab[user]=self.scalechecker.check(self.weights_vector[user][self.last[user]:],self.last_stab[user])
# If it is a positive step (answer=1) --> set add product flag to True
# else if it is a negative step (answer=-1) --> set remove product flag to True
# else if it is not a step (answer=0) --> do nothing
if self.answers[user]==1:
self.add_product_w[user]=True
self.last[user]=len(self.weights_vector[user])-1
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+user+': a barcode was detected before the removal of a product')
elif self.answers[user]==-1:
if self.products[user]!=[]:
self.remove_product[user]=True
self.last[user]=len(self.weights_vector[user])-1
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+user+': a barcode was detected after the removal of a product')
else:
pass
if self.add_product[user] and self.add_product_w[user]:
last_barcode_msg=self.last_barcode_msg[user]
last_barcode_msg.payload=json.loads(last_barcode_msg.payload)
timestamp=time.strftime("%a %d %b %Y %H:%M:%S GMT", time.gmtime(self.received_products[user][-1][1]))
self.products[user].append([self.received_products[user][-1][0],timestamp])
last_barcode_msg.payload['e'][0]['v']=self.received_products[user][-1][0]
last_barcode_msg.payload['e'][0]['action']='add'
last_barcode_msg.payload['e'][0]['t']=time.strftime("%a %d %b %Y %H:%M:%S GMT", time.gmtime(self.received_products[user][-1][1]))
last_barcode_msg.payload['e'][0]['user']=user
last_barcode_msg.payload['bn']=msg.topic
last_barcode_msg.payload=json.dumps(last_barcode_msg.payload)
flags["bc"]=last_barcode_msg
self.add_product[user]=False
self.add_product_w[user]=False
return msg,flags
def user_payload(self,msg):
user=msg.topic.split('/')[1].encode() #ricontrolla per temp e hum e moti
payload=json.loads(msg.payload)
return user,payload
def common(self,clientID,flags,msg):
user,payload=self.user_payload(msg)
resource=payload['e'][0]['n']
if user not in list(self.timer.keys()):
self.timer[user]={}
if resource not in list(self.timer[user].keys()):
self.timer[user][resource]=time.time()
if time.time()-self.timer[user][resource]>15: # thingspeak maximum number of data points for hour is 240
self.timer[user][resource]=time.time()
topic_l=msg.topic.split('/')
topic_l[0]='MicroServices/'+resource+'/'+clientID
if resource == 'barcode' or 'weight':
topic_l[0]='MicroServices/'+resource+'/'+resource.capitalize()+'Service'
msg.topic='/'.join(topic_l)
payload['e'][0]['user']=user
payload['bn']=msg.topic
msg.payload=json.dumps(payload)
flags["timer"]=True
else:
flags["timer"]=False
return flags,self.timer,msg
class MicroServicePubSub:
def __init__(self,clientID,TOKEN=None,WSC_URL=None,ADMIN=None,PASSWORD=None):
self.clientID=clientID
#self.resource=resource
if TOKEN is not None:
self.bot = telepot.Bot(token=TOKEN)
if None not in [WSC_URL,ADMIN,PASSWORD]:
self.clientSession=WebServiceClient(WSC_URL,ADMIN,PASSWORD)
self.clientSession.start()
else:
self.clientSession=None
self.doSomething=DoSomething()
#create an instance of paho.mqtt.client
self._paho_mqtt=MQTT.Client(self.clientID,clean_session=True)
#register the callbacks
self._paho_mqtt.on_connect=self.myOnConnect
self._paho_mqtt.on_message=self.myOnMessageReceived
# initalize Data
self.doSomething.timer={}
self.doSomething.fifo={}
self.doSomething.weights_vector={}
self.doSomething.products={}
self.doSomething.remove_product={}
self.doSomething.add_product={}
self.doSomething.current_product={}
self.doSomething.received_products={}
self.doSomething.weights_vector={}
self.doSomething.last={}
self.doSomething.answers={}
self.QoS=2
def start(self, url, port, sub_topic=None):
# connection to broker
self._paho_mqtt.connect(url,port)
self._paho_mqtt.loop_start()
# if it's also subscriber, subscribe to a topic
if sub_topic is not None:
self.mySubscribe(sub_topic)
def stop(self, sub_topic=None):
# if it's also subscriber, subscribe to a topic
if sub_topic is not None:
self.myUnsubscribe(sub_topics)
self._paho_mqtt.loop_stop()
self._paho_mqtt.disconnect()
def myPublish(self,topic,message,QoS):
# publish a message on a certain topic
self._paho_mqtt.publish(topic, message, QoS,retain=False)
def mySubscribe(self,sub_topics):
self._paho_mqtt.subscribe(sub_topics)
def myUnsubscribe(self,sub_topics):
self._paho_mqtt.unsubscribe(sub_topics)
def myUpdate(self,new_sub_topics,old_sub_topics):
topics_to_add=[]
topics_to_remove=[]
for topic in new_sub_topics:
if topic not in old_sub_topics:
for o in old_sub_topics:
if o[0] == topic[0] or (o not in new_sub_topics):
topics_to_remove.append(o[0])
topics_to_add.append(topic)
topics_to_remove=self.unique(topics_to_remove)
if topics_to_remove!=[]:
self.myUnsubscribe(topics_to_remove)
if topics_to_add!=[]:
self.mySubscribe(topics_to_add)
def unique(self,duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def myOnConnect(self, paho_mqtt, userdata, flags, rc):
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"connected to message broker with rc" + str(rc))
def myOnMessageReceived(self, paho_mqtt, userdata, msg):
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"Received from Topic: " + msg.topic + " QoS: "+ str(msg.qos))
# initialize timer and allarm flags
flags={"timer":False,"allarm":False,"bc":False}
# get the type of resource
resource=json.loads(msg.payload)['e'][0]['n']
# update doSomething object attributes
#self.doSomething.clientID=self.clientID
#self.doSomething.msg=msg
user=msg.topic.split('/')[1]
action=getattr(self.doSomething,resource)
msg,flags=action(self.clientID,msg,flags)
if flags['timer']:
self.myPublish(msg.topic,msg.payload,self.QoS)
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"Publishing subsampled data to Topic: " + msg.topic + " QoS: "+ str(self.QoS))
if flags['bc']:
msg=flags['bc']
self.myPublish(msg.topic,msg.payload,self.QoS)
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"Publishing received Barcode to Topic: " + msg.topic + " QoS: "+ str(self.QoS))
if flags['allarm']:
contacts=self.clientSession.get_contacts()
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+user+": sending allarm to user by RefrigeratorAllarm_bot")
try:
chat_id=contacts[user]['telegramID']
tmsg='WARNING: '+resource+' is over threshold!!!'
self.bot.sendMessage(chat_id=chat_id, text=tmsg)
except:
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"Missing telegram contact for "+user)
class ScaleChecker:
def __init__(self,noise,perc):
self.noise=noise
self.perc=perc
def check(self,v,last_stab):
if max(self.diff(v[-3:]))>=self.noise:
return 0,last_stab
if self.added(v):
last_stab=self.mode(v)[0]
return 1,last_stab
elif self.removed(v):
last_stab=self.mode(v)[0]
return -1,last_stab
else:
if float(sum(v))/len(v)>last_stab+self.noise:
last_stab=self.mode(v)[0]
return 1,last_stab
elif float(sum(v))/len(v)<last_stab-self.noise:
last_stab=self.mode(v)[0]
return -1,last_stab
else:
return 0,last_stab
def added(self,v):
c=0
for i in range(len(v)-1):
if v[-1]>(v[i]+self.noise):
c+=1
return c>len(v)*self.perc
def removed(self,v):
c=0
for i in range(len(v)-1):
if v[-1]<(v[i]-self.noise):
c+=1
return c>len(v)*self.perc
def diff(self,v):
out=[]
for j in range(len(v)):
for i in range(len(v)):
out.append(abs(v[i]-v[j]))
return out
def mode(self,array):
most = max(list(map(array.count, array)))
return list(set(filter(lambda x: array.count(x) == most, array)))
class WebServiceClient(object):
def __init__(self,urlWebService,user,password):
self.url=urlWebService
self.user=user
self.password=password
self.loggedin=False
def start(self):
r=self.login()
if r.status_code==200:
self.loggedin=True
else:
print (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+"- "+"Authentication Error")
return r.status_code
def login(self):
self.s=requests.Session()
r=self.s.post(self.url+'/login',data=json.dumps({'user':self.user,'password':self.password}))
return r
def get_msgbroker(self):
IP_msgbroker=None
PORT_msgbroker=None
if self.loggedin:
r=self.s.get(self.url+'/msgbroker')
msgbroker=json.loads(r.text)['msgbroker']
IP_msgbroker=msgbroker["IP"]
PORT_msgbroker=msgbroker["PORT"]
return IP_msgbroker,PORT_msgbroker
def get_topics(self,resource):
if self.loggedin:
r=self.s.get(self.url+'/devices',params={'resources':resource})
topics=[]
devices=json.loads(r.text)['devices']
users=list(devices.keys())
for user in users:
for dev in devices[user]:
if dev["resources"]==resource:
topics.append(dev['endpoints'])
return topics
else:
return None
def get_contacts(self):
contacts=None
if self.loggedin:
r=self.s.get(self.url+'/contacts')
contacts=json.loads(r.text)['contacts']
return contacts
def put_microservice(self,data):
if self.loggedin:
r=self.s.put(self.url+'/newmicroservice',data=json.dumps(data))
return r.status_code
else:
return 401
class FIFO:
def __init__(self,nbit=8):
self.array=[0]*nbit
self.nbit=nbit
def insert(self,bit):
self.array.pop(0)
self.array.append(bit)
def check(self):
return sum(self.array)==self.nbit
def reset(self):
self.array=[0]*self.nbit
|
# next idea: look at assigning tons of work and letting the computer figure out what to do.
# lets make a directory with lots of little files that the process needs to open,
# write to, and close.
import multiprocessing as mp
import numpy as np
import glob
import time
def fileWriter(path):
with open(path,'w') as f:
f.write('{}'.format(np.random.randn()))
files = glob.glob('./testFiles/*txt');
queue = mp.Queue()
#processes = [mp.Process(target=fileWriter, args=(file,)) for file in files]
start = time.time()
with mp.Pool(4) as p:
p.map(fileWriter,files)
# for p in processes:
# p.start()
# for p in processes:
# p.join()
stop = time.time()
# multiproc time
print(stop-start)
# now single process
start2 = time.time()
for file in files:
fileWriter(file)
stop2 = time.time()
# single proc time
print(stop2 - start2) |
## <compile_data2.py>
## Author: Kai Bonsol
## This program will access new cases per day, test count,
## acutely ill, hospitalized, recovered, deceased
##
##
import pickle
import bs4 as bs
import requests
from twilio.rest import Client
def update(text):
print(text)
account_sid = "AC1360cea428da1fcb809f629507bc5774"
auth_token = "5c0c45fb68782364b67b086701ec17ee"
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+18155662099",
from_="+12408396909",
body=text)
print(message.sid)
# <saveAdditionalCovidData>
##data structure:
##
##data = {
## 'county/state 1' : {
## 'date10' : num_cases,
## 'population' : population,
## 'cases_per_day' : cases_per_day,
## 'deaths' : deaths,
## 'cases' : cases,
## 'tests' : tests
## }
##}
def saveAdditionalCovidDataState(site, covid_data, state):
resp = requests.get('https://www.citypopulation.de/en/usa/covid/' + site)
soup = bs.BeautifulSoup(resp.text, "lxml")
tables = soup.findAll('table', {'class':'data'})
cases_per_day_table = tables[1]
death_table = tables[2]
cases_table = tables[3]
test_table = tables[4]
# ISSUE: individual counties only have new cases per day & deaths
covid_data[state]['cases_per_day'] = cases_per_day_table.findAll('tr')[9].findAll('td')[1].text.replace(',','')
covid_data[state]['deaths'] = death_table.findAll('tr')[9].findAll('td')[1].text.replace(',','')
for row in cases_table.findAll('tr')[1:]:
if row.findAll('td')[0].text == 'Cases (total)' or row.findAll('td')[0].text == 'Deceased':
continue
elif row.findAll('td')[0].text == 'Acutely ill':
covid_data[state]['acutely_ill'] = row.findAll('td')[1].text.replace(',','')
elif row.findAll('td')[0].text == 'Hospitalized':
covid_data[state]['hospitalized'] = row.findAll('td')[1].text.replace(',','')
elif row.findAll('td')[0].text == 'Recovered':
covid_data[state]['recovered'] = row.findAll('td')[1].text.replace(',','')
covid_data[state]['positive_tests'] = test_table.findAll('tr')[1].findAll('td')[1].text.replace(',','')
covid_data[state]['negative_tests'] = test_table.findAll('tr')[2].findAll('td')[1].text.replace(',','')
try:
covid_data[state]['hospitalized']
except KeyError:
covid_data[state]['hospitalized'] = 'NA'
try:
covid_data[state]['acutely_ill']
except KeyError:
covid_data[state]['acutely_ill'] = 'NA'
##}
def saveAdditionalCovidDataCounty(site, covid_data, county):
resp = requests.get('https://www.citypopulation.de' + site)
soup = bs.BeautifulSoup(resp.text, "lxml")
tables = soup.findAll('table', {'class':'data'})
try:
tables[1]
except IndexError:
return
cases_per_day_table = tables[1]
covid_data[county]['cases_per_day'] = cases_per_day_table.findAll('tr')[9].findAll('td')[1].text.replace(',','')
try:
tables[2]
except IndexError:
return
death_table = tables[2]
covid_data[county]['deaths'] = death_table.findAll('tr')[9].findAll('td')[1].text.replace(',','')
def updateCovidData():
with open('pickles/covid_data_by_state.pickle', 'rb') as f:
covid_data_by_state = pickle.load(f)
with open('pickles/covid_data_by_county.pickle', 'rb') as f:
covid_data_by_county = pickle.load(f)
with open('pickles/abrv_state.pickle', 'rb') as f:
abrv_state = pickle.load(f)
## # states first
## for state in covid_data_by_state:
## site = state + '__' + abrv_state[state].replace(' ', '_')
## saveAdditionalCovidDataState(site, covid_data_by_state, state)
##
## with open('pickles/covid_data_by_state2.pickle', 'wb') as f:
## pickle.dump(covid_data_by_state,f)
# now counties
update('starting data compiler...')
county_sites = getCountySites()
#print(county_sites[0])
index = 0
for county in covid_data_by_county:
try:
county_sites[index]
except IndexError:
index += 1
continue
site = county_sites[index]
if site == '/en/usa/covid/district_of_columbia/11001__district_of_columbia/':
index += 1
continue
print('visiting ' + site + ': for county ' + county)
saveAdditionalCovidDataCounty(site, covid_data_by_county, county)
index += 1
print(covid_data_by_county)
with open('pickles/covid_data_by_county2.pickle', 'wb') as f:
pickle.dump(covid_data_by_county, f)
update('Data compiler 2 is finished')
## num = 0
## num1 = 1
## num2 = 1
## curState = 'AL'
## for county in covid_data_by_county:
## site = abrv_state[county[0:2]] + '/'
## if county[0:2] != curState:
## curState = county[0:2]
## num2 = 1
## num1 += 1
## if num1 < 10:
## site += '0'
## site += str(num1)
## if num2 < 10:
## site += '00'
## elif num2 < 100:
## site += '0'
## site += str(num2)
## num2 += 2
## site += '__' + county[2:].replace(' ','_')
## print(site)
## num += 1
## if num > 100:
## break
def getCountySites():
county_sites = []
resp = requests.get('https://www.citypopulation.de/en/usa/covid/')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class':'data'})
for row in table.findAll('tr')[1:]:
if row.findAll('td')[1].text == 'State':
continue
if row.find('td', {'class':'sc'}).find('a') is not None:
#print(row.find('td', {'class':'sc'}).find('a')['href'])
county_sites.append(row.find('td', {'class':'sc'}).find('a')['href'])
#print(county_sites)
return county_sites
def show():
with open('pickles/covid_data_by_state2.pickle', 'rb') as f:
data = pickle.load(f)
print(data)
|
from django.db import models
class Card(models.Model):
# A specific Hearthstone Card
name = models.CharField(max_length=100, default="noName")
@classmethod
def create(cls, name):
card = cls(name = name)
return card
class Deck(models.Model):
# A Hearthstone deck containing a bunch of cards
def __str__(self):
return self.name
name = models.CharField(max_length = 128)
created = models.DateTimeField(auto_now_add = True)
description = models.TextField(null = True)
wins = models.PositiveIntegerField(null = True)
losses = models.PositiveIntegerField(null = True)
cards = models.ManyToManyField(Card)
|
from src.models.user import User
from src.models.user_roles import UserRoles
from src.models.roles import Roles
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 06:45:38 2019
@author: night
"""
from collections import defaultdict
import pandas as pd
import networkx as nx
import itertools
import matplotlib.pyplot as plt
import heapq
import pydot
import dill
import graphviz
def save_pkl(df, filename):
with open('data/'+filename+'.pkl','wb') as fobj:
dill.dump(df,fobj)
def load_pkl(filename):
with open('data/'+filename+'.pkl','rb') as fobj:
df = dill.load(fobj)
return df
def get_genre_list(genre_str):
import re
p = re.compile(r"'([A-Za-z &]+)'")
return p.findall(genre_str)
def create_genre_groups(artist_info):
genre_ct = defaultdict(int)
genre_groups = []
for i in range(len(artist_info)):
genre_list = artist_info.iloc[i]['genres']
# genre_list = get_genre_list(genre_str)
genre_groups.append(genre_list)
for genre in genre_list:
genre_ct[genre] += 1
#sort_dict = sorted(genre_ct.items(), key=lambda item: item[1], reverse = True)
return genre_groups
def create_graph(genre_groups):
G = nx.MultiGraph()
for artist_genres in genre_groups:
G.add_nodes_from(artist_genres)
for comb in itertools.combinations(artist_genres, 2):
G.add_edge(comb[0], comb[1])
#connected = dict(G.degree(weight='weight'))
S = nx.Graph()
for u,v,data in G.edges(data=True):
w = data['weight'] if 'weight' in data else 1.0
if S.has_edge(u,v):
S[u][v]['weight'] += w
else:
S.add_edge(u, v, weight=w)
return S
def sort_nodes(G):
degree_dict = {}
for (a, b) in G.degree():
degree_dict[a] = b
heap = [(-value, key) for key,value in degree_dict.items()]
degree_largest = heapq.nsmallest(100, heap)
return [key for value, key in degree_largest]
def sort_edges(G):
weight_dict = {}
for (a, b, w) in G.edges.data('weight'):
weight_dict[(a,b)] = w
heap = [(-value, key) for key,value in weight_dict.items()]
weight_largest = heapq.nsmallest(100, heap)
#weight_largest = [(key, -int(value)) for value, key in weight_largest]
return [key for value, key in weight_largest]
def plot_network_graph(G, top_num, sorted_nodes):
bignodes=degree_largest[:20]
G_small = G.subgraph(bignodes)
pos_small = nx.nx_pydot.graphviz_layout(G_small)
nx.draw(G_small, pos=pos_small, node_size=50, with_labels=True)
return G_small
artist_info = load_pkl('artist_df')
genre_groups = create_genre_groups(artist_info)
G = create_graph(genre_groups)
#
nx.readwrite.graphml.write_graphml(G,'data/genregraph.graphml')
#
#weight_largest = sort_edges(S)
#degree_largest = sort_nodes(S)
#
#G_small = plot_network_graph(S, 100, degree_largest)
|
from rest_framework import serializers
from coupon.models import Coupon
class CouponSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
class Meta:
model = Coupon
fields = ['code', 'discount_type', 'discount_value', 'valid_from', 'valid_till', 'status', 'id']
|
Boston, MA
509953 # all
41145 # pneumonia
20577 # < 1 yr old
309257 # 65+
Bridgeport, CT
113119
8921
2546
78218
Cambridge, MA
63708
6778
629
47786
Albany, NY
142275
6252
5699
94558
Hartford, CT
156735
8483
5700
98414
Lowell, MA
72723
5691
1131
52267
New Haven, CT
126447
7547
5626
79803
Providence, RI
163598
10816
5258
111321
DONE
-1
-1
-1
-1
|
from django.conf.urls import url, include
import xadmin
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
from cmdb.views import ZonesViewSet, ServersViewSet, BusinessViewSet
from databases.views import InstancesGroupViewSet, InstancesViewSet, MysqldbsViewSet, MysqlusersViewSet, PrivilegesViewSet
router = DefaultRouter()
router.register(r'zones', ZonesViewSet, base_name='zones')
router.register(r'servers', ServersViewSet, base_name='servers')
router.register(r'business', BusinessViewSet, base_name='business')
router.register(r'instancegroups', InstancesGroupViewSet, base_name='instancegroups')
router.register(r'instances', InstancesViewSet, base_name='instances')
router.register(r'mysqldbs', MysqldbsViewSet, base_name='mysqldbs')
router.register(r'mysqlusers', MysqlusersViewSet, base_name='mysqlusers')
router.register(r'privileges', PrivilegesViewSet, base_name='privileges')
urlpatterns = [
url(r'^xadmin/', xadmin.site.urls),
url(r'^', include(router.urls)),
url(r'docs/', include_docs_urls(title="zdb")),
]
|
import time
import scipy
import scipy.optimize
import scipy.sparse
import scipy.sparse.linalg
from scipy.spatial import cKDTree
import core
class BoundElementPoint:
def __init__(self, element_id, xi, data_label, data_index=None,
fields=None, weight=1):
self._class_ = 'elem'
self.eid = element_id
self.xi = xi
self.fields = fields
self.data = data_label
self.data_index = data_index
self.bind_weight = weight
self.param_ids = None
self.param_weights = None
self.num_fields = 0
self.num_elem_fields = 0
self.data_ids = None
def get_field_id(self, field_index):
return self.fields[field_index]
def get_bind_weight(self):
return self.bind_weight
def get_param_ids(self, field_index):
return self.param_ids[self.fields[field_index]]
def get_param_weights(self, field):
return self.param_weights * self.bind_weight
def update_from_mesh(self, mesh):
element = mesh.elements[self.eid]
self.param_ids = element._get_param_indicies()
self.param_weights = element.weights(self.xi)[0]
self.num_fields = len(self.param_ids)
self.num_elem_fields = len(self.param_ids)
if self.fields == None:
self.fields = range(self.num_fields)
else:
self.num_fields = len(self.fields)
def get_data(self, data, field, mesh):
if self.data_index == None:
return data[self.data].get_data(self.data_ids)[field]
else:
#~ print data[self.data].values.shape
#~ print self.data_index, self.fields, field
#~ print data[self.data].values[self.data_index, field]
return data[self.data].values[self.data_index, field]
class BoundNodeValue:
def __init__(self, node_id, field_id, comp_id, data_label, index=None, weight=1):
self._class_ = 'node'
self.nid = node_id
self.field_id = field_id
self.comp_id = comp_id
self.data = data_label
self.data_index = index
self.bind_weight = weight
self.param_ids = None
self.param_weights = 1
self.num_fields = 1
self.data_ids = None
def get_field_id(self, field):
return self.field_id
def get_bind_weight(self):
return self.bind_weight
def get_param_ids(self, field):
return [self.param_ids]
def get_param_weights(self, field):
return [self.param_weights * self.bind_weight]
def update_from_mesh(self, mesh):
node = mesh.nodes[self.nid]
self.param_ids = node._get_param_indicies()[self.field_id][self.comp_id]
def get_data(self, data, field, mesh):
if self.data_index == None:
x = mesh.nodes[self.nid].values[0]
xc = data[self.data].find_closest(x, 1)
if isinstance(xc, scipy.ndarray):
return xc[field]
else:
return xc
else:
return data[self.data].values[self.data_index, field]
class Data:
def __init__(self, label, values):
self.id = label
self.values = values
self.tree = None
if isinstance(self.values, scipy.ndarray):
if len(values.shape) == 2 and values.shape[0] > 1:
self.tree = cKDTree(self.values)
else:
self.xc = values
else:
self.values = self.values * scipy.ones((10)) ### HACK ####
self.xc = self.values * scipy.ones((10)) ### HACK ####
self.row_ind = 0
self.Phi = None
self.ii = None
self.err_sqr_sum = None
self.num_err = None
def init_phi(self, M, N):
self.row_ind = 0
self.Phi = scipy.sparse.lil_matrix((M, N))
def add_point(self, point):
if point._class_ == 'elem' and self.tree != None:
point.data_ids = []
for pids in point.param_ids:
self.Phi[self.row_ind, pids] = point.param_weights
point.data_ids.append(self.row_ind)
self.row_ind += 1
def update_point_data(self, params):
if self.Phi != None and self.tree != None:
xd = self.Phi.dot(params)
rr, ii = self.tree.query(xd.reshape((xd.size/self.values.shape[1], self.values.shape[1])))
self.xc = self.values[ii, :].reshape(xd.size)
self.num_err = rr.shape[0]
self.err_sqr_sum = (rr*rr).sum()
def get_data(self, ind):
if ind == None:
return self.xc
return self.xc[ind]
def find_closest(self, x, num=1):
if self.tree:
r, ii = self.tree.query(list(x))
return self.values[ii]
else:
return self.values
class Fit:
def __init__(self, method='data_to_mesh_closest'):
self.points = core.ObjectList()
self.data = core.ObjectList()
self.method = method
self._objfns = {
'd2mp': self.objfn_data_to_mesh_project,
'd2mc': self.objfn_data_to_mesh_closest,
'm2dc': self.objfn_mesh_to_data_closest,
'data_to_mesh_project': self.objfn_data_to_mesh_project,
'data_to_mesh_closest': self.objfn_data_to_mesh_closest,
'mesh_to_data_closest': self.objfn_mesh_to_data_closest
}
if isinstance(method, str):
self.objfn = self._objfns[method]
self.on_start = None
self.objective_function = None
self.on_stop = None
self.X = None
self.Xi = None
self.A = None
self.invA = None
self.svd_UT, self.svd_S, self.svd_VT = None, None, None
self.svd_invA = None
self.use_sparse = True
self.param_ids = []
self.num_dof = 0
self.num_rows = 0
def bind_element_point(self, element_id, xi, data,
data_index=None, fields=None, weight=1):
if not isinstance(data, str):
label_not_in_data = True
while label_not_in_data:
data_label = '_' + str(element_id) + '_' + \
str(int(1000000 + 1000000 * scipy.rand()))
if data_label not in self.data.keys():
label_not_in_data = False
self.set_data(data_label, data)
data = data_label
self.points.add(BoundElementPoint(element_id, xi, data,
data_index=data_index, fields=fields, weight=weight))
def bind_node_value(self, node_id, field_id, comp_id,
data, index=None, weight=1):
if not isinstance(data, str):
data_label = '_' + str(node_id) + '_' + str(field_id) + \
'_' + str(comp_id) + '_' + \
str(int(1000000 * scipy.rand()))
self.set_data(data_label, data)
data = data_label
if isinstance(node_id, list):
for nid in node_id:
self.points.add(BoundNodeValue(
nid, field_id, comp_id, data, index=index,
weight=weight))
else:
self.points.add(BoundNodeValue(
node_id, field_id, comp_id, data, index=index,
weight=weight))
def set_data(self, label, values):
self.data.add(Data(label, values))
def get_data(self, mesh):
Xd = scipy.zeros(self.num_rows)
for ind, dm in enumerate(self.data_map):
Xd[ind] = self.points[dm[0]].get_data(self.data, dm[1], mesh)
return Xd
def delete_all_data(self):
self.data.reset_object_list()
def get_column_index(self, param_ids):
return [self.param_ids.index(pid) for pid in param_ids]
def update_from_mesh(self, mesh):
for point in self.points:
point.update_from_mesh(mesh)
self.generate_matrix()
def generate_matrix(self):
param_ids = []
self.num_rows = 0
for point in self.points:
self.num_rows += point.num_fields
if isinstance(point.param_ids, int):
param_ids.extend([point.param_ids])
elif isinstance(point.param_ids[0], list):
param_ids.extend([item for sublist in point.param_ids
for item in sublist])
else:
param_ids.extend([item for item in point.param_ids])
self.param_ids = [pid for pid in set(param_ids)]
self.param_ids.sort()
self.num_dof = len(self.param_ids)
self.W = scipy.ones(self.num_rows)
self.data_map = []
if self.use_sparse:
self.A = scipy.sparse.lil_matrix((self.num_rows, self.num_dof))
else:
self.A = scipy.zeros((self.num_rows, self.num_dof))
row_ind = -1
for pid, point in enumerate(self.points):
bind_weight = point.get_bind_weight()
for field_ind in range(point.num_fields):
field = point.get_field_id(field_ind)
weights = point.get_param_weights(field_ind)
param_ids = point.get_param_ids(field_ind)
cols = self.get_column_index(param_ids)
row_ind += 1
self.data_map.append([pid, field])
for col, weight in zip(cols, weights):
self.A[row_ind, col] += weight
self.W[row_ind] = bind_weight
if self.use_sparse:
self.A = self.A.tocsc()
def generate_fast_data(self):
num_rows = {}
for point in self.points:
if point._class_ == 'elem':
if point.data_index == None:
if point.data not in num_rows.keys():
num_rows[point.data] = 0
num_rows[point.data] += point.num_elem_fields
for key in num_rows.keys():
self.data[key].init_phi(num_rows[key], self.num_dof)
for point in self.points:
if point._class_ == 'elem':
if point.data_index == None:
self.data[point.data].add_point(point)
def invert_matrix(self):
from sparsesvd import sparsesvd
self.svd_UT, self.svd_S, self.svd_VT = sparsesvd(self.A, self.A.shape[1])
self.svd_invA = scipy.dot(\
scipy.dot(self.svd_VT.T,scipy.linalg.inv(scipy.diag(self.svd_S))),self.svd_UT)
def solve(self, mesh, max_iterations=1000, drms=1e-9, output=False):
td, ts = 0, 0
for data in self.data:
data.update_point_data(mesh._core.P[self.param_ids])
rms_err0 = self.compute_rms_err()
drms_iter = 1e99
niter = 0
while drms_iter > drms and niter < max_iterations:
niter += 1
t0 = time.time()
Xd = self.get_data(mesh) * self.W
t1 = time.time()
if self.svd_invA==None:
self.lsqr_result = scipy.sparse.linalg.lsqr(self.A, Xd)
solved_x = self.lsqr_result[0]
else:
solved_x = scipy.dot(self.svd_invA, Xd)
mesh.update_parameters(self.param_ids, solved_x)
t2 = time.time()
for data in self.data:
data.update_point_data(mesh._core.P[self.param_ids])
rms_err1 = self.compute_rms_err()
drms_iter = scipy.absolute(rms_err0 - rms_err1)
rms_err0 = rms_err1
t3 = time.time()
td += (t1 - t0) + (t3 - t2)
ts += t2 - t1
if output:
print 'Solve time: %4.2fs, (%4.2fs, %4.2fs)' % (ts+td, ts, td)
if rms_err0 < 1e-2:
print 'RMS err: %4.3e (iterations = %d)' % (rms_err0, niter)
else:
print 'RMS err: %4.3f (iterations = %d)' % (rms_err0, niter)
return mesh, rms_err0
def compute_rms_err(self):
err_sqr_sum = 0
num_err = 0
for data in self.data:
if data.err_sqr_sum != None and data.num_err != None:
err_sqr_sum += data.err_sqr_sum
num_err += data.num_err
if num_err > 0:
return scipy.sqrt(err_sqr_sum/num_err)
else:
return scipy.sqrt(err_sqr_sum)
def optimize(self, mesh, Xd, ftol=1e-9, xtol=1e-9, maxiter=0, output=True):
mesh.generate()
Td = cKDTree(Xd)
x0 = mesh.get_variables()
t0 = time.time()
x, success = scipy.optimize.leastsq(self.objfn, x0,
args=[mesh, Xd, Td], ftol=ftol, xtol=xtol,
maxfev=maxiter)
if output: print 'Fit Time: ', time.time()-t0
mesh.set_variables(x)
return mesh
def optimize2(self, mesh, data, ftol=1e-9, xtol=1e-9, maxiter=0, output=True):
mesh.generate()
if self.on_start != None:
mesh, data = self.on_start(mesh, data)
x0 = mesh.get_variables()
t0 = time.time()
x, success = scipy.optimize.leastsq(self.objective_function,
x0, args=[mesh, data], ftol=ftol, xtol=xtol,
maxfev=maxiter)
if output: print 'Fit Time: ', time.time()-t0
mesh.set_variables(x)
mesh.update()
if self.on_stop != None:
mesh, data = self.on_stop(mesh, data)
return mesh
def objfn_mesh_to_data_closest(self, x0, args):
mesh, Xd, Td = args[0], args[1], args[2]
mesh.set_variables(x0)
NXi = self.Xi.shape[0]
ind = 0
for element in mesh.elements:
self.X[ind:ind+NXi,:] = element.evaluate(self.Xi)
ind += NXi
err = Td.query(list(self.X))[0]
return err*err
def objfn_data_to_mesh_closest(self, x0, args):
mesh, Xd, Td = args[0], args[1], args[2]
mesh.set_variables(x0)
NXi = self.Xi.shape[0]
ind = 0
for element in mesh.elements:
self.X[ind:ind+NXi,:] = element.evaluate(self.Xi)
ind += NXi
Tm = cKDTree(self.X)
err = Tm.query(list(Xd))[0]
self.err = err
return err*err
def objfn_data_to_mesh_project(self, x0, args):
mesh, Xd, Td = args[0], args[1], args[2]
mesh.set_variables(x0)
err = scipy.zeros(Xd.shape[0])
ind = 0
for xd in Xd:
xi1 = mesh.elements[1].project(xd)
xi2 = mesh.elements[2].project(xd)
if 0<=xi1<=1:
xi = xi1
elif 0<=xi2<=1:
xi = xi2
else:
Xi = scipy.array([xi1, xi1-1, xi2, xi2-1])
Xi2 = Xi*Xi
ii = Xi2.argmin()
xi = Xi[ii]
if ii < 2:
elem = 1
else:
elem = 2
dx = mesh.elements[elem].evaluate(scipy.array([xi]))[0] - xd
err[ind] = scipy.sum(dx * dx)
ind += 1
return err
|
#!/usr/bin/python
### import guacamole libraries
import avango
import avango.gua
from avango.script import field_has_changed
class SceneScript(avango.script.Script):
#input field
sf_button8 = avango.SFBool() ##
#output field
sf_room_number = avango.SFInt()
## constructor
def __init__(self):
self.super(SceneScript).__init__()
self.keyboard_sensor = avango.daemon.nodes.DeviceSensor(DeviceService = avango.daemon.DeviceService())
self.keyboard_sensor.Station.value = "device-keyboard"
## init field connections
self.sf_button8.connect_from(self.keyboard_sensor.Button21) # v for mono or stereo ##
### init variables
self.CLASS = None
def my_constructor(self, CLASS, ROOM_NUMBER):
### external reference
self.CLASS = CLASS
sf_room_number = ROOM_NUMBER
@field_has_changed(sf_button8) ##
def sf_button8_changed(self):
if self.sf_button8.value == True: # key pressed
if self.CLASS is not None:
if self.CLASS.room_number != 3:
self.CLASS.room_number = self.CLASS.room_number + 1
if self.CLASS.room_number == 2:
self.CLASS.office_room.Tags.value = ["invisible"]
#self.CLASS.kinect_node.Tags.value = ["invisible"]
self.CLASS.labyrinth_room.Tags.value = []
else:
self.CLASS.labyrinth_room.Tags.value = ["invisible"]
self.CLASS.classroom_room.Tags.value = []
else:
self.CLASS.room_number = 1
self.CLASS.office_room.Tags.value = []
#self.CLASS.kinect_node.Tags.value = []
self.CLASS.classroom_room.Tags.value = ["invisible"]
sf_room_number = self.CLASS.room_number
class Scene:
## constructor
def __init__( self
, PARENT_NODE = None
):
### resources ###
## init scene light
self.scene_light = avango.gua.nodes.LightNode(Name = "scene_light", Type = avango.gua.LightType.POINT)
self.scene_light.Color.value = avango.gua.Color(0.9, 0.9, 0.9)
self.scene_light.Brightness.value = 15.0
self.scene_light.Falloff.value = 1.0 # exponent
#self.scene_light.Softness.value = 2.0
self.scene_light.EnableShadows.value = False
self.scene_light.ShadowMapSize.value = 512
#self.scene_light.ShadowOffset.value = 0.002
#self.scene_light.ShadowMaxDistance.value = 5
self.scene_light.Transform.value = avango.gua.make_trans_mat(0.0, 0.5, 0.0) * \
avango.gua.make_rot_mat(-90.0, 1, 0, 0) * \
avango.gua.make_scale_mat(2.0)
PARENT_NODE.Children.value.append(self.scene_light)
#SUN
self.sun_light = avango.gua.nodes.LightNode(Name = "sun_light", Type = avango.gua.LightType.SUN)
self.sun_light.Color.value = avango.gua.Color(0.9, 0.9, 0.9)
self.sun_light.Brightness.value = 55.0
self.sun_light.Falloff.value = 1.0 # exponent
self.sun_light.EnableShadows.value = False
self.sun_light.ShadowMapSize.value = 512
self.sun_light.Transform.value = avango.gua.make_trans_mat(0.0, 2.1, 0.0) * \
avango.gua.make_rot_mat(-90.0, 1, 0, 0)
PARENT_NODE.Children.value.append(self.sun_light)
## init scene geometries
_trimesh_loader = avango.gua.nodes.TriMeshLoader() # get trimesh loader to load external tri-meshes
## init office
self.office_geometry = _trimesh_loader.create_geometry_from_file("office_geometry", "../objects/office/best-office.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.LOAD_MATERIALS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.office_geometry.Transform.value = avango.gua.make_trans_mat(0.0, -0.55, -0.0) * \
avango.gua.make_scale_mat(0.4, 0.4, 0.4)
'''
for _child in self.office_geometry.Children.value:
_child.Material.value.set_uniform("Emissivity", 1.0)
#_child.Material.value.set_uniform("Color", avango.gua.Vec4(1.0,0.0,0.0,1.0))
#_child.Material.value.set_uniform("Roughness", 0.5)
#_child.Material.value.set_uniform("Metalness", 0.5)
'''
## init table
self.table_geometry = _trimesh_loader.create_geometry_from_file("table_geometry", "../objects/office/round_table.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.table_geometry.Transform.value = avango.gua.make_trans_mat(0.0, -0.55, 0.0) * \
avango.gua.make_scale_mat(6.0, 7.0, 7.0)
#self.table_geometry.Material.value.set_uniform("ColorMap", "../objects/office/Texture-2.jpg")
#self.table_geometry.Material.value.set_uniform("NormalMap", "../objects/office/round_table_normal.jpg")
## init dish 1
self.transform_dish = avango.gua.nodes.TransformNode()
self.dish_geometry = _trimesh_loader.create_geometry_from_file("dish_geometry", "../objects/dish/dish.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.dish_geometry.Transform.value = avango.gua.make_trans_mat(0.45, 0.14, 0.6) * \
avango.gua.make_scale_mat(0.1, 0.1, 0.1) * \
avango.gua.make_scale_mat(3.0, 3.0, 3.0)
self.dish_geometry.Material.value.set_uniform("ColorMap", "../objects/dish/red_wood.jpg")
self.transform_dish.Children.value = [self.dish_geometry]
## cubes
self.transform_all_cubes = avango.gua.nodes.TransformNode()
## O_atom
self.transform_O_atom = avango.gua.nodes.TransformNode()
self.O_atom_geometry = _trimesh_loader.create_geometry_from_file("O_atom_geometry", "../objects/classroom/h2o_molecule.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.O_atom_geometry.Transform.value = avango.gua.make_trans_mat(0.45, 0.13, -0.95) * \
avango.gua.make_scale_mat(0.1, 0.1, 0.1) * \
avango.gua.make_scale_mat(0.5, 0.5, 0.5)
self.O_atom_geometry.Material.value.set_uniform("ColorMap", "../textures/cube.png")
self.O_atom_geometry.Tags.value = ["moveable"]
self.transform_O_atom.Children.value = [self.O_atom_geometry]
## H_atom
self.transform_H1_atom = avango.gua.nodes.TransformNode()
self.H1_atom_geometry = _trimesh_loader.create_geometry_from_file("H1_atom_geometry", "../objects/classroom/h2o_molecule.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.H1_atom_geometry.Transform.value = avango.gua.make_trans_mat(0.15, 0.13, -0.85) * \
avango.gua.make_scale_mat(0.1, 0.1, 0.1) * \
avango.gua.make_scale_mat(0.3, 0.3, 0.3)
self.H1_atom_geometry.Material.value.set_uniform("ColorMap", "../textures/cube.png")
self.H1_atom_geometry.Tags.value = ["moveable"]
self.transform_H1_atom.Children.value = [self.H1_atom_geometry]
## H_atom
self.transform_H2_atom = avango.gua.nodes.TransformNode()
self.H2_atom_geometry = _trimesh_loader.create_geometry_from_file("H2_atom_geometry", "../objects/classroom/h2o_molecule.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.H2_atom_geometry.Transform.value = avango.gua.make_trans_mat(0.35, 0.13, -0.80) * \
avango.gua.make_scale_mat(0.1, 0.1, 0.1) * \
avango.gua.make_scale_mat(0.3, 0.3, 0.3)
self.H2_atom_geometry.Material.value.set_uniform("ColorMap", "../textures/cube.png")
self.H2_atom_geometry.Tags.value = ["moveable"]
self.transform_H2_atom.Children.value = [self.H2_atom_geometry]
for i in range(5):
self.transform_cube1 = avango.gua.nodes.TransformNode()
self.cube1_geometry = _trimesh_loader.create_geometry_from_file("cube1_geometry", "../objects/cube/cube.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.MAKE_PICKABLE)
self.cube1_geometry.Transform.value = avango.gua.make_scale_mat(0.6, 0.6, 0.6)
# avango.gua.make_scale_mat(0.05, 0.05, 0.05)
self.cube1_geometry.Material.value.set_uniform("ColorMap", "../textures/cube.png")
self.transform_cube1.Children.value = [self.cube1_geometry]
self.cube1_geometry.Tags.value = ["moveable"]
self.transform_all_cubes.Children.value.append(self.transform_cube1)
## init kinect video avatar
'''
_video_loader = avango.gua.nodes.Video3DLoader() # get video-3D loader
#self.kinect_node = _video_loader.load("kinect_node", "/opt/kinect-resources/shot_lcd_KV2_X_5.ks")
#self.kinect_node = _video_loader.load("kinect_node", "/opt/kinect-resources/rgbd-framework/recordings/stepptanz/stepptanz_from_charon.ks")
self.kinect_node = _video_loader.load("kinect_node", "/opt/kinect-resources/calib_3dvc/surface_23_24_25_26.ks")
self.kinect_node.Transform.value = avango.gua.make_trans_mat(0.0, 0.0, 0.0) * \
avango.gua.make_rot_mat(180.0,0,1,0) * \
avango.gua.make_scale_mat(0.12)
PARENT_NODE.Children.value.append(self.kinect_node)
'''
## init labyrinth
self.labyrinth_geometry = _trimesh_loader.create_geometry_from_file("labyrinth_geometry", "../objects/labyrinth/labyrinth-textured2.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.LOAD_MATERIALS| avango.gua.LoaderFlags.MAKE_PICKABLE)
self.labyrinth_geometry.Transform.value = avango.gua.make_scale_mat(6.0, 3.0, 6.0) * \
avango.gua.make_trans_mat(0.0, -0.6, 0.0)
## init tables
self.tables_geometry = _trimesh_loader.create_geometry_from_file("tables_geometry", "../objects/classroom/tables_distance.obj", avango.gua.LoaderFlags.DEFAULTS | avango.gua.LoaderFlags.LOAD_MATERIALS| avango.gua.LoaderFlags.MAKE_PICKABLE)
self.tables_geometry.Transform.value = avango.gua.make_trans_mat(0.0, -0.55, -0.0) * \
avango.gua.make_scale_mat(0.3, 0.3, 0.3)
## init rooms
self.office_room = avango.gua.nodes.TransformNode()
self.office_room.Children.value = [self.office_geometry, self.table_geometry, self.transform_dish, self.transform_all_cubes]
PARENT_NODE.Children.value.append(self.office_room)
self.classroom_room = avango.gua.nodes.TransformNode()
self.classroom_room.Children.value = [self.office_geometry, self.tables_geometry, self.transform_O_atom, self.transform_dish, self.transform_H1_atom, self.transform_H2_atom]
PARENT_NODE.Children.value.append(self.classroom_room)
self.classroom_room.Tags.value = ["invisible"]
self.labyrinth_room = avango.gua.nodes.TransformNode()
self.labyrinth_room.Children.value = [self.labyrinth_geometry]
PARENT_NODE.Children.value.append(self.labyrinth_room)
self.labyrinth_room.Tags.value = ["invisible"]
self.room_number = 1
self.script = SceneScript()
self.script.my_constructor(CLASS = self, ROOM_NUMBER = self.room_number)
|
import copy
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
trie = {}
ans = {}
if not board:
return []
board_height = len(board)
board_width = len(board[0])
total_size = board_height * board_width
# Construct trie for the words to be found
for word in words:
# print(len(word), total_size)
if len(word) > total_size:
continue
prev_dict = trie
for c in (word + '-'):
if not prev_dict.get(c):
prev_dict[c] = {}
if c == '-':
prev_dict[c] = word
prev_dict = prev_dict[c]
# print(trie)
# Find word in trie
# prev_dict = trie
# for c in "oath-":
# if type(prev_dict.get(c)) != dict:
# break
# else:
# prev_dict = prev_dict.get(c)
# if c == '-':
# print('True')
# break
# As we iterate each letter on board, we search in the trie if it matches
def find_on_board(new_board, remain_trie, curr_xy):
curr_y, curr_x = curr_xy
nxt_trie = remain_trie.get(new_board[curr_y][curr_x], False)
if type(nxt_trie) == dict: # Found a valid path
# Line up surroundings
nxt_moves = [(curr_y + 1, curr_x), (curr_y - 1, curr_x), (curr_y, curr_x + 1), (curr_y, curr_x - 1)]
nxt_moves = list(
filter(lambda x: x[0] >= 0 and x[0] < board_height and x[1] >= 0 and x[1] < board_width, nxt_moves))
if '-' in nxt_trie:
ans[nxt_trie['-']] = True
# new_board = copy.deepcopy(new_board)
temp_a = new_board[curr_y][curr_x]
new_board[curr_y][curr_x] = '0'
for nxt_move in nxt_moves:
find_on_board(new_board, nxt_trie, nxt_move)
new_board[curr_y][curr_x] = temp_a
for y in range(len(board)):
for x in range(len(board[0])):
if type(trie.get(board[y][x])) == dict: # Found the first letter
find_on_board(board, trie, (y, x))
return (list(ans))
# Trie, one pass across all board to check for trie at all once, backtracking
# Boardsize: n x m
# Number of words: w
# Maximum length of word: l; l < n*m
# Time: O(n*m*w*l + w*l)
# Space: O(w*l)
# Runtime: 460 ms, faster than 28.81% of Python3 online submissions for Word Search II.
# Memory Usage: 27.1 MB, less than 100.00% of Python3 online submissions for Word Search II. |
from .views import home_view,detail_view,tagged,TagDetailView
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('',home_view),
path('article/<slug:slug>',detail_view,name='detail'),
# path('tagged/<slug:slug>',tagged,name='tagged')
path('tagged/<slug:slug>',TagDetailView.as_view(),name='tagged')
]
|
from django.conf.urls import url
from django.contrib import admin
from API.views import *
from API.web_view import *
urlpatterns = [
url(r'^update_location_and_parameters/', update_location_and_parameters),
url(r'^update_stop_location/', update_stop_location),
url(r'^get_bus_data_current_time/', get_bus_data_current_time),
url(r'^get_bus_data_from_time/', get_bus_data_from_time),
url(r'^web/get_bus_locations/', get_bus_location_ajax),
url(r'^web/marker_update/', marker_update),
url(r'^web/get_fuel_data/', get_fuel_data),
url(r'^get_bus_data_user/', get_bus_data_from_user),
url(r'^get_stop_data_from_time/', get_stop_data_from_time),
url(r'^update_status/', update_status),
url(r'^get_json_from_csv/', get_json_from_csv),
url(r'^get_bus_location_from_time/', get_bus_location_from_time),
url(r'^get_all_bus_data/',get_all_bus_data),
]
|
""" Convenience wrapper to handle Oauth authentication
"""
import requests
class LiveAuth(object):
""" Live Oauth authentication helper
"""
_base_url = 'https://login.live.com/'
_authorize_uri = 'oauth20_authorize.srf'
_token_url = 'oauth20_token.srf'
def __init__(self, client_id, client_secret, scope, redirect_uri):
self._client_id = client_id
self._client_secret = client_secret
self._scope = scope
self._redirect_uri = redirect_uri
def generate_oauth_initiation_url(self, response_type):
""" generate the oauth dialog initiation url
The user must be redirected to the obtained url
@param response_type: the type of response to ask for the server,
can be 'token' for implicit grant flow or 'code' for Authorization code
grant flow
@return: the url to redirect the user to
@rtype: str
"""
if response_type != 'code' and response_type != 'token':
raise ValueError("response_type must be 'code' or 'token'")
return '{base}{authorize}?client_id={id}&scope={scope}&' \
'response_type={type}&redirect_uri={redirect}' \
.format(base=self._base_url,
authorize=self._authorize_uri,
id=self._client_id,
scope=self._scope,
type=response_type,
redirect=self._redirect_uri)
def exchange_oauth_code_for_token(self, code):
""" Call Live API to exchange a authentication code for token(s)
If the asked response_type for oauth authentication
@param code: authentication coe
@return: API's raw response
@rtype: Response
"""
post_data = {
'client_id': self._client_id,
'client_secret': self._client_secret,
'redirect_uri': self._redirect_uri,
'grant_type': 'authorization_code',
'code': code
}
return requests.post('{base}{token}'.format(
base=self._base_url, token=self._token_url),
data=post_data) |
#This script has not been formatted for external use
import copy
import open3d as o3d
import numpy as np
import matplotlib.pyplot as plt
#Data Path
#path = "../../../Data/DATA_FROM_EARLIER_PHOTOS/Textured_OBJ/"
path = "../../../Data/DanielHess_Dataset2/"
detPath = "../../../Data/detrital_mesh2.obj"
path_out = "out/class/"
from os import listdir
from os.path import isfile, join
allfiles = [f for f in listdir(path) if isfile(join(path, f))]
allObjNames = [f for f in allfiles if (f.find(".obj") != -1)]
base_name = [allObjNames[a][:-4] for a in range(len(allObjNames))]
detMesh = o3d.io.read_triangle_mesh(detPath)
detMesh.paint_uniform_color([0,0,0])
def move_forward(vis):
i_name = path_out + base_name[index] + "_class.png"
vis.capture_screen_image(i_name,True)
vis.register_animation_callback(None)
vis.destroy_window()
return False
#Suppressing output
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
from tqdm import tqdm
for a in tqdm(range(len(allObjNames))):
index = a
mesh = o3d.io.read_triangle_mesh(path + allObjNames[index],False)
mesh.paint_uniform_color([1,1,1])
backMesh = copy.deepcopy(detMesh)
#theta = -0.3233476477503846 #This is the theta we need... just trust me
theta = -0.0187 #Value for new dataset
c_center = mesh.get_center()
mesh.rotate(mesh.get_rotation_matrix_from_xyz((0, 0, theta)),center=c_center)
backMesh.rotate(mesh.get_rotation_matrix_from_xyz((0, 0, theta)),center=c_center)
vis = o3d.visualization.Visualizer()
vis.create_window(width=4096,height=4096,visible=False)#Max depth resolution in 1080, for some reason won't render properly for any higher
vis.add_geometry(mesh)
ctr = vis.get_view_control()
ctr.set_zoom(0.5)
ctr.change_field_of_view(step=-13)
vis.add_geometry(backMesh,reset_bounding_box=False)
opt = vis.get_render_option()
opt.background_color = np.asarray([1, 0, 0]) #setting background to red
vis.register_animation_callback(move_forward)
vis.run()
del opt
del ctr
del vis
|
from tealight.net import connect, send
import tealight.utils
import random
userId= int(tealight.utils.now()) +random.randint(0,1000000)
connect("racetracksix")
send("connected")
#def regisrtation_handler(message):
#for i in range (0, carNumber):
#call draw car function and increment placement
#horizontally? also change colour
def authenticated_send(data, to, type):
data = {"to": to, "type": type, "payload": data, "user_id": userId}
send(data)
print "Just sent", data
authenticated_send(userId, "server", "registration")
authenticated_send(tealight.utils.now(), "server", "heartbeat")
def client_handle_frame():
authenticated_send(tealight.utils.now(), "server", "heartbeat")
lastSent = tealight.utils.now() |
'''
Given an array nums of n integers where nums[i] is in the range [1, n], return an array of all the integers in the range [1, n] that do not appear in nums.
'''
numss = [4,3,2,7,8,2,3,1] # [5,6]
# Approach 1
# Intuitive: Compare unique values, return difference
# Time: O(n) to find different between sets, where n = length of longer set, which in this case is len(nums)
def findDisappearedNumbers(nums):
n = len(nums)
setRange = set(range(1,n+1))
setNums = set(nums)
return setRange - setNums
# Approach 2
# Without extra space and O(n) runtime
# We go through the array, and in the place of the given value, we can change its sign.
# Now we can find and return where the index has not changed.
def findDisappearedNumbers(nums):
for i in range(len(nums)):
nums[abs(nums[i]) - 1] = - abs(nums[abs(nums[i]) - 1])
return [i+1 for i in range(len(nums)) if nums[i] > 0]
findDisappearedNumbers(numss)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 19:53:40 2021
@author: joel
"""
# load the required functions
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy.random import seed
from numpy.random import multivariate_normal
import statsmodels.api as sm
from sklearn.ensemble import RandomForestRegressor
import seaborn as sns
import numpy as np
############# Functions for the DGP ##################
def dgp(b, mean, cov, u_sd, n):
""" Creates one draw of a DGP with multivariate normal covariates, a
randomly assigning treatment value of one or zero and normal noise.
Inputs:
- b: vector (1D-array) of true betas (first one for constant)
- mean: vector (1D-array) of means from multivariate normal
- cov: covariance matrix (2D-array) from multivariate normal
- u_sd: standard deviation of the normally distributed noise variable
Outputs:
- x: Matrix of the covariates without treatment
– x_t: Matrix of the covariates with a randomly assigned treatment.
Which is either one or zero with 40% probability
- y: Regression vector (outcomes)
"""
x = multivariate_normal(mean, cov, n)
x_t = np.c_[np.random.binomial(1, 0.40, size = n), x]
y = b[0] + x_t @ b[1:] + np.random.normal(0,u_sd,n)
return(x, x_t, y)
def dgp2(b, mean, cov, u_sd, n):
""" Creates one draw of a DGP with one normally distributed covariate,
a non-random treatment value, that is dependant on the prior defined
covariate as well as some normally distributed noise.
Inputs:
- b: vector (1D-array) of true betas (first one for constant)
- mean: Scalar for the mean of a normally distributed covariate
- cov: 2-d for the variance of a normally distributed covariate
- u_sd: standard deviation of the normal noise variable
Outputs:
- x: Matrix of the covariate without the treatment
- x_t: Matrix of the covariate included the treatment
- y: Regression vector (outcomes)
"""
x = multivariate_normal(mean, cov, n)
x_t = np.where(x<10, 0, 1)
y = b[0] + x @ b[1:] + x_t @ b[1:] + np.random.normal(0,u_sd,n)
return(x, x_t, y)
############# Functions for the estimators ##################
def ols(x,y):
"""OLS coefficients
Inputs:
- x: covariates matrix
- y: Regression vector (outcomes)
Output:
- Betas: Regression coefficients (1D-array)
"""
n = y.shape[0] # num of obs
x_t = np.c_[np.ones(n), x] # add constant
betas = np.linalg.inv(x_t.T @ x_t) @ x_t.T @ y # calculate coeff
return betas
def ipw(exog, t, y, reps):
"""Average treatment estimates according to the IPW.
Inputs:
- exog: Covariates
- t:
- y:
- reps:
Output:
- ate: Average treatment effect
- ate_std : Standard deviation of the average treatment effect
"""
# convert all passed matrices into pandas data frame or Series
exog = pd.DataFrame(exog)
t = pd.Series(t)
y = pd.Series(y)
pscores = sm.Logit(endog=t, exog=sm.add_constant(exog)).fit(
disp=0).predict()
ate = np.mean((t * y) / pscores - ((1 - t) * y) / (1 - pscores))
ate_boot = []
for rep in range(reps):
boot_sample = np.random.choice(exog.index, size=exog.shape[0], replace=True)
treat_boot = t.loc[boot_sample]
y_boot = y.loc[boot_sample]
# append the ate score for per bootstrap run
ate_boot.append(np.mean((treat_boot * y_boot) / pscores - ((1 - treat_boot) * y_boot) / (1 - pscores)))
ate_std = np.std(ate_boot)
# t_val = ate / ate_std
# df = pd.DataFrame({"ate": ate, "ate_std": ate_std, "t_value": t_val})
return [ate, ate_std]
def OLS_DML(Y, X, D):
"""Approach in order to compare OLS and DML estimators.
Inspired by: https://github.com/jgitr/opossum/blob/master/double_machine_learning_example/dml.py
Inputs:
- X: covariates matrix
- Y: Regression vector (outcomes)
- D: Assignment vector
Output:
- Betas: Regression coefficients (1D-array)
"""
# array to store OLS, naive and cross result
treatment_est = np.zeros(2)
N = len(Y)
num_trees = 50
# Now run the different methods
#
# OLS --------------------------------------------------
OLS = sm.OLS(Y,D)
results = OLS.fit()
treatment_est[0] = results.params[0]
# Naive double machine Learning ------------------------
naiveDMLg =RandomForestRegressor(num_trees , max_depth=2)
# Compute ghat
naiveDMLg.fit(X,Y)
Ghat = naiveDMLg.predict(X)
naiveDMLm =RandomForestRegressor(num_trees , max_depth=2)
naiveDMLm.fit(X,D)
Mhat = naiveDMLm.predict(X)
# vhat as residual
Vhat = D-Mhat
treatment_est[1] = np.mean(np.dot(Vhat,Y-Ghat))/np.mean(np.dot(Vhat,D))
return treatment_est
############# Function for the simulations ##################
def simulation(b, mean, cov, u_sd, sz, mc):
"""Monte-Carlo simulation with the DGP from line 27
Inputs:
- b: Vector (1D-array) of true betas (first one for constant)
- mean: Vector (1D-array) of means from multivariate normal
- cov: covariance matrix (2D-array) from multivariate normal
- u_sd: standard deviation of the normally distributed noise variable
- sz: Sample sizes
- mc: Number of simulations
Output:
- Dataframe MSE from OLS and IPW Estimators
"""
# set up an empty data frame
df_mse = pd.DataFrame(columns = ["OLS_MSE", "IPW_MSE", "IPW_std_ATE"])
num_simulations = mc
n_list = []
for i in sz:
# append the sample size
n_list.append(i)
# will become a list of lists, where each element holds three betas
beta_list = []
ate_list = []
# estimate the betas
for j in range(mc):
# generate data
x, x_t, y = dgp(b, mean, cov, u_sd, i)
# estimate betas and store them in "beta_list"
beta_list.append(ols(x_t, y))
ate_list.append(ipw(x, np.choose([0] * i, x_t.T), y, 100))
# calculate the mean per beta coefficient
aggr_betas = [np.array(beta_list)[:, j].mean() for j in range(len(beta_list[0]))]
aggr_ate = [np.array(ate_list)[:, j].mean() for j in range(len(ate_list[0]))]
# put the average squared deviation of the beta-estimation and the
# originally set values into the data frame
# PLUS the "ATE", its Standard Deviation and the t-value
df_mse.loc[len(n_list) - 1] = [((b[1] - aggr_betas[1])**2) / i] + [((b[1] - aggr_ate[0])**2) / i] + aggr_ate[1:3]
# insert a sample size column into the data frame
df_mse.insert(loc = 0, column = "n", value = n_list)
return df_mse
|
#!/usr/bin/env python
# Ipred1 script is intended to get kmers from fasta file of viral or human proteome
# and predict their binding affinity to MHC via netMHCpan in parallel
import os, sys, subprocess, argparse, tempfile, shutil
import pandas as pd
from tqdm import tqdm, trange
from Bio import SeqIO
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--length', '-l', help='The length of generated peptides')
parser.add_argument('--file', '-f', help='Path to the file with fasta sequence to generate k-mers')
parser.add_argument('--destination', '-d', default='CMV_HLA-A02:01_nmp.txt',
help='Path where to store output, full path is prefered')
parser.add_argument('--hla', '-a', default="HLA-A02:01", help='HLA allele for prediction')
args = parser.parse_args()
l = args.length
f = args.file
d = args.destination
a = args.hla
length = int(l)
fasta_file = str(f)
destination = str(d)
hla_allele = str(a).replace('*', '')
# This is path to netMHCpan in your system. Don't forget to change it accordingly!
netmhcpan = '/home/vcvetkov/Tools/netMHCpan-4.0/netMHCpan'
ncores = 16
# Reading fasta file with proteome sequence
record_dict = SeqIO.index(fasta_file, "fasta")
# Generating k-mer peptides dataframe containing origin protein info for each peptide
# pdf is peptide dataframe
pdf = pd.DataFrame()
for key in tqdm(record_dict):
lst = []
df = pd.DataFrame()
for i in range(len(record_dict[key].seq)-length):
string = str(record_dict[key].seq[i:i+length])
lst.append(string)
df['Peptide'] = lst
df['Origin_protein'] = key.split('|')[-1]
pdf = pd.concat([pdf, df], axis=0)
pdf = pdf.reset_index(drop=True)
# Creating temporary directories for files
tmpdir = tempfile.mkdtemp()
parallel_tmpdir = tempfile.mkdtemp()
nmp_tempdir = tempfile.mkdtemp()
prefix = tmpdir + "/split_"
midpoint = nmp_tempdir + '/unprocessed_file_nmp.txt'
# Splitting into 5000 line files (netMHCpan constaint)
for i in trange(1, len(pdf)//5000 + 2):
if i > len(pdf)//5000:
df = pdf.iloc[5000*(i-1):,:]
else:
df = pdf.iloc[5000*(i-1):5000*i,:]
df['Peptide'].to_csv(prefix + "{0:04}".format(i) + '.txt', index=False, header=False)
# Paralleling netMHCpan with gnu parallel
process = subprocess.run('parallel --eta --jobs {} -k \
--tmpdir {} {} -p -BA -a {} \
-f ::: {} > {}'.format(ncores,
parallel_tmpdir,
netmhcpan,
hla_allele,
prefix + '*',
midpoint), shell=True)
# print('Subprocess returned code {}'.format(process.returncode))
if not process.returncode:
print('.', end='')
process = subprocess.run("sed -i '/^ /!d' {}".format(midpoint), shell=True)
# print('Subprocess returned code {}'.format(process.returncode))
if not process.returncode:
print('.', end='')
if os.path.isfile(midpoint):
print('.', end='')
if not os.stat(midpoint).st_size == 0:
print('.', end='')
# netMHCpan's output format is weird and pandas cannot read it properly.
# Here we remove extra spaces and the last column. The latter could be used
# but it is not necessary
with open(midpoint, 'r+') as file:
lines = []
for line in file:
lines.append(line.strip().split()[:14])
columnlst = lines[0]
lines = [line for line in lines if line != columnlst]
tdf = pd.DataFrame(lines, columns=columnlst)
tdf = tdf[['HLA', 'Peptide', '%Rank']]
tdf['Origin_protein'] = pdf['Origin_protein'].values
# Check that the order is preserved and each peptide matches with its origin protein
lst1 = list(pdf['Peptide'].values)
lst2 = list(tdf['Peptide'].values)
diff = []
for i in range(len(lst1)):
if lst1[i] != lst2[i]:
diff.append(i)
if not len(diff):
print('.')
# There are overall five checks in this script. If there are five dots in the output
# then the final file should have been processed correctly
flag = False
if os.path.isdir(destination):
smart_name = destination + '/' + os.path.basename(fasta_file).split('.')[0] + '_' + hla_allele + '_nmp'
if not os.path.isfile(smart_name + '.csv'):
tdf.to_csv(smart_name + '.csv', index=False)
flag = True
else:
for i in range(10):
if not os.path.isfile(smart_name + '{0:02}.csv'.format(i)):
tdf.to_csv(smart_name + '{0:02}.csv'.format(i), index=False)
flag = True
if flag:
break
else:
tdf.to_csv(destination, index=False)
flag = True
if not flag:
tdf.to_csv(destination, index=False)
flag = True
if flag:
print("Predictions have been made correctly")
else:
print("Something went wrong")
shutil.rmtree(tmpdir)
shutil.rmtree(parallel_tmpdir)
shutil.rmtree(nmp_tempdir)
|
import qsim
import qgates
from random import randint
from math import sqrt
# number of bits on which f operates
n = 6
# secret element for which f(x) = 1 iff x = c
c = randint(0, 2**n-1)
print("Secret:", c)
# define f(x) = 1 iff x = c
def f(x):
return 1 if x == c else 0
# create unitary operator for f such that x -> x and y -> y XOR f(x)
Uf = qgates.Uf(n, f)
# create hadamard operator for n qubits
Hn = qgates.Hn(n)
# create mean inversion operator for n qubits
MI = qgates.mean_inversion(n)
# initialize quantum state
state = qsim.create_state([0 for _ in range(n)] + [1])
# apply hadamard to x
state = qsim.apply_gate(state, Hn)
# apply hadamard to y
state = qsim.apply_gate(state, qgates.H, n)
# number of times to repeat
loop = int(sqrt(2**n))
# repeat sqrt(2**n) times
for i in range(loop):
# apply Uf to x
state = qsim.apply_gate(state, Uf)
# apply mean inversion to x
state = qsim.apply_gate(state, MI)
# get results
result = qsim.measure_all_standard(state)
# print x values
print("Guess:", qsim.int_from_binary_list([result[i] for i in range(n)])) |
""" FIDUCEO FCDR harmonisation
Author: Arta Dilo / NPL MM
Date created: 06-12-2016
Last update: 20-03-2017
Version: 10.0
Harmonisation functions for a pair-wise implementation and for all the sensors
together using odr package. Functions implement weighted ODR (an EIV method)
for a pair sensor-reference and for multiple pairs of type sensor-reference and
sensor-sensor. """
import scipy.odr as odr
from numpy import logical_not
# AVHRR measurement equation
def avhrrME(CE,Cs,Cict,Lict,To,a0,a1,a2,a3):
# Earth radiance from Earth counts and calibration data
LE = a0 + (0.98514+a1)*Lict*(Cs-CE)/(Cs-Cict) + a2*(Cict-CE)*(Cs-CE) + a3*To
return LE # return Earth radiance
# dictionary with measurement eq. function of each sensors' series
MEfunc = {'avhrr': avhrrME}
""" Model function for re-calibration, i.e. fcn argument in ODR package """
def fcnP(coef, data, slabel='avhrr'):
a0 = coef[0] # AVHRR model coefficients
a1 = coef[1]
a2 = coef[2]
a3 = coef[3]
# transposed ndarrays
CE = data[2,:] # Earth counts
Cs = data[0,:] # space counts
Cict = data[1,:] # ICT counts
Lict = data[3,:] # ICT radiance
To = data[4,:] # orbit temperature
LE = MEfunc[slabel](CE,Cs,Cict,Lict,To,a0,a1,a2,a3)
#print 'Current iteration coefficients:', [a0,a1,a2, a3]
return LE # return Earth radiance
""" Perform LS fit for a sensor-reference pair with low-level odr function """
def odrP(Hdata, Hr, b0, fb=None, fx=None, Hs=None, rsp=1):
# extract variables Cs, Cict, CE, Lict, To from Hdata matrix
X = Hdata[:,1:6].transpose() # transpose data matrix
# Y is adjusted radiance: reference radiance + adjustment values
Y = Hdata[:,0] + Hdata[:,6]
# cacluate weights from uncertainty matrices
if Hs is not None: # weight on both random and systematic uncertainty data
#Hs = resetHs(Hs, rsp) # set sytematic equiv to Peter optimisation prob
VX = (Hr[:,1:6]**2 + Hs[:,1:6]**2).transpose() # sigma^2 of X variables
''' Y = Lref+K: assume independence of ref. radiance and K
K random: in Hr matchups uncertainty, in Hs SRF shifting uncertainty '''
VY = Hr[:,0]**2 + (Hr[:,6]**2+Hs[:,6]**2) # sigma^2 of Y
else: # weight on random uncertainty
VX = (Hr[:,1:6]**2).transpose() # sigma^2 of X
VY = Hr[:,0]**2 + Hr[:,6]**2 # Y sigma^2 (no shifting uncert.)
# perform odr fit (low level function)
if fb: # keep a3 coefficient fixed (defined by fb) and To var fixed (by fx)
fit = odr.odr(fcnP,b0,Y,X,we=1./VY,wd=1./VX,ifixb=fb,ifixx=fx,full_output=1)
else: # fit all coefficients
fit = odr.odr(fcnP,b0,Y,X,we=1./VY,wd=1./VX,full_output=1)
odrFit = odr.Output(fit) # get odr fit output
return odrFit # return odr output
""" Perform ODR over MC generated data with ODR best estimates from
real or simulated data and errors """
def odr4MC(Xdata, Ydata, Hr, b0, fb=None, fx=None, Hs=None, rsp=1):
X = Xdata.transpose()
# cacluate weights from uncertainty matrices
if Hs is not None: # weights from combined random & systematic uncertainty
VX = (Hr[:,1:6]**2 + Hs[:,1:6]**2).transpose() # sigma^2 of X variables
''' Y = Lref+K: assume independence of ref. radiance and K
K random: in Hr matchups uncertainty, in Hs SRF shifting uncertainty '''
VY = Hr[:,0]**2 + (Hr[:,6]**2+Hs[:,6]**2) # sigma^2 of Y
else: # weight on random uncertainty
VX = (Hr[:,1:6]**2).transpose() # sigma^2 of X
VY = Hr[:,0]**2 + Hr[:,6]**2 # Y sigma^2 (no shifting uncert.)
# ODR on new X,Y data, perturbed best estimates
if fb: # keep a3 coefficient fixed (defined by fb) and To var fixed (by fx)
fit = odr.odr(fcnP,b0,Ydata,X,we=1./VY,wd=1./VX,ifixb=fb,ifixx=fx,full_output=1)
else: # fit all coefficients
fit = odr.odr(fcnP,b0,Ydata,X,we=1./VY,wd=1./VX,full_output=1)
odrFit = odr.Output(fit) # get odr fit output
return odrFit # return odr output
""" Perform ODR fit for the whole series.
AVHRR measurement model to use for series harmonisation: two virtual sensors
for the data matrices, a block a rows has the specific sensors. """
def seriesODR(Hdata, Hunc2, b0, sensors, series, fb=None, fx=None):
# extract variables from Hdata matrix
X = Hdata[:,0:11].transpose() # X vars; transpose data matrix
Y = Hdata[:,11] # adjustment values K
bsens = sensors.transpose()
VX = Hunc2[:,0:11].transpose() # squared uncertainty X vars
VY = Hunc2[:,11] # K squared uncertainty
def fcnH(coef, Xdata, sp=bsens):
# read data to variable names; transpose ndarrays
Lr1 = Xdata[0,:] # reference radiance 1st sensor; 0 for sensor-sensor pair
Cs1 = Xdata[1,:] # space counts 1st sensor
Cict1 = Xdata[2,:] # ICT counts 1st sensor
CE1 = Xdata[3,:] # Earth counts 1st sensor
Lict1 = Xdata[4,:] # ICT radiance 1st sensor
To1 = Xdata[5,:] # orbit temperature 1st sensor
Cs2 = Xdata[6,:] # space counts 2nd sensor
Cict2 = Xdata[7,:] # ICT counts 2nd sensor
CE2 = Xdata[8,:] # Earth counts 2nd sensor
Lict2 = Xdata[9,:] # ICT radiance 2nd sensor
To2 = Xdata[10,:] # orbit temperature 2nd sensor
s1 = sp[0,:] # 1st sensor index in sensors list (&coeff arr)
s2 = sp[1,:] # 2nd sensor's index
switch = logical_not(s1).astype(int)
p = series.nocoefs # number of calibration coefficients
a01 = coef[s1*p + 0] # fit coefficients 1st sensor [s*p+0 for s in s1]
a11 = coef[s1*p + 1]
a21 = coef[s1*p + 2]
a31 = coef[s1*p + 3]
a02 = coef[s2*p + 0] # fit coefficients 2nd sensor
a12 = coef[s2*p + 1]
a22 = coef[s2*p + 2]
a32 = coef[s2*p + 3]
# fit model
K = avhrrME(CE2,Cs2,Cict2,Lict2,To2,a02,a12,a22,a32) -\
(1-switch) * avhrrME(CE1,Cs1,Cict1,Lict1,To1,a01,a11,a21,a31) -\
switch * Lr1
#print 'Current iteration coefficients:', [a01,a11,a21,a31,a02,a12,a22,a32]
return K
print '\nRunning ODR for multiple pairs\n'
# run low-level odr
if fb: # keep a3 coefficients fixed (fb) and To vars fixed (fx)
fit = odr.odr(fcnH,b0,Y,X,we=1./VY,wd=1./VX,ifixb=fb,ifixx=fx,full_output=1)
#fit = odr.odr(fcnH,b0,Y,X,ifixb=fb,ifixx=fx,iprint=3,rptfile='shFinal.rpt',maxit=20)
else: # fit all coefficients
fit = odr.odr(fcnH,b0,Y,X,we=1./VY,wd=1./VX,full_output=1)
mFit = odr.Output(fit)
return mFit # return ODR output
""" Model function for series harmonisation (fcn argument in ODR package).
This setup with fcnH outside seriesODR function is not working;
possibly the reading of sensors array sp. """
def fcnH2(coef, Xdata, sensors, series):
sp = sensors.transpose()
# read data to variable names; transpose ndarrays
Lr1 = Xdata[0,:] # reference radiance 1st sensor; 0 for sensor-sensor pair
Cs1 = Xdata[1,:] # space counts 1st sensor
Cict1 = Xdata[2,:] # ICT counts 1st sensor
CE1 = Xdata[3,:] # Earth counts 1st sensor
Lict1 = Xdata[4,:] # ICT radiance 1st sensor
To1 = Xdata[5,:] # orbit temperature 1st sensor
Cs2 = Xdata[6,:] # space counts 2nd sensor
Cict2 = Xdata[7,:] # ICT counts 2nd sensor
CE2 = Xdata[8,:] # Earth counts 2nd sensor
Lict2 = Xdata[9,:] # ICT radiance 2nd sensor
To2 = Xdata[10,:] # orbit temperature 2nd sensor
s1 = sp[0,:] # 1st sensor index in sensors list (&coeff arr)
s2 = sp[1,:] # 2nd sensor's index
switch = logical_not(s1).astype(int)
p = series.nocoefs # number of calibration coefficients
a01 = coef[s1*p + 0] # fit coefficients 1st sensor [s*p+0 for s in s1]
a11 = coef[s1*p + 1]
a21 = coef[s1*p + 2]
a31 = coef[s1*p + 3]
a02 = coef[s2*p + 0] # fit coefficients 2nd sensor
a12 = coef[s2*p + 1]
a22 = coef[s2*p + 2]
a32 = coef[s2*p + 3]
# fit model
K = avhrrME(CE2,Cs2,Cict2,Lict2,To2,a02,a12,a22,a32) -\
(1-switch) * avhrrME(CE1,Cs1,Cict1,Lict1,To1,a01,a11,a21,a31) -\
switch * Lr1
#print 'Current iteration coefficients:', [a01,a11,a21,a31,a02,a12,a22,a32]
return K
|
import torch
import os.path
def load_data(sys_name, file_index):
filename_list = os.listdir('../data/{0}'.format(sys_name))
if file_index >= len(filename_list):
return False
elif filename_list[file_index][-3:] != 'txt':
return False
else:
filename = filename_list[file_index]
filepath = '../data/{0}/'.format(sys_name) + filename
fd = open(filepath, 'r')
temp = fd.readlines()
data_raw = [float(e.strip()) for e in temp]
fd.close()
dim = int(data_raw[0])
n_particles = int(data_raw[1])
# mass_list = data_raw[2:2+n_particles]
if (len(data_raw) - 2) % (2 * dim): # 안 나누어떨어질 경우
print("invalid file length: file no{1} of {0}".format(sys_name, file_index))
return False
n_frames = (len(data_raw) - 2) // (2 * dim * n_particles)
data = []
for frame_index in range(n_frames):
datum = []
for particle_index in range(n_particles):
temp = []
start = 2 + frame_index*(dim*2*n_particles) + particle_index*(dim*2)
x = data_raw[start: start + dim]
v = data_raw[start + dim: start + dim*2]
temp += x
temp += v
datum.append(temp)
data.append(datum)
return data |
from .models import Formador
from django_datatables_view.base_datatable_view import BaseDatatableView
from django.db.models import Q
from formacion.models import Grupo,ParticipanteEscuelaTic, SoporteEntregableEscuelaTic, Masivo, MasivoDocente, Actividad, EvidenciaEscuelaTic, Entregable
from formacion.models import GrupoDocentes, ParticipanteDocente, EvidenciaDocentes, EntregableDocentes
from random import randrange
def unique(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
class FormadorTableView(BaseDatatableView):
model = Formador
columns = [
'id',
'nombre',
'cedula',
'celular',
'correo',
'cargo',
'profesion',
'banco',
'tipo_cuenta',
'numero_cuenta',
'eps',
'pension',
'arl',
'foto',
'hv',
'certificacion',
'rut',
'contrato',
'fotocopia_cedula',
'antecedentes_judiciales',
'antecedentes_contraloria',
'seguro_enero',
'seguro_febrero',
'seguro_marzo',
'seguro_abril',
'seguro_mayo',
'seguro_junio',
'seguro_julio',
'seguro_agosto',
'seguro_septiembre',
'seguro_octubre',
'seguro_noviembre',
'seguro_diciembre',
'fecha_contratacion',
'fecha_terminacion',
'contrato_plan_choque',
'seguro_enero_1',
'seguro_febrero_1',
'seguro_marzo_1',
'seguro_abril_1',
'liquidacion'
]
order_columns = [
'nombre',
'nombre',
'nombre',
'nombre',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
''
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(region__id=self.kwargs['region']).filter(tipo__id=self.kwargs['tipo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombre__icontains' : search.capitalize()})
q |= Q(**{'cedula__icontains' : search})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'hv':
return str(row.hv)
if column == 'certificacion':
return str(row.certificacion)
if column == 'rut':
return str(row.rut)
if column == 'contrato':
return str(row.contrato)
if column == 'contrato_plan_choque':
return str(row.contrato_plan_choque)
if column == 'seguro_enero':
return str(row.seguro_enero)
if column == 'seguro_febrero':
return str(row.seguro_febrero)
if column == 'seguro_marzo':
return str(row.seguro_marzo)
if column == 'seguro_abril':
return str(row.seguro_abril)
if column == 'seguro_mayo':
return str(row.seguro_mayo)
if column == 'seguro_junio':
return str(row.seguro_junio)
if column == 'seguro_julio':
return str(row.seguro_julio)
if column == 'seguro_agosto':
return str(row.seguro_agosto)
if column == 'seguro_septiembre':
return str(row.seguro_septiembre)
if column == 'seguro_octubre':
return str(row.seguro_octubre)
if column == 'seguro_noviembre':
return str(row.seguro_noviembre)
if column == 'seguro_diciembre':
return str(row.seguro_diciembre)
if column == 'seguro_enero_1':
return str(row.seguro_enero_1)
if column == 'seguro_febrero_1':
return str(row.seguro_febrero_1)
if column == 'seguro_marzo_1':
return str(row.seguro_marzo_1)
if column == 'seguro_abril_1':
return str(row.seguro_abril_1)
if column == 'liquidacion':
return str(row.liquidacion)
if column == 'fotocopia_cedula':
return str(row.fotocopia_cedula)
if column == 'antecedentes_judiciales':
return str(row.antecedentes_judiciales)
if column == 'antecedentes_contraloria':
return str(row.antecedentes_contraloria)
if column == 'foto':
return str(row.foto)
else:
return super(FormadorTableView,self).render_column(row,column)
class FormadorCalificacionTableView(BaseDatatableView):
model = Formador
columns = [
'id',
'nombre',
'cedula',
'celular',
'correo',
'cargo',
'profesion',
'foto',
'fecha_contratacion',
'fecha_terminacion',
]
order_columns = [
'nombre',
'nombre',
'nombre',
'nombre',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
'',
''
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(region__id=self.kwargs['region']).filter(tipo__id=self.kwargs['id_tipo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombre__icontains' : search.capitalize()})
q |= Q(**{'cedula__icontains' : search})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'foto':
return str(row.foto)
else:
return super(FormadorCalificacionTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
tipo = int(self.kwargs['id_tipo'])
if tipo == 1:
for item in qs:
grupos = GrupoDocentes.objects.filter(formador__id=item.id).count()
participantes = ParticipanteDocente.objects.filter(formador__id=item.id).count()
json_data.append([
item.id,
item.nombre,
item.cedula,
item.celular,
item.correo,
item.cargo,
item.profesion,
str(item.foto),
item.fecha_contratacion,
item.fecha_terminacion,
grupos,
participantes
])
if tipo == 2:
for item in qs:
grupos = Grupo.objects.filter(formador__id=item.id).count()
participantes = ParticipanteEscuelaTic.objects.filter(formador__id=item.id).count()
json_data.append([
item.id,
item.nombre,
item.cedula,
item.celular,
item.correo,
item.cargo,
item.profesion,
str(item.foto),
item.fecha_contratacion,
item.fecha_terminacion,
grupos,
participantes
])
return json_data
class FormadorGrupoTableView(BaseDatatableView):
model = Grupo
columns = [
'id',
'formador',
'municipio',
'nombre',
'direccion',
'horario'
]
order_columns = [
'id',
'formador',
'municipio',
'nombre',
'direccion',
'horario'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__id=self.kwargs['id_formador'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombre__icontains' : search.capitalize()})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return str(row.nombre)
if column == 'municipio':
return str(row.municipio.nombre)
else:
return super(FormadorGrupoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
participantes = ParticipanteEscuelaTic.objects.filter(grupo__id=item.id).count()
json_data.append([
item.id,
item.nombre,
item.municipio.nombre,
item.municipio.departamento.nombre,
item.direccion,
item.horario,
participantes
])
return json_data
class FormadorGrupoTipo1TableView(BaseDatatableView):
model = GrupoDocentes
columns = [
'id',
'formador',
'municipio',
'nombre',
'direccion',
'horario'
]
order_columns = [
'id',
'formador',
'municipio',
'nombre',
'direccion',
'horario'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__id=self.kwargs['id_formador'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombre__icontains' : search.capitalize()})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return str(row.nombre)
if column == 'municipio':
return str(row.municipio.nombre)
else:
return super(FormadorGrupoTipo1TableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
participantes = ParticipanteDocente.objects.filter(grupo__id=item.id).count()
json_data.append([
item.id,
item.nombre,
item.municipio.nombre,
item.municipio.departamento.nombre,
item.direccion,
item.horario,
participantes
])
return json_data
class FormadorListadoGrupoTableView(BaseDatatableView):
model = ParticipanteEscuelaTic
columns = [
'id',
'formador',
'grupo',
'numero',
'institucion',
'nombres',
'apellidos',
'cedula',
'genero',
'nivel_educativo',
'telefono',
'correo',
'poblacion',
'codigo_anspe',
'tipo_proyecto',
'grupo_conformacion'
]
order_columns = [
'nombres',
'nombres',
'nombres',
'nombres',
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__id=self.kwargs['id_formador']).filter(grupo__id=self.kwargs['id_grupo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombres__icontains' : search})
q |= Q(**{'apellidos__icontains' : search})
q |= Q(**{'cedula__icontains' : search})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return str(row.formador.nombre)
if column == 'grupo':
return str(row.grupo.nombre)
else:
return super(FormadorListadoGrupoTableView,self).render_column(row,column)
class FormadorListadoGrupoDocentesTableView(BaseDatatableView):
model = ParticipanteDocente
columns = [
'id',
'formador',
'grupo',
'radicado',
'nombres',
'apellidos',
'cedula',
'correo',
'telefono_fijo',
'celular',
'area',
'grado',
'tipo_beneficiario',
'genero',
'nombre_proyecto',
'definicion_problema',
'area_proyecto',
'competencia',
'grupo_poblacional'
]
order_columns = [
'nombres',
'nombres',
'nombres',
'nombres',
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__id=self.kwargs['id_formador']).filter(grupo__id=self.kwargs['id_grupo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'nombres__icontains' : search})
q |= Q(**{'apellidos__icontains' : search})
q |= Q(**{'cedula__icontains' : search})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return unicode(row.formador.nombre)
if column == 'grupo':
return unicode(row.grupo.nombre)
if column == 'radicado':
return unicode(row.radicado.numero)
if column == 'area':
return unicode(row.area)
if column == 'grado':
return unicode(row.grado)
if column == 'genero':
return unicode(row.genero)
if column == 'competencia':
return unicode(row.competencia)
if column == 'grupo_poblacional':
return unicode(row.grupo_poblacional)
else:
return super(FormadorListadoGrupoDocentesTableView,self).render_column(row,column)
class FormadorListadoMasivoTableView(BaseDatatableView):
model = Masivo
columns = [
'id',
'fecha',
'grupo',
'archivo',
'usuario',
'resultado'
]
order_columns = [
'id',
'fecha',
'grupo',
'archivo',
'usuario',
'resultado'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(grupo__id=self.kwargs['id_grupo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'grupo':
return row.grupo.nombre
if column == 'archivo':
return str(row.archivo)
if column == 'resultado':
return str(row.resultado)
if column == 'usuario':
return row.usuario.username
else:
return super(FormadorListadoMasivoTableView,self).render_column(row,column)
class FormadorTipo1ListadoMasivoTableView(BaseDatatableView):
model = MasivoDocente
columns = [
'id',
'fecha',
'grupo',
'archivo',
'usuario',
'resultado'
]
order_columns = [
'id',
'fecha',
'grupo',
'archivo',
'usuario',
'resultado'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(grupo__id=self.kwargs['id_grupo'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'grupo':
return row.grupo.nombre
if column == 'archivo':
return str(row.archivo)
if column == 'resultado':
return str(row.resultado)
if column == 'usuario':
return row.usuario.username
else:
return super(FormadorTipo1ListadoMasivoTableView,self).render_column(row,column)
class ParticipantesListadoTableView(BaseDatatableView):
model = ParticipanteEscuelaTic
columns = [
'id',
'formador',
'grupo',
'numero',
'institucion',
'nombres',
'apellidos',
'cedula',
'genero',
'nivel_educativo',
'telefono',
'correo',
'poblacion',
'codigo_anspe',
'tipo_proyecto',
'grupo_conformacion'
]
order_columns = [
'nombres',
'nombres',
'nombres',
'nombres',
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__region__id=self.kwargs['region'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'cedula__icontains' : search})
q |= Q(**{'nombres__icontains' : search.capitalize()})
q |= Q(**{'apellidos__icontains' : search.capitalize()})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return str(row.formador.nombre)
if column == 'grupo':
return str(row.grupo.nombre)
else:
return super(ParticipantesListadoTableView,self).render_column(row,column)
class DocentesListadoTableView(BaseDatatableView):
model = ParticipanteDocente
columns = [
'id',
'formador',
'grupo',
'radicado',
'nombres',
'apellidos',
'cedula',
'correo',
'telefono_fijo',
'celular',
'area',
'grado',
'tipo_beneficiario',
'genero',
'nombre_proyecto',
'definicion_problema',
'area_proyecto',
'competencia',
'grupo_poblacional',
]
order_columns = [
'nombres',
'nombres',
'nombres',
'nombres',
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(formador__region__id=self.kwargs['region'])
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'cedula__icontains' : search})
q |= Q(**{'nombres__icontains' : search.capitalize()})
q |= Q(**{'apellidos__icontains' : search.capitalize()})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return unicode(row.formador.nombre)
if column == 'grupo':
return unicode(row.grupo.nombre)
if column == 'radicado':
return unicode(row.radicado)
if column == 'area':
return unicode(row.area)
if column == 'grado':
return unicode(row.grado)
if column == 'genero':
return unicode(row.genero)
if column == 'competencia':
return unicode(row.competencia)
if column == 'grupo_poblacional':
return unicode(row.grupo_poblacional)
else:
return super(DocentesListadoTableView,self).render_column(row,column)
class EvidenciasDocentesListadoTableView(BaseDatatableView):
model = EvidenciaDocentes
columns = [
'id',
'soporte',
'entregable',
'participante'
]
order_columns = [
'entregable',
'entregable',
'entregable',
'entregable'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
x = self.kwargs
y = x['participante__id']
qs = self.model.objects.filter(participante__id=y,entregable__id__in=[1,3,5,7,21,23,29,31,33,41,43,45,47,49,11,13,15,17,9,27,35,37,39,60]).order_by('entregable')
c = qs.values_list('entregable__id',flat=True)
return qs
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'soporte':
return str(row.soporte.soporte)
if column == 'entregable':
return row.entregable.nombre
if column == 'participante':
return str(row.participante.cedula)
else:
return super(EvidenciasDocentesListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
d = qs.values_list('entregable__id',flat=True)
for item in qs:
try:
s = item.soporte.soporte
except:
s = ""
if s == "":
#soporte = ""
soporte = unicode('Formacion/Formadores Tipo 1/Region 4/Lilian Nayibe Villamizar Cote/F1Nort1-01/NIVEL 2/21/1_42.pdf')
else:
soporte = unicode(item.soporte.soporte)
json_data.append([
item.id,
item.entregable.actividad.nombre,
item.entregable.nombre,
soporte,
item.entregable.descripcion,
])
#json_data.append([500,"PROYECTO","PROYECTO TIC","http://sican.asoandes.org:8000/index.php/apps/files/?dir=%2FProyectos","Proyecto educativo aplicando las TIC"])
return json_data
class EvidenciasListadoTableView(BaseDatatableView):
model = EvidenciaEscuelaTic
columns = [
'id',
'soporte',
'entregable',
'participante'
]
order_columns = [
'entregable',
'entregable',
'entregable',
'entregable'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
x = self.kwargs
y = x['participante__id']
return self.model.objects.filter(participante__id=y)
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'soporte':
return str(row.soporte.soporte)
if column == 'entregable':
return row.entregable.nombre
if column == 'participante':
return str(row.participante.cedula)
else:
return super(EvidenciasListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
if item.soporte == None:
soporte = ""
else:
soporte = unicode(item.soporte.soporte)
json_data.append([
item.id,
item.entregable.actividad.nombre,
item.entregable.nombre,
soporte,
item.entregable.descripcion,
])
return json_data
class ActividadesListadoTableView(BaseDatatableView):
model = Entregable
columns = [
'id',
'actividad',
'nombre',
'descripcion'
]
order_columns = [
'id',
'actividad',
'nombre',
'descripcion'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.all()
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'actividad':
return str(row.actividad.nombre)
else:
return super(ActividadesListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
filtro_region = EvidenciaEscuelaTic.objects.filter(participante__formador__region__id=self.kwargs['region'])
for item in qs:
cantidad = filtro_region.filter(entregable__id=item.id).exclude(soporte=None).exclude(soporte__soporte='').count()
json_data.append([
item.id,
item.actividad.nombre,
item.nombre,
item.descripcion,
cantidad
])
return json_data
class ActividadesDocentesListadoTableView(BaseDatatableView):
model = EntregableDocentes
columns = [
'id',
'actividad',
'nombre',
'descripcion'
]
order_columns = [
'id',
'actividad',
'nombre',
'descripcion'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.all()
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'actividad':
return str(row.actividad.nombre)
else:
return super(ActividadesDocentesListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
#cantidad = EvidenciaDocentes.objects.filter(participante__formador__region=self.kwargs['region']).filter(soporte__entregable__id=item.id).exclude(soporte__soporte="").values_list("participante__id",flat=True).distinct()
json_data.append([
item.id,
item.actividad.nombre,
item.nombre,
item.descripcion,
#cantidad.count()
])
return json_data
class ParticipantesActividadListadoTableView(BaseDatatableView):
model = ParticipanteEscuelaTic
columns = [
'id',
'formador',
'grupo',
'numero',
'institucion',
'nombres',
'apellidos',
'cedula',
'genero',
'nivel_educativo',
'telefono',
'correo',
'poblacion',
'codigo_anspe',
'tipo_proyecto',
'grupo_conformacion'
]
order_columns = [
'nombres',
'nombres',
'nombres',
'nombres',
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
x = EvidenciaEscuelaTic.objects.filter(soporte__entregable__id=self.kwargs['id_actividad']).exclude(soporte__soporte="").values_list("participante__id",flat=True)
return self.model.objects.filter(formador__region__id=self.kwargs['region']).filter(id__in = x)
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'cedula__icontains' : search})
q |= Q(**{'nombres__icontains' : search.capitalize()})
q |= Q(**{'apellidos__icontains' : search.capitalize()})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'formador':
return str(row.formador.nombre)
if column == 'grupo':
return str(row.grupo.nombre)
else:
return super(ParticipantesActividadListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
soporte = EvidenciaEscuelaTic.objects.filter(participante__id=item.id).get(entregable__id=self.kwargs['id_actividad']).soporte
json_data.append([
item.id,
item.formador.nombre,
item.grupo.nombre,
item.numero,
item.grupo.municipio.departamento.nombre,
item.grupo.municipio.nombre,
item.institucion,
item.nombres,
item.apellidos,
item.cedula,
item.genero,
item.nivel_educativo,
item.telefono,
item.correo,
item.poblacion,
item.codigo_anspe,
item.tipo_proyecto,
item.grupo_conformacion,
unicode(soporte.soporte)
])
return json_data
class ParticipantesDocentesActividadListadoTableView(BaseDatatableView):
model = EvidenciaDocentes
columns = [
'id',
'soporte',
'entregable',
'participante',
'valor',
'corte'
]
order_columns = [
'id',
'id',
'id',
'id'
]
def get_initial_queryset(self):
if not self.model:
raise NotImplementedError("Need to provide a model or implement get_initial_queryset!")
return self.model.objects.filter(participante__formador__region__id=self.kwargs['region']).filter(soporte__entregable__id=self.kwargs['id_actividad']).exclude(soporte__soporte="")
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
q = Q()
if search:
q |= Q(**{'participante__cedula__icontains' : search})
q |= Q(**{'participante__nombres__icontains' : search})
q |= Q(**{'participante__apellidos__icontains' : search})
qs = qs.filter(q)
return qs
def render_column(self, row, column):
if column == 'soporte':
return str(row.soporte)
if column == 'entregable':
return str(row.entregable)
if column == 'participante':
return str(row.participante)
if column == 'valor':
return str(row.valor)
if column == 'corte':
return str(row.corte)
else:
return super(ParticipantesDocentesActividadListadoTableView,self).render_column(row,column)
def prepare_results(self, qs):
json_data = []
for item in qs:
#soporte = EvidenciaDocentes.objects.filter(participante__id=item.id).get(entregable__id=self.kwargs['id_actividad']).soporte
json_data.append([
item.participante.id,
unicode(item.participante.formador),
unicode(item.participante.grupo),
unicode(item.participante.radicado),
item.participante.nombres,
item.participante.apellidos,
item.participante.cedula,
item.participante.correo,
item.participante.telefono_fijo,
item.participante.celular,
unicode(item.participante.area),
unicode(item.participante.grado),
item.participante.tipo_beneficiario,
unicode(item.participante.genero),
item.participante.nombre_proyecto,
item.participante.definicion_problema,
unicode(item.participante.area_proyecto),
unicode(item.participante.competencia),
unicode(item.participante.grupo_poblacional),
unicode(item.soporte.soporte)
])
return json_data |
def dest(mneumonic):
dst, _, _ = _destruct(mneumonic)
ins = 0
if dst:
if "M" in dst:
ins |= 0x1
if "D" in dst:
ins |= 0x2
if "A" in dst:
ins |= 0x4
return f"{ins:03b}"
def comp(mneumonic):
instructions = {
"0": "0101010",
"1": "0111111",
"-1": "0111010",
"D": "0001100",
"A": "0110000",
"M": "1110000",
"!D": "0001101",
"!A": "0110001",
"!M": "1110001",
"-D": "0001111",
"-A": "0110011",
"-M": "1110011",
"D+1": "0011111",
"A+1": "0110111",
"M+1": "1110111",
"D-1": "0001110",
"A-1": "0110010",
"M-1": "1110010",
"D+A": "0000010",
"D+M": "1000010",
"D-A": "0010011",
"D-M": "1010011",
"A-D": "0000111",
"M-D": "1000111",
"D&A": "0000000",
"D&M": "1000000",
"D|A": "0010101",
"D|M": "1010101"
}
_, cmp, _ = _destruct(mneumonic)
return instructions[cmp]
def jump(mneumonic):
_, _, jmp = _destruct(mneumonic)
nojump = "000"
instructions = {
"JGT": "001",
"JEQ": "010",
"JGE": "011",
"JLT": "100",
"JNE": "101",
"JLE": "110",
"JMP": "111"
}
return instructions[jmp] if jmp in instructions else nojump
def _destruct(mneumonic):
mneumonic = mneumonic.strip("")
"""
if _dest_ is empty, the "=" is omitted
if _jump_ is empty, the ";" is omitted
"""
if not "=" in mneumonic:
p = mneumonic.split(";")
return (None, p[0].strip(), p[1].strip())
else:
p = mneumonic.split("=")
return (p[0].strip(), p[1].strip(), None)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-05 08:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('routeoptimizer', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='stop',
name='pickupTime1',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='stop',
name='pickupTime2',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='stop',
name='type',
field=models.IntegerField(choices=[(0, 'midway'), (1, 'start'), (2, 'finish'), (3, 'delivery')], default=0),
),
migrations.AlterField(
model_name='stop',
name='waiting_people',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='vehicle',
name='current_lat',
field=models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True),
),
migrations.AlterField(
model_name='vehicle',
name='current_lng',
field=models.DecimalField(blank=True, decimal_places=6, max_digits=9, null=True),
),
]
|
from flask import Flask, render_template, request
import helpers
# Init
# ----------------------------------
app = Flask(__name__)
app.config.update(blog=helpers.get_config())
app.config.update(SERVER_NAME=app.config['blog']['host'])
app.debug = app.config['blog']['debug']
# ----------------------------------
# Routes
# ----------------------------------
@app.route('/')
def index():
posts = helpers.get_posts()
return render_template('index.html', posts=posts)
@app.route('/<post_name>')
def show_post(post_name):
post = helpers.get_post(post_name)
return render_template('post.html', **post)
@app.route('/contact')
def contact():
if request.method == 'POST':
return 'POSTED!'
return render_template('contact.html', title='contact')
# ----------------------------------
# Run
# ----------------------------------
if __name__ == '__main__':
app.run()
# ----------------------------------
|
import os
import sys
import tarfile
import time
import pyprind
import pandas as pd
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from matplotlib.colors import ListedColormap
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
from hyperopt import hp, tpe, Trials, fmin
from sklearn import __version__ as sklearn_version
from distutils.version import LooseVersion
from sklearn import datasets
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
import numpy as np
from sklearn.metrics import explained_variance_score
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=cl,
edgecolor='black')
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
edgecolor='black',
alpha=1.0,
linewidth=1,
marker='o',
s=100,
label='test set')
X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2)
hyperopt_parameters = { 'gamma': hp.choice("gamma",range(1,5))}
best = 0
def objective(args):
scikit_kpca = KernelPCA(**args, fit_inverse_transform="True")
X_skernpca = scikit_kpca.fit_transform(X)
return -1* explained_variance_score(X, scikit_kpca.inverse_transform(X_skernpca)).mean()
def f(params):
global best
acc = objective(params)
if acc > best:
best = acc
print ('new best:', best, params)
return {'loss': -acc, 'status': STATUS_OK}
max_evals = 200
# 試行の過程を記録するインスタンス
trials = Trials()
best = fmin(
# 最小化する値を定義した関数
objective,
# 探索するパラメータのdictもしくはlist
hyperopt_parameters,
# どのロジックを利用するか、基本的にはtpe.suggestでok
algo=tpe.suggest,
max_evals=max_evals,
trials=trials,
# 試行の過程を出力
verbose=1
)
print('best:')
print(best)
# plot_decision_regions(X_skernpca, y, classifier=lr)
# plt.xlabel('PC 1')
# plt.ylabel('PC 2')
# plt.legend(loc='lower left')
# plt.tight_layout()
# #plt.savefig('images/05_04.png', dpi=300)
# plt.show()
|
import sys
from PIL import Image
import math
import queue as Q
import time
# import matplotlib.pyplot as plt
import copy
'''
These variables are determined at runtime and should not be changed or mutated by you
'''
start = (0, 0) # a single (x,y) tuple, representing the start position of the search algorithm
end = (0, 0) # a single (x,y) tuple, representing the end position of the search algorithm
difficulty = "" # a string reference to the original import file
'''
These variables determine display color, and can be changed by you, I guess
'''
NEON_GREEN = (0, 255, 0)
RED = (255, 0, 0)
PURPLE = (85, 26, 139)
YELLOW = (0, 50, 50)
DARK_GRAY = (100, 100, 100)
'''
These variables are determined and filled algorithmically, and are expected (and required) be mutated by you
'''
path = [] # an ordered list of (x,y) tuples, representing the path to traverse from start-->goal
expanded = {} # a dictionary of (x,y) tuples, representing nodes that have been expanded
frontier = {} # a dictionary of (x,y) tuples, representing nodes to expand to in the future
dirty = Q.PriorityQueue() # priority q for planning algorithim
def search(map):
"""
This function is meant to use the global variables [start, end, path, expanded, frontier] to search through the
provided map.
:param map: A '1-concept' PIL PixelAccess object to be searched. (basically a 2d boolean array)
"""
# O is unoccupied (white); 1 is occupied (black)
print("pixel value at start point ", map[start[0], start[1]])
print("pixel value at end point ", map[end[0], end[1]])
dirty.put((0, start))
while len(dirty.queue) != 0:
current = dirty.get()
current_pos = current[1]
if current_pos == end:
break
path.append(current_pos)
# look left
explore(current_pos[0], current_pos[1] - 1, end, path, map)
# look up
explore(current_pos[0] - 1, current_pos[1], end, path, map)
# look right
explore(current_pos[0], current_pos[1] + 1, end, path, map)
# look down
explore(current_pos[0] + 1, current_pos[1], end, path, map)
print(path)
visualize_search("out.png") # see what your search has wrought (and maybe save your results)
#def revisit():
def cost(x, y, end):
h = (abs(x - end[0]) + abs(y - end[1])) * -1
return h
def explore(x, y, end, path, map):
# Check for boundary
if x < 0 or x > rows - 1 or y < 0 or y > columns - 1:
return
# Check if the node has already been explored
elif (x, y) in path:
return
# check for obstacle
elif map[x, y] == 1:
return
# Check if path clear
elif map[x, y] == 0:
h = cost(x, y, end)
dirty.put((h, (x, y)))
return
else:
print("Dead end")
def visualize_search(save_file="solved_trivial.gif"):
"""
:param save_file: (optional) filename to save image to (no filename given means no save file)
"""
im = Image.open(difficulty).convert("RGB")
pixel_access = im.load()
# draw frontier pixels
for pixel in frontier.keys():
pixel_access[pixel[0], pixel[1]] = YELLOW
# draw expanded pixels
for pixel in expanded.keys():
pixel_access[pixel[0], pixel[1]] = DARK_GRAY
# draw path pixels
for pixel in path:
pixel_access[pixel[0], pixel[1]] = PURPLE
# draw start and end pixels
pixel_access[start[0], start[1]] = NEON_GREEN
pixel_access[end[0], end[1]] = RED
# display and (maybe) save results
im.show()
if (save_file != "do_not_save.png"):
im.save(save_file)
im.close()
if __name__ == "__main__":
# Throw Errors && Such
# global difficulty, start, end
assert sys.version_info[0] == 3 # require python 3 (instead of python 2)
assert len(sys.argv) == 2, "Incorrect Number of arguments" # require difficulty input
# Parse input arguments
function_name = str(sys.argv[0])
difficulty = str(sys.argv[1])
print("running " + function_name + " with " + difficulty + " difficulty.")
# Hard code start and end positions of search for each difficulty level
if difficulty == "trivial.gif":
start = (8, 0)
end = (20, 0)
rows = 22
columns = 22
elif difficulty == "test.gif":
start = (0, 1)
end = (0, 0)
rows = 10
columns = 10
else:
assert False, "Incorrect difficulty level provided"
# Perform search on given image
im = Image.open(difficulty)
search(im.load())
|
from bs4 import BeautifulSoup
import urllib.request,urllib.parse,urllib.error
from selenium import webdriver
import sqlite3
driver=webdriver.Firefox()
driver.get('https://www.reddit.com/r/india/')
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
html=driver.execute_script("return document.documentElement.outerHTML")
soup=BeautifulSoup(html,'html.parser')
tag=soup.find_all('div',{'class':'lrzZ8b0L6AzLkQj5Ww7H1'})
news=soup.find_all('h3')
posted_by=soup.find_all('a',{'class':'_2tbHP6ZydRpjI44J3syuqC _23wugcdiaj44hdfugIAlnX oQctV4n0yUb0uiHDdGnmE'})
comment=soup.find_all('span',{'class':'FHCV02u6Cp2zYL0fhQPsO'})
conn=sqlite3.connect("reddit_db.sqlite")
cur=conn.cursor()
tag_list=list()
for x in tag:
he=x.find('span')
if he is None:
continue
else :
tag_list.append(he.contents[0])
new_list=list()
for y in news:
new_list.append(y.contents[0])
posted_list=list()
for z in posted_by:
posted_list.append(z.contents[0])
comment_list=list()
for w in comment:
comment_list.append(w.contents[0])
driver.quit()
cur.executescript('''
DROP TABLE IF EXISTS Main;
DROP TABLE IF EXISTS Tags;
CREATE TABLE Main(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
tag_id INTEGER,
heading TEXT,
comment INTEGER,
posted_by TEXT
);
CREATE TABLE Tags(
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
tag TEXT UNIQUE
)
''')
for item in tag_list:
cur.execute(''' INSERT OR IGNORE INTO Tags (tag) VALUES(?)
''',(item,))
conn.commit()
for i in range(len(tag_list)):
tags=tag_list[i]
head=new_list[i]
comments=comment_list[i]
post=posted_list[i]
cur.execute('''SELECT id FROM Tags WHERE tag = ?''',(tags,))
t_id=cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Main (tag_id,heading,comment,posted_by) VALUES(?,?,?,?)''',(t_id,head,comments,post))
conn.commit()
|
import re
class LCDData:
"""
Holds global data for the plugin,
as well as some global functions
"""
def __init__(self, lcd):
# type (Adafruit_CharLCD)
self.perc2 = unichr(1)
self.perc4 = unichr(2)
self.perc6 = unichr(3)
self.perc8 = unichr(4)
self.perc10 = '='
self.lcd = lcd
self.fileName = ""
self.lcd_width = 16
def special_chars_to_num(self, string):
#type (str) -> str
"""
Convert special characters that the LCD uses to numbers in the format '#0' or '#5'
The range is 0 to 7 since the LCD can only store 8 special characters
:param string: string to convert
"""
for ch in range(0, 8):
if unichr(ch) in string:
string = string.replace(unichr(ch), "#{}".format(ch))
return string
def get_diff(self, str1, str2):
#type (str, str) -> list
"""
Get the indexes for each difference in the two strings. The two strings
don't have to be the same size
:param str1: string 1
:param str2: string 2
"""
return [i for i in xrange(min(len(str1), len(str2))) if str1[i] != str2[i]]
def clean_file_name(self, name):
#type (str) -> str
"""
Simplify the file names to fit in the lcd screen.
It makes several changes to the string until it is less than the lcd width
1. remove extension
2. remove spaces, underscores, dashes
3. remove big numbers larger than 3 digits
4. remove trailing words, (excluding version numbers: Vxxx)
:param name: name to minify
"""
if len(name) <= self.lcd_width:
return name
""" == Remove extension == """
if name.find('.') != -1:
name = name.split('.', 1)[0]
if len(name) <= self.lcd_width:
return name
# Capitalize Version number
words = re.findall(r'[v][\d]*', name)
for v in words:
name = name.replace(v, v.capitalize())
""" == Remove dashes, underscores, and spaces. Then capitalize each word == """
words = re.findall(r'[a-zA-Z\d][^A-Z-_ ]*', name)
name = ''.join([s.capitalize() for s in words])
if len(name) <= self.lcd_width:
return name
""" == Remove big numbers == """
# find all the numbers in the string
numbers = re.findall(r'\d+', name)
# remove numbers with more than 3 digits
for n in numbers:
if len(n) > 2 and len(name) > self.lcd_width:
name = name.replace(n, "")
if len(name) <= self.lcd_width:
return name
""" == remove extra words from the end == """
# split the string into capitalized words
words = re.findall(r'[\dA-Z][^A-Z]*', name)
# remove words from the string until it is smaller or equal to the lcd width
for w in reversed(words):
if len(name) > self.lcd_width:
# Make sure that version numbers are not messed with
if len(re.findall(r'[V][\d]*', w)) == 0:
name = name.replace(w, "")
return name |
# https://www.w3resource.com/python-exercises/geopy/python-geopy-nominatim_api-exercise-6.php
import geocoder
import socket
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="geoapiExercises")
def get_city(host):
if host == "127.0.0.1:8000":
ip_address = "me"
else:
ip_address = socket.gethostbyname(str(host))
ip_address = str(ip_address)
g = geocoder.ip(ip_address)
latlng = g.latlng
lat = latlng[0]
lon = latlng[1]
place = city_state_country(str(lat) + ', ' + str(lon))
return place[1]
def city_state_country(coord):
location = geolocator.reverse(coord, exactly_one=True)
address = location.raw['address']
city = address.get('city', '')
state = address.get('state', '')
country = address.get('country', '')
return city, state, country |
#!/usr/bin/env python
"""
Sensor class for the arudino_python package
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2012 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
from sensor_msgs.msg import Range, Imu, MagneticField
from geometry_msgs.msg import Twist, Quaternion, Vector3
from arduino_driver import CommandErrorCode, CommandException
from diagnostics import DiagnosticsUpdater
from saturnbot_msgs.msg import *
from saturnbot_msgs.srv import *
from math import pow, radians
from tf.transformations import quaternion_from_euler
import sys
LOW = 0
HIGH = 1
INPUT = 0
OUTPUT = 1
class MessageType:
ANALOG = 0
DIGITAL = 1
RANGE = 2
FLOAT = 3
INT = 4
BOOL = 5
IMU = 6
class Sensor(object):
def __init__(self, device, name, pin=None, rate=0, direction="input", frame_id="base_link", **kwargs):
self.device = device
self.name = name
self.pin = pin
self.rate = rate
self.direction = direction
self.frame_id = frame_id
# Set min/max/offset/scale if specified
self.min = self.get_kwargs(kwargs, 'min', 0)
self.max = self.get_kwargs(kwargs, 'max', float('inf'))
self.offset = self.get_kwargs(kwargs, 'offset', 0)
self.scale = self.get_kwargs(kwargs, 'scale', 1)
# Track diagnostics for this component
diagnotics_error_threshold = self.get_kwargs(kwargs, 'diagnotics_error_threshold', 10)
diagnostics_rate = float(self.get_kwargs(kwargs, 'diagnostics_rate', 1))
# The DiagnosticsUpdater class is defined in the diagnostics.py module
self.diagnostics = DiagnosticsUpdater(self, name + '_sensor', diagnotics_error_threshold, diagnostics_rate)
# Initialize the component's value
self.value = None
# Create the default publisher
if self.rate != 0:
self.create_publisher()
# Create any appropriate services
self.create_services()
# Intialize the next polling time stamp
if self.rate != 0:
self.t_delta = rospy.Duration(1.0 / self.rate)
self.t_next = rospy.Time.now() + self.t_delta
def get_kwargs(self, kwargs, arg, default):
try:
return kwargs[arg]
except:
return default
def create_publisher(self):
# Override per sensor type
pass
def create_services(self):
# Override per sensor type
pass
def read_value(self):
pass
def write_value(self):
pass
def publish_message(self):
# Override this method if necessary for particular sensor types
if self.direction == "input":
self.value = self.read_value()
else:
self.write_value()
self.msg.value = self.value
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
def poll(self):
now = rospy.Time.now()
if now > self.t_next:
# Update read counters
self.diagnostics.reads += 1
self.diagnostics.total_reads += 1
# Add a successful poll to the frequency status diagnostic task
self.diagnostics.freq_diag.tick()
try:
self.publish_message()
except CommandException as e:
# Update error counter
self.diagnostics.errors += 1
rospy.logerr('Command Exception: ' + CommandErrorCode.ErrorCodeStrings[e.code])
rospy.logerr("Invalid value read from sensor: " + str(self.name))
except TypeError as e:
# Update error counter
self.diagnostics.errors += 1
rospy.logerr('Type Error: ' + e.message)
# Compute the next polling time stamp
self.t_next = now + self.t_delta
class AnalogSensor(Sensor):
def __init__(self, *args, **kwargs):
super(AnalogSensor, self).__init__(*args, **kwargs)
self.message_type = MessageType.ANALOG
self.msg = Analog()
self.msg.header.frame_id = self.frame_id
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, Analog, queue_size=5)
def create_services(self):
if self.direction == "output":
self.device.analog_pin_mode(self.pin, OUTPUT)
rospy.Service('~' + self.name + '/write', AnalogSensorWrite, self.sensor_write_handler)
else:
self.device.analog_pin_mode(self.pin, INPUT)
rospy.Service('~' + self.name + '/read', AnalogSensorRead, self.sensor_read_handler)
def read_value(self):
return self.scale * (self.device.analog_read(self.pin) - self.offset)
def write_value(self, value):
return self.device.analog_write(self.pin, value)
def sensor_read_handler(self, req=None):
self.value = self.read_value()
return AnalogSensorReadResponse(self.value)
def sensor_write_handler(self, req):
self.write_value(req.value)
self.value = req.value
return AnalogSensorWriteResponse()
class AnalogFloatSensor(AnalogSensor):
def __init__(self, *args, **kwargs):
super(AnalogFloatSensor, self).__init__(*args, **kwargs)
self.message_type = MessageType.ANALOG
self.msg = AnalogFloat()
self.msg.header.frame_id = self.frame_id
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, AnalogFloat, queue_size=5)
def create_services(self):
if self.direction == "output":
self.device.analog_pin_mode(self.pin, OUTPUT)
rospy.Service('~' + self.name + '/write', AnalogFloatSensorWrite, self.sensor_write_handler)
else:
self.device.analog_pin_mode(self.pin, INPUT)
rospy.Service('~' + self.name + '/read', AnalogFloatSensorRead, self.sensor_read_handler)
def read_value(self):
return self.scale * (self.device.analog_read(self.pin) - self.offset)
def write_value(self, value):
return self.device.analog_write(self.pin, value)
def sensor_read_handler(self, req=None):
self.value = self.read_value()
return AnalogFloatSensorReadResponse(self.value)
def sensor_write_handler(self, req):
self.write_value(req.value)
self.value = req.value
return AnalogFloatSensorWriteResponse()
class DigitalSensor(Sensor):
def __init__(self, *args, **kwargs):
super(DigitalSensor, self).__init__(*args, **kwargs)
self.message_type = MessageType.BOOL
self.msg = Digital()
self.msg.header.frame_id = self.frame_id
# Get the initial state
self.value = self.read_value()
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, Digital, queue_size=5)
def create_services(self):
if self.direction == "output":
self.device.digital_pin_mode(self.pin, OUTPUT)
rospy.Service('~' + self.name + '/write', DigitalSensorWrite, self.sensor_write_handler)
else:
self.device.digital_pin_mode(self.pin, INPUT)
rospy.Service('~' + self.name + '/read', DigitalSensorRead, self.sensor_read_handler)
def read_value(self):
return self.device.digital_read(self.pin)
def write_value(self, value=None):
# Alternate HIGH/LOW when publishing at a fixed rate
if self.rate != 0:
self.value = not self.value
else:
self.value = value
return self.device.digital_write(self.pin, self.value)
def sensor_read_handler(self, req=None):
self.value = self.read_value()
return DigitalSensorReadResponse(self.value)
def sensor_write_handler(self, req):
self.write_value(req.value)
self.value = req.value
return DigitalSensorWriteResponse()
class RangeSensor(Sensor):
def __init__(self, *args, **kwargs):
super(RangeSensor, self).__init__(*args, **kwargs)
self.message_type = MessageType.RANGE
self.msg = Range()
self.msg.header.frame_id = self.frame_id
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, Range, queue_size=5)
def create_services(self):
rospy.Service('~' + self.name + '/read', AnalogFloatSensorRead, self.sensor_read_handler)
def publish_message(self):
self.value = self.read_value()
self.msg.range = self.value
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
def sensor_read_handler(self, req=None):
self.value = self.read_value()
return AnalogFloatSensorReadResponse(self.value)
class SonarSensor(RangeSensor):
def __init__(self, *args, **kwargs):
super(SonarSensor, self).__init__(*args, **kwargs)
self.msg.radiation_type = Range.ULTRASOUND
class IRSensor(RangeSensor):
def __init__(self, *args, **kwargs):
super(IRSensor, self).__init__(*args, **kwargs)
self.msg.radiation_type = Range.INFRARED
class Ping(SonarSensor):
def __init__(self,*args, **kwargs):
super(Ping, self).__init__(*args, **kwargs)
self.msg.field_of_view = 0.7
self.msg.min_range = 0.02
self.msg.max_range = 3.0
def read_value(self):
# The Arduino Ping code returns the distance in centimeters
cm = self.device.ping(self.pin)
# Convert it to meters for ROS
distance = cm / 100.0
return distance
class GP2D12(IRSensor):
# The GP2D12 has been replaced by the GP2Y0A21YK0F
def __init__(self, *args, **kwargs):
super(GP2D12, self).__init__(*args, **kwargs)
self.msg.field_of_view = 0.09
self.msg.min_range = 0.10
self.msg.max_range = 0.80
def read_value(self):
value = self.device.analog_read(self.pin)
# The GP2D12 cannot provide a meaning result closer than 3 cm.
if value <= 3.0:
return float('NaN')
try:
#distance = pow(4187.8 / value, 1.106)
distance = (6787.0 / (float(value) - 3.0)) - 4.0
except:
return float('NaN')
# Convert to meters
distance /= 100.0
# If we get a spurious reading, set it to the max_range
if distance > self.msg.max_range: distance = float('NaN')
if distance < self.msg.min_range: distance = float('NaN')
return distance
class IMU(Sensor):
def __init__(self, *args, **kwargs):
super(IMU, self).__init__(*args, **kwargs)
self.message_type = MessageType.IMU
self.direction = "input"
self.msg = Imu()
self.mag_msg = MagneticField()
self.msg.header.frame_id = self.frame_id
self.mag_msg.header.frame_id = self.frame_id
self.msg.orientation_covariance = [1e6, 0, 0, 0, 1e6, 0, 0, 0, 1e-6]
self.msg.angular_velocity_covariance = [1e6, 0, 0, 0, 1e6, 0, 0, 0, 1e-6]
self.msg.linear_acceleration_covariance = [1e-6, 0, 0, 0, 1e6, 0, 0, 0, 1e6]
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, Imu, queue_size=5)
self.mag_pub = rospy.Publisher("imu/mag", MagneticField, queue_size=5)
def read_value(self):
'''
IMU data is assumed to be returned in the following order:
[ax, ay, az, gx, gy, gz, mx, my, mz, roll, pitch, ch]
where a stands for accelerometer, g for gyroscope and m for magnetometer.
The last value ch stands for "compensated heading" that some IMU's can
compute to compensate magnetic heading for the current roll and pitch.
'''
data = self.device.get_imu_data()
try:
ax, ay, az, gx, gy, gz, mx, my, mz = data
except:
rospy.logerr("Invalid value read from sensor: " + str(self.name))
return None
self.msg.linear_acceleration.x = ay
self.msg.linear_acceleration.y = ax
self.msg.linear_acceleration.z = -az
self.msg.angular_velocity.x = radians(gy)
self.msg.angular_velocity.y = radians(gx)
self.msg.angular_velocity.z = -radians(gz)
# magnetic data
self.mag_msg.magnetic_field.x = my
self.mag_msg.magnetic_field.y = mx
self.mag_msg.magnetic_field.z = mz
return data
def publish_message(self):
self.read_value()
self.msg.header.stamp = rospy.Time.now()
self.mag_msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
self.mag_pub.publish(self.mag_msg)
class Gyro(Sensor):
def __init__(self, *args, **kwargs):
super(Gyro, self).__init__(*args, **kwargs)
try:
self.base_controller = kwargs['base_controller']
except:
self.base_controller = None
self.message_type = MessageType.IMU
self.direction = "input"
self.sensitivity = rospy.get_param('~sensors/' + self.name + '/sensitivity', None)
self.voltage = rospy.get_param('~sensors/' + self.name + '/voltage', 5.0)
self.gyro_scale_correction = rospy.get_param('~sensors/' + self.name + '/gyro_scale_correction', 1.0)
# Time in seconds to collect initial calibration data at startup
self.cal_start_interval = rospy.get_param('~sensors/' + self.name + '/cal_start_interval', 5.0)
if self.sensitivity is None:
rospy.logerr("Missing sensitivity parameter for gyro.")
rospy.signal_shutdown("Missing sensitivity parameter for gyro.")
self.rad_per_sec_per_adc_unit = radians(self.voltage / 1023.0 / self.sensitivity)
self.orientation = 0.0
self.last_time = None
self.cal_offset = None
self.cal_drift_threshold = rospy.get_param('~sensors/' + self.name + '/cal_drift_threshold', 0.1)
self.cal_buffer = []
self.cal_drift_buffer = []
self.cal_buffer_length = 1000
self.cal_drift_buffer_length = 1000
self.msg = Imu()
self.msg.header.frame_id = self.frame_id
self.msg.orientation_covariance = [sys.float_info.max, 0, 0, 0, sys.float_info.max, 0, 0, 0, 0.05]
self.msg.angular_velocity_covariance = [sys.float_info.max, 0, 0, 0, sys.float_info.max, 0, 0, 0, 0.05]
self.msg.linear_acceleration_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 0]
print "\n*** DO NOT MOVE GYRO FOR", self.cal_start_interval, "SECONDS TO ALLOW OFFSET CALLIBRATION ***\n"
update_interval = 1.0 / self.rate
cal_time = 0.0
while cal_time < self.cal_start_interval:
gyro_data = self.device.analog_read(self.pin)
self.update_calibration(gyro_data)
rospy.sleep(update_interval)
cal_time += update_interval
def create_publisher(self):
self.pub = rospy.Publisher("~sensor/" + self.name, Imu, queue_size=5)
def read_value(self):
gyro_data = self.device.analog_read(self.pin)
# If the robot is not moving, update the gyro calibration
if self.base_controller is not None and self.base_controller.current_speed == Twist():
self.update_calibration(gyro_data)
# If this is the first measurement, just record the current time
if self.last_time is None:
self.last_time = rospy.Time.now()
return
# Store the current time
current_time = rospy.Time.now()
# Compute the time since the last measurement
dt = (current_time - self.last_time).to_sec()
self.last_time = current_time
# Compute angular velocity from the raw gyro data
angular_velocity = self.gyro_scale_correction * self.rad_per_sec_per_adc_unit * (gyro_data - self.cal_offset)
# Right-hand coordinate system
angular_velocity = -1.0 * angular_velocity
# Ignore small values that are likely due to drift
if abs(angular_velocity) < self.cal_drift_threshold:
angular_velocity = 0
# Update the orientation by integrating angular velocity over time
self.orientation += angular_velocity * dt
# Fill in the Imu message
self.msg.header.stamp = current_time
self.msg.angular_velocity.z = angular_velocity
(self.msg.orientation.x, self.msg.orientation.y, self.msg.orientation.z, self.msg.orientation.w) = quaternion_from_euler(0, 0, self.orientation)
return self.msg
def publish_message(self):
self.read_value()
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
def update_calibration(self, gyro_data):
# Collect raw analog values when stoped so we can compute the ADC offset
self.cal_buffer.append(gyro_data)
if len(self.cal_buffer) > self.cal_buffer_length:
del self.cal_buffer[:-self.cal_buffer_length]
if len(self.cal_drift_buffer) > self.cal_drift_buffer_length:
del self.cal_drift_buffer[:-self.cal_drift_buffer_length]
try:
# Collect angular velocity values when stopped to compute the drift estimate
angular_velocity = self.gyro_scale_correction * self.rad_per_sec_per_adc_unit * (gyro_data - self.cal_offset)
self.cal_drift_buffer.append(abs(angular_velocity))
except:
pass
try:
# Use the max absolute angular velocity when stopped as the drift estimated
self.cal_drift_offset = max(self.cal_drift_buffer, key=lambda x: abs(x))
except:
pass
try:
self.cal_offset = sum(self.cal_buffer) / len(self.cal_buffer)
except:
pass
def reset(self):
self.orientation = 0
self.msg.orientation = Quaternion()
self.msg.orientation.w = 1.0
self.msg.angular_velocity = Vector3()
self.msg.linear_acceleration = Vector3()
class PololuMotorCurrent(AnalogFloatSensor):
def __init__(self, *args, **kwargs):
super(PololuMotorCurrent, self).__init__(*args, **kwargs)
def read_value(self):
# From the Pololu source code
milliamps = self.device.analog_read(self.pin) * 34
return milliamps / 1000.0
class PhidgetsVoltage(AnalogFloatSensor):
def __init__(self, *args, **kwargs):
super(PhidgetsVoltage, self).__init__(*args, **kwargs)
def read_value(self):
# From the Phidgets documentation
voltage = 0.06 * (self.device.analog_read(self.pin) - 500.)
return voltage
class PhidgetsCurrent(AnalogFloatSensor):
def __init__(self, *args, **kwargs):
super(PhidgetsCurrent, self).__init__(*args, **kwargs)
def read_value(self):
# From the Phidgets documentation for the 20 amp DC sensor
current = 0.05 * (self.device.analog_read(self.pin) - 500.)
return current
class MaxEZ1Sensor(SonarSensor):
def __init__(self, *args, **kwargs):
super(MaxEZ1Sensor, self).__init__(*args, **kwargs)
self.trigger_pin = kwargs['trigger_pin']
self.output_pin = kwargs['output_pin']
self.msg.field_of_view = 0.785398163
self.msg.min_range = 0.02
self.msg.max_range = 3.0
def read_value(self):
return self.device.get_MaxEZ1(self.trigger_pin, self.output_pin)
|
'''
Created on 2 Oct 2016
@author: MetalInvest
'''
########################################## Trade param ########################################
stop_loss = dict(
stop_loss_conservative = 0.98,
stop_loss_aggressive = 0.93,
stop_loss_normal = 0.95
)
stop_gain = dict(
stop_gain_conservative = 1.03,
stop_gain_aggressvie = 1.13,
stop_gain_normal = 1.07
)
margin = dict(
minimum_advance_margin = 1.13,
maximum_advacne_margin = 0.30,
minimum_divergence_margin_upper = 1.02,
minimum_divergence_margin_lower = 0.98,
minimum_fallback_margin = 0.20
)
############################################# Trade control ##############################################
trade_margin = dict(
buy_extra_margin = 1.03,
sell_extra_margin = 0.97
)
trade_acc = dict(
use_xq = 'xq', #
json_xq = 'xq.json', # two_eight
json2_xq = 'xq_2.json', # macd
json3_xq = 'xq_3.json', # backup
use_yjb = 'yjb',
use_ht = 'ht',
json_yjb = 'yjb.json',
json_ht = 'ht.json',
jar_yjb = 'yjb_verify_code.jar',
jar_ht = 'getcode_jdk1.5.jar'
)
real_action = dict(
macd_real_action = True,
two_eight_real_action = True
)
fja5 = [
u'150008.XSHE', u'150018.XSHE', u'150030.XSHE', u'150051.XSHE', u'150076.XSHE',
u'150083.XSHE', u'150085.XSHE', u'150088.XSHE', u'150090.XSHE', u'150092.XSHE',
u'150094.XSHE', u'150100.XSHE', u'150104.XSHE', u'150106.XSHE', u'150108.XSHE',
u'150112.XSHE', u'150117.XSHE', u'150121.XSHE', u'150123.XSHE', u'150130.XSHE',
u'150135.XSHE', u'150140.XSHE', u'150145.XSHE', u'150148.XSHE', u'150150.XSHE',
u'150152.XSHE', u'150157.XSHE', u'150171.XSHE', u'150173.XSHE', u'150177.XSHE',
u'150179.XSHE', u'150181.XSHE', u'150184.XSHE', u'150186.XSHE', u'150190.XSHE',
u'150192.XSHE', u'150194.XSHE', u'150196.XSHE', u'150198.XSHE', u'150203.XSHE',
u'150205.XSHE', u'150207.XSHE', u'150209.XSHE', u'150213.XSHE', u'150215.XSHE',
u'150217.XSHE', u'150221.XSHE', u'150225.XSHE', u'150227.XSHE', u'150241.XSHE',
u'150247.XSHE', u'150249.XSHE', u'150255.XSHE', u'150267.XSHE', u'150271.XSHE',
u'150291.XSHE', u'150295.XSHE', u'150299.XSHE', u'502001.XSHG', u'502004.XSHG',
u'502007.XSHG', u'502011.XSHG', u'502014.XSHG', u'502021.XSHG', u'502024.XSHG',
u'502027.XSHG', u'502031.XSHG', u'502037.XSHG', u'502041.XSHG', u'502049.XSHG',
u'502054.XSHG', u'502057.XSHG'] |
import random, time
def partition(arr, low, high):
i = (low - 1)
pivote = arr[high]
for j in range(low, high):
if arr[j] <= pivote:
i = i + 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return (i + 1)
def QuickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
pi = partition(arr, low, high)
QuickSort(arr, low, pi - 1)
QuickSort(arr, pi + 1, high)
def randomArr(arr_len):
for i in range(arr_len):
arr.append(random.randint(0,1000))
i = i + 1
return arr
arr_len = 1000
arr = []
n = arr_len
randomArr(arr_len)
print(arr)
startTime = time.time()
QuickSort(arr, 0, n - 1)
executionTime = (time.time() - startTime)
print ("Sorted array is:")
for i in range(len(arr)):
print ("%d" %arr[i]),
print("Execution Time:", executionTime) |
class Activator:
def __init__(self):
self.is_on = False
def on(self):
print("Setting {} ON".format(self.__class__.__name__))
self.is_on = True
def off(self):
print("Setting {} OFF".format(self.__class__.__name__))
self.is_on = False
|
from django.dispatch import receiver
from django.conf import settings
from django_rest_passwordreset.signals import reset_password_token_created
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
sg_client = SendGridAPIClient(settings.SENDGRID_API_KEY)
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
mail = Mail(
from_email=settings.DEFAULT_FROM_EMAIL,
to_emails=reset_password_token.user.email
)
mail.template_id = settings.SENDGRID_RESET_PASSWORD_TEMPLATE
mail.dynamic_template_data = {'key': reset_password_token.key}
try:
sg_client.send(mail)
except Exception as e:
print(e.message)
|
#print('\tPrimul meu string pentru \ncurs 2') #\t pune un tab
#print('Primul meu string pentru curs 2. ' * 2)
#print(2+2*2-2/1) #ridicare putere: **
#a = "String"
#a += "String1"
#print(type(a))
#concatenare cu format
#a = "String1"
#b = "String2"
#c = "{1} {0} {1}".format(a, b)
#c = a + ' ' + b
#c = f"{a} {b}"
#print(c)
#a = "1"
#b = "2"
#c = int(a) + int(b)
#print(c)
# a = int(input("Primul nr: "))
# b = int(input("Al doilea nr: "))
# c = a + b
# print(c)
# Structuri
# if conditie:
# executie
# elif conditie;
# executie2
# else:
# executie3
# a = 2
# b = a
# a = 3
# print(a)
# print(b)
# if a is b:
# print("a este adevarat")
# elif a > b:
# print("a este mai mare")
# else:
# print("a este mai mic")
# while conditie:
# sintaxa1
# ...
# sintaxa2
# x = 10
# while x > 1:
# print("x este", x)
# #break
# pass
# x -= 1
# while True:
# euro = input("Valoare euro pentru convertire: ")
#
# if euro.isdigit():
# pass
# elif len(euro.split(".")) == 2:
# a = euro.split('.')[0]
# b = euro.split('.')[1]
#
# if type(euro) == float:
# euro = int(euro)
# print("Valoare convertita este: ", euro * 4.82, "RON")
# else:
# print("Valoarea nu este un numar")
# str = "abecedar"
# print(str[-1])
# print(str[::-1])
# print(str[::2])
# print(str[2::])
#
# for poz, char in enumerate(str):
# print(poz, char)
# ok = "1"
##Var 2
# while ok == "1":
# print("1.Faceti conversie")
# print("2.Iesiti din program")
# ok = input()
#
# if ok.isdigit() and int(ok) == 1:
# euro = input("Valoare euro pentru convertire: ")
# if len(euro) > 0:
# floatSign = 1
# if euro[0] == "-":
# floatSign = -1
# euro = euro[1:]
# a = None
# if euro.isdigit():
# euro = int(euro)
# elif "." in euro and len(a := euro.split(".")) == 2 and a[0].isdigit() and a[1].isdigit():
# euro = float(euro)
# else:
# print("Valoarea nu este un numar")
# continue
# print("Valoarea convertita este:", floatSign * euro * 4.87, " RON")
for x in range(-1, 6, 2):
print(x)
lista = list(range(10))
print(lista) |
import json
none = "d3043820717d74d9a17694c176d39733"
# region Subscription
class Subscription:
"""
# Arguments
resource_id: str
protocol: str
endpoint: str
event_type: str
event_format: dict
"""
def __init__(
self,
resource_id=none,
protocol=none,
endpoint=none,
event_type=none,
event_format=none):
self.resource_id = resource_id
self.protocol = protocol
self.endpoint = endpoint
self.event_type = event_type
self.event_format = event_format
# endregion
class SubscriptionRequest:
def __init__(self, subscription):
self.subscription = subscription
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
"""Battles Models.
This module contains all the classes and methods regarding our
battles blueprint models.
"""
import pickle
from copy import deepcopy
from marshmallow import (Schema, fields, validates, post_dump, ValidationError)
from dino_extinction.infrastructure import redis
class BattleSchema(Schema):
"""BatttleSchema Class.
This class is responsible for handling all data regarding our
battles schema.
...
Attributes
----------
id : int
The ID of the battle that you're handling.
board_size : int
The size of the board that you are creating.
"""
id = fields.Integer(required=True)
board_size = fields.Integer(required=True)
@validates('id')
def validate_id(self, data):
"""Validate the length of battle ID.
This validator checks if the number of digits of our current
battle ID is 4. If not, it will raise an error.
...
Raises
------
ValidationError
If the number of digits of the ID is different than 4.
"""
digits = str(data)
number_of_digits = len(digits)
if number_of_digits != 4:
raise ValidationError('The battle ID should be 4 digits long.')
@post_dump
def create_battle(self, data):
"""Create a new battle.
This method will create a new battle if you try to dump a new
data and all the attributes are valid.
...
Parameters
----------
data : dict
A dict containing all the data that you are trying to insert into
the new battle. Valid keys: board_size, size and state.
"""
board_size = data['board_size']
board_state = [[None] * board_size for _ in range(board_size)]
board = dict()
board['size'] = board_size
board['state'] = board_state
battle = dict()
battle['board'] = board
pickled_battle = pickle.dumps(battle)
redis.instance.set(data['id'], pickled_battle)
def get_battle(self, battle_id):
"""Get the data from an existing battle.
This method will get the data from an existing battle, return it data
normalized or None if the data does not exist.
...
Parameters
----------
battle_id : str
The ID of the battle that you are trying to get.
Returns
-------
battle : dict
The data of the desired battle (if exists) normalized as a
Python dict. If there is no battle, it should return None.
"""
raw_data = redis.instance.get(battle_id)
if not raw_data:
return None
data = pickle.loads(raw_data)
return data
def update_battle(self, battle_id, new_data):
"""Update the data of an existing battle.
This method will use the new data to update and overwrite an existing
battle data.
...
Parameters
----------
battle_id : str
The ID of the battle that you are trying to update.
new_data : dict
The entire battle new data that will overwrite the previous data.
"""
raw_data = pickle.dumps(new_data)
redis.instance.set(battle_id, raw_data)
return True
def robot_move(self, battle, robot_id, action):
"""Move the robot inside the battlefield.
This method will move the desired robot inside the battlefield
according to an specific action. It will not move if any other entity
has already taken that position.
...
Parameters
----------
battle : dict
The battle object that you are working on.
robot_id : str
The ID of the robot that you are trying to move.
action : str
The action that you are trying to do. It can be? move-forward or
move-backwards.
"""
cardinal_points = dict()
cardinal_points.setdefault('north', 0)
cardinal_points.setdefault('south', 0)
cardinal_points.setdefault('west', 1)
cardinal_points.setdefault('east', 1)
reversed_directions = dict()
reversed_directions.setdefault('north', True)
reversed_directions.setdefault('west', True)
reversed_directions.setdefault('south', False)
reversed_directions.setdefault('east', False)
robot = battle.get('entities').get(robot_id)
facing_direction = robot.get('direction')
cardinal_point = cardinal_points.get(facing_direction)
is_reversed = reversed_directions.get(facing_direction)
original_position = robot.get('position')
position_to_change = original_position[cardinal_point]
updated_battle = deepcopy(battle)
changed_position = self._calculate_position(position_to_change,
action,
is_reversed)
new_robot_position = [pos for pos in original_position]
new_robot_position[cardinal_point] = changed_position
new_robot_position = tuple(new_robot_position)
old_yPos = original_position[0]
old_xPos = original_position[1]
new_yPos = new_robot_position[0]
new_xPos = new_robot_position[1]
board = battle.get('board').get('state')
if self._is_not_valid_index(new_xPos - 1, new_yPos - 1, board):
return False
if board[new_yPos - 1][new_xPos - 1]:
return False
updated_battle.get('board').get('state')[old_yPos - 1][old_xPos - 1] = None
updated_battle.get('board').get('state')[new_yPos - 1][new_xPos - 1] = robot_id
new_position = dict()
new_position.setdefault('position', new_robot_position)
updated_battle.get('entities').get(robot_id).update(new_position)
return updated_battle
def robot_attack(self, battle, robot_id):
"""Attack all dinos close to an specific robot.
This method will destroy all dinos close to an specific robot. It will
not attack any other robot close to it.
...
Parameters
----------
battle : dict
The battle object that you are working on.
robot_id : str
The ID of the robot that you are trying to move.
"""
robot = battle.get('entities').get(robot_id)
robot_position = robot.get('position')
entities = battle.get('entities')
robot_yPos = robot_position[0]
robot_xPos = robot_position[1]
yPositions = [robot_yPos + 1, robot_yPos - 1]
xPositions = [robot_xPos + 1, robot_xPos - 1]
corners = [(y, x) for x in xPositions for y in yPositions]
same_ver_axis = [(y, x) for x in robot_position for y in yPositions]
same_hor_axis = [(y, x) for x in xPositions for y in robot_position]
positions_to_attack = list(set().union(corners,
same_ver_axis,
same_hor_axis))
for yPos, xPos in positions_to_attack:
entity = battle.get('board').get('state')[yPos - 1][xPos - 1]
if entity and entity[:2] == 'D-':
del entities[entity]
battle.get('board').get('state')[yPos - 1][xPos - 1] = None
return battle
def _calculate_position(self, pos, act, rev):
def move_forward(x, rev):
return x + 1 if not rev else x - 1
def move_backwards(x, rev):
return x - 1 if not rev else x + 1
dispatch = dict()
dispatch.setdefault('move-forward', move_forward)
dispatch.setdefault('move-backwards', move_backwards)
return dispatch.get(act)(pos, rev)
def _is_not_valid_index(self, x, y, board):
return x not in range(len(board[0])) or y not in range(len(board))
|
def digitsProduct(product):
count = 9
res = 1
while res < 10000:
count += 1
res = 1
for i in str(count):
res = res * int(i)
if res == product:
break
else:
return -1
return count
product = 450
print(digitsProduct(product))
# = 26
|
from graph import *
brushColor("yellow")
circle(250,250,200)
brushColor("red")
circle(150,210,50)
brushColor("red")
circle(350,210,40)
brushColor("black")
circle(150,210,30)
brushColor("black")
circle(350,210,25)
a=[[55,100],[45,110],[190,210],[205,200]]
polygon(a)
b=[[400,100],[405,110],[300,210],[280,200]]
polygon(b)
rectangle(190,350,350,380)
run() |
__author__ = 'schien'
from fabric.api import *
import os, time, boto
import ConfigParser
CONFIG_FILE = "ep.cfg"
ep_ubuntu_14_04 = "ami-47a23a30"
config = ConfigParser.RawConfigParser()
# If there is no config file, let's write one.
if not os.path.exists(CONFIG_FILE):
config.add_section('ec2')
config.set('ec2', 'AMI', ep_ubuntu_14_04)
config.set('ec2', 'INSTANCE_TYPE', 't2.micro')
config.set('ec2', 'SECURITY_GROUP', 'ep')
config.set('ec2', 'KEY_PATH', '~/.ssh/ep-host.pem')
config.set('ec2', 'AWS_ACCESS_KEY_ID', '')
config.set('ec2', 'AWS_SECRET_ACCESS_KEY', '')
config.set('ec2', 'USER', 'ubuntu')
# Writing our configuration file to CONFIG_FILE
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
else:
config.read(CONFIG_FILE)
MY_AMI = config.get('ec2', 'AMI')
SECURITY_GROUP = config.get('ec2', 'SECURITY_GROUP')
KEY_PATH = config.get('ec2', 'KEY_PATH')
INSTANCE_TYPE = config.get('ec2', 'INSTANCE_TYPE')
os.environ["AWS_ACCESS_KEY_ID"] = config.get('ec2', 'AWS_ACCESS_KEY_ID')
os.environ["AWS_SECRET_ACCESS_KEY"] = config.get('ec2', 'AWS_SECRET_ACCESS_KEY')
host = None
try:
host = config.get('ec2', 'HOST')
except:
pass
if host is not None and host != '':
env.hosts = [host, ]
env.user = config.get('ec2', 'USER')
env.key_filename = KEY_PATH
print "Instance already created, using it: %s" % host
else:
conn = boto.connect_ec2()
image = conn.get_image(MY_AMI)
security_groups = conn.get_all_security_groups()
try:
[ep_group] = [x for x in security_groups if x.name == SECURITY_GROUP]
except ValueError:
# this probably means the security group is not defined
# create the rules programatically to add access to ports 22, 80, 8000 and 8001
ep_group = conn.create_security_group(SECURITY_GROUP, 'Cool EP rules')
ep_group.authorize('tcp', 22, 22, '0.0.0.0/0')
ep_group.authorize('tcp', 80, 80, '0.0.0.0/0')
ep_group.authorize('tcp', 8000, 8001, '0.0.0.0/0')
ep_group.authorize('tcp', 8080, 8080, '0.0.0.0/0')
try:
[geonode_key] = [x for x in conn.get_all_key_pairs() if x.name == 'dan']
except ValueError:
# this probably means the key is not defined
# get the first one in the belt for now:
print "GeoNode file not found in the server"
geonode_key = conn.get_all_key_pairs()[0]
reservation = image.run(security_groups=[ep_group, ], key_name=geonode_key.name, instance_type=INSTANCE_TYPE)
instance = reservation.instances[0]
print "Firing up instance"
# Give it 10 minutes to appear online
for i in range(120):
time.sleep(5)
instance.update()
print instance.state
if instance.state == "running":
break
if instance.state == "running":
dns = instance.dns_name
print "Instance up and running at %s" % dns
config.set('ec2', 'HOST', dns)
config.set('ec2', 'INSTANCE', instance.id)
env.hosts = [dns, ]
env.user = config.get('ec2', 'USER')
env.key_filename = KEY_PATH
with open(CONFIG_FILE, 'wb') as configfile:
config.write(configfile)
print "ssh -i %s ubuntu@%s" % (KEY_PATH, dns)
print "Terminate the instance via the web interface %s" % instance
time.sleep(20)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 16:24:59 2017
@author: yan
Load pre-trained network to segment a new image
Code v0.01
"""
# %% Resnet blocks in U-net
import argparse
import datetime
import nibabel as nib
import numpy as np
import os
from os import path
from scipy import ndimage
import SimpleITK as sitk
import time
import torch
from torch import cuda
from torch import optim
from torch.autograd import Variable
import torch.nn as nn
# from unet_context import UNet_ctx
#from u_net import UNet
# from model.concave_dps import ResUNet
from model.concave_dps_w import ResUNet
# from model.concave_res2 import ResUNet
# from model.concave_res_w3 import ResUNet
#from fcoordresu_net import ResUNet
#from resu_ctx import ResUNet
# %%
parser = argparse.ArgumentParser(description='ResUNet CT segmentation')
parser.add_argument('input_filename', type=str, metavar='input_filename',
help='File of image to be segmented')
parser.add_argument('output_filename', type=str, metavar='output_filename',
help='File to save the segmentation result')
parser.add_argument('-s', '--slices', default=3, type=int,
help='number of slices (default: 5)')
parser.add_argument('--begin', default=0, type=int,
help='Beginning slice for segmentation')
parser.add_argument('--end', default=9999, type=int,
help='Ending slice for segmentation')
parser.add_argument('-c', '--cuda', default=True, type=bool, metavar='Use GPU CUDA',
help='Use GPU for computation')
parser.add_argument('-e', '--evaluating', default=False, type=bool,
metavar='evaluation after segmentation', help='Use GT label for evaluation after completing segmentation')
parser.add_argument('-l', '--label_filename', default=None, type=str,
metavar='label_filename',
help='File containing the ground truth segmentation label for evaluation')
parser.add_argument('--network_path', default='./', type=str,
metavar='path of network file',
help='File containing the pre-trained network')
parser.add_argument('--view', default='axial', type=str,
metavar='View', help='view for segmentation (default: axial)')
# %%
def load_image(image_filename, evaluating=False, label_filename=None):
"""
"""
image = nib.load(image_filename)
if evaluating and path.isfile(label_filename):
label = nib.load(label_filename)
else:
label = None
return {'image':image, 'label':label}
# %%
def load_network(fn_network, gpu=True):
""" Load pre-trained network
"""
if path.isfile(fn_network):
print("=> loading checkpoint '{}'".format(fn_network))
if gpu:
checkpoint = torch.load(fn_network)
else:
checkpoint = torch.load(fn_network, map_location=lambda storage, loc: storage)
# Currently only support binary segmentation
# num_classes = 2
#model = UNet(5,2)
#model = UNet_ctx(3,5,2)
model = ResUNet(3,4)
model.load_state_dict(checkpoint['state_dict'])
if gpu:
model.cuda()
else:
model.cpu()
# optimizer = optim.SGD(model.parameters(), lr=0.02)
# if gpu:
# optimizer.load_state_dict(checkpoint['optimizer'])
# else:
optimizer = None
print("=> loaded checkpoint at epoch {}"
.format(checkpoint['epoch']))
return model, optimizer
else:
print("=> no checkpoint found at '{}'".format(fn_network))
return None, None
# %%
def compute_dice(la, lb):
intersection = np.sum(la * lb)
union = np.sum(la + lb)
return 2 * intersection / (union + 0.00001)
# %%
class SimpleITKAsNibabel(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self,
sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
class SimpleITKAsNibabelHeader(nib.spatialimages.SpatialHeader):
def __init__(self, image_reference):
super(SimpleITKAsNibabelHeader, self).__init__(
data_dtype=sitk.GetArrayViewFromImage(image_reference).dtype,
shape=sitk.GetArrayViewFromImage(image_reference).shape,
zooms=image_reference.GetSpacing())
def make_affine(simpleITKImage):
# get affine transform in LPS
c = [simpleITKImage.TransformContinuousIndexToPhysicalPoint(p)
for p in ((1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, 0))]
c = np.array(c)
affine = np.concatenate([
np.concatenate([c[0:3] - c[3:], c[3:]], axis=0),
[[0.], [0.], [0.], [1.]]], axis=1)
affine = np.transpose(affine)
# convert to RAS to match nibabel
affine = np.matmul(np.diag([-1., -1., 1., 1.]), affine)
return affine
# %%
class Nifti_from_numpy(nib.Nifti1Image):
"""
Minimal interface to use a SimpleITK image as if it were
a nibabel object. Currently only supports the subset of the
interface used by NiftyNet and is read only
"""
def __init__(self, array, itk_image):
#try:
self._SimpleITKImage = itk_image
#except RuntimeError as err:
# if 'Unable to determine ImageIO reader' in str(err):
# raise nibabel.filebasedimages.ImageFileError(str(err))
# else:
# raise
# self._header = SimpleITKAsNibabelHeader(self._SimpleITKImage)
affine = make_affine(self._SimpleITKImage)
# super(SimpleITKAsNibabel, self).__init__(
# sitk.GetArrayFromImage(self._SimpleITKImage).transpose(), affine)
nib.Nifti1Image.__init__(
self, array.transpose(), affine)
def extract_volume(volume):
volumes = []
x_coord = []
y_coord = []
for x in range(0,volume.shape[1],112):
for y in range(0,volume.shape[2],112):
end_x = x + 224
end_y = y + 224
if end_x > volume.shape[1]:
x = volume.shape[1] - 224
end_x = volume.shape[1]
if end_y > volume.shape[2]:
y = volume.shape[2] - 224
end_y = volume.shape[2]
cur_img = volume[:, x:end_x, y:end_y]
volumes.append(cur_img)
x_coord.append(x)
y_coord.append(y)
if y == volume.shape[2] - 224:
break
if x == volume.shape[1] - 224:
break
return volumes, x_coord, y_coord
def construct_volume(volumes,x_coord, y_coord):
x_len = max(x_coord) + 224
y_len = max(y_coord) + 224
seg_matrix = []
mul_matrix = []
for i in range(len(volumes)):
output = torch.zeros([volumes[i].shape[0],volumes[i].shape[1],x_len,y_len],dtype=torch.float32)
time_matrix = torch.zeros([volumes[i].shape[0],volumes[i].shape[1], x_len,y_len])
x_start = x_coord[i]
y_start = y_coord[i]
x_end = x_start + 224
y_end = y_start + 224
output[:,:,x_start:x_end, y_start:y_end] = volumes[i]
time_matrix[:,:, x_start:x_end, y_start:y_end] = torch.ones(volumes[i].shape)
seg_matrix.append(output)
mul_matrix.append(time_matrix)
seg_matrix = torch.cat(seg_matrix,0)
mul_matrix = torch.cat(mul_matrix,0)
seg_matrix = torch.sum(seg_matrix, 0)
mul_matrix = torch.sum(mul_matrix, 0)
seg_final = torch.div(seg_matrix, mul_matrix)
seg_final = seg_final.cuda()
return seg_final
# %%
if __name__ == "__main__":
args = parser.parse_args()
evaluating = args.evaluating
use_cuda = args.cuda
slice_begin = args.begin
slice_end = args.end
view = args.view
if not cuda.is_available():
print('No available GPU can be used for computation!')
use_cuda = False
num_channels = args.slices
# num_channels = 3
#fn_network = path.expanduser('~/tmp/resu-net3D/checkpoints/resu3d_checkpoint_ep0578.pth.tar')
#fn_network = path.join(args.network_path, 'resu_best.pth.tar')
#load the trained best 2D model
# fn_network = path.join(args.network_path,'resunet_checkpoint_final.pth.tar')
fn_network = path.join(args.network_path,'resu_best_' + view + '.pth.tar')
print('Loading network from <{}>'.format(fn_network))
if not path.isfile(fn_network):
raise Exception('Missing network <{}>! File Not Found!'.format(fn_network))
model_axial, optimizer = load_network(fn_network, gpu=use_cuda)
# Set model to evaluation mode
model_axial.eval()
#img_filename = path.expanduser(args.input_filename)
#file in computer/home/data/ct_nih
img_filename = args.input_filename
print('Input image for segmentation:\t{}'.format(img_filename))
dicom_input = False
# Check if it is DICOM folder
if path.isdir(img_filename):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames( img_filename )
reader.SetFileNames(dicom_names)
image = reader.Execute()
dicom_input = True
w, h, d = image.GetSize()
img_data = sitk.GetArrayFromImage(image)
else:
volume = load_image(img_filename, evaluating, args.label_filename)
image, label = volume['image'], volume['label']
w, h, d = image.shape[:3]
img_data = np.squeeze(image.get_data())
print('Size of the input image: {}x{}x{}'.format(w, h, d))
img_data = img_data.astype(np.float32)
if view == 'axial':
img_data = img_data
elif view == 'coronal':
img_data = img_data.transpose((2,0,1))
else:
img_data = img_data.transpose(2,1,0)
img_data[img_data > 200] = 200.0
img_data[img_data < -200] = -200.0
img_data /= 200.0
print('Segmenting image...')
start_time = time.time()
results = []
num_half_channels = num_channels >> 1
# Define the range of segmentation
first = max(num_half_channels, slice_begin)
last = min(d - num_half_channels - 1, slice_end)
#last = min(d - num_channels + 1, slice_end)
num_segmented_slices = last - first + 1
print('Segmenting {} slices between [{}, {}]'.format(
num_segmented_slices, first, last))
for i in range(first):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
#for depth in range(d - num_channels + 1):
for depth in range(first - num_half_channels,
last - num_half_channels):
if dicom_input:
subvolume = img_data[depth:depth+num_channels,:,:]
else:
subvolume = img_data[:,:,depth:depth+num_channels]
subvolume = subvolume.transpose((2, 1, 0))
subvolumes, x_coor, y_coor = extract_volume(subvolume)
outputs = []
for volume in subvolumes:
volume = volume[np.newaxis,:,:,:]
volume = Variable(torch.from_numpy(volume), volatile=True).float()
if use_cuda:
volume = volume.cuda()
#subs.append(subvolume)
# output1, output2, output3, output4, output5 = model_axial(volume)
output5 = model_axial(volume)
# output_s = nn.Softmax2d()(output5)
outputs.append(output5)
output = construct_volume(outputs, x_coor, y_coor)
output = output.max(dim=0)[1].cpu().data.numpy()
output = output[np.newaxis,:,:]
results.append(output)
#results.append(output.cpu().data.numpy())
print('It took {:.1f}s to segment {} slices'.format(
time.time() - start_time, num_segmented_slices))
#for i in range(num_half_channels):
for i in range(d - last):
#results.append(np.zeros((1,1,w,h)))
results.append(np.zeros((1,h,w)))
results = np.squeeze(np.asarray(results))
#dsize = list(results.shape)
c, h, w = results.shape
#print('Segmentation result in CxHxW: {}x{}x{}'.format(c, h, w))
if not dicom_input:
if view == 'axial':
results = np.transpose(results, (2, 1, 0))
elif view == 'coronal':
results = np.transpose(results,(1, 0, 2))
else:
results = results
print('Segmentation result in HxWxC: {}x{}x{}'.format(h, w, c))
# results[results > 0.49] = 1
# results[results < 0.5] = 0
results = results.astype(np.uint8)
if evaluating:
label_data = label.get_data()
# remove tumor label
label_data[label_data > 1] = 1
dice = compute_dice(results, label_data)
print('Dice score of ResU-Net: {:.3f}'.format(dice))
# print('Starting morphological post-processing...')
# #print('no postprocess...')
# # perform morphological operation
# #remove small noisy segmentation
# results = ndimage.binary_opening(results, iterations=5)
# #Generate smooth segmentation
# results = ndimage.binary_dilation(results, iterations=3)
# results = ndimage.binary_fill_holes(results)
# results = ndimage.binary_erosion(results, iterations=3)
# perform largest connected component analysis
# labeled_array, num_features = ndimage.label(results)
# size_features = np.zeros((num_features))
# for i in range(num_features):
# size_features[i] = np.sum(labeled_array == i+1)
# results = np.zeros_like(labeled_array)
# results[labeled_array == np.argmax(size_features) + 1] = 1
results_post = np.zeros_like(results)
min_co = 0
for i in range(1, 4):
#liver
if i ==1:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
summed_1 = np.sum(results_i.sum(axis=0), axis=0)
non0_list = np.asarray([i for i in range(summed_1.size)])
non0_list = non0_list[summed_1 > 1]
min_co = 0.8 * np.min(non0_list)
min_co = int(min_co)
print('min_co', min_co)
#kidney
if i == 2:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
# print('idx1:',np.argmax(size_features_i))
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results1_i = np.zeros_like(labeled_array_i)
idx2 = np.argsort(-size_features_i)[1]
# print('idx2:',idx2)
results1_i[labeled_array_i == idx2 + 1] = i
results_i = results_i + results1_i
results_i = results_i.astype(np.uint8)
#spleen
else:
results_i = np.zeros(results.shape)
# results_i = results_i.cuda().clone()
results_i[results == i] = 1
results_i[:,:,:min_co] = 0
labeled_array_i, num_features_i = ndimage.label(results_i)
size_features_i = np.zeros((num_features_i))
for j in range(num_features_i):
size_features_i[j] = np.sum(labeled_array_i == j+1)
results_i = np.zeros_like(labeled_array_i)
results_i[labeled_array_i == np.argmax(size_features_i) + 1] = i
results_i = results_i.astype(np.uint8)
results_post += results_i
results = results_post
# results = results.astype(np.uint8)
# Create the segmentation image for saving
if dicom_input:
new_image = Nifti_from_numpy(results, image)
else:
header = image.header
header.set_data_dtype(np.uint8)
# if nifty1
if header['sizeof_hdr'] == 348:
new_image = nib.Nifti1Image(results, image.affine, header=header)
# if nifty2
elif header['sizeof_hdr'] == 540:
new_image = nib.Nifti2Image(results, image.affine, header=header)
else:
raise IOError('Input image header problem')
#seg_dir = path.expanduser('~/tmp/resu-net/segmentation')
#fn_seg = path.join(seg_dir, 'segmentation.nii')
fn_seg = path.expanduser(args.output_filename)
print('Writing segmentation result into <{}>...'.format(fn_seg))
#mu.write_mhd_file(fn_seg, results, meta_dict=header)
nib.save(new_image, fn_seg)
print('Segmentation result has been saved.')
# Compute Dice for evaluating
if evaluating:
dice = compute_dice(results, label_data)
print('Final Dice score: {:.3f}'.format(dice))
|
#if
num=int(input("Enter positive integer: "))
if num>0:
print("Hello")
print(num)
print("Welcome")
|
class MeasureList:
def __init__(self, num_commits, calc_metric_func, name):
# LOC
# Added LOC / TOTAL
# deleted LOC / TOTAL
# num times changed? basically 1 or 0? normalized over time? so earlier changes get less weight
# newer changes weighted more heavily
# given change metrics, how to compute a score
self._func = calc_metric_func
self._values = [0 for i in range(num_commits - 1)]
self._name = name
def iterate_metric(self, index, added_lines, del_lines, total, is_bug_fix):
#call self.func with parameters or some shit
self._values[index] = self._func(added_lines, del_lines, total, is_bug_fix)
def summarize(self):
return self._summarize_func(self._values)
def get_sum(self):
return sum(self._values)
def get_avg(self):
return sum(self._values) / float(len(self._values))
def get_max(self):
return max(self._values)
def get_name(self):
return self._name
def set_sum_rank(self, buckets):
self._sum_rank = buckets.index(self.get_sum())
def get_sum_rank(self):
return self._sum_rank
#def get_weighted_avg(self):
#do shit
def __str__(self):
return ('%s: AVG %s MAX %s' % (self._name, self.get_avg(), self.get_max()))
def added_vs_total(added_lines, del_lines, total, is_bug_fix):
#print added_lines
#print total
return added_lines / float(total)
def del_vs_total(added_lines, del_lines, total, is_bug_fix):
return del_lines / float(total)
def num_change(added_lines, del_lines, total, is_bug_fix):
return 1 if (abs(added_lines) + abs(del_lines) > 0) else 0
def added_vs_deleted(added_lines, del_lines, total, is_bug_fix):
d = del_lines if del_lines > 0 else 1
return -(added_lines / float(d))
def add_bug_commit(added_lines, del_lines, total, is_bug_fix):
return 1 if ((added_lines > 0 or del_lines > 0) and is_bug_fix) else 0
class NewFunction:
def __init__(self, name, file_path, init_timestamp):
self._name = name
self._path = file_path
self._added = []
self._deleted = []
# binary vector, 0 = not bug commit, 1 = bug commit
self._bugs = []
self._total_loc = []
# use this to track number of authors at each touch
self._uniq_authors = []
self._num_authors = []
self._start_age = init_timestamp
self._last_touched = init_timestamp
self._future_bugs = 0
def add_datapoint(self, added, deleted, new_total, author, commit_ts, is_bug_fix):
self._added.append(added)
self._deleted.append(deleted)
if new_total == 0:
print added
print deleted
print self._name
self._total_loc.append(new_total)
if author not in self._uniq_authors:
self._uniq_authors.append(author)
self._num_authors.append(len(self._uniq_authors))
self._last_touched = commit_ts
self._bugs.append(1 if is_bug_fix else 0)
def get_max_ratio(self, values1, values2):
curr_max = -1
for i in range(len(values1)):
v = self.get_ratio(values1[i], values2[i])
if v > curr_max:
curr_max = v
return curr_max
def get_avg_ratio(self, values1, values2):
ratio_values = []
for i in range(len(values1)):
ratio_values.append(self.get_ratio(values1[i], values2[i]))
return sum(ratio_values) / float(len(ratio_values))
def get_ratio(self, v1, v2):
div_val = v2 if v2 != 0 else 1
return v1 / float(div_val)
def iterate_future_bugs(self):
self._future_bugs += 1
def get_data_vector(self):
return {
'Added/Total': sum(self._added) / float(sum(self._total_loc)),
'Max Added/Total': self.get_max_ratio(self._added, self._total_loc),
'Avg Added/Total': self.get_avg_ratio(self._added, self._total_loc),
'Deleted/Total': sum(self._deleted) / float(sum(self._total_loc)),
'Max Deleted/Total': self.get_max_ratio(self._deleted, self._total_loc),
'Avg Deleted/Total': self.get_avg_ratio(self._deleted, self._total_loc),
'Added/Deleted': self.get_ratio(sum(self._added), sum(self._deleted)),
'Max Added/Deleted': self.get_max_ratio(self._added, self._deleted),
'Avg Added/Deleted': self.get_avg_ratio(self._added, self._deleted),
'bugs': sum(self._bugs),
'authors': len(self._uniq_authors),
'start_age': self._start_age,
'last_touched': self._last_touched,
'total_touches': len(self._total_loc),
'future_bugs': (4 if self._future_bugs > 0 else 2)
}
class Function:
def __init__(self, name, num_commits, file_path):
self._name = name
self._make_delta_list(num_commits)
self._ranks = []
self._path = file_path
def _make_delta_list(self, num_commits):
self._measure_list = []
self._added_measure = MeasureList(num_commits, added_vs_total, 'Added/Total LOC')
self._measure_list.append(self._added_measure)
self._deleted_measure = MeasureList(num_commits, del_vs_total, 'Del/Total LOC')
self._measure_list.append(self._deleted_measure)
self._changed_measure = MeasureList(num_commits, num_change, 'Num Changes')
self._measure_list.append(self._changed_measure)
self._added_vs_del = MeasureList(num_commits, added_vs_deleted, 'Added Vs Deleted')
self._measure_list.append(self._added_vs_del)
self._bug_fix_measure = MeasureList(num_commits, add_bug_commit, 'Bug Fixes')
self._measure_list.append(self._bug_fix_measure)
def iterate_measures(self, commit_diff_num, added_lines, deleted_lines, total, is_bug_fix):
for measure in self._measure_list:
measure.iterate_metric(commit_diff_num, added_lines, deleted_lines, total, is_bug_fix)
def get_added_metric(self):
return self._added_measure
def get_deleted_metric(self):
return self._deleted_measure
def get_changed_metric(self):
return self._changed_measure
def get_added_vs_del_metric(self):
return self._added_vs_del
def get_bugfix_metric(self):
return self._bug_fix_measure
def __str__(self):
string = '%s::%s:' % (self._path, self._name)
for measure in self._measure_list:
string += '\n\t\t%s: Rank: %s' % (measure.get_name(), measure.get_sum_rank() + 1)
return string
def get_dict(self):
d = {'path': self._path, 'name': self._name, 'measures': []}
for measure in self._measure_list:
d['measures'].append({'name': measure.get_name(), 'rank': measure.get_sum_rank() + 1, 'value': measure.get_sum()})
return d
def add_rank(self, value):
self._ranks.append(value)
def get_rank_sum(self):
return sum(self._ranks) / float(len(self._ranks))
def get_sum_rank_avg(self):
measure_sum_ranks = map(lambda x: x.get_sum_rank(), self._measure_list)
return sum(measure_sum_ranks) / float(len(measure_sum_ranks))
class ChangedClass:
def __init__(self, num_commits, path):
#key this on function name
self._functions = {}
self._num_commits = num_commits
self._path = path
def add_function_metric(self, func_name, index, added, deleted, total, is_bug_fix):
if func_name not in self._functions:
self._functions[func_name] = Function(func_name, self._num_commits, self._path)
self._functions[func_name].iterate_measures(index, added, deleted, total, is_bug_fix)
def get_func_count(self):
return len(self._functions.keys())
def get_functions(self):
return self._functions.values()
def __str__(self):
string = '%s:' % self._path
for f in self._functions.values():
string += '\n\t%s' % str(f)
return string
|
import pytest
from faker import Faker
from tests.AbstractTest import AbstractTest
fake = Faker()
class TestCIText(AbstractTest):
@classmethod
@pytest.fixture(scope='class', autouse=True)
def setup_class(cls, Student):
super(TestCIText, cls).setup_class()
cls.email_upper = fake.email().upper()
student = Student(name=fake.name(), email=cls.email_upper)
cls.checkin(student)
cls.student = cls.persist(student)
def test_always_lower(self, Student):
student = Student.filter().one()
assert student.email.lower() == student.email
student.update(email=self.email_upper).save()
assert self.email_upper != student.email
assert student.email == student.email.lower()
def test_query_by_upper(self, Student):
student = Student.filter({"email": self.email_upper}).one()
assert student.email == student.email.lower()
|
n = int(input())
s = input().strip() # in a form "3 4 5 6 2"
def reverse(s, a, b):
if a == b:
return s[a]
elif a > b:
return ''
else:
left_number = s[a:b+1][:s[a:b+1].index(" ")]
right_number = s[a:b+1][s[a:b+1].rindex(" ") + 1:]
return right_number + ' ' + reverse(s, a + len(left_number) + 1, b - len(right_number) -1) + ' ' + left_number
print(reverse(s, 0, len(s) - 1)) |
import logging
from django.contrib.auth.models import (AbstractBaseUser, PermissionsMixin,
BaseUserManager)
from django.core.mail import send_mail
from django.db import models
from django.db.models import signals
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# from myStock.core import async_tasks
# from myStock.core import helpers
LOG = logging.getLogger('myStock.%s' % __name__)
# Create your models here.
class UserManager(BaseUserManager):
# use_in_migrations = True
"""
Manager class deals with the creation of user and superuser
"""
def create_user(self, email, password=None, username=None, **extra_fields):
now = timezone.now()
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=UserManager.normalize_email(email),
is_staff=True, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password=password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class User(AbstractBaseUser, PermissionsMixin):
"""
Users within the Django authentication system are represented by this
model.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), blank=False, unique=True, max_length=254)
nick_name = models.CharField(_('nick name'), unique=False, max_length=30, help_text=_('Nick name of the user'))
full_name = models.CharField(_('full name'), max_length=30, blank=False)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_admin = models.BooleanField(_('admin status'), default=False,
help_text=_('Designates whether the user is admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the short name for the user.
"""
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def has_changed(instance, field):
if not instance.pk:
return False
old_value = instance.__class__._default_manager.filter(pk=instance.pk).values(field).get()[field]
return not getattr(instance, field) == old_value
class Profile(models.Model):
"""
Profile model to save user profile linked to Auth user by onetoone field.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
about_me = models.CharField(max_length=255, blank=True)
profile_pic = models.FileField(upload_to='profile/', blank=True)
class Meta:
verbose_name = _('profile')
verbose_name_plural = _('profiles')
def __str__(self):
return self.user.email
def has_changed(instance, field):
if not instance.pk:
return False
old_value = instance.__class__._default_manager.filter(pk=instance.pk).values(field).get()[field]
return not getattr(instance, field) == old_value
class Participants(models.Model):
"""
"""
name = models.TextField(_('name'), help_text=_('Name of the Participant'))
email = models.EmailField(_('email address'), blank=False, unique=True, max_length=254)
phoneNumber = models.TextField(_('phone number'), blank=False, unique=False,)
college = models.TextField(_('college'), help_text=_('College of the participant'))
stream = models.TextField(_('stream'), help_text=_('Stream of the Participant'))
subregion = models.TextField(_('subregion'), help_text=_('Subregion of the Participant'))
zone = models.TextField(_('zone'), help_text=_('Zone of the Participant'))
dob = models.DateField(_('date of birth'), blank=True, null=True, help_text=_('event date'))
gender = models.TextField(_('gender'), blank=True, null=True, help_text=_('Gender of the Participant'))
fee_status = models.TextField(_('fee status'), blank=True, null=True, help_text=_('Fee status of the Participant'))
amount = models.TextField(_('amount'), blank=True, null=True, help_text=_('Amount paid by the Participant'))
responsible_person = models.TextField(_('responsible person'), blank=True, null=True,
help_text=_('Responsible Person of the Participant'))
responsible_person_contact = models.TextField(_('responsible person contact'), blank=True, null=True,
help_text=_('Contact of the Responsible Person of the Participant'))
is_volunteer = models.BooleanField()
ministry = models.TextField(_('ministry of the volunteer'), blank=True, null=True,
help_text=_('Ministry of the volunteer'))
is_participant = models.BooleanField()
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
class Meta:
verbose_name = _('Participant')
verbose_name_plural = _('Participants')
def __str__(self):
return self.email
def save(self, *args, **kwargs):
super(Participants, self).save(*args, **kwargs) |
# 2. Given a non-empty array of digits representing a non-negative integer, increment one to the integer.
# -- The digits are stored such that the most significant digit is at the head of the list, and each element in the array contains a single digit.
# -- You may assume the integer does not contain any leading zero, except the number 0 itself.
# -- LeetCode link: https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/559/
def array_sequence(array,i):
global returned_array
global number_of_elements
returned_array = array
number_of_elements = i + len(returned_array)
for n in returned_array:
if len(returned_array) >= number_of_elements:
return returned_array
else:
number = returned_array[len(returned_array) - 1] + 1
returned_array.append(number)
print(array_sequence([1,15,13,50,43,-1,0,17,22,55],10))
# def incrementer(array):
# global new_array
# new_array = array
# number_at_head = array[len(array)-1]
# next_number = (number_at_head + 1)
# new_array.append(next_number)
# yield array
# # x = incrementer(list_of_ints)
# # print(x)
# # print(incrementer(list_of_ints))
# s = incrementer(list_of_ints)
# while len(list_of_ints) < 10:
# incrementer(list_of_ints)
# def firstN(n):
# num, nums = 0,[]
# while num < n:
# yield num
# num +=1
# # sum_of_first_n = sum(firstN(1000))
# # print(sum_of_first_n)
# array_of_sequence =[]
# s = firstN(1000)
# array_of_sequence.append(s)
# print(array_of_sequence) |
from parse.ast import Node
import re
from antlr4 import *
def graphviz(t, is_root_node, node_text, get_children, relations=[], labels={}, node_key=0):
child_key = node_key
labels[node_key] = sanitize_graphviz_label(node_text(t))
if is_root_node(t):
return node_key, relations, labels
for child in get_children(t):
child_key += 1
relations.append((node_key, child_key))
child_key += graphviz(child, is_root_node, node_text, get_children, relations, labels, child_key)[0]
return child_key, relations, labels
def graphviz_output(relations, labels):
out = ""
out += "digraph g {\n"
for relation in relations:
out += "\t{} -> {}\n".format(relation[0], relation[1])
for k, v in labels.items():
out += "\t{}[label=\"{}\"]\n".format(k, v)
out += "}\n"
return out
def ctx_text(ctx, symbolic_names):
if isinstance(ctx, tree.Tree.TerminalNodeImpl):
pattern = re.compile(r'\s+')
text = re.sub(pattern, '', ctx.symbol.text)
if len(text) > 0:
return "{}({})".format(symbolic_names[ctx.symbol.type], text)
else:
return "{}".format(text)
else:
return ctx.__class__.__name__.replace("Context", "")
def sanitize_graphviz_label(text):
return text.replace("\"", "\\\"")
def draw_syntax_tree(theTree, symbolic_names):
out = graphviz(theTree, lambda t: isinstance(t, tree.Tree.TerminalNodeImpl), lambda x: ctx_text(x, symbolic_names), lambda x: x.getChildren())
write_gv_output(out)
def draw_ast(ast : Node):
out = graphviz(ast, lambda t: len(ast.get_children()) == 0, lambda x: x.__str__(), lambda x : x.get_children())
write_gv_output(out)
def write_gv_output(output):
gv = graphviz_output(output[1], output[2])
f = open("graphviz", "w+")
f.write(gv)
f.close()
|
# Generated by Django 3.1.3 on 2020-11-14 02:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tickets', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='ticket',
old_name='name',
new_name='username',
),
migrations.AddField(
model_name='ticket',
name='ticket_id',
field=models.IntegerField(blank=True, null=True),
),
migrations.CreateModel(
name='BookingTickets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('movie_date', models.DateTimeField()),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tickets.ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import filter
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os
from collections import OrderedDict, defaultdict
import seaborn as sns
plt.style.use('default')
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.size'] = 7
mpl.rcParams['font.family'] = 'arial'
def nice_names(key):
nice_name_dict = {
'trial': 'Trials',
'lick_smoothed': 'Number of Licks',
'boolean_smoothed': '% Trials with Licks',
'lick': 'Number of Licks',
'odor': 'Odor',
'mouse': 'Mouse',
'half_max': 'Learning Rate',
'odor_standard': 'Odor',
'condition_name': 'Experimental Condition',
'OFC_JAWS': r'OFC$_{\rm INH}$',
'BLA_JAWS': r'BLA$_{\rm INH}$',
'OFC_LONGTERM': r'OFC$_{\rm LT}$',
'BLA_LONGTERM': r'BLA$_{\rm LT}$',
'odor_valence': 'Odor Valence',
'csp_identity': 'CS+ ID',
'csm_identity': 'CS- ID',
'identity': 'ID',
'valence': 'Valence',
'False': '0',
'True': '1',
'day':'Day',
'BEHAVIOR_OFC_JAWS_PRETRAINING': 'PT IH',
'BEHAVIOR_OFC_JAWS_DISCRIMINATION': 'DT IH',
'BEHAVIOR_OFC_YFP': 'YFP',
}
if key in nice_name_dict.keys():
out = nice_name_dict[key]
else:
out = key
return out
def significance_str(x, y, val):
if val < .05 and val >= .01:
str = 'P < .05'
elif val < .01 and val > .001:
str = 'P < .01'
elif val < .001:
str = 'P < .001'
else:
str = 'P = {:.2f}'.format(val)
plt.text(x, y, str)
return str
def _easy_save(path, name, dpi=300, pdf=True, close=True):
'''
convenience function for saving figs while taking care of making folders
:param path: save path
:param name: save name
:param dpi: save dpi for .png format
:param pdf: boolean, save in another pdf or not
:return:
'''
os.makedirs(path, exist_ok=True)
figname = os.path.join(path, name)
print('figure saved in {}'.format(figname))
plt.savefig(os.path.join(figname + '.png'), dpi=dpi)
if pdf:
plt.savefig(os.path.join(figname + '.pdf'), transparent=True)
if close:
plt.close()
def _string_to_index(xdata):
x_index = np.unique(xdata, return_index=True)[1]
labels = [xdata[index] for index in sorted(x_index)]
indices = np.zeros_like(xdata, dtype= int)
for i, label in enumerate(labels):
indices[label == xdata] = i
nice_labels = [nice_names(key) for key in labels]
return indices, nice_labels
def _plot(plot_function, x, y, color, label, plot_args, xjitter= 0):
if x.dtype == 'O' and y.dtype == 'O':
# print('plotted O')
for i in range(x.shape[0]):
# xj = x[i] + np.random.uniform(low=-xjitter,high=xjitter, size=x[i].shape)
plot_function(x[i], y[i], color=color, label=label, **plot_args)
else:
x = np.squeeze(x)
# xj = x + np.random.uniform(low=-xjitter, high=xjitter, size=x.shape)
y = np.squeeze(y)
plot_function(x, y, color=color, label=label, **plot_args)
def _plot_error(plot_function, x, y, err, color, label, plot_args):
if x.dtype == 'O' and y.dtype == 'O':
# print('plotted O')
for i in range(x.shape[0]):
plot_function(x[i], y[i], err[i], color=color, label=label, **plot_args)
else:
x_ = np.squeeze(x)
y_ = np.squeeze(y)
err_ = np.squeeze(err)
if y_.size == 1:
x_ = x
y_ = y
err_ = err
plot_function(x_, y_, err_, color=color, label=label, **plot_args)
def _plot_fill(plot_function, x, y, err, color, label, plot_args):
if x.dtype == 'O' and y.dtype == 'O':
# print('plotted O')
for i in range(x.shape[0]):
plot_function(x[i], y[i]-err[i], y[i] + err[i], color=color, label=label, **plot_args)
else:
x = np.squeeze(x)
y = np.squeeze(y)
err = np.squeeze(err)
plot_function(x, y-err, y+err, color=color, label=label, **plot_args)
def plot_results(res, x_key, y_key, loop_keys =None,
select_dict=None, path=None, colors= None, colormap='cool',
plot_function= plt.plot, ax_args={}, plot_args={},
xjitter = 0,
save = True, reuse = False, twinax = False, sort = False, error_key = '_sem',
fig_size = (2, 1.5), rect = (.25, .25, .6, .6), legend = True, name_str = ''):
'''
:param res: flattened dict of results
:param x_key:
:param y_key:
:param loop_key:
:param select_dict:
:param path: save path
:param ax_args: additional args to pass to ax, such as ylim, etc. in dictionary format
:return:
'''
if select_dict is not None:
res = filter.filter(res, select_dict)
if reuse:
ax = plt.gca()
if twinax:
ax = ax.twinx()
else:
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(rect)
if sort:
ind_sort = np.argsort(res[x_key])
for key, val in res.items():
res[key] = val[ind_sort]
if loop_keys != None:
if isinstance(loop_keys, str):
loop_keys = [loop_keys]
loop_combinations, loop_indices = filter.retrieve_unique_entries(res, loop_keys)
if save:
labels = [str(','.join(str(e) for e in cur_combination)) for cur_combination in loop_combinations]
else:
labels = [None] * len(loop_combinations)
if colormap is None:
cmap = plt.get_cmap('cool')
else:
cmap = plt.get_cmap(colormap)
if colors is None:
colors = [cmap(i) for i in np.linspace(0, 1, len(loop_combinations))]
loop_lines = len(loop_combinations)
else:
loop_lines = 1
loop_indices = [np.arange(len(res[y_key]))]
if colors is None:
colors = ['black']
else:
colors = [colors]
labels = [None]
for i in range(loop_lines):
color = colors[i]
label = labels[i]
plot_ix = loop_indices[i]
x_plot = res[x_key][plot_ix]
if type(x_plot[0]) != np.ndarray:
x_plot = np.array([nice_names(x) for x in list(x_plot)])
y_plot = res[y_key][plot_ix]
if plot_function == plt.errorbar:
error_plot = res[error_key][plot_ix]
_plot_error(plot_function, x_plot, y_plot, error_plot, color=color, label=label, plot_args=plot_args)
elif plot_function == plt.fill_between:
error_plot = res[error_key][plot_ix]
_plot_fill(plot_function, x_plot, y_plot, error_plot, color=color, label=label, plot_args=plot_args)
elif plot_function == sns.swarmplot:
t = defaultdict(list)
for k, v in res.items():
t[k] = res[k][plot_ix]
sns.swarmplot(x = x_key, y = y_key, hue=loop_keys[0], data=t, **plot_args)
ax.get_legend().remove()
elif plot_function == sns.barplot:
import pandas as pd
t = defaultdict(list)
for k, v in res.items():
t[k] = res[k][plot_ix]
sns.barplot(x = x_key, y = y_key, data=t, **plot_args)
ax.get_legend().remove()
else:
_plot(plot_function, x_plot, y_plot, color=color, label=label, plot_args=plot_args, xjitter=xjitter)
ax.set(**ax_args)
#format
# plt.xticks(rotation=45)
ax.set_ylabel(nice_names(y_key), fontsize = 7)
ax.set_xlabel(nice_names(x_key), fontsize = 7)
if x_key == 'time':
xticks = res['xticks'][0]
xticklabels = ['On', 'Off', 'US']
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
if not twinax:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
else:
ax.spines['top'].set_visible(False)
ax.yaxis.label.set_color('green')
ax.tick_params(axis='y', colors='green')
if loop_keys and legend:
nice_loop_str = '+'.join([nice_names(x) for x in loop_keys])
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
l = ax.legend(by_label.values(), by_label.keys(), ncol = 2, fontsize = 7, frameon=False)
try:
for handle in l.legendHandles:
handle.set_sizes([5])
except:
pass
if not loop_keys and legend:
plt.legend(frameon=False)
# l.set_title(nice_loop_str)
# plt.setp(l.get_title(), fontsize=4)
if select_dict is None:
name = 'figure'
else:
name = ''
for k, v in select_dict.items():
name += k + '_' + str(v) + '_'
name += name_str
folder_name = y_key + '_vs_' + x_key
if loop_keys:
loop_str = '+'.join(loop_keys)
folder_name += '_vary_' + loop_str
save_path = os.path.join(path, folder_name)
if save:
_easy_save(save_path, name, dpi=300, pdf=True)
else:
return save_path, name
def plot_weight(summary_res, x_key, y_key, val_key, title, vmin, vmax,
xticklabel =None, yticklabel=None,
label ='Accuracy', save_path = None, text='', mask=False,
figsize = (1.626, 1.626), fontsize = 6):
x_len = len(np.unique(summary_res[x_key]))
y_len = len(np.unique(summary_res[y_key]))
x = summary_res[x_key]
y = summary_res[y_key]
z = summary_res[val_key]
w_plot = np.zeros((x_len, y_len))
w_plot[y, x] = z
rect = [0.2, 0.2, 0.6, 0.6]
rect_cb = [0.82, 0.2, 0.02, 0.6]
fig = plt.figure(figsize= figsize)
ax = fig.add_axes(rect)
if mask:
m = np.tri(w_plot.shape[0], k=0).astype(bool)
w_plot = np.ma.array(w_plot, mask=np.invert(m)) # mask out the lower triangle
cmap = plt.get_cmap()
cmap.set_bad('w') # default value is 'k'
im = plt.pcolor(np.flipud(w_plot), cmap='plasma', vmin=vmin, vmax=vmax)
# im = plt.imshow(w_plot, cmap='jet', vmin=vmin, vmax=vmax, origin='upper')
def _show_values(pc, fmt="%.2f", **kw):
pc.update_scalarmappable()
for p, color, value in zip(pc.get_paths(), pc.get_facecolors(), pc.get_array()):
x, y = p.vertices[:-2, :].mean(0)
if (value - vmin)/(vmax-vmin) > .5:
color = (0.0, 0.0, 0.0)
else:
color = (1.0, 1.0, 1.0)
ax.text(x, y, fmt % value, ha="center", va="center", color=color, fontsize= fontsize, **kw)
_show_values(im)
# for i, j, k in zip(x, y, z):
# plt.text(i-.15, j-.1, np.round(k,2))
# import seaborn as sns
# im = sns.heatmap(w_plot, annot=True)
plt.title(title, fontsize=6)
# ax.set_xlabel(x_key, labelpad=2)
# ax.set_ylabel(y_key, labelpad=2)
plt.axis('tight')
for loc in ['bottom', 'top', 'left', 'right']:
ax.spines[loc].set_visible(False)
ax.tick_params('both', length=0)
xticks = np.arange(0, w_plot.shape[1]) + .5
yticks = np.arange(0, w_plot.shape[0]) + .5
ax.set_xticks(xticks)
ax.set_yticks(yticks[::-1])
if xticklabel == None:
ax.set_xticklabels((xticks + .5).astype(int), fontsize = 6)
ax.set_yticklabels((yticks + .5).astype(int), fontsize = 6)
else:
ax.set_xticklabels(xticklabel, fontsize = 6)
ax.set_yticklabels(yticklabel, fontsize = 6)
plt.axis('tight')
ax = fig.add_axes(rect_cb)
cb = plt.colorbar(cax=ax, ticks=[vmin, vmax])
cb.outline.set_linewidth(0.5)
cb.set_label(label, fontsize=6, labelpad=-5)
plt.tick_params(axis='both', which='major', labelsize=6)
plt.axis('tight')
folder_name = x_key + '_and_' + y_key + '_vs_' + val_key + '_' + text
p = os.path.join(save_path, folder_name)
_easy_save(p, 'figure', dpi=300, pdf=True) |
class Solution:
def convertToTitle(self, n: int) -> str:
ans=""
while n:
a=chr((n-1)%26+65)
ans=a+ans
n=(n-1)//26
return ans
x=Solution()
result=x.convertToTitle(26)
print(result)
|
import cv2
import numpy as np
img = cv2.imread("image/wheel.jpg",0)
# cv2.imshow("img",img)
# kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)
# img = cv2.filter2D(img,-1,kernel)
# img = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
cv2.imshow("img1",img)
img = cv2.convertScaleAbs(img,alpha=5,beta=0)
cv2.imshow("abs",img)
# img = cv2.GaussianBlur(img,(5,5),1)
# k = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))
# img = cv2.morphologyEx(img,cv2.MORPH_CLOSE,k,iterations=1)
# img = cv2.morphologyEx(img,cv2.MORPH_OPEN,k,iterations=1)
canny = cv2.Canny(img,100,150)
cv2.imshow("canny",canny)
cv2.waitKey(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.