text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 00:35:16 2018
@author: Magnus Tarle
Description:
Contains functions and simplified test functions for vrep_fastslam.py
"""
import numpy as np
import matplotlib.patches as mpatches # used for legend, ellipses and rectangles
from scipy.stats import norm, chi2 # used for the covariance
# USED IN SIM
def cov_ellipse(cov, q=None, nsig=None, **kwargs):
"""
source: https://stackoverflow.com/questions/12301071/multidimensional-confidence-intervals
answered Sep 28 '16 at 13:40 by Syrtis Major
Parameters
----------
cov : (2, 2) array
Covariance matrix.
q : float, optional
Confidence level, should be in (0, 1)
nsig : int, optional
Confidence level in unit of standard deviations.
E.g. 1 stands for 68.3% and 2 stands for 95.4%.
Returns
-------
width, height, rotation :
The lengths of two axises and the rotation angle in degree
for the ellipse.
"""
if q is not None:
q = np.asarray(q)
elif nsig is not None:
q = 2 * norm.cdf(nsig) - 1
else:
raise ValueError('One of `q` and `nsig` should be specified.')
r2 = chi2.ppf(q, 2)
val, vec = np.linalg.eigh(cov)
width, height = 2 * np.sqrt(val[:, None] * r2)
rotation = np.degrees(np.arctan2(*vec[::-1, 0]))
return width, height, rotation
# USED IN SIM
def add_ellipse_object(Sigma, mu):
# Function adds a matplotlib.matches.ellipse-object based on covariance matrix
# Inputs:
# Sigma NXM2X2 Feature covariance matrix (N observed features, M particles)
# mu 2XM observation function for range-bearing measurement, [r, theta]' in robot reference frame
# Outputs:
# ecov 1XN list of matplotlib.matches.ellipse-objects representing the covariance
mu_mean = np.mean(mu,axis=2)
Sigma_mean = np.mean(Sigma,axis=1)
ecov = []
n_features = len(np.mean(Sigma,axis=1))
for n in range(0,n_features):
feat_cov_matrix = Sigma_mean[n]
featwidth, featheight, featrotdeg = cov_ellipse(feat_cov_matrix, q=0.99)
featx = mu_mean[n][0]
featy = mu_mean[n][1]
ecov.append(mpatches.Ellipse(xy=(featx, featy), width=featwidth, height=featheight, angle=featrotdeg, fill=0))
return ecov
# USED IN SIM
def add_arrow_object(iter,carpos,featpos,feature_index):
# Function adds a matplotlib.matches.arrow-object between true robot position and feature position
# Inputs:
# iter 1X1 simulation iteration index
# carpos 1XK list of true car positions
# featpos 2X1 observed feature position [x,y]' in world reference frame
# feature_index 1X1 assigned index to the feature
# Outputs:
# a 1XN list of matplotlib.matches.arrows-objects for plotting observed features
carpos = np.asarray(carpos)
feature_index = feature_index -1
a = mpatches.Arrow(x=carpos[iter,0], y=carpos[iter,1], dx=featpos[feature_index,0]-carpos[iter,0],dy=featpos[feature_index,1]-carpos[iter,1], width = 0.2, color = 'red')
return a
# USED IN SIM
def id_feature(features,sensed_object):
# Function identifies which feature index is observed given an object handle
# Inputs:
# features 1X1 list of features in simulated world
# sensed_object 1X1 observed object handle
# Outputs:
# itemindex 1X1 index of feature object handle in feature list
# This function retrieves the index of the observed object handle in given in vrep
itemindex = np.where(features==sensed_object)
return int(itemindex[0])+1
# USED IN SIM
def init_mean_from_z(Xbar, z):
# This function initializes the mean location mu of the observed feature
# in world coordinates based on a range and angle z related to
# the particle set X
# See p.320 in Probabilistic Robotics by Sebastian Thrun
# The bearing lies in the interval [-pi,pi)
# Inputs:
# X 3XM previous particle set representing the states and the weights [x,y,theta]
# z 2XM observation function for range-bearing measurement, [r, theta]' in robot reference frame
# Outputs:
# mu_init 2XM position of the feature related to each particle
M = np.size(Xbar[0,:]) # Number of particles in particle set
mu_init = np.zeros((2,M))
mu_init[0,:] = Xbar[0,:] + z[0,:] * np.sin(z[1,:] + Xbar[2,:]) # X position of particle plus x-distance to feature related to the particle
mu_init[1,:] = Xbar[1,:] + z[0,:] * np.cos(-z[1,:] - Xbar[2,:]) # the same for Y
return mu_init
def test_init_mean_from_z():
# Test function
M = 1 # Number of particles
x = 0
y = 0
theta = np.pi/4
X1 = np.array([x, y, theta]) # Assumed particle start position
X = np.repeat(X1[:, np.newaxis], M, axis=1) # Set of particles
zr = 1
ztheta = np.pi/2
Z1 = np.array([zr, ztheta]) # Assumed particle start position
z = np.repeat(Z1[:, np.newaxis], M, axis=1) # Set of particles
mu_init1 = init_mean_from_z(X, z)
print('car pos x:', X1[0], 'pos y:', X1[1], 'theta:', X1[2], 'deg:', X1[2]*180/np.pi)
print('observation r:', z[0], 'theta:', z[1], 'deg:', X1[2]*180/np.pi)
print('mean x:', mu_init1[0], 'mean y:', mu_init1[1])
# test_init_mean_from_z()
# USED IN SIM!
def observation_model_zhat(X,mu,k,Q): # maybe remove Q and j here....
# This function implements the observation model and calculates the range and angle zhat for
# a given feature j in mean [x,y] coordinates mu (in relation to each particle) and particle set X
# Note: The bearing theta lies in the interval [-pi,pi) in relation to the particle set X
# Inputs:
# X 3XM previous particle set representing the states and the weights [x,y,theta]'
# mu NX2XM coordinates of the features in x,y in tth time
# j 1X1 index of the feature being matched to the measurement observation
# Q 2X2 measurement covariance noise
# Outputs:
# z 2XM observation function for range-bearing measurement, [r, theta]'
M = np.size(X[0,:]) # Number of particles in particle set
Featx = mu[k,0,:] # Extract one feature j for creating a distance calculation in x for all particles
Featy = mu[k,1,:] # Extract one feature j and create a matrix shape for creating a distance calculation in y for all particles
Xx = X[0,:] # Extract the x position of all particles
Xy = X[1,:] # Extract the y position of all particles
Xtheta = X[2,:] # Extract the theta angle of all particles
ra = np.sqrt((Featx - Xx)**2 +(Featy - Xy)**2) # range to feature for each particle
ra = ra.reshape(1,M)
# theta = np.arctan2(Featy-Xy,Featx-Xx) - Xtheta # angle to observed feature for each particle
theta = np.arctan2(Featx-Xx,Featy-Xy) - Xtheta # angle to observed feature for each particle
theta_lim = ((theta + np.pi) % (2*np.pi)) - np.pi # limit angle between pi and -pi
theta_lim = theta_lim.reshape(1,M)
zmeas = np.concatenate((ra, theta_lim), axis = 0)
# Add diffusion
rtheta_stddev2 = np.diag(np.sqrt(Q)) # obtain standard deviation square of process noise (1-dimensional array)
diffusion_normal = np.random.standard_normal((2,M)) # Normal distribution with standard deviation 1
diffusion = diffusion_normal * rtheta_stddev2.reshape(2,1) # Normal distribution with standard deviation according to process noise covariance R (reshape sigma to get 3 rows and 1 column for later matrix inner product multiplication)
z = zmeas + diffusion[:2,:] # estimated states = old states + motion + diffusion
z[1,:] = np.mod(z[1,:] +np.pi,2*np.pi) -np.pi
return z
def test_observation_model_zhat():
M = 1 # Number of particles
x = -0.5
y = -1
theta = -np.pi/2
mx = -1.5#1/np.sqrt(2)
my = -1.0#1/np.sqrt(2) #np.pi/4
X1 = np.array([x, y, theta]) # Assumed particle start position
X = np.repeat(X1[:, np.newaxis], M, axis=1) # Set of particles
m1 = np.array([mx, my]) # Assumed particle start position
m2 = np.repeat(m1[:, np.newaxis], M, axis=1) # Set of particles
#m = np.repeat(m2[np.newaxis,:,:], 5, axis=0) # Set of particles
m3 = m2.reshape(1,2,M)
k = 3 # landmark
m_init = np.zeros((1,2,M))
m_init[0,:,:] = m3
m = np.concatenate((m_init,m_init),axis=0) # NX2XM
m = np.concatenate((m,m_init),axis=0) # NX2XM
m = np.concatenate((m,m_init),axis=0) # NX2XM
m = np.concatenate((m,m_init),axis=0) # NX2XM
Q = np.eye(2)*0.1
zhat = observation_model_zhat(X,m,k,Q)
thetadeg = zhat[1]*180/np.pi
print('car pos x:', X1[0], 'pos y:', X1[1], 'theta', X1[2])
print('mean x:', m[k,0], 'y:', m[k,1])
print('calc range:', zhat[0], 'theta:', zhat[1] ,', deg:', thetadeg)
#test_observation_model_zhat()
# USED IN SIM
def calculate_measurement_jacobian(X,mu,k):
# This function calculates the jacobian of the observation model h for a given
# feature and the particle set. The Jacobian is used
# later to obtain the measurement covariance matrix Sigma = HxQxH.T
# for a given feature and the particle set.
# The derivative is based on the feature x,y and not the robot itself (robot has x,y and theta)
# Inputs:
# X(t) 3XM estimated states [x,y,theta]'
# mu(t) NX2XM estimated mean position of features [x,y]'
# Outputs:
# H MX2X2 H is the Jacobian of observation model h in regards to any observation linearized/evaluated at feature position mu_bar
# Extract relevant variables
M = np.size(X[0,:]) # Number of particles in particle set
mux = mu[k,0,:] # Extract one feature j and create a matrix shape for creating a distance calculation in x for all particles
muy = mu[k,1,:] # Extract one feature j and create a matrix shape for creating a distance calculation in y for all particles
Xx = X[0,:] # Extract the x position of all particles
Xy = X[1,:] # Extract the y position of all particles
# calculate square of range distance between robot and feature
q = (mux - Xx)**2 +(muy - Xy)**2 #
# range linearization
dhr_dmux = (mux-Xx) / np.sqrt(q)
dhr_dmuy = (muy-Xy) / np.sqrt(q)
# angle linearization
dhtheta_dmux = -(mux-Xx) / q #1XM
dhtheta_dmuy = (muy-Xy) / q
# Reshape matrices to create Jacobian
dhr_dmux2 = dhr_dmux.reshape(M,1,1)
dhr_dmuy2 = dhr_dmuy.reshape(M,1,1)
dhtheta_dmux2 = dhtheta_dmux.reshape(M,1,1)
dhtheta_dmuy2 = dhtheta_dmuy.reshape(M,1,1)
# Organize Jacobian
H1 = np.concatenate((dhr_dmux2,dhr_dmuy2),axis=2)
H2 = np.concatenate((dhtheta_dmux2,dhtheta_dmuy2),axis=2)
H = np.concatenate((H1,H2), axis=1) # MX2X2
return H
# USED IN SIM
def z_from_detectection(X,observed_objects_pos):
# This function calculates a range r and angle theta given x, y in robot reference frame
# Note: The bearing theta lies in the interval [-pi,pi) in relation to the particle set X
# Inputs:
# X 3XM previous particle set representing the states [x,y,theta]'
# observed_objects_pos 2X1 coordinates of the features in x,y in tth time
# Outputs:
# zmeas 2XM range r and angle between robot and feature object, [r, theta]'
M = np.size(X[0,:]) # Number of particles in particle set
Featx = np.ones((1,M)) * observed_objects_pos[0,0] # Extract one feature j and create a matrix shape for creating a distance calculation in x for all particles
Featy = np.ones((1,M)) * observed_objects_pos[1,0] # Extract one feature j and create a matrix shape for creating a distance calculation in y for all particles
r = np.sqrt((Featx)**2 +(Featy)**2) # range to feature for each particle
theta = np.arctan2(Featx,Featy) # angle to observed feature for each particle
theta_lim = ((theta + np.pi) % (2*np.pi)) - np.pi # limit angle between pi and -pi
zmeas = np.concatenate((r, theta_lim), axis = 0)
return zmeas
def test_z_from_detection():
M = 1 # Number of particles
x = 0
y = 0
theta = 0#2*np.pi
X1 = np.array([x, y, theta]) # Assumed particle start position
X = np.repeat(X1[:, np.newaxis], M, axis=1) # Set of particles
ox = 0#+1/np.sqrt(2)
oy = 1 # +1/np.sqrt(2) #np.pi/4
o1 = np.array([ox, oy]) # Assumed particle start position
o = np.repeat(o1[:, np.newaxis], M, axis=1) # Set of particles
z = z_from_detectection(X, o)
thetadeg = z[1]*180/np.pi
print('car pos x:', X1[0], 'pos y:', X1[0], 'theta', X1[2])
print('observation x:', o[0], 'y:', o[1])
print('calc range:', z[0], 'theta:', z[1] ,', deg:', thetadeg)
#test_z_from_detection()
# USED IN SIM
def sample_pose(X, v, w, R, delta_t):
# This function perform the prediction step / proposal distribution for the particle set X
# based on the odometry speed and angular frequency as well as the process noise R
# Inputs:
# X(t-1) 3XM previous time particle set representing the states [x,y,theta]'
# v(t) 1X1 translational velocity of the robot in the tth time step
# w(t) 1X1 angular velocity of the robot in the tth time step
# R 3X3 process noise covariance matrix of x,y and theta
# delta_t 1X1 discrete time step between time t and t-1
# Outputs:
# Xbar(t) 3XM prediction of the new states [x,y,thetha]
# Extract relevant variables
M = np.size(X[0,:]) # Number of particles in particle set
xytheta_dim = 3 # Number of states of each particle
Xbar = np.zeros((3,M))
theta_prev = X[2,:] # theta angle at previous time step
# Estimate next x,y position
xy_predmotion = delta_t * np.array([ v * np.sin(theta_prev), v * np.cos(theta_prev) ]) # motion model on x and y for each particle
# Estimate orientation
theta_predmotion = delta_t * w * np.ones((1,M)) #motion model for the angle of each particle
# Re-organize matrix
xytheta_predmotion = np.concatenate((xy_predmotion, theta_predmotion), axis = 0) # combine the complete motion model x,y and theta into one matrix
# Add noise to prediction
xytheta_stddev2 = np.diag(np.sqrt(R)) # obtain standard deviation of process noise (1-dimensional array)
diffusion_normal = np.random.standard_normal((xytheta_dim,M)) # Normal distribution with standard deviation
diffusion = diffusion_normal * xytheta_stddev2.reshape(3,1) # Normal distribution with standard deviation according to process noise covariance R (reshape sigma to get 3 rows and 1 column for later matrix inner product multiplication)
Xbar = X + xytheta_predmotion + diffusion # estimated states = old states + motion + diffusion
# Correct angle to be within -pi and pi
Xbar[2,:] = ((Xbar[2,:] + np.pi) % (2*np.pi)) - np.pi
return Xbar
# USED IN SIM
def calculate_odometry(delta_angle_R, delta_angle_L, B, R_R, R_L, delta_t, CALIB_ODOM):
# This function calculates translational speed and angular frequency w of the robot provided
# from the odometry information of the robot wheel motors
# Inputs:
# delta_angle_R(t): 1X1 angle difference for the left wheel in the tth time step
# delta_angle_L(t): 1X1 angle difference for the right wheel in the tth time step
# B: 1X1 wheel base (distance between the contact points of the wheels, front and back)
# R_L: 1X1 radius of the left wheels
# R_R: 1X1 radius of the right wheels
# delta_t: 1X1 previous state in [x, y, theta]'
# CALIB_ODOM: 1X1 calibration constant for odometry slip etc
# Outputs:
# v(t): 1X1 translational velocity of the robot in the tth time step
# w(t): 1X1 angular velocity of the robot in the tth time step
w_R = delta_angle_R / delta_t
w_L = delta_angle_L / delta_t
w = -(w_R*R_R - w_L*R_L) / B * CALIB_ODOM
v = (w_R*R_R + w_L*R_L) / 2
return v, w
def test_calculate_odometry():
# Inputs:
# delta_angle_R(t): 1X1 angle difference for the left wheel in the tth time step
# delta_angle_L(t): 1X1 angle difference for the right wheel in the tth time step
# B: 1X1 wheel base (distance between the contact points of the wheels, front and back)
# R_L: 1X1 radius of the left wheels
# R_R: 1X1 radius of the right wheels
# delta_t: 1X1 previous state in [x, y, theta]'
# CALIB_ODOM: 1X1 calibration constant for odometry slip etc
#% Outputs:
#% v(t): 1X1 translational velocity of the robot in the tth time step
#% w(t): 1X1 angular velocity of the robot in the tth time step
R_L = 0.1/2 # Radius of left wheel
R_R = R_L
B = 0.2 # Length between front and back wheels
CLBRTE_ODMTRY = 1/2.68 # odometry calibration using radius 0.5 and B = 0.2
delta_t = 0.1
angle_diff_L = np.pi
angle_diff_R = np.pi*1.1
v, w = calculate_odometry(angle_diff_R, angle_diff_L, B, R_R, R_L, delta_t, CLBRTE_ODMTRY)
print('velocity is', v)
print('angular velocity is', w)
# USED IN SIM
def predict_motion_xytheta(x, y, theta, v, w, delta_t):
# This function performs a prediction step without diffusion, i.e.
# estimates the next time increment position of the robot based on the robot position
# and translational velocity and angular frequency
# Inputs:
# x(t-1) 1X1 previous state x-direction
# y(t-1) 1X1 previous state y-direction
# theta(t-1) 1X1 previous state theta-angle
# v(t) 1X1 translational velocity of the robot in the tth time step
# w(t) 1X1 angular velocity of the robot in the tth time step
# delta_t 1X1 discrete time step between time t and t-1
# Outputs:
# x_predmotion(t) 1X1 prediction of the new state x-direction
# y_predmotion(t) 1X1 prediction of the new state y-direction
# theta_predmotion(t) 1X1 prediction of the new state theta-angle
x_predmotion = x + v*delta_t*np.sin(theta)
y_predmotion = y + v*delta_t*np.cos(theta)
theta_predmotion = (((theta + w*delta_t) + np.pi) % (2*np.pi)) - np.pi #
return x_predmotion, y_predmotion, theta_predmotion
|
import segmentation_models
from segmentation_models import Unet, PSPNet
from segmentation_models import metrics
from segmentation_models.losses import (
dice_loss, jaccard_loss, categorical_focal_loss, categorical_crossentropy
)
from farmer.ncc.models import xception, mobilenet, Deeplabv3, Model2D
from ..model.task_model import Task
from tensorflow import keras
segmentation_models.set_framework('tf.keras')
# loss functions
cce_dice_loss = categorical_crossentropy + dice_loss
cce_jaccard_loss = categorical_crossentropy + jaccard_loss
categorical_focal_dice_loss = categorical_focal_loss + dice_loss
categorical_focal_jaccard_loss = categorical_focal_loss + jaccard_loss
class BuildModelTask:
def __init__(self, config):
self.config = config
def command(self, trial=None):
# return: base_model is saved when training on multi gpu
base_model = self._do_make_model_task(
task=self.config.task,
model_name=self.config.model_name,
nb_classes=self.config.nb_classes,
height=self.config.height,
width=self.config.width,
backbone=self.config.backbone,
activation=self.config.activation,
trial=trial
)
base_model = self._do_load_model_task(
base_model, self.config.trained_model_path
)
model = self._do_multi_gpu_task(
base_model, self.config.multi_gpu, self.config.nb_gpu
)
compiled_model = self._do_compile_model_task(
model,
self.config.optimizer,
self.config.learning_rate,
self.config.task,
self.config.loss,
trial
)
return compiled_model, base_model
def _do_make_model_task(
self,
task,
model_name,
nb_classes,
width=299,
height=299,
backbone="resnet50",
activation="softmax",
trial=None
):
if task == Task.CLASSIFICATION:
xception_shape_condition = height >= 71 and width >= 71
mobilenet_shape_condition = height >= 32 and width >= 32
if model_name == "xception" and xception_shape_condition:
model = xception(nb_classes, height, width)
elif model_name == "mobilenet" and mobilenet_shape_condition:
model = mobilenet(nb_classes, height, width)
else:
model = Model2D(nb_classes, height, width)
elif task == Task.SEMANTIC_SEGMENTATION:
if self.config.op_backbone:
backbone = trial.suggest_categorical(
'backbone', self.config.backbone
)
print('------------------')
print('Model:', model_name)
print('Backbone:', backbone)
print('------------------')
if model_name == "unet":
model = Unet(
backbone_name=backbone,
input_shape=(height, width, 3),
classes=nb_classes,
)
elif model_name == "deeplab_v3":
model = Deeplabv3(
input_shape=(height, width, 3),
classes=nb_classes,
backbone=backbone,
activation=activation
)
elif model_name == "pspnet":
model = PSPNet(
backbone_name=backbone,
input_shape=(height, width, 3),
classes=nb_classes,
)
else:
raise NotImplementedError
return model
def _do_load_model_task(self, model, trained_model_path):
if trained_model_path:
model.load_weights(trained_model_path)
return model
def _do_multi_gpu_task(self, base_model, multi_gpu, nb_gpu):
if multi_gpu:
if self.config.framework == "tensorflow":
model = keras.utils.multi_gpu_model(base_model, gpus=nb_gpu)
else:
model = base_model
return model
def _do_compile_model_task(
self,
model,
optimizer,
learning_rate,
task_id,
loss_func,
trial
):
if self.config.op_learning_rate:
if len(self.config.learning_rate) == 2:
# learning_rate = [10^(min), 10^(max)]
learning_rate = int(trial.suggest_loguniform(
'learning_rate', *self.config.learning_rate))
elif len(self.config.learning_rate) == 3:
# learning_rate = [min, max, step]
learning_rate = int(trial.suggest_discrete_uniform(
'learning_rate', *self.config.learning_rate))
else:
learning_rate = self.config.learning_rate
if self.config.framework == "tensorflow":
if self.config.op_optimizer:
optimizer = trial.suggest_categorical(
'optimizer', self.config.optimizer
)
print('------------------')
print('Optimizer:', optimizer)
print('------------------')
elif optimizer == "adam":
optimizer = keras.optimizers.Adam(
lr=learning_rate, beta_1=0.9, beta_2=0.999, decay=0.001
)
else:
optimizer = keras.optimizers.SGD(
lr=learning_rate, momentum=0.9, decay=0.001
)
if task_id == Task.CLASSIFICATION:
model.compile(
optimizer=optimizer,
loss=keras.losses.categorical_crossentropy,
metrics=["acc"],
)
elif task_id == Task.SEMANTIC_SEGMENTATION:
print('------------------')
print('Loss:', loss_func)
print('------------------')
model.compile(
optimizer=optimizer,
loss=globals()[loss_func],
metrics=[metrics.iou_score,
categorical_crossentropy],
)
else:
raise NotImplementedError
return model
|
from ... import weather as rk_weather
from .solar_workflow_manager import SolarWorkflowManager
def openfield_pv_merra_ryberg2019(placements, merra_path, global_solar_atlas_ghi_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed", inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None):
"""
openfield_pv_merra_ryberg2019(placements, merra_path, global_solar_atlas_ghi_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed",
inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None)
Simulation of an openfield PV openfield system based on MERRA Data.
Parameters
----------
placements: Pandas Dataframe
Locations where to perform simulations at.
Columns need to be lat (latitudes), lon (longitudes), tilt and capacity.
merra_path: str
Path to the MERRA Data on your computer.
Can be a single ".nc" file, or a directory containing many ".nc" files.
global_solar_atlas_ghi_path: str
Path to the global solar atlas ghi data on your computer.
module: str
Name of the module that you want to use for the simulation.
Default is Winaico Wsx-240P6.
See reskit.solar.SolarWorkflowManager.configure_cec_module for more usage information.
elev: float
Elevation that you want to model your PV system at.
tracking: str
Option 1 is 'fixed' meaning that the module does not have any tracking capabilities.
Option 2 is 'single-axis' meaning that the module has single-axis tracking capabilities.
inverter: str
Determines wether or not you want to model your PV system with an inverter.
Default is None, meaning no inverter is assumed
See reskit.solar.SolarWorkflowManager.apply_inverter_losses for more usage information
output_netcdf_path: str
Path to a file that you want to save your output NETCDF file at.
Default is None
output_variables: str
Output variables of the simulation that you want to save into your NETCDF Outputfile.
Returns
-------
A xarray dataset including all the output variables you defined as your output_variables.
"""
wf = SolarWorkflowManager(placements)
wf.configure_cec_module(module)
if not "tilt" in wf.placements.columns:
wf.estimate_tilt_from_latitude(convention="Ryberg2020")
if not "azimuth" in wf.placements.columns:
wf.estimate_azimuth_from_latitude()
if not "elev" in wf.placements.columns:
wf.apply_elevation(elev)
wf.read(
variables=['surface_wind_speed',
"surface_pressure",
"surface_air_temperature",
"surface_dew_temperature",
"global_horizontal_irradiance"],
source_type="MERRA",
source=merra_path,
set_time_index=True,
verbose=False
)
wf.adjust_variable_to_long_run_average(
variable='global_horizontal_irradiance',
source_long_run_average=rk_weather.MerraSource.LONG_RUN_AVERAGE_GHI,
real_long_run_average=global_solar_atlas_ghi_path,
real_lra_scaling=1000 / 24, # cast to hourly average kWh
)
wf.determine_solar_position()
wf.filter_positive_solar_elevation()
wf.determine_extra_terrestrial_irradiance(model="spencer", solar_constant=1370)
wf.determine_air_mass(model='kastenyoung1989')
wf.apply_DIRINT_model()
wf.diffuse_horizontal_irradiance_from_trigonometry()
if tracking == "single_axis":
wf.permit_single_axis_tracking(**tracking_args)
wf.determine_angle_of_incidence()
wf.estimate_plane_of_array_irradiances(transposition_model="perez")
wf.apply_angle_of_incidence_losses_to_poa()
wf.cell_temperature_from_sapm()
wf.simulate_with_interpolated_single_diode_approximation(module=module)
if inverter is not None:
wf.apply_inverter_losses(inverter=inverter, **inverter_kwargs)
wf.apply_loss_factor(0.20, variables=['capacity_factor', 'total_system_generation'])
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def openfield_pv_era5_unvalidated(placements, era5_path, global_solar_atlas_ghi_path, global_solar_atlas_dni_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed", inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None):
"""
openfield_pv_era5_unvalidated(placements, era5_path, global_solar_atlas_ghi_path, global_solar_atlas_dni_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed", inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None)
Simulation of an openfield PV openfield system based on ERA5 Data.
Parameters
----------
placements: Pandas Dataframe
Locations that you want to do the simulations for.
Columns need to be lat (latitudes), lon (longitudes), tilt and capacity.
era5_path: str
Path to the ERA5 Data on your computer.
Can be a single ".nc" file, or a directory containing many ".nc" files.
global_solar_atlas_ghi_path: str
Path to the global solar atlas ghi data on your computer.
global_solar_atlas_dni_path: str
Path to the global solar atlas dni data on your computer.
module: str
Name of the module that you wanna use for the simulation.
Default is Winaico Wsx-240P6
elev: float
Elevation that you want to model your PV system at.
tracking: str
Determines wether your PV system is fixed or not.
Default is fixed.
Option 1 is 'fixed' meaning that the module does not have any tracking capabilities.
Option 2 is 'single-axis' meaning that the module has single-axis tracking capabilities.
inverter: str
Determines wether you want to model your PV system with an inverter or not.
Default is None.
See reskit.solar.SolarWorkflowManager.apply_inverter_losses for more usage information.
output_netcdf_path: str
Path to a file that you want to save your output NETCDF file at.
Default is None
output_variables: str
Output variables of the simulation that you want to save into your NETCDF Outputfile.
Returns
-------
A xarray dataset including all the output variables you defined as your output_variables.
"""
wf = SolarWorkflowManager(placements)
wf.configure_cec_module(module)
if not "tilt" in wf.placements.columns:
wf.estimate_tilt_from_latitude(convention="Ryberg2020")
if not "azimuth" in wf.placements.columns:
wf.estimate_azimuth_from_latitude()
if not "elev" in wf.placements.columns:
wf.apply_elevation(elev)
wf.read(
variables=["global_horizontal_irradiance",
"direct_horizontal_irradiance",
"surface_wind_speed",
"surface_pressure",
"surface_air_temperature",
"surface_dew_temperature", ],
source_type="ERA5",
source=era5_path,
set_time_index=True,
verbose=False
)
wf.determine_solar_position()
wf.filter_positive_solar_elevation()
wf.direct_normal_irradiance_from_trigonometry()
wf.adjust_variable_to_long_run_average(
variable='global_horizontal_irradiance',
source_long_run_average=rk_weather.Era5Source.LONG_RUN_AVERAGE_GHI,
real_long_run_average=global_solar_atlas_ghi_path,
real_lra_scaling=1000 / 24, # cast to hourly average kWh
)
wf.adjust_variable_to_long_run_average(
variable='direct_normal_irradiance',
source_long_run_average=rk_weather.Era5Source.LONG_RUN_AVERAGE_DNI,
real_long_run_average=global_solar_atlas_dni_path,
real_lra_scaling=1000 / 24, # cast to hourly average kWh
)
wf.determine_extra_terrestrial_irradiance(model="spencer", solar_constant=1370)
wf.determine_air_mass(model='kastenyoung1989')
wf.diffuse_horizontal_irradiance_from_trigonometry()
if tracking == "single_axis":
wf.permit_single_axis_tracking(**tracking_args)
wf.determine_angle_of_incidence()
wf.estimate_plane_of_array_irradiances(transposition_model="perez")
wf.apply_angle_of_incidence_losses_to_poa()
wf.cell_temperature_from_sapm()
wf.simulate_with_interpolated_single_diode_approximation(module=module)
if inverter is not None:
wf.apply_inverter_losses(inverter=inverter, **inverter_kwargs)
wf.apply_loss_factor(0.20, variables=['capacity_factor', 'total_system_generation'])
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
def openfield_pv_sarah_unvalidated(placements, sarah_path, era5_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed", inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None):
"""
openfield_pv_sarah_unvalidated(placements, sarah_path, era5_path, module="WINAICO WSx-240P6", elev=300, tracking="fixed", inverter=None, inverter_kwargs={}, tracking_args={}, output_netcdf_path=None, output_variables=None)
Simulation of an openfield PV openfield system based on Sarah and ERA5 Data.
Parameters
----------
placements: Pandas Dataframe
Locations that you want to do the simulations for.
Columns need to be lat (latitudes), lon (longitudes), tilt and capacity.
sarah_path: str
Path to the SARAH Data on your computer.
Can be a single ".nc" file, or a directory containing many ".nc" files.
era5_path: str
Path to the ERA5 Data on your computer.
Can be a single ".nc" file, or a directory containing many ".nc" files.
module: str
Name of the module that you wanna use for the simulation.
Default is Winaico Wsx-240P6
elev: float
Elevation that you want to model your PV system at.
tracking: str
Determines wether your PV system is fixed or not.
Default is fixed.
Option 1 is 'fixed' meaning that the module does not have any tracking capabilities.
Option 2 is 'single-axis' meaning that the module has single-axis tracking capabilities.
inverter: str
Determines wether you want to model your PV system with an inverter or not.
Default is None.
See reskit.solar.SolarWorkflowManager.apply_inverter_losses for more usage information.
output_netcdf_path: str
Path to a file that you want to save your output NETCDF file at.
Default is None
output_variables: str
Output variables of the simulation that you want to save into your NETCDF Outputfile.
Returns
-------
A xarray dataset including all the output variables you defined as your output_variables.
"""
wf = SolarWorkflowManager(placements)
wf.configure_cec_module(module)
if not "tilt" in wf.placements.columns:
wf.estimate_tilt_from_latitude(convention="Ryberg2020")
if not "azimuth" in wf.placements.columns:
wf.estimate_azimuth_from_latitude()
if not "elev" in wf.placements.columns:
wf.apply_elevation(elev)
wf.read(
variables=["direct_normal_irradiance",
"global_horizontal_irradiance"],
source_type="SARAH",
source=sarah_path,
set_time_index=True,
verbose=False
)
wf.read(
variables=["surface_wind_speed",
"surface_pressure",
"surface_air_temperature",
"surface_dew_temperature", ],
source_type="ERA5",
source=era5_path,
set_time_index=False,
verbose=False
)
wf.determine_solar_position()
wf.filter_positive_solar_elevation()
wf.determine_extra_terrestrial_irradiance(model="spencer", solar_constant=1370)
wf.determine_air_mass(model='kastenyoung1989')
wf.diffuse_horizontal_irradiance_from_trigonometry()
if tracking == "single_axis":
wf.permit_single_axis_tracking(**tracking_args)
wf.determine_angle_of_incidence()
wf.estimate_plane_of_array_irradiances(transposition_model="perez")
wf.apply_angle_of_incidence_losses_to_poa()
wf.cell_temperature_from_sapm()
wf.simulate_with_interpolated_single_diode_approximation(module=module)
if inverter is not None:
wf.apply_inverter_losses(inverter=inverter, **inverter_kwargs)
wf.apply_loss_factor(0.20, variables=['capacity_factor', 'total_system_generation'])
return wf.to_xarray(output_netcdf_path=output_netcdf_path, output_variables=output_variables)
|
#Answer to Mod Divmod
a = int(input())
b = int(input())
print(a//b)
print(a%b)
print(divmod(a,b))
"""
>>> print divmod(177,10)
(17, 7)
Here, the integer division is 177/10 => 17 and the modulo operator is 177%10 => 7.
""" |
import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
import random
from PIL import Image
from CNN import CNN
def get_data():
train_data = []
for i in range(14):
image_label = numpy.zeros(14)
image_label[i] = 1
for j in range(200):
image = Image.open('./TRAIN/' + str(i + 1) + '/' + str(j) + '.bmp')
image_array = numpy.array(image)
image_matrix = []
for a in range(len(image_array)):
for b in range(len(image_array[a])):
image_matrix.append(image_array[a][b])
train_data.append([image_matrix, image_label])
test_data = []
test_label = []
for i in range(14):
image_label = numpy.zeros(14)
image_label[i] = 1
for j in range(56):
image = Image.open('./TRAIN/' + str(i + 1) + '/' + str(200 + j) + '.bmp')
image_array = numpy.array(image)
image_matrix = []
for a in range(len(image_array)):
for b in range(len(image_array[a])):
image_matrix.append(image_array[a][b])
test_data.append(image_matrix)
test_label.append(image_label)
# 测试代码
# for i in range(763):
# image = Image.open('./test/' + str(i) + '.bmp')
# image_array = numpy.array(image)
# image_matrix = []
# for a in range(len(image_array)):
# for b in range(len(image_array[a])):
# image_matrix.append(image_array[a][b])
# test_data.append(image_matrix)
return train_data, numpy.array(test_data), numpy.array(test_label)
class Instance(CNN):
def optimizer(self, cross_entropy):
optimizer = tf.train.GradientDescentOptimizer(self.rate).minimize(cross_entropy)
# optimizer = tf.train.AdamOptimizer(self.rate).minimize(cross_entropy)
# self.sess.run(tf.global_variables_initializer())
return optimizer
def activate(self, mid_result, bias):
return tf.nn.relu(mid_result + bias)
def convolution(self, x, weight_valve):
return tf.nn.conv2d(x, weight_valve, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
train_data, test_data, test_label = get_data()
cnn_instance_1 = Instance(0.0001, 784, 14, [-1, 28, 28, 1],
[[4, 4, 1, 32], [4, 4, 32, 64], [7 * 7 * 64, 1024], [1024, 14]])
# cnn_instance_2 = Instance(0.0001, 784, 14, [-1, 28, 28, 1],
# [[4, 4, 1, 32], [4, 4, 32, 64], [7 * 7 * 64, 1024], [1024, 14]])
error_matrix = cnn_instance_1.train_random(1500, 150, train_data, test_data, test_label)
print("input your test file path:")
test_path = input()
print("test start:")
test_data = []
for i in range(763):
image = Image.open(test_path + str(i) + '.bmp')
image_array = numpy.array(image)
image_matrix = []
for a in range(len(image_array)):
for b in range(len(image_array[a])):
image_matrix.append(image_array[a][b])
test_data.append(image_matrix)
cnn_instance_1.test(test_data)
# cnn_instance_1.train_all_data(100, 50, train_data, test_data, test_label)
# cnn_instance_2.train_random(2000, 150, train_data, test_data, test_label)
# plt.title("batch_size= 50,rate=0.0001 , iterations=2000")
# plt.plot(error_matrix, color='r')
# plt.show()
|
#!/usr/bin/python
import socket
import sys
if len(sys.argv) != 2:
print "Usage: vrfy.py <username>"
sys.exit(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create a socket
connect = s.connect(('192.168.15.215',25)) # Connect to the server
|
import unittest
import textwrap
from pseudo import generate
from pseudo.pseudo_tree import Node
import suite
class TestRuby(unittest.TestCase, metaclass=suite.TestLanguage): # dark magic bitches
_language = 'ruby'
_import = 'require'
_parse_import = lambda self, line: line[9:-1]
# make declarative style great again
# expected ruby translation for each example in suite:
int_ = '42'
float_ = '42.42'
string = "'la'"
boolean = 'true'
null = 'nil'
dictionary = "{la: 0}"
list_ = "['la']"
local = 'egg'
set_ = (['set'], 'Set.new([2])')
tuple_ = '[2, 42.2]'
array = '[2, 4]'
regex = '/[a-b]/'
typename = 'Egg'
instance_variable = '@egg'
attr = 'e.egg'
assignments = [
'egg = ham',
'@egg = ham',
'T.egg = ham',
"x[4] = 'String'"
]
call = 'map(x)'
method_call = 'e.filter(42)'
standard_call = [
'puts 42',
'gets',
'Math.log(ham)',
"source = File.read('f.py')"
]
# io
io_display = "puts 2, 'z'"
io_read = 'source = gets'
io_read_file = "source = File.read('z.py')"
io_write_file = "File.write('z.py', source)"
# math
math_ln = 'Math.log(z)'
math_log = 'Math.log(z, 2.0)'
math_tan = 'Math.tan(z)'
math_sin = 'Math.sin(z)'
math_cos = 'Math.cos(z)'
# regexp
regexp_compile = '/#{s}/'
regexp_escape = 'Regexp.escape(s)'
standard_method_call = [
'l.length',
"'l'[0...2]",
]
# List
list_push = "cpus.push('')"
list_pop = 'cpus.pop'
list_length = 'cpus.length'
list_map = "cpus.map { |value| value + 'a' }"
list_remove = "cpus.delete(s)"
list_remove_at = "cpus.delete_at(0)"
list_length = 'cpus.length'
list_slice = 'cpus[2...-1]'
list_slice_from = 'cpus[2..-1]'
list_slice_to = 'cpus[0...2]'
list_filter = 'cpus.select { |value| value.length == 0 }'
list_reduce = textwrap.dedent('''\
cpus.reduce('') do |value, other|
result = value + other
result
end''')
list_any = 'cpus.any? { |value| value.length == 0 }'
list_all = 'cpus.all? { |value| value.length == 0 }'
list_find = 'cpus.index(s)'
list_present = '!cpus.empty?'
list_empty = 'cpus.empty?'
list_contains = 'cpus.include?(s)'
list_sort = 'cpus.sort!'
# Hash
dictionary_length = 'pointers.length'
dictionary_contains = 'pointers.include?(s)'
dictionary_keys = 'pointers.keys'
dictionary_values = 'pointers.values'
# Set
set_length = (['set'], 'words.length')
set_contains = (['set'], 'words.include?(s)')
set_union = (['set'], 'words | words')
set_intersection = (['set'], 'words.intersection(words)')
# Tuple
tuple_length = 'flowers.length'
# Array
array_length = 'cars.length'
# String
string_substr = 's[1...-1]'
string_substr_from = 's[2..-1]'
string_substr_to = 's[0...-2]'
string_length = 's.length'
string_find = 's.index(t)'
string_find_from = 's.index(t, z)'
string_count = 's.count(t)'
string_concat = 's + t'
string_partition = 's.partition(t)'
string_split = 's.split(t)'
string_trim = 's.trim'
string_reversed = 's.reverse'
string_center = 's.center(z, t)'
string_present = '!s.empty?'
string_empty = 's.empty?'
string_contains = 's.include?(t)'
string_to_int = 's.to_i'
string_pad_left = 's.ljust(0, t)'
string_pad_right = 's.rjust(0, t)'
# Regexp
regexp_match = 's.scan(r)'
# RegexpMatch # result of s.scan is an array, fix regex in next versions
regexp_match_group = 'm[2][0]'
regexp_match_has_match = '!m.empty?'
binary_op = 'ham + egg'
unary_op = '-a'
comparison = 'egg > ham'
interpolation = '"#{s}la#{4}"'
if_statement = textwrap.dedent('''\
if egg == ham
l[0...2]
elsif egg == ham
puts 4.2
else
z
end''')
for_statement = [
textwrap.dedent('''\
sequence.each do |a|
log(a)
end'''),
textwrap.dedent('''\
(0...42).step(2).each do |j|
analyze(j)
end'''),
textwrap.dedent('''\
z.each_with_index do |k, j|
analyze(j, k)
end'''),
textwrap.dedent('''\
z.each do |j, k|
analyze(k, j)
end'''),
textwrap.dedent('''\
z.zip(zz).each do |k, l|
a(k, l)
end''')
]
while_statement = textwrap.dedent('''\
while f >= 42
b = g
end''')
function_definition = textwrap.dedent('''\
def weird(z)
fixed = fix(z)
fixed
end''')
method_definition = textwrap.dedent('''\
def parse(source)
@ast = 0
[source]
end''')
anonymous_function = [
'-> source { ves(source.length) }',
textwrap.dedent('''\
-> source do
puts source
ves(source.length)
end''')
]
class_statement = [textwrap.dedent('''\
class A < B
def initialize(a)
@a = a
end
def parse
42
end
end''')]
this = 'self'
constructor = textwrap.dedent('''\
def initialize(a, b)
@a = a
@b = b
end''')
neg_index = "'la'[-2]"
index = "'la'[2]"
try_statement = [
textwrap.dedent('''\
begin
a
h(-4)
rescue StandardError => e
puts e
end'''),
textwrap.dedent('''\
class NeptunError < StandardError
end
begin
a
h(-4)
rescue NeptunError => e
puts e
end''')
]
throw_statement = textwrap.dedent('''\
class NeptunError < StandardError
end
throw NeptunError.new(\'no tea\')''')
|
import os
# change to dir of script
os.chdir(os.path.dirname(os.path.abspath(__file__)))
try:
with open("input.txt") as f:
# with open("input_small.txt") as f:
data = f.read() # entire file as string
lines = data.splitlines()
except:
print("no input.txt")
data, lines = "", []
line_groups = data.split("\n\n") # lines split by double newlines
print(lines)
print(len(lines), "lines in input.txt")
def ans(answer):
# store answer to clipboard
print(answer, "| in clipboard")
os.system(f'echo "{answer}"| xclip -selection clipboard -in')
L, I, D, S = list, int, dict, set
P, E, R, M = print, enumerate, range, map
## end of boilerplate ##
def line_transform(line):
# split = [line.split() for line in lines]
# return int(line)
return line
lines = [line_transform(line) for line in lines] # apply line_transform to each line
for line in lines:
pass
start = int(lines[0])
departs = []
waits = []
min_ans = []
small = 1000000
for d in lines[1].split(","):
if d != "x":
d = int(d)
m = d - (start % d)
print(d, m)
if m < small:
print("num small ", small)
small = m
print(small, d)
min_ans.append(small * d)
waits.append(m)
ans(min_ans[-1]) # 2995
## part 2 ##
departs = []
offsets = []
offsets2 = []
offsets3 = []
for i, d in enumerate(lines[1].split(",")):
if d != "x":
d = int(d)
departs.append(d)
offsets.append(i)
while i > d:
i -= d
offsets2.append(i)
offsets3.append(d - i)
print(i)
first = departs[0]
idx = 0
from math import gcd # Python versions 3.5 and above
# depart mod clocktime == offset
def allmod(m, l):
return list(map(lambda e: m % e, l))
def correct_time(time, L=departs):
# show buses at correct time + offset
# return [True, False, False, ...]
corrects = []
Z = zip(departs, offsets)
for dep, goal_minute in Z:
time_away = (time + goal_minute) % dep
# print(dep, goal_minute, time, time_away)
if time_away == 0:
corrects.append(True)
else:
corrects.append(False)
return corrects
def quick_correct(time):
# quickly show if time is correct
# return True/False
Z = zip(departs, offsets)
for dep, goal_minute in Z:
time_away = (time + goal_minute) % dep
# print(dep, goal_minute, time, time_away)
if time_away == 0:
pass
else:
return False
return True
Z = list(zip(departs, offsets3))
eqn = "solve for x, "
# eqn = ""
for dep, offset in Z:
eqn += f"x = {dep - offset} mod {dep}, "
print(eqn) # me trying to make a wolfram alpha query post
from functools import reduce
# https://rosettacode.org/wiki/Chinese_remainder_theorem#Python_3.6
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1:
return 1
while a > 1:
q = a // b
a, b = b, a % b
x0, x1 = x1 - q * x0, x0
if x1 < 0:
x1 += b0
return x1
n = departs[:]
a = offsets3[:]
# print("1068781, correct small_input")
cr = chinese_remainder(n, a)
ans(cr) # 1012171816131114
exit()
"""
wrong attempts below
"""
# 703
# a = [0, 1, 2, 3, 4, 5, 6, 7, 8]
# print(departs)
# print(allmod(first, departs))
# marked = dict()
# diffs = dict()
# for i in range(len(departs)):
# marked[i] = False
# while True:
# # CT = correct_time(idx, departs)
# # for i, v in enumerate(CT):
# # if v:
# # if not marked[i]:
# # print(f"{i} depart({departs[i]}) every {idx}")
# # diffs[i] = idx
# # marked[i] = True
# # if all(marked.values()):
# # break
# # if AM[2] == 2:
# # print(idx - l)
# # l = idx
# if quick_correct(idx):
# ans(idx) # this will theoretically finish
# exit()
# break
# # if all(correct_time(idx, departs)):
# # ans(idx)
# # break
# idx += first
# print(diffs.values())
# # from fractions import gcd # Python versions below 3.5
# from functools import reduce # Python version 3.x
# def lcm(denominators):
# return reduce(lambda a, b: a * b // gcd(a, b), denominators)
# dv = list(diffs.values())
# GOAL_SMALL = 1068781
# l = lcm(diffs.values())
# s = 1
# for i in diffs.values():
# s *= i
# print(GOAL_SMALL / i)
# print("multiplied: ", s)
# print(correct_time(s, departs))
# print("lcm: ", l)
# print(correct_time(l, departs))
# print("gcd: ", gcd(*dv))
# print(correct_time(l, departs))
# print("all true below? (1068781)")
# print(correct_time(1068781, departs))
|
#from hungaryCard import HungaryCard, GetCards
from hungaryCard import hungaryCard
from enum import Enum
import os,sys
from PIL import Image
from random import shuffle
import random
from django.http import HttpResponse
def index(request):
list_tuple = tuple(hungaryCard.HungaryCard)
#return HttpResponse("Hello, world. You're at the fatcard index.")
return HttpResponse(list_tuple[1].value[0], "....", list_tuple[1].value[1])
# Create your views here..
print(hungaryCard.HungaryCard)
g = hungaryCard.GetCards()
t = g.get_list_tuple()
d = g.get_list_digits()
print(t)
random.shuffle(d)
print(d)
list_tuple = tuple(hungaryCard.HungaryCard)
print(list_tuple[1].value[0], "....", list_tuple[1].value[1]) |
shopping_list = []
def show_help():
print("What should we pick up at the store?")
print("""
Enter 'DONE' to stop adding items.
Enter 'HELP' for this help.
Enter 'SHOW' to see your current shopping list.
""")
def add_to_list(item):
shopping_list.append(item)
print("You've just added {} to the shopping cart! You currently have {} items in your list.".format(item, len(shopping_list)))
def show_list():
print("Here's your list:")
for item in shopping_list:
print(item)
show_help()
while True:
new_item = input("> ")
if new_item == "DONE":
break
elif new_item == "HELP":
show_help()
continue #these continues say to go back to the while true part when done.
elif new_item == "SHOW":
show_list()
continue
add_to_list(new_item)
#I can remove the continue lines if i move this into an else statement
#else add_to_list(new_item)
show_list() |
"""empty message
Revision ID: 0010_events_table
Revises: 0009_created_by_for_jobs
Create Date: 2016-04-26 13:08:42.892813
"""
# revision identifiers, used by Alembic.
revision = "0010_events_table"
down_revision = "0009_created_by_for_jobs"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"events",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("event_type", sa.String(length=255), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("data", postgresql.JSON(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table("events")
### end Alembic commands ###
|
from __future__ import unicode_literals
from django.contrib import admin
from models import Service, AmcReport, Status
class AmcReportAdmin(admin.ModelAdmin):
model = AmcReport
list_display = ('customer_name', 'project_name', 'milestone_name', 'Due_date', 'AMC_end_date', 'status', 'notification')
admin.site.register(Service)
admin.site.register(AmcReport, AmcReportAdmin)
admin.site.register(Status)
|
#!/usr/bin/env python
import os
for i in range(110, 135):
if i % 5 == 0:
continue
os.system('python scaleCards.py --xsbr --ddir . %i' % i)
|
import unittest
from LogInfo import LogInfo
class LogInfoTests(unittest.TestCase):
def setUp(self):
mock_log = dict()
mock_log["ip_address"] = "222.64.146.118"
mock_log["datetime"] = "19/Jun/2005:06:44:17"
mock_log["zone"] = "+0200"
mock_log["method"] = "GET"
mock_log["resource"] = "/wximages/wxwidgets02-small.png"
mock_log["section"] = "wximages"
mock_log["protocol"] = "HTTP/1.1"
mock_log["status"] = "200"
mock_log["bytes"] = "12468"
self.mock_log = mock_log
mock_data = ('222.64.146.118 - - [19/Jun/2005:06:44:17 +0200] '
'"GET /wximages/wxwidgets02-small.png HTTP/1.1" 200 12468 '
'"http://blog.vckbase.com/bastet/" "Mozilla/4.0 '
'(compatible; MSIE 6.0; Windows NT 5.1; SV1; TencentTraveler )"'
)
self.mock_data = mock_data
def test_parse_data(self):
log = LogInfo(self.mock_data)
self.assertEqual(log.log_data, self.mock_log)
with self.assertRaises(ValueError):
LogInfo("")
def main():
unittest.main()
if __name__ == '__main__':
main()
|
"""
Given an array of ints length 3,
figure out which is larger between
the first and last elements in the
array, and set all the other elements
to be that value. Return the changed array.
"""
from test import Tester
def max_end3(nums):
max_end = max(nums[0],nums[-1])
for i in range(len(nums)):
nums[i] = max_end
return nums
Tester(max_end3([1, 2, 3]), [3, 3, 3])
Tester(max_end3([11, 5, 9]), [11, 11, 11])
Tester(max_end3([2, 11, 3]), [3, 3, 3])
Tester(max_end3([11, 3, 3]), [11, 11, 11])
Tester(max_end3([3, 11, 11]), [11, 11, 11])
Tester(max_end3([2, 2, 2]), [2, 2, 2])
Tester(max_end3([2, 11, 2]), [2, 2, 2])
Tester(max_end3([0, 0, 1]), [1, 1, 1])
|
import json
filename = 'favorite_number.json'
with open(filename) as f:
fav_number = json.load(f)
print(f"I know you favorite number, its {fav_number}!") |
# -*- coding: utf-8 -*-
tuple1 = ('a','b',['A','B'])
print(tuple1)
a = input('替换tuple中的A元素')
b = input('替换tuple中的B元素')
tuple1[2][0] = a
tuple1[2][1] = b
print('修改后的tuple 为:%s'%str(tuple1)) |
from LibraryClass import *
if __name__ == "__main__":
print("\nHey, Welcome to GG'LIBRARY'MANAGEMENT'SYSTEM.....\n")
while True:
choice = input("how may i help you? ")
choice = choice.lower()
if "help" in choice or "intstruct" in choice or "assist" in choice:
print("hey, you can perform following operations.....\n")
print("1. Add Book")
print("2. Delete Book")
print("3. Update Book")
print("4. Issue Book")
print("5. Return Book function when someone return's a Book of library.")
print("6. Status of Books that are Issued")
print("7. Show All Books Present in the system.")
print("8. Exit to end up.")
print("\nThat's it....\nThank You!!\n")
elif "exit" in choice or "bye" in choice or "that's it" in choice:
print("\nThank You!!\nHope you Like it...\n")
break
elif "add" in choice or "store" in choice or "load" in choice or "put on" in choice:
l = Library()
elif "remove" in choice or "delete" in choice or "erase" in choice:
print("\nhey, specify book-id to delete that book from the system.....")
Library.delete_book_from_database(input("Enter Book-ID : "))
elif ("show" in choice or "gather" in choice or "display" in choice or "extract" in choice or "generate" in choice or ("give" in choice and "details" in choice)) and "all" in choice and "status" not in choice and "availability" not in choice:
print("\nhere, is the list of all the books present.....\n")
Library.show_all_books()
elif ("show" in choice or "gather" in choice or "display" in choice or "extract" in choice or "generate" in choice or ("give" in choice and "details" in choice)) and "book" in choice:
print("\nhey, to get details about particular book, specify it's id.....")
Library.gather_particular_book_details(input("Enter Book-ID : "))
elif "status" in choice or "availability" in choice:
print("\nhere, is the list of all the status of the books.....\n")
Library.show_status_of_all_books()
elif ("update" in choice or "change" in choice or "upgrade" in choice or "amend" in choice or "modify" in choice or "revise" in choice) and ("price" in choice or "cost" in choice or "rate" in choice or "expense" in choice or "fare" in choice):
print("\nhey, specify book-id whose price you want to change.....")
Library.update_price(input("Enter Book-ID : "))
elif ("check" in choice or "show" in choice or "tell" in choice or "gather" in choice) and ("availi" in choice):
print("\nhey, specify book-id whose availibility you want to check....")
Library.check_book_availability(input("Enter Book-ID : "))
elif ("show" in choice or "gather" in choice or "display" in choice or "extract" in choice or "generate" in choice or ("give" in choice and "details" in choice)) and ("id" in choice or "book_id" in choice):
print("\nhey, specify the title of the book.....")
Library.get_book_id(input("Enter Book-TITLE : "))
elif ("get" in choice or "give" in choice or "show" in choice) and ("issued" in choice):
print("\nhey, welcome.....")
Library.gather_who_issued_book(input("Enter Book-ID : "))
elif ("issue" in choice):
print("\nhey, welcome please specify the book-d you want to issue.....")
Library.issue_book(input("Enter Book-ID : "))
elif ("return" in choice or "give back" in choice):
print("\nhey, welcome please specify the book-id that you want to return.....")
Library.return_book(input("Enter Book-ID : "))
else:
print("\nInvalid Functioning.\n") |
from math import floor
import random
list = [random.randrange(0,100) for i in range(100)]
list.sort()
num = random.randrange(0,100)
def binaryS(list, num):
start = 0
end = len(list)
while start <= end:
mid = floor((start+end)/2)
if list[mid] == num:
return mid
if list[mid] > num:
end = mid - 1
else:
start = mid + 1
else:
if list[start] < num:
return list[start+1]
return list[start]
print(list)
print(num)
print(binaryS(list, num)) |
try:
x = 2
y = 10
z = y/x
print(z)
except Exception as e:
print(e)
else:
m = x + y
print(m) |
import get_prices as hist
import tensorflow as tf
from preprocessing import DataProcessing
# import pandas_datareader.data as pdr if using the single test below
import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
#import matplotlib.pyplot as plt
fix.pdr_override()
start = "2003-01-01"
end = "2018-01-01"
hist.get_stock_data("AAPL", start_date=start, end_date=end)
process = DataProcessing("stock_prices.csv", 0.9)
process.gen_test(10) #滑动窗口构建测试样本
process.gen_train(10) #滑动窗口构建训练样本
X_train = process.X_train / np.array([200, 1e9]) # 归一化, 包括Adj Close 和 Volume, 是否比x' = (x-min/max-min)更靠谱?min=0,max=200
X_train = X_train.reshape(X_train.shape[0], 20)
Y_train = process.Y_train / 200
X_test = process.X_test / np.array([200, 1e9])
X_test = X_test.reshape(X_test.shape[0], 20)
Y_test = process.Y_test / 200
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(32, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(64, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1, activation=tf.nn.relu))
model.compile(optimizer="adam", loss="mean_squared_error")
model.fit(X_train, Y_train, epochs=100)
print(model.evaluate(X_test, Y_test))
# If instead of a full backtest, you just want to see how accurate the model is for a particular prediction, run this:
data = pdr.get_data_yahoo("AAPL", "2017-12-19", "2018-01-03")
stock = data[["Adj Close", "Volume"]] # adj close,除权价格(前除权)
X_predict = np.array(stock)
X_predict = X_predict / np.array([200, 1e9])
X_predict = X_predict.reshape(1, 20)
print("predict:")
print(model.predict(X_predict)*200)
|
#-*- coding: utf-8 -*-
from selenium import webdriver
import os
import time
import unittest
import sys, traceback
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver import ActionChains
import telegram
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
class item_zzimTest(unittest.TestCase):
def __init__(self, x):
super().__init__()
self.x = x
def moveTab(self, x):
window_before = self.driver.window_handles[x]
self.driver.switch_to.window(window_before)
return time.sleep(2)
def setUp(self):
self.chromeDriver = PATH('/Users/cell/Downloads/chromedriver')
self.driver = webdriver.Chrome(executable_path=self.chromeDriver)
self.wait = WebDriverWait(self.driver, 5)
def runTest(self):
count = 0
while True:
try:
my_token = "729314656:AAFulVrBg4MQcEDBHi_oSUoIV1B2kPP0fIU"
my_bot = telegram.Bot(token=my_token)
zigbangUrl = "https://www.zigbang.com/"
confirmAccount = "qa_test_account@gmail.com"
confirmpwAccount = "Wlrqkd7905!"
# 0. 직방 웹페이지 접속
self.driver.get(zigbangUrl)
time.sleep(1)
self.driver.add_cookie({'name': 'cookie_sms_app_down', 'value': 'true'})
# 지도 앱 다운로드 팝업 쿠키 True 값 고정
self.driver.maximize_window()
# 1. 찜한 방 접속
element_to_hover_over = self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "has_d2")))[2]
hover = ActionChains(self.driver).move_to_element(element_to_hover_over)
hover.perform()
self.wait.until(EC.visibility_of_element_located((By.LINK_TEXT, "찜한 매물"))).click()
time.sleep(1)
# 2. 로그인 버튼 클릭
self.wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "i_login"))).click()
self.wait.until(EC.visibility_of_element_located((By.NAME, "username"))).send_keys(confirmAccount)
self.wait.until(EC.visibility_of_element_located((By.XPATH, "//button[@class='btn btn-ok']"))).click()
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "input[type='password']"))).send_keys(confirmpwAccount)
self.wait.until(EC.visibility_of_element_located((By.XPATH, "//button[@class='btn btn-ok']"))).click()
# 3. 찜 매물 개수 확인
zzimList = len(self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "list-item"))))
zzimCount = self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".page-title > span"))).text
zzimReplace = zzimCount.replace("(", "").replace(")", "")
if not int(zzimList) == int(zzimReplace):
raise Exception("찜 개수가 상이함으로 자동화를 종료합니다.", "찜 리스트 : ", int(zzimList), "찜 개수 표시 : ", int(zzimReplace))
# 4. 찜목록 불러오기 확인
if int(zzimReplace) != 1:
raise Exception("찜목록을 가져올 수 없습니다.")
# 5. 임의의 방 찜 목록 등록
self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "has_d2")))[2].click()
time.sleep(2)
self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "list-item")))[0].click()
self.moveTab(1)
time.sleep(1)
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.btn_bx > button.btn-zzim.off"))).click()
time.sleep(1)
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.alert-layer > div > div > button"))).click()
time.sleep(1)
# 6. 전체 선택 및 전체 해제
element_to_hover_over = self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "has_d2")))[2]
hover = ActionChains(self.driver).move_to_element(element_to_hover_over)
hover.perform()
self.wait.until(EC.visibility_of_element_located((By.LINK_TEXT, "찜한 매물"))).click()
time.sleep(1)
zzimList = len(self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "list-item"))))
zzimCount = self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".page-title > span"))).text
zzimReplace = zzimCount.replace("(", "").replace(")", "")
if not int(zzimList) == int(zzimReplace):
raise Exception("찜 개수가 상이함으로 자동화를 종료합니다.", "찜 리스트 : ", int(zzimList), "찜 개수 표시 : ", int(zzimReplace))
if int(zzimReplace) != 2:
raise Exception("찜목록을 가져올 수 없습니다.")
self.wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "allcheck_btn1"))).click()
time.sleep(2)
self.wait.until(EC.visibility_of_element_located((By.LINK_TEXT, u"삭제"))).click()
time.sleep(2)
self.wait.until(EC.alert_is_present()).accept()
time.sleep(2)
# 7. 테스트방 찜 목록 추가
self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "has_d2")))[2].click()
self.wait.until(EC.visibility_of_element_located((By.ID, "rooms-textfield"))).send_keys("서도면")
time.sleep(1)
self.wait.until(EC.visibility_of_element_located((By.ID, "btn-room-search"))).click()
time.sleep(3)
self.wait.until(EC.visibility_of_all_elements_located((By.CLASS_NAME, "list-item")))[0].click()
self.moveTab(2)
time.sleep(1)
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.btn_bx > button.btn-zzim.off"))).click()
time.sleep(1)
self.wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.alert-layer > div > div > button"))).click()
time.sleep(1)
print("원룸/오피스탤 찜목록 완료")
break
except Exception as e:
err_msg = u"원룸 찜목록 페이지 오류:" + "\n" + str(e) + "\n\n" + self.driver.current_url
if count == 2:
my_bot.sendMessage(chat_id='@zigbang_qa_notification', text=err_msg)
break
else:
traceback.print_exc(file=sys.stdout)
self.driver.quit()
self.setUp()
count += 1
def tearDown(self):
self.driver.quit() |
import numpy as np
from collections import Counter
import pickle
import pathlib
import json
import os,sys
HERE = pathlib.Path().absolute().parent.__str__()
sys.path.append(os.path.join(pathlib.Path().absolute().parent,"card_db")) # Hax lol
import caffeinate
#pathlib.Path(__file__).parent.absolute()
import pandas as pd
import init_db
CONN = init_db.CONN
CURSOR = CONN.cursor()
# df = pd.read_sql_query("SELECT * FROM cards LIMIT 1;",CONN)
# print(df.iloc[0])
# for k,v in df.dtypes.items():
# print(k,v)
# HODOR
TYPES_TAG = "<TYPES>"
RARITY_TAG = "<RARITY>"
TEXT_TAG = "<TEXT>"
POWER_TAG = "<POWER>"
TOUGHNESS_TAG = "<TOUGHNESS>"
START_TAG = "<START>"
PAD_TAG = "<PAD>"
END_TAG = "<END>"
NULL_TAG = "<NULL>"
UNKNOWN_KEY = "<UNK>"
SPACER_KEYS = [UNKNOWN_KEY, START_TAG, END_TAG, TYPES_TAG, RARITY_TAG, TEXT_TAG,PAD_TAG, POWER_TAG, TOUGHNESS_TAG]
ONEHOT_SIZE = 512 #- len(SPACER_KEYS)
MAX_TARGET_LEN = 12 # Progenitus has the longest cmc - 10. We use 12 to account for start/end tags
INDEX_ONLY_INPUT = True
LSTM_SIZE = 32
BATCH_SIZE = 16
EPOCHS = 1000
LEARNING_RATE = 0.0001
def onehotify(word,onehot_lookup,index_only=False):
try:
i = onehot_lookup[word]
except KeyError as e:
i = UNKNOWN_INDEX
if index_only:
return i
else:
o = np.zeros(len(onehot_lookup))
o[i] = 1.0
return o
# FETCH DATA
ONE_HOT_DATA = "data/mana_pred_data.pkl"
MODEL_INPUT_DATA = "data/model_input_data.pkl"
try:
with open(ONE_HOT_DATA,"rb") as f:
df = pickle.load(f)
print("loaded SQL data")
except:
print("no pickled SQL data, recreating...")
df = pd.read_sql_query("""SELECT c.name,
c.text as text,
c.min_text as min_text,
c.rarity,
c.convertedManaCost as cmc,
c.type,
c.types,
c.printings,
c.power,
c.toughness,
MIN(s.releaseDate) as first_print,
c.manaCost as mana_cost,
c.colorIdentity as color_id
FROM cards c
JOIN legalities l ON (l.uuid = c.uuid AND l.format = "vintage")
JOIN sets s ON instr(c.printings, s.code) > 0
WHERE s.releaseDate BETWEEN "2003-01-01" AND "2017-01-01"
-- AND c.type LIKE "%Creature%"
-- AND c.colorIdentity = "B"
-- AND c.rarity = "common"
GROUP BY c.name;""", CONN)
print(f"Number of cards found: {len(df)}")
with open(ONE_HOT_DATA,"wb+") as f:
pickle.dump(df,f)
#https://stackoverflow.com/questions/31468117/python-3-can-pickle-handle-byte-objects-larger-than-4gb
class MacOSFile(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
return getattr(self.f, item)
def read(self, n):
# print("reading total_bytes=%s" % n, flush=True)
if n >= (1 << 31):
buffer = bytearray(n)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
# print("reading bytes [%s,%s)..." % (idx, idx + batch_size), end="", flush=True)
buffer[idx:idx + batch_size] = self.f.read(batch_size)
# print("done.", flush=True)
idx += batch_size
return buffer
return self.f.read(n)
def write(self, buffer):
n = len(buffer)
#print("writing total_bytes=%s..." % n, flush=True)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
#print("writing bytes [%s, %s)... " % (idx, idx + batch_size), end="", flush=True)
self.f.write(buffer[idx:idx + batch_size])
#print("done.", flush=True)
idx += batch_size
def pickle_dump(obj, file_path):
with open(file_path, "wb") as f:
return pickle.dump(obj, MacOSFile(f), protocol=pickle.HIGHEST_PROTOCOL)
def pickle_load(file_path):
with open(file_path, "rb") as f:
return pickle.load(MacOSFile(f))
try:
input_data = pickle_load(MODEL_INPUT_DATA)
DECODER_TARGET_ARRAY = input_data["decoder_target_array"]
DECODER_INPUT_ARRAY = input_data["decoder_input_array"]
ENCODER_INPUT_ARRAY = input_data["encoder_input_array"]
#ENCODER_INPUT_ARRAY = np.flip(ENCODER_INPUT_ARRAY,axis=1)
#print("SHAPE:",ENCODER_INPUT_ARRAY.shape)
ONEHOT_WORD_LOOKUP = input_data["onehot_word_lookup"]
ONEHOT_INDEX_LOOKUP = input_data["onehot_index_lookup"]
ONEHOT_COST_LOOKUP= input_data["onehot_cost_lookup"]
ONEHOT_COST_INDEX_LOOKUP = input_data[ "onehot_cost_index_lookup"]
MAX_SEQ_LEN = input_data["max_seq_len"]
print("loaded MODEL INPUT data")
except Exception as e:
print(e)
print("no pickled MODEL INPUT data, recreating...")
# df = pd.read_sql_query("SELECT * FROM cards LIMIT 1;",CONN)
# print(df.iloc[0])
# for k,v in df.dtypes.items():
# print(k,v)
# HODOR
df["cleaned_text"] = ""#df["min_text"]
df["cleaned_cmc"] = ""#df["cmc"]
all_words = []
numbers = set()
one_hot_costs = set()
for i,row in df.iterrows():
if row["min_text"]: # account for lands and shit
cleaned_words = []
words = row["min_text"].replace(". "," . ").replace("\\"," ").strip().split(" ")
for w in words:
#print(s)
cleaned_word = w.lower().replace(",","")
all_words.append(cleaned_word)
cleaned_words.append(cleaned_word)
#row["cleaned_text"] = " ".join(cleaned_words)
df.loc[i, 'cleaned_text'] = " ".join(cleaned_words)
row["rarity"] = row["rarity"].strip().lower()
all_words.append(row["rarity"])
# Power/Toughness
if row["toughness"]:
numbers.add(str(row["toughness"]).strip())
if row["power"]:
numbers.add(str(row["power"]).strip())
types = row["types"]
type_tokens = []
if "," in types:
for t in row["types"].split(","):
type_tokens.append(t.strip().lower())
else:
type_tokens.append(types.strip().lower())
for t in type_tokens:
all_words.append(t)
df.loc[i,"types"] = " ".join(type_tokens)
# mana costs
if row["mana_cost"]:
mc = row["mana_cost"].replace("}{","},{")
df.loc[i, 'cleaned_cmc'] = mc
cost_list = mc.split(",") if "," in mc else [mc]
for c in cost_list:
one_hot_costs.add(c)
one_hot_words = set()
counter = Counter(all_words)
for k,v in counter.most_common(ONEHOT_SIZE):
#print(k,v)
#if v >= 10:
one_hot_words.add(k)
for s in SPACER_KEYS:
one_hot_words.add(s)
for n in numbers:
one_hot_words.add(n)
one_hot_words = tuple(sorted(one_hot_words))
print(f"word tokens: {one_hot_words[:10]}")
ONEHOT_LEN = len(one_hot_words)
print(f"Number of one-hot tokens: {ONEHOT_LEN}")
ONEHOT_WORD_LOOKUP = {o:i for i,o in enumerate(one_hot_words)}
ONEHOT_INDEX_LOOKUP = {i:o for i,o in enumerate(one_hot_words)}
UNKNOWN_INDEX = ONEHOT_WORD_LOOKUP[UNKNOWN_KEY]
PAD_INDEX = ONEHOT_WORD_LOOKUP[PAD_TAG]
one_hot_costs.add(START_TAG)
one_hot_costs.add(END_TAG)
one_hot_costs.add(NULL_TAG)
one_hot_costs.add(PAD_TAG)
ONEHOT_COST_LOOKUP = {o:i for i,o in enumerate(one_hot_costs)}
ONEHOT_COST_INDEX_LOOKUP = {i:o for i,o in enumerate(one_hot_costs)}
ONEHOT_COST_LEN = len(one_hot_costs)
print(f"Number of one_hot_cost tokens: {ONEHOT_COST_LEN}")
# for o in one_hot_words:
# print(f"{o}, Number of occurrences: {counter[o]}, index: {one_hot_words.index(o)}")
# print(len(one_hot_words))
INPUT_TEXT = []
INPUT_LABELS = []
#
ENCODER_INPUT_DATA = []
DECODER_INPUT_DATA = []
DECODER_TARGET_DATA = []
MAX_SEQ_LEN = max([len(row["cleaned_text"].split(" ")) for _i,row in df.iterrows()])
# for i,_ in enumerate(ENCODER_INPUT_DATA):
# #e = ENCODER_INPUT_DATA[i]
# while len(ENCODER_INPUT_DATA[i]) < MAX_SEQ_LEN:
# ENCODER_INPUT_DATA[i].append(PAD_INDEX)
MAX_SEQ_LEN = MAX_SEQ_LEN + 14 # determined ahead of time with above code
print("MAX SEQUENCE LENGTH: ",MAX_SEQ_LEN)
for i,row in df.iterrows():
keys = [START_TAG,TYPES_TAG] + row["types"].split(" ") + [RARITY_TAG,row["rarity"],TEXT_TAG] + row["cleaned_text"].split(" ") #+ [END_TAG]
if row["power"]:
keys += [POWER_TAG,row["power"]]
if row["toughness"]:
keys += [TOUGHNESS_TAG,row["toughness"]]
keys += [END_TAG]
# Padding
assert len(keys) <= MAX_SEQ_LEN-1, f"Uh Oh: {keys},{len(keys)}"
if len(keys) < MAX_SEQ_LEN:
diff = MAX_SEQ_LEN - len(keys)
keys += ([PAD_TAG] * diff)
#keys += [END_TAG]
x = " ".join(keys)
INPUT_TEXT.append(x)
y = row["cleaned_cmc"].split(",")
# Account for lands and cards with no cost like Living End
if len(y) == 0 or y[0] == "":
y = [NULL_TAG]
INPUT_LABELS.append(y)
y = [START_TAG] + y + [END_TAG]
if len(y) < (MAX_TARGET_LEN):
diff = MAX_TARGET_LEN - len(y)
y += ([PAD_TAG] * diff)
assert len(y) == MAX_TARGET_LEN,f"invalid sequnce: {len(y)},{y}"
tx = [onehotify(w,onehot_lookup=ONEHOT_WORD_LOOKUP,index_only=INDEX_ONLY_INPUT) for w in x.split(" ")]
ENCODER_INPUT_DATA.append(tx)
ty = [onehotify(w.strip(),onehot_lookup=ONEHOT_COST_LOOKUP,index_only=INDEX_ONLY_INPUT) for w in y if len(w.strip()) > 0]
assert len(ty) == MAX_TARGET_LEN,f"invalid sequnce: {len(ty)},{y}"
# if len(ty) > max_decoder_len:
# max_decoder_len = len(ty)
DECODER_INPUT_DATA.append(ty[:-1])
DECODER_TARGET_DATA.append(ty[1:])
#print(f"MAX DECODER SEQUENCE LENGTH: {max_decoder_len}")
# make sure everything is the right shape
for e in ENCODER_INPUT_DATA:
assert len(e) == MAX_SEQ_LEN,f"OH FUCK: Encoder axis 0 :: {len(e)}"
if not INDEX_ONLY_INPUT:
for i in e:
assert len(i) == ONEHOT_LEN,f"oh fuck: Encoder axis 1 :: {len(i)}"
# thank god
ENCODER_INPUT_ARRAY = np.array(ENCODER_INPUT_DATA)
if INDEX_ONLY_INPUT:
encoder_input_array_shape = (len(ENCODER_INPUT_DATA),MAX_SEQ_LEN)
else:
encoder_input_array_shape = (len(ENCODER_INPUT_DATA),MAX_SEQ_LEN,ONEHOT_LEN)
ENCODER_INPUT_ARRAY = np.reshape(ENCODER_INPUT_ARRAY,encoder_input_array_shape)
print(f"Encoder INPUT data formatted: {ENCODER_INPUT_ARRAY.shape}")
# print("ONHOT TARGET SHAPE")
# print(ONEHOT_COST_LOOKUP)
# print(len(ONEHOT_COST_LOOKUP))
#
# print("DECODER SHAPE")
# print(len(DECODER_INPUT_DATA))
# print(len(DECODER_INPUT_DATA[0]))
# print(len(DECODER_INPUT_DATA[0][0]))
# Format the decoder input data - this is
for d in DECODER_INPUT_DATA:
assert len(d) == MAX_TARGET_LEN-1,f"OH FUCK: Encoder axis 0 :: {len(d)}, {d}"
if not INDEX_ONLY_INPUT:
for i in d:
assert len(i) == ONEHOT_COST_LEN,f"oh fuck: Encoder axis 1 :: {len(i)}"
DECODER_INPUT_ARRAY = np.array(DECODER_INPUT_DATA)
if INDEX_ONLY_INPUT:
decoder_input_array_shape = (len(ENCODER_INPUT_DATA),MAX_TARGET_LEN-1)
else:
decoder_input_array_shape = (len(DECODER_INPUT_DATA),MAX_TARGET_LEN-1,ONEHOT_COST_LEN)
DECODER_INPUT_ARRAY = np.reshape(DECODER_INPUT_ARRAY,decoder_input_array_shape)
print(f"Decoder INPUT data formatted: {DECODER_INPUT_ARRAY.shape}")
# Format the decoder TARGET data
for d in DECODER_TARGET_DATA:
assert len(d) == MAX_TARGET_LEN-1,f"OH FUCK: Decoder axis 0 :: {len(d)}, {d}"
if not INDEX_ONLY_INPUT:
for i in d:
assert len(i) == ONEHOT_COST_LEN,f"oh fuck: Decoder axis 1 :: {len(i)}"
DECODER_TARGET_ARRAY = np.array(DECODER_TARGET_DATA)
DECODER_TARGET_ARRAY = np.reshape(DECODER_TARGET_ARRAY,decoder_input_array_shape)
print(f"Decoder TARGET data formatted: {DECODER_TARGET_ARRAY.shape}")
input_data_dict = {"decoder_target_array":DECODER_TARGET_ARRAY,
"decoder_input_array":DECODER_INPUT_ARRAY,
"encoder_input_array":ENCODER_INPUT_ARRAY,
"onehot_word_lookup": ONEHOT_WORD_LOOKUP,
"onehot_index_lookup": ONEHOT_INDEX_LOOKUP,
"onehot_cost_lookup":ONEHOT_COST_LOOKUP,
"onehot_cost_index_lookup":ONEHOT_COST_INDEX_LOOKUP,
"max_seq_len": MAX_SEQ_LEN
}
pickle_dump(input_data_dict,MODEL_INPUT_DATA)
UNKNOWN_INDEX = ONEHOT_WORD_LOOKUP[UNKNOWN_KEY]
PAD_INDEX = ONEHOT_WORD_LOOKUP[PAD_TAG]
ONEHOT_COST_LEN = len(ONEHOT_COST_LOOKUP)
print("BEGINNING MODEL")
ENCODER_SIZE = len(ONEHOT_WORD_LOOKUP)
DECODER_SIZE = len(ONEHOT_COST_LOOKUP)
print("ENCODER_INPUT_ARRAY:",ENCODER_INPUT_ARRAY.shape)
import tensorflow as tf
from tensorflow.keras.layers import Input, LSTM, Dense, Embedding
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
try:
model = tf.keras.models.load_model('models/mana_pred.h5')
print("loaded trained models...")
except Exception as e:
print(e)
print("Unable to load models, recreating & training....")
# Define an input sequence and process it.
if INDEX_ONLY_INPUT:
encoder_inputs = Input(shape=(MAX_SEQ_LEN,),name="encoder_inputs")
else:
encoder_inputs = Input(shape=(None, ENCODER_SIZE),name="encoder_inputs")
print("encoder_inputs:",encoder_inputs.shape)
# Set up the decoder, using `encoder_states` as initial state.
if INDEX_ONLY_INPUT:
decoder_inputs = Input(shape=(MAX_TARGET_LEN-1,),name="decoder_inputs")
else:
decoder_inputs = Input(shape=(None, DECODER_SIZE),name="decoder_inputs")
#encoder = LSTM(LSTM_SIZE, return_state=True)
encoder1 = LSTM(LSTM_SIZE, return_state=True, return_sequences=True,name="encoder1")
encoder2 = LSTM(LSTM_SIZE, return_state=True, return_sequences=True,name="encoder2")
encoder3 = LSTM(LSTM_SIZE, return_state=True,name="encoder3")#, return_sequences=False)
embed_layer = Embedding(input_dim=ENCODER_SIZE, output_dim=LSTM_SIZE)#,input_length=BATCH_SIZE)
embedding_output = embed_layer(encoder_inputs)
encoder_hidden_outputs, state_h, state_c = encoder1(embedding_output)
#encoder_hidden_outputs2, state_h, state_c = encoder2(encoder_hidden_outputs)
#encoder_outputs, state_h, state_c = encoder3(encoder_hidden_outputs2,initial_state=[hidden_state_h2,hidden_state_c2])
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_embed_layer = Embedding(input_dim=DECODER_SIZE, output_dim=LSTM_SIZE, name="decoder_embed")#,input_length=BATCH_SIZE)
decoder_lstm1 = LSTM(LSTM_SIZE, return_sequences=True, return_state=True,name="decoder1")
decoder_lstm2 = LSTM(LSTM_SIZE, return_sequences=True, return_state=True,name="decoder2")
decoder_lstm3 = LSTM(LSTM_SIZE, return_sequences=True, return_state=True,name="decoder3")#, return_state=False)
decoder_embedding_output = decoder_embed_layer(decoder_inputs)
print("decoder_embedding_output:",decoder_embedding_output.shape)
print("Len encoder_states: ", len(encoder_states))
print("encoder_states[0]:",encoder_states[0].shape)
print("encoder_states[1]:",encoder_states[1].shape)
decoder_hidden_outputs, _, _ = decoder_lstm1(decoder_embedding_output,initial_state=encoder_states)
#print("decoder_lstm_outputs:",decoder_lstm_outputs.shape)
decoder_lstm_outputs, _, _ = decoder_lstm2(decoder_hidden_outputs)
#decoder_outputs, _, _ = decoder_lstm3(decoder_hiddens2,initial_state=[decoder_hidden_state_h2, decoder_hidden_state_c2])
decoder_dense = Dense(DECODER_SIZE, activation='softmax',name="decoder_dense")
decoder_outputs = decoder_dense(decoder_lstm_outputs)
print("decoder_outputs:",decoder_outputs.shape)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
opt = Adam(learning_rate=LEARNING_RATE)
# Run training
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy',metrics='accuracy')
print("decoder_inputs:",decoder_inputs.shape)
print("DECODER_INPUT_ARRAY:",DECODER_INPUT_ARRAY.shape)
print("DECODER_TARGET_ARRAY:",DECODER_TARGET_ARRAY.shape)
model.fit([ENCODER_INPUT_ARRAY, DECODER_INPUT_ARRAY], DECODER_TARGET_ARRAY,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.2)
model.save('models/mana_pred.h5')
#
# print(model.layers)
# print(model.layers[1])
# print(model.layers[1].name)
# print(model.get_layer('encoder1'))
encoder_inputs = model.inputs[0]#"encoder_inputs")
decoder_inputs = model.inputs[1]#("decoder_inputs")
print("decoder_inputs:",decoder_inputs.shape)
encoder_outputs, state_h_enc, state_c_enc = model.get_layer("encoder1").output
encoder_model = Model(encoder_inputs, [state_h_enc, state_c_enc])
decoder_state_input_h = Input(shape=(LSTM_SIZE,), name="decoder_state_input_h")
decoder_state_input_c = Input(shape=(LSTM_SIZE,), name="decoder_state_input_c")
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_embed = model.get_layer("decoder_embed")
decoder_lstm1 = model.get_layer("decoder1")
decoder_lstm2 = model.get_layer("decoder2")
#decoder_lstm3 = model.get_layer("decoder3")
#d1,s1h,s1c = decoder_lstm1(decoder_inputs, initial_state=decoder_states_inputs)
#d2,s2h,s2c = decoder_lstm2(d1,initial_state=[s1h,s1c])
decoder_embedded_inputs = decoder_embed(decoder_inputs)
decoder_hidden_out1, _,_= decoder_lstm1(decoder_embedded_inputs, initial_state=decoder_states_inputs)
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm2(decoder_hidden_out1)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.get_layer("decoder_dense")
decoder_outputs = decoder_dense(decoder_hidden_out1)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states
)
def decode_sequence(input_seq):
# Encode the input as state vectors.
h,c = encoder_model.predict(input_seq)
states_value = [h,c]
# Generate empty target sequence of length 1.
target_seq = np.zeros(( 1, 1))
# Populate the first character of target sequence with the start character.
target_seq[0, 0] = ONEHOT_COST_LOOKUP[START_TAG]
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = []
while not stop_condition:
k = [target_seq] + states_value
output_tokens, h, c = decoder_model.predict(k)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :]) # <- ORIGINAL
#sampled_token_index = np.random.choice([_ for _ in range(ONEHOT_COST_LEN)],p=output_tokens[0, -1, :])
sampled_char = ONEHOT_COST_INDEX_LOOKUP[sampled_token_index]
#print(f"sampled_char : {sampled_char}")
#print(f" sampled_char: {sampled_char}")
decoded_sentence.append(sampled_char)
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == END_TAG or len(decoded_sentence) > MAX_TARGET_LEN):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return " ".join(decoded_sentence)
# print("ENCODER_INPUT_ARRAY:",ENCODER_INPUT_ARRAY.shape)
# HODOR
#CHECK_INDEX = 90
for CHECK_INDEX in range(20,30):
E = ENCODER_INPUT_ARRAY[CHECK_INDEX]
#print(E.shape)
print("Encoder input: ")
if INDEX_ONLY_INPUT:
print(" ".join([ONEHOT_INDEX_LOOKUP[e] for e in E if ONEHOT_INDEX_LOOKUP[e] != PAD_TAG]))
else:
print(" ".join([ONEHOT_INDEX_LOOKUP[np.argmax(e)] for e in E if ONEHOT_INDEX_LOOKUP[np.argmax(e)] != PAD_TAG]))
# D = DECODER_INPUT_ARRAY[CHECK_INDEX]
# print("input:")
# print(" ".join([ONEHOT_COST_INDEX_LOOKUP[np.argmax(d)] for d in D]))
print(" prediction:")
print(decode_sequence(np.expand_dims(E,0)))
D = DECODER_INPUT_ARRAY[CHECK_INDEX]
print(" real:")
if INDEX_ONLY_INPUT:
print(" ".join([ONEHOT_COST_INDEX_LOOKUP[d] for d in D if ONEHOT_INDEX_LOOKUP[d] != PAD_TAG]))
else:
print(" ".join([ONEHOT_COST_INDEX_LOOKUP[np.argmax(d)] for d in D]))
print("")
|
__author__ = 'Administrator'
import re
class Grep_match:
def __init__(self, grep_exp, matched_str):
self.grep_exp = grep_exp
self.matched_str = matched_str
def exec_grep(self):
wordre = re.compile(self.grep_exp)
list = wordre.findall(self.matched_str)
return list |
import sys
import pygame
def check_keydown_events(event, ship):
"""response to the keydown"""
if event.key == pygame.K_UP:
ship.moving_up = True
elif event.key == pygame.K_DOWN:
ship.moving_down = True
def check_keyup_events(event, ship):
"""response to the keyup"""
if event.key == pygame.K_UP:
ship.moving_up = False
elif event.key == pygame.K_DOWN:
ship.moving_down = False
def check_events(ship):
"""response to the keyboard and the mouse"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ship)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def update_screen(ai_settings, screen, ship):
"""update the screen's image, and change to the new screen"""
# Redraw the screen each time starts the loop
screen.fill(ai_settings.bg_color)
ship.blitme()
# show the lastest drawing screen
pygame.display.flip() |
def matchor(instance, association):
def takeTwo(elm):
return elm[1]
# count = 0
results = None
# for instance,association in zip(ins_predictions,ass_predictions):
# count +=1
objects = [ i for i,v in enumerate(instance.pred_classes) if v == 1]
shadows = [i for i,v in enumerate(instance.pred_classes) if v == 0]
boxes = []
for o in objects:
for s in shadows:
boxes.append(box_combine(o,s,instance.pred_boxes[o].tensor[0].numpy(),instance.pred_boxes[s].tensor[0].numpy()))
ass_boxes = association.pred_boxes.tensor.numpy()
pair = []
for i,ass_box in enumerate(ass_boxes):
scores = []
ass_box = [ass_box[0],ass_box[1],ass_box[2]-ass_box[0],ass_box[3]-ass_box[1]]
for box in boxes:
k,v = box
scores.append([str(i)+'_'+k,compute_iou(ass_box,v)])
if len(ass_boxes) == 1:
pair.append(sorted(scores,key=takeTwo,reverse=True)[:1])
else:
pair.append(sorted(scores,key=takeTwo,reverse=True)[:1])
if not sum([sc[1] > 0.7 for sc in pair[i]]):
pair[i] = [[0,0]]
O = {}
S = {}
for k,v in enumerate(pair):
if v != [[0,0]] and v != []:
r,o,s = v[0][0].split('_')
if o in O or s in S:
if o in O:
if v[0][1] > O[o][1]:
O[o] = v[0]
if s in S:
if v[0][1] > S[s][1]:
S[s] = s[0]
else:
O[o] = v[0]
S[s] = v[0]
for k,v in S.items():
try:
r,o,s = v[0].split('_')
if results == None:
results = [[int(o),int(s),int(r)]]
else:
results.append([int(o),int(s),int(r)])
except:
return None,None
for v in results:
ins_association = ins_predictions.pred_classes * 0
ass_association = ass_predictions.pred_classes * 0
association_id = 1
for i in v:
ins_association[i[0]] = association_id
ins_association[i[1]] = association_id
ass_association[i[2]] = association_id
association_id += 1
ins_predictions.pred_association = ins_association
ass_predictions.pred_association = ass_association
return ins_predictions,ass_predictions
if __name__ == "__main__":
pass
|
Python 3.7.0 (v3.7.0:1bf9cc5093, Jun 27 2018, 04:59:51) [MSC v.1914 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> x=int(input("Enter a number : "))
Enter a number : 56
>>> if x>17:
v=2*(x-17)
print(v)
else:
v1=(17-x)
print(v1)
78
>>> #Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference
|
import unittest
from lib.ebook import Ebook, InvalidPercentageError
"""
Naming convention: test_functionName_input/TestState_expectedResult
"""
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.ebook = Ebook("What a wonderful world", 73)
def test_pagesRead_getsPagesRead(self) -> None:
self.assertEqual(73, self.ebook.pages_read)
def test_percentComplete(self) -> None:
self.assertEqual(0.73, self.ebook.percent_complete)
def test_setTotalPages_raisesInvalidPercentageError(self):
with self.assertRaisesRegex(InvalidPercentageError,
"E-Book total pages cannot be any number "
"other than 100 per cent"):
self.ebook.total_pages = 122
def test_setPagesRead_moreThan100Pages_raisesInvalidPercentageError(self):
with self.assertRaisesRegex(ValueError,
"What a wonderful world is only 100 pages"
" long, your value should be less than "
"that."):
self.ebook.pages_read = 107
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# """
# Created on Tue Jul 30 10:02:48 2019
# @author: Wenyang Lyu and Shibabrat Naik
# Compute unstable peridoic orbits at different energies using turning point method
# """
# For the DeLeon-Berne problem
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import math
from scipy import optimize
import matplotlib as mpl
from matplotlib import cm
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
import uposham.turning_point as tp
import uposham.deleonberne_hamiltonian as deleonberne
import os
path_to_data = os.path.join(os.path.dirname(os.path.dirname(__file__)), \
'data/')
path_to_saveplot = os.path.join(os.path.dirname(os.path.dirname(__file__)), \
'tests/plots/')
#%% Setting up parameters and global variables
save_final_plot = True
show_final_plot = False
show_itrsteps_plots = False # show iteration of the UPOs in plots
N = 4 # dimension of phase space
MASS_A = 8.0
MASS_B = 8.0 # De Leon, Marston (1989)
EPSILON_S = 1.0
D_X = 10.0
ALPHA = 1.00
LAMBDA = 1.5
parameters = np.array([MASS_A, MASS_B, EPSILON_S, D_X, LAMBDA, ALPHA])
eqNum = 1
eqPt = tp.get_eq_pts(eqNum, deleonberne.init_guess_eqpt_deleonberne, \
deleonberne.grad_pot_deleonberne, parameters)
#%%
#E_vals = [1.1, 2.00, 3.00, 5.00]
#linecolor = ['b','r','g','m','c']
E_vals = [1.1, 2.00]
linecolor = ['b','r']
n = 4 # number of intervals we want to divide
n_turn = 1 # nth turning point we want to choose.
for i in range(len(E_vals)):
e = E_vals[i] # total energy
deltaE = e - parameters[2]
#Trial initial Condition s.t. one initial condition is on the LHS of the UPO and the
#other one is on the RHS of the UPO
f1 = lambda x: deleonberne.get_coord_deleonberne(x,0.06,e,parameters)
x0_2 = optimize.newton(f1,-0.15)
state0_2 = [x0_2,0.06,0.0,0.0]
f2 = lambda x: deleonberne.get_coord_deleonberne(x,-0.05,e,parameters)
x0_3 = optimize.newton(f2,-0.15)
state0_3 = [x0_3,-0.05,0.0,0.0]
with open("x0_turningpoint_deltaE%s_deleonberne.dat"%(deltaE),'a+') as po_fam_file:
[x0po_1, T_1,energyPO_1] = tp.turningPoint(
state0_2, state0_3, deleonberne.get_coord_deleonberne, \
deleonberne.guess_coords_deleonberne, deleonberne.ham2dof_deleonberne, \
deleonberne.half_period_deleonberne, deleonberne.variational_eqns_deleonberne, \
deleonberne.pot_energy_deleonberne, \
deleonberne.plot_iter_orbit_deleonberne,
parameters, e, n, n_turn, show_itrsteps_plots, po_fam_file)
#%% Load periodic orbit data from ascii files
x0po = np.zeros((4,len(E_vals))) #each column is a different initial condition
for i in range(len(E_vals)):
e = E_vals[i]
deltaE = e - parameters[2]
with open("x0_turningpoint_deltaE%s_deleonberne.dat"%(deltaE),'a+') as po_fam_file:
print('Loading the periodic orbit family from data file',po_fam_file.name,'\n')
x0podata = np.loadtxt(po_fam_file.name)
x0po[:,i] = x0podata[-1,0:4]
#%% Plotting the family
TSPAN = [0,30]
plt.close('all')
axis_fs = 15
RelTol = 3.e-10
AbsTol = 1.e-10
f = lambda t,x : deleonberne.ham2dof_deleonberne(t,x,parameters)
ax = plt.gca(projection='3d')
for i in range(len(E_vals)):
e = E_vals[i]
deltaE = e - parameters[2]
soln = solve_ivp(f, TSPAN, x0po[:,i], method='RK45', dense_output=True, \
events = lambda t,x : deleonberne.half_period_deleonberne(t,x,parameters), \
rtol=RelTol, atol=AbsTol)
te = soln.t_events[0]
tt = [0,te[2]]
t,x,phi_t1,PHI = tp.state_transit_matrix(tt, x0po[:,i], parameters, \
deleonberne.variational_eqns_deleonberne)
ax.plot(x[:,0],x[:,1],x[:,2],'-',color=linecolor[i], \
label='$\Delta E$ = %.2f'%(deltaE))
ax.scatter(x[0,0],x[0,1],x[0,2],s=10,marker='*')
ax.plot(x[:,0], x[:,1], zs=0, zdir='z')
resX = 100
xVec = np.linspace(-1,1,resX)
yVec = np.linspace(-2,2,resX)
xMat, yMat = np.meshgrid(xVec, yVec)
cset1 = ax.contour(xMat, yMat, tp.get_pot_surf_proj(xVec, yVec, \
deleonberne.pot_energy_deleonberne, parameters), \
[0.01,0.1,1,2,4], zdir='z', offset=0, \
linewidths = 1.0, cmap=cm.viridis, \
alpha = 0.8)
ax.scatter(eqPt[0], eqPt[1], s = 50, c = 'r', marker = 'X')
ax.set_xlabel('$x$', fontsize=axis_fs)
ax.set_ylabel('$y$', fontsize=axis_fs)
ax.set_zlabel('$p_x$', fontsize=axis_fs)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_zlim(-4, 4)
legend = ax.legend(loc='upper left')
plt.grid()
if show_final_plot:
plt.show()
if save_final_plot:
plt.savefig(path_to_saveplot + 'tp_deleonberne_upos.pdf', \
format='pdf', bbox_inches='tight')
|
#Biggest Number Using Array
def BigArr(arr):
x=arr[0]
for i in arr:
if(i>x):
x=i
return x
arr=[8,9,11,5,10,12]
print(BigArr(arr))
|
import os
import shutil
import MySQLdb as mysql
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from apps.achievements.models import Record, RecordProof
from apps.utils import upload_to
"""
`kmp_members` (
0 `id` int(11) NOT NULL AUTO_INCREMENT,
1 `mtype` tinyint(4) DEFAULT '0',
2 `email` varchar(60) DEFAULT '',
3 `pass` varchar(32) DEFAULT '',
4 `regdate` datetime DEFAULT '0000-00-00 00:00:00',
5 `lastvisit` datetime DEFAULT '0000-00-00 00:00:00',
6 `name` varchar(60) DEFAULT '',
7 `dateofbirth` date DEFAULT '0000-00-00',
8 `city` varchar(60) DEFAULT '0',
9 `sex` tinyint(1) DEFAULT '0',
10 `active` tinyint(1) DEFAULT '0',
11 `activekey` varchar(8) DEFAULT '',
12 `mainliga` int(11) DEFAULT '0',
13 `userliga` int(11) DEFAULT '0',
14 `userligapass` varchar(5) DEFAULT '',
15 `newrecords` tinyint(1) DEFAULT '0'
)
`kmp_members_records` (
0 `id` int(11) NOT NULL AUTO_INCREMENT,
1 `member_id` int(4) DEFAULT '0',
2 `record` int(11) DEFAULT '0',
3 `record_date` datetime DEFAULT '0000-00-00 00:00:00',
4 `record_stat_date` datetime DEFAULT '0000-00-00 00:00:00',
5 `record_photo_prev` varchar(100) DEFAULT '',
6 `record_photo` varchar(100) DEFAULT '',
7 `record_comment` varchar(200) DEFAULT '',
8 `record_show` tinyint(1) DEFAULT '0',
9 `record_cheat` tinyint(1) DEFAULT '0',
10 `record_liga` tinyint(1) DEFAULT '0',
11 `month_record` tinyint(1) DEFAULT '0'
)
"""
def file_copy(name):
img_to_name = upload_to(name)
img_from = os.path.join(settings.MEDIA_ROOT, 'uploads', 'userphotos',
name)
img_to = os.path.join(settings.MEDIA_ROOT, 'uploads', 'achievements',
img_to_name)
if os.path.isfile(img_from):
shutil.copyfile(img_from, img_to)
return "uploads/achievements/%s" % img_to_name
else:
return False
def import_proofs():
conn = mysql.connect(
host="localhost",
user="root",
passwd="chill",
charset="utf8",
use_unicode=True
)
cursor = conn.cursor()
records = Record.objects.all()
for i, record in enumerate(records):
query = "SELECT * FROM powerball.ru.kmp_members_records \
WHERE record=%s AND record_date='%s';" % (record.value, record.created_at)
cursor.execute(query)
proof = cursor.fetchall()[0]
image = file_copy(proof[6])
if image:
RecordProof.objects.create(
record=record,
image=image
)
print ("%s. copy %s to %s" % (i, proof[6], image))
cursor.close()
conn.close()
class Command(BaseCommand):
args = ''
help = 'import old powerball.ru'
can_import_settings = True
def handle(self, *args, **options):
import_proofs()
self.stdout.write('import')
|
'''
Take input a number ‘N’ and an array as given below.
Input:- N=2
Array =1,2,3,3,4,4
O/p : 2
Find the least number of unique elements after deleting N numbers of elements from the
array.
In the above example , after deleting N=2 elements from the array.
In above 1,2 will be deleted.
So 3,3,4,4 will be remaining so,
2 unique elements are in the array i.e 3 and 4.
So ,output will be 2.
'''
n = int(input())
arr = list(map(int,input().split(',')))
arr1 = []
for i in set(arr):
arr1.append(arr.count(i))
arr1.sort()
length = len(arr1)
for i in arr1:
if n <= 0:
break
n -= i
length -= 1
print(length) |
from __future__ import division
from collections import deque
from Event import Event
from ExponentialRandomVariableGenerator import ExponentialRandomVariableGenerator
from Packet import Packet
AVERAGE_PACKET_LENGTH = 2000
SIMULATION_TIME = 1000
TRANSMISSION_RATE = 1000000 # 1 Mbps
class DiscreteEventBufferSimulator:
def __init__(self, rho, buffer_length):
self.events = []
self.departures = deque()
self.packets = deque()
self.buffer = deque()
self.rho = rho
self.buffer_length = buffer_length
# Counters for metrics
self.arrival_count = 0
self.departure_count = 0
self.observer_count = 0
self.idle_count = 0
self.packet_sum = 0
self.packet_loss_count = 0
self.prevDepartureTime = 0
# Metrics
self.proportion_idle = 0
self.packet_loss = 0
self.average_packets_in_queue = 0
def run(self):
self.genEventsAndPackets()
self.processEvents()
self.printResults()
return self.average_packets_in_queue, self.packet_loss
def genEventsAndPackets(self):
arrival_time_lambda = self.rho * TRANSMISSION_RATE / AVERAGE_PACKET_LENGTH
packet_length_lambda = 1.0 / AVERAGE_PACKET_LENGTH
observer_time_lambda = 5 * arrival_time_lambda
# Exponential Random Variable Generators
arrivalTimeGenerator = ExponentialRandomVariableGenerator(lmbda=arrival_time_lambda)
packetLengthGenerator = ExponentialRandomVariableGenerator(lmbda=packet_length_lambda)
observationTimeGenerator = ExponentialRandomVariableGenerator(lmbda=observer_time_lambda)
currentTime = 0
# Generate Arrival, and if M/M/1, generate Departure
while currentTime < SIMULATION_TIME:
# Add inter-arrival time to arrive at current timestamp
interArrivalTime = arrivalTimeGenerator.genValue()
currentTime += interArrivalTime
# Generate packet and its length
packet = Packet(length=packetLengthGenerator.genValue())
self.packets.append(packet)
self.events.append(Event("Arrival", currentTime))
# Generate Observer Events
currentTime = 0
while currentTime < SIMULATION_TIME:
interArrivalTime = observationTimeGenerator.genValue()
currentTime += interArrivalTime
self.events.append(Event("Observer", currentTime))
# Sort Events
self.events.sort(key=lambda event: event.timestamp)
self.events = deque(self.events)
def processEvents(self):
while self.events or self.departures:
if self.departures:
eventsTime = self.events[0].timestamp
departureTime = self.departures[0].timestamp
if eventsTime > departureTime:
event = self.departures.popleft()
else:
event = self.events.popleft()
else:
event = self.events.popleft()
if event.event_type == "Arrival":
self.processArrival(event.timestamp)
elif event.event_type == "Departure":
self.processDeparture()
elif event.event_type == "Observer":
self.processObserver()
def processArrival(self, timestamp):
packet = self.packets.popleft()
self.arrival_count += 1
if len(self.buffer) < self.buffer_length:
# Generate its departure time based on queue status
transmissionTime = packet.getTransmissionTime()
if timestamp < self.prevDepartureTime:
departureTime = self.prevDepartureTime + transmissionTime
else:
departureTime = timestamp + transmissionTime
if departureTime < SIMULATION_TIME:
self.prevDepartureTime = departureTime
self.departures.append(Event("Departure", departureTime))
self.buffer.append(packet)
else:
self.packet_loss_count += 1
def processDeparture(self):
self.buffer.popleft()
self.departure_count += 1
def processObserver(self):
self.observer_count += 1
if not self.buffer:
self.idle_count += 1
buffer_size = len(self.buffer)
self.packet_sum += buffer_size
self.average_packets_in_queue = self.packet_sum / self.observer_count
self.proportion_idle = self.idle_count / self.observer_count
self.packet_loss = self.packet_loss_count / (1 + self.arrival_count)
def printResults(self):
print("Counts", self.arrival_count, self.departure_count, self.observer_count)
print("Average Packets In Queue ", self.average_packets_in_queue)
print("Idle Proportion ", self.proportion_idle)
print("Probability of Packet Loss ", self.packet_loss)
|
'''
This script uses the CCSD-LPNO response code
to compute the MP2-level energy correction
to the PNO/PNO++ method by including the
external (truncated) space
'''
import numpy as np
import psi4
import ccsd_lpno
import argparse
import time
import json
parser = argparse.ArgumentParser()
parser.add_argument("--j", default='output.json', type=str, help="Output json filename")
parser.add_argument("--m", default='h2_2', type=str, help="Molecule from mollib")
args = parser.parse_args()
cutoffs = [1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 5e-9, 5e-8, 5e-7, 5e-6, 5e-5, 5e-4]
#cutoffs =[0, 1e-6]
mp2_en_list = {}
for cut in cutoffs:
psi4.core.clean()
# Set memory
psi4.set_memory('2 GB')
psi4.core.set_output_file('optrot.dat', False)
np.set_printoptions(precision=12, threshold=np.inf, linewidth=200, suppress=True)
# Set Psi4 options
geom = ccsd_lpno.mollib.mollib["{}".format(args.m)]
mol = psi4.geometry(geom)
psi4.set_options({'basis': 'aug-cc-pvdz', 'scf_type': 'pk',
'freeze_core': 'false', 'e_convergence': 1e-12,
'd_convergence': 1e-12, 'save_jk': 'true'})
# Set for CCSD
E_conv = 1e-10
R_conv = 1e-8
maxiter = 40
compare_psi4 = False
# Set for linear response calculation
omega_nm = 589
# Compute RHF energy with psi4
psi4.set_module_options('SCF', {'E_CONVERGENCE': 1e-12})
psi4.set_module_options('SCF', {'D_CONVERGENCE': 1e-12})
e_scf, wfn = psi4.energy('SCF', return_wfn=True)
print('SCF energy: {}\n'.format(e_scf))
print('Nuclear repulsion energy: {}\n'.format(mol.nuclear_repulsion_energy()))
no_vir = wfn.nmo() - wfn.doccpi()[0] - wfn.frzcpi()[0]
# Set for LPNO
#localize=True
local=True
pert='mu'
pno_cut = cut
local = ccsd_lpno.HelperLocal(wfn.doccpi()[0], no_vir)
hcc = ccsd_lpno.HelperCCEnergy(wfn, local=local, pert=pert, pno_cut=pno_cut)
mp2_en_list['{}'.format(cut)] = hcc.pno_correct
#optrot_mvg_list['{}'.format(cut)] = optrot_mvg
with open("{}".format(args.j), "w") as write_file:
json.dump(mp2_en_list, write_file, indent=4)
print("List of MP2 energy corrections: {}".format(mp2_en_list))
|
import numpy as np
from tools import gauss
def calc_probaprio_gm(signal, w):
"""
Cete fonction permet de calculer les probabilité a priori des classes w1 et w2, en observant notre signal non bruité
:param signal: Signal discret non bruité à deux classes (numpy array 1D d'int)
:param w: vecteur dont la première composante est la valeur de la classe w1 et la deuxième est la valeur de la classe w2
:return: un vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
"""
p1 = np.sum((signal == w[0])) / signal.shape[0]
p2 = np.sum((signal == w[1])) / signal.shape[0]
return np.array([p1, p2])
def mpm_gm(signal_noisy, w, p, m1, sig1, m2, sig2):
"""
Cette fonction permet d'appliquer la méthode mpm pour retrouver notre signal d'origine à partir de sa version bruité et des paramètres du model.
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param w: vecteur dont la première composante est la valeur de la classe w1 et la deuxième est la valeur de la classe w2
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param m1: La moyenne de la première gaussienne
:param sig1: L'écart type de la première gaussienne
:param m2: La moyenne de la deuxième gaussienne
:param sig2: L'écart type de la deuxième gaussienne
:return: Un signal discret à 2 classe (numpy array 1D d'int)
"""
gausses = gauss(signal_noisy, m1, sig1, m2, sig2)
proba_apost = p * gausses
proba_apost = proba_apost / (proba_apost.sum(axis=1)[..., np.newaxis])
X = []
indexes = np.argmax(proba_apost, axis=1)
for index in indexes:
X.append(w[index])
return np.array(X)
def simu_gm(n, w, p):
"""
Cette fonction permet de simuler un signal discret à 2 classe de taille n à partir des probabilité d'apparition des deux classes
:param n: taille du signal
:param w: vecteur dont la première composante est la valeur de la classe w1 et la deuxième est la valeur de la classe w2
:param p: vecteur de taille 2 avec la probailité d'apparition pour chaque classe
:return: Un signal discret à 2 classe (numpy array 1D d'int)
"""
simu = np.zeros((n,), dtype=int)
for i in range(n):
aux = np.random.multinomial(1, p)
simu[i] = w[np.argmax(aux)]
return simu
def calc_param_EM_gm(signal_noisy, p, m1, sig1, m2, sig2):
"""
Cette fonction permet de calculer les nouveaux paramètres estimé pour une itération de EM
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: vecteur de taille 2 avec la probailité d'apparition a priori pour chaque classe
:param m1: La moyenne de la première gaussienne
:param sig1: L'écart type de la première gaussienne
:param m2: La moyenne de la deuxième gaussienne
:param sig2: L'écart type de la deuxième gaussienne
:return: tous les paramètres réestimés donc p, m1, sig1, m2, sig2
"""
gausses = gauss(signal_noisy, m1, sig1, m2, sig2)
proba_apost = p * gausses
proba_apost = proba_apost / (proba_apost.sum(axis=1)[..., np.newaxis])
p = proba_apost.sum(axis=0) / proba_apost.shape[0]
m1 = (proba_apost[:, 0] * signal_noisy).sum() / proba_apost[:, 0].sum()
sig1 = np.sqrt((proba_apost[:, 0] * ((signal_noisy - m1) ** 2)).sum() / proba_apost[:, 0].sum())
m2 = (proba_apost[:, 1] * signal_noisy).sum() / proba_apost[:, 1].sum()
sig2 = np.sqrt((proba_apost[:, 1] * ((signal_noisy - m2) ** 2)).sum() / proba_apost[:, 1].sum())
return p, m1, sig1, m2, sig2
def estim_param_EM_gm(iter, signal_noisy, p, m1, sig1, m2, sig2):
"""
Cette fonction est l'implémentation de l'algorithme EM pour le modèle en question
:param iter: Nombre d'itération choisie
:param signal_noisy: Signal bruité (numpy array 1D de float)
:param p: la valeur d'initialisation du vecteur de proba
:param m1: la valeur d'initialisation de la moyenne de la première gaussienne
:param sig1: la valeur d'initialisation de l'écart type de la première gaussienne
:param m2: la valeur d'initialisation de la moyenne de la deuxième gaussienne
:param sig2: la valeur d'initialisation de l'écart type de la deuxième gaussienne
:return: Tous les paramètres réestimés à la fin de l'algorithme EM donc p, m1, sig1, m2, sig2
"""
p_est = p
m1_est = m1
sig1_est = sig1
m2_est = m2
sig2_est = sig2
for i in range(iter):
p_est, m1_est, sig1_est, m2_est, sig2_est = calc_param_EM_gm(signal_noisy, p_est, m1_est, sig1_est, m2_est,
sig2_est)
print({'p': p_est, 'm1': m1_est, 'sig1': sig1_est, 'm2': m2_est, 'sig2': sig2_est})
return p_est, m1_est, sig1_est, m2_est, sig2_est
|
import os
def f(arr):
if len(arr) == 1:
return True
i = 1
while i < len(arr) and arr[i] == arr[i - 1]:
i += 1
if i == len(arr):
return True
if arr[i] < arr[i - 1]:
while i < len(arr) and arr[i] <= arr[i - 1]:
i += 1
else:
while i < len(arr) and arr[i] >= arr[i - 1]:
i += 1
if i == len(arr):
return False
return True
if os.getenv('TEST_MODE', False):
print("------TEST MODE------")
print(f"{f([3, 1, 5, 3])} = True\n")
print(f"{f([3, 2, 1])} = False\n")
print(f"{f([7331])} = True\n")
print(f"{f([3, 1, 5, 6])} = True\n")
print("---------------------")
elif __name__ == "__main__":
for _ in range(int(input())):
input()
arr = list(map(int, input().split()))
print('YES' if f(arr) else 'NO')
|
from src import detect_faces, show_bboxes
from PIL import Image
# img = Image.open('images/office1.jpg')
# bounding_boxes, landmarks = detect_faces(img)
# im = show_bboxes(img, bounding_boxes, landmarks)
# img.show()
# im.show()
# img = Image.open('images/office2.jpg')
# bounding_boxes, landmarks = detect_faces(img)
# im = show_bboxes(img, bounding_boxes, landmarks)
# img.show()
# im.show()
# img = Image.open('images/office3.jpg')
# bounding_boxes, landmarks = detect_faces(img)
# im = show_bboxes(img, bounding_boxes, landmarks)
# img.show()
# im.show()
# img = Image.open('images/office4.jpg')
# bounding_boxes, landmarks = detect_faces(img, thresholds=[0.6, 0.7, 0.85])
# im = show_bboxes(img, bounding_boxes, landmarks)
# img.show()
# im.show()
img = Image.open('images/office5.jpg')
bounding_boxes, landmarks = detect_faces(img, min_face_size=10)
im = show_bboxes(img, bounding_boxes, landmarks)
im.show()
|
import tkinter
screen = tkinter.Tk()
entry = tkinter.Entry(screen, width=50, bg="aquamarine", borderwidth=5)
entry.insert(0, "Enter your name")
entry.pack()
button = tkinter.Button(text="Submit", command=lambda: tkinter.Label(text=entry.get()).pack())
button.pack()
screen.mainloop()
|
import os
from pathlib import Path
import numpy as np
import cv2 as cv
MIN_NUM_KEYPOINT_MATCHES = 50 # constant for minimum number of keypoint matches
def main():
"""loop through 2 folders with paired images, register & blink images."""
night1_files = sorted(os.listdir(
'C:/Users/austi/Documents/pythonWork/findingz_pluto_with_open_cv_imag_ analysis/night_1'))
# create sorted list of filenames for night 1 folder
night2_files = sorted(os.listdir(
'C:/Users/austi/Documents/pythonWork/findingz_pluto_with_open_cv_imag_ analysis/night_2'))
# create sorted list of filenames for night 2 folder
path1 = Path.cwd() / 'C:/Users/austi/Documents/pythonWork/findingz_pluto_with_open_cv_imag_ analysis/night_1'
# assign path class name for input folder 1
path2 = Path.cwd() / 'C:/Users/austi/Documents/pythonWork/findingz_pluto_with_open_cv_imag_ analysis/night_2'
# assign path class name for input folder 2
path3 = Path.cwd() / \
'C:/Users/austi/Documents/pythonWork/findingz_pluto_with_open_cv_imag_ analysis/night_1_registered'
# assign class path name for output folder
for i, _ in enumerate(night1_files): # create index for files in folder
img1 = cv.imread(str(path1 / night1_files[i]), cv.IMREAD_GRAYSCALE) # read image 1 in greyscale
img2 = cv.imread(str(path2 / night2_files[i]), cv.IMREAD_GRAYSCALE) # read image 2 in greyscale
print("Comparing {} to {}.\n".format(night1_files[i], night2_files[i])) # display status for comparing
kp1, kp2, best_matches = find_best_matches(img1, img2) # find keypoints and best matches with function
img_match = cv.drawMatches(img1, kp1, img2, kp2,
best_matches, outImg=None) # draw lines to match image keypoints
height, width = img1.shape # get size of image 1
cv.line(img_match, (width, 0), (width, height), (255, 255, 255), 1) # draw a line on the right side of image 1
QC_best_matches(img_match) # comment out to ignore. displays best matches quality control
img1_registered = register_image(img1, img2, kp1, kp2, best_matches) # register first image to second
blink(img1, img1_registered, 'Check Registration', num_loops=5) # blink comparator function
out_filename = '{}_registered.png'.format(night1_files[i][:-4]) # create file path for out image
cv.imwrite(str(path3 / out_filename), img1_registered) # Will overwrite! write image to file
cv.destroyAllWindows() # destroy all windows to remove clutter
blink(img1_registered, img2, 'Blink Comparator', num_loops=15) # call blink again to display only the comparing
def find_best_matches(img1, img2):
"""Return list of keypoints and list of best matches for the two images"""
orb = cv.ORB_create(nfeatures=100) # detect keypoint orb objects class instance
kp1, desc1 = orb.detectAndCompute(img1, mask=None) # find image 1 keypoints
kp2, desc2 = orb.detectAndCompute(img2, mask=None) # find image 2 keypoints
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True) # bruteforce matching keypoints in both images instance
matches = bf.match(desc1, desc2) # match keypoints in both images
matches = sorted(matches, key=lambda x: x.distance) # sort list of keypoint matches by distance
best_matches = matches[:MIN_NUM_KEYPOINT_MATCHES] # take 50 best matches
return kp1, kp2, best_matches
def QC_best_matches(img_match):
"""draw best keypoint matches connected by colored lines"""
cv.imshow('Best {} Matches'.format(MIN_NUM_KEYPOINT_MATCHES), img_match) # display the window with keypoint circles
cv.waitKey(2500) # keeps window active 2.5 seconds
def register_image(img1, img2, kp1, kp2, best_matches):
"""return first image registered to second image"""
if len(
best_matches) >= MIN_NUM_KEYPOINT_MATCHES:
# if the best matches list is greater than MIN_NUM_KEYPOINT_MATCHES
src_pts = np.zeros((len(best_matches), 2), dtype=np.float32) # create array of zeros
dst_pts = np.zeros((len(best_matches), 2), dtype=np.float32) # create a row of zeros
for i, match in enumerate(best_matches): # data from best matches
src_pts[i, :] = kp1[match.queryIdx].pt # fill array with points
dst_pts[i, :] = kp2[match.queryIdx].pt # fill array 2 with points
h_array, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC) # use homography to align images
height, width = img2.shape # Get dimensions of image 2
img1_warped = cv.warpPerspective(img1, h_array, (width, height)) # warp image so it aligns with first image
return img1_warped
else:
print("Warning: Number of keypoint matches < {}\n".format(MIN_NUM_KEYPOINT_MATCHES))
return img1
def blink(image_1, image_2, window_name, num_loops):
"""Replicate blink comparator with 2 images"""
for _ in range(num_loops):
cv.imshow(window_name, image_1)
cv.waitKey(330)
cv.imshow(window_name, image_2)
cv.waitKey(330)
if __name__ == '__main__':
main()
|
from .entity import Entity
from .vector import Vector
from .pickup import PickupType
from .sandbox import builtins
from .color import Color3
from .event import Event, EventType
from .util import *
from .path import Path
from .game_config import GameConfig as gc
from .enemy_type import EnemyType
from functools import partial
import math
import random
import sys
import inspect
import signal
import traceback
class Player(Entity):
def __init__(self, name, color, game, script, player_id):
super().__init__()
self.player_id = player_id
self.name = name
self.color = color
self.vel = Vector(random.random(), random.random())
self.game = game
self.speed_max = self.game.gc.P_SPEED
self.range_visible = self.game.gc.P_RANGE_VISIBLE
self.range_attackable = self.game.gc.P_RANGE_ATTACKABLE
self.ammo = self.game.gc.P_AMMO
self.damage = self.game.gc.P_DAMAGE
self.attack_delay = self.game.gc.P_ATTACK_DELAY
self.attack_timer = 0
self.message_limit = self.game.gc.P_MESSAGE_LIMIT
self.message_count = 0
# Register the handler for timing out user scripts
signal.signal(signal.SIGALRM, timeout_handler)
if not self.try_apply_script(script, self.game):
self.try_apply_script(self.game.gc.P_ERROR_SCRIPT, self.game)
def reset_dummy(self):
self.dummy.tag = self.tag
self.dummy.pos = self.pos
self.dummy.vel = self.vel
self.dummy.speed_max = self.speed_max
self.dummy.size = self.size
self.dummy.health = self.health
self.dummy.health_max = self.health_max
self.dummy.name = self.name
self.dummy.color = self.color
self.dummy.range_visible = self.range_visible
self.dummy.range_attackable = self.range_attackable
self.dummy.damage = self.damage
self.dummy.attack_delay = self.attack_delay
self.dummy.attack_timer = self.attack_timer
self.dummy.ammo = self.ammo
self.dummy.message_limit = self.message_limit
self.dummy.message_count = 0
def get_in_range(self, entities, dist):
in_range = []
for p in entities:
if distance_to(self, p) <= dist + p.size:
in_range.append(p.dummy)
return in_range
def player_list(self):
result = []
for p in self.game.players:
result.append(p.dummy)
return result
def pickups_visible(self):
return self.get_in_range(self.game.pickups, self.range_visible)
def enemies_visible(self):
return self.get_in_range(self.game.enemies, self.range_visible)
def enemies_attackable(self):
return self.get_in_range(self.game.enemies, self.range_attackable)
def try_apply_script(self, script, game):
if script is None:
return False
self.scope = {
'math' : math,
'game_width' : self.game.get_map_width(),
'game_height' : self.game.get_map_height(),
'Vector' : Vector,
'PickupType' : PickupType,
'EnemyType' : EnemyType,
'Path' : Path,
'core' : game.core.dummy,
'random' : random,
'sys' : sys,
'say' : partial(say, self),
'say_also_to_self' : partial(say_also_to_self, self),
'chat' : partial(chat, self),
'shoot' : partial(shoot, self),
'move_to' : partial(move_to, self),
'move_from' : partial(move_from, self),
'get_nearest' : partial(get_nearest, self),
'get_nearest_enemy' : partial(get_nearest_enemy, self),
'get_nearest_pickup' : partial(get_nearest_pickup, self),
'get_nearest_ammo': partial(get_nearest_ammo, self),
'get_nearest_health': partial(get_nearest_health, self),
'get_farthest' : partial(get_farthest, self),
'distance_to' : partial(distance_to, self),
'print' : partial(user_print, self),
'follow_path' : partial(follow_path, self),
'__builtins__' : builtins
}
# If the script throws an error, just give up
try:
signal.alarm(self.game.gc.SCRIPT_TIMEOUT)
exec(script, self.scope)
signal.alarm(0)
except Exception:
# Format traceback
exp, val, tb = sys.exc_info()
listing = traceback.format_exception(exp, val, tb)
del listing[0]
del listing[0]
# Set color to red to signify the bot is broken
self.game.events_add(Event(EventType.ERROR, listing, self))
self.color = Color3(255,0,0)
return False
# Check update method existence and signature of update function
self.script_respond = None
if 'respond' in self.scope:
respond = self.scope['respond']
if callable(respond) and len(inspect.signature(respond).parameters) == 2:
#Create dummy function in special scope
self.script_respond = type(respond)(respond.__code__, self.scope)
if 'update' in self.scope:
update = self.scope['update']
if callable(update) and len(inspect.signature(update).parameters) == 2:
#Create dummy function in special scope
self.script_update = type(update)(update.__code__, self.scope)
return True
return False
def update(self, delta):
# Update what the player knows about the world
self.update_game_state_info()
if self.attack_timer > 0:
self.attack_timer -= 1
self.message_count = 0
# Reset dummy
self.reset_dummy()
# Execute on Dummy Entity
try:
signal.alarm(self.game.gc.SCRIPT_TIMEOUT)
self.script_update(self.dummy, delta)
signal.alarm(0)
except Exception:
# If script is broken, set color to red to signify the bot is broken
# and reset to error script
# Format traceback
exp, val, tb = sys.exc_info()
listing = traceback.format_exception(exp, val, tb)
del listing[0]
del listing[0]
# Set color to red to signify the bot is broken
self.game.events_add(Event(EventType.ERROR, listing, self))
self.color = Color3(255,0,0)
self.try_apply_script(self.game.gc.P_ERROR_SCRIPT, self.game)
# Check for sanity (restrict velocity)
if Vector.Length(self.dummy.vel) > self.speed_max:
self.dummy.vel = Vector.Normalize(self.dummy.vel) * self.speed_max
self.vel = self.dummy.vel
# Apply Motion
return super().update(delta)
def update_game_state_info(self):
self.scope['enemies_visible'] = self.enemies_visible()
self.scope['enemies_attackable'] = self.enemies_attackable()
self.scope['pickups_visible'] = self.pickups_visible()
self.scope['players'] = self.player_list()
def __str__(self):
return str(self.name) + ":" + str(self.pos)
class TimeoutException(Exception):
pass
# Handler for SIGALRM for timing out user scripts.
def timeout_handler(signum, frame):
raise TimeoutException('Function timed out. Allowed time for functions is ' +
str(gc.SCRIPT_TIMEOUT) + ' second.')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-23 15:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('conversationtree', '0011_auto_20171023_1327'),
]
operations = [
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('template', models.CharField(max_length=500)),
('node', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='conversationtree.Node')),
],
),
]
|
def format_real(number_ext):
if number_ext == 'um':
return '{} {}'.format(number_ext, 'real')
else:
return '{} {}'.format(number_ext, 'reais')
def format_centavo(number_ext):
if number_ext == 'um':
return '{} {}'.format(number_ext, 'centavo')
else:
return '{} {}'.format(number_ext, 'centavos')
|
from utils import *
inp = get_input(2020, 3)
rows = [r for r in inp.split("\n") if r != ""]
def get(x, y):
return rows[y][x % len(rows[y])]
def calc(dx, dy):
x = 0
y = 0
trees = 0
while y < len(rows):
trees += 1 if get(x, y) == "#" else 0
x += dx
y += dy
return trees
print(calc(3, 1))
grads = [(1, 1), (3, 1), (5, 1), (7, 1), (1, 2)]
vals = [calc(dx, dy) for dx, dy in grads]
prod = 1
for val in vals:
prod *= val
print(prod)
|
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import simplejson as json
import os
from base64 import b64encode
from nose.tools import nottest
from mimetypes import guess_type
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder
try:
import Image
except ImportError:
try:
from PIL import Image
except ImportError:
Image = None
log = logging.getLogger(__name__)
class TestFilesController(TestController):
def tearDown(self):
TestController.tearDown(self, del_global_app_set=True,
dirs_to_clear=['files_path', 'reduced_files_path'])
@nottest
def test_index(self):
"""Tests that GET /files returns a JSON array of files with expected values."""
# Test that the restricted tag is working correctly.
# First get the users.
users = h.get_users()
contributor_id = [u for u in users if u.role == u'contributor'][0].id
# Then add a contributor and a restricted tag.
restricted_tag = h.generate_restricted_tag()
my_contributor = h.generate_default_user()
my_contributor_first_name = u'Mycontributor'
my_contributor.first_name = my_contributor_first_name
Session.add_all([restricted_tag, my_contributor])
Session.commit()
my_contributor = Session.query(model.User).filter(
model.User.first_name == my_contributor_first_name).first()
my_contributor_id = my_contributor.id
restricted_tag = h.get_restricted_tag()
# Then add the default application settings with my_contributor as the
# only unrestricted user.
application_settings = h.generate_default_application_settings()
application_settings.unrestricted_users = [my_contributor]
Session.add(application_settings)
Session.commit()
# Finally, issue two POST requests to create two default files with the
# *default* contributor as the enterer. One file will be restricted and
# the other will not be.
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_base64_encoded = b64encode(open(wav_file_path).read())
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_base64_encoded = b64encode(open(jpg_file_path).read())
# Create the restricted file.
params = self.file_create_params_base64.copy()
params.update({
'filename': u'test_restricted_file.wav',
'base64_encoded_file': wav_file_base64_encoded,
'tags': [h.get_tags()[0].id] # the restricted tag should be the only one
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
restricted_file_id = resp['id']
# Create the unrestricted file.
params = self.file_create_params_base64.copy()
params.update({
'filename': u'test_unrestricted_file.jpg',
'base64_encoded_file': jpg_file_base64_encoded
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
# Expectation: the administrator, the default contributor (qua enterer)
# and the unrestricted my_contributor should all be able to view both files.
# The viewer will only receive the unrestricted file.
# An administrator should be able to view both files.
extra_environ = {'test.authentication.role': 'administrator',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
assert resp[0]['filename'] == u'test_restricted_file.wav'
assert resp[1]['filename'] == u'test_unrestricted_file.jpg'
assert response.content_type == 'application/json'
# The default contributor (qua enterer) should also be able to view both
# files.
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# Mycontributor (an unrestricted user) should also be able to view both
# files.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# A (not unrestricted) viewer should be able to view only one file.
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 1
# Remove Mycontributor from the unrestricted users list and access to
# the second file will be denied.
application_settings = h.get_application_settings()
application_settings.unrestricted_users = []
Session.add(application_settings)
Session.commit()
# Mycontributor (no longer an unrestricted user) should now *not* be
# able to view the restricted file.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True,
'test.retain_application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 1
# Remove the restricted tag from the file and the viewer should now be
# able to view it too.
restricted_file = Session.query(model.File).get(restricted_file_id)
restricted_file.tags = []
Session.add(restricted_file)
Session.commit()
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 2
# Clear all Files (actually, everything but the tags, users and languages)
h.clear_all_models(['User', 'Tag', 'Language'])
# Now add 100 files. The even ones will be restricted, the odd ones not.
# These files will be deficient, i.e., have no binary data or MIME_type
# but that's ok ...
def create_file_from_index(index):
file = model.File()
file.filename = u'name_%d.jpg' % index
return file
files = [create_file_from_index(i) for i in range(1, 101)]
Session.add_all(files)
Session.commit()
files = h.get_files()
restricted_tag = h.get_restricted_tag()
for file in files:
if int(file.filename.split('_')[1].split('.')[0]) % 2 == 0:
file.tags.append(restricted_tag)
Session.add(file)
Session.commit()
files = h.get_files() # ordered by File.id ascending
# An administrator should be able to retrieve all of the files.
extra_environ = {'test.authentication.role': 'administrator',
'test.application_settings': True}
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == 100
assert resp[0]['filename'] == u'name_1.jpg'
assert resp[0]['id'] == files[0].id
# Test the paginator GET params.
paginator = {'items_per_page': 23, 'page': 3}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp['items']) == 23
assert resp['items'][0]['filename'] == files[46].filename
# Test the order_by GET params.
order_by_params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'desc'}
response = self.app.get(url('files'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
result_set = sorted([f.filename for f in files], reverse=True)
assert result_set == [f['filename'] for f in resp]
assert response.content_type == 'application/json'
# Test the order_by *with* paginator.
params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'desc', 'items_per_page': 23, 'page': 3}
response = self.app.get(url('files'), params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert result_set[46] == resp['items'][0]['filename']
# The default viewer should only be able to see the odd numbered files,
# even with a paginator.
items_per_page = 7
page = 7
paginator = {'items_per_page': items_per_page, 'page': page}
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp['items']) == items_per_page
assert resp['items'][0]['filename'] == u'name_%d.jpg' % (
((items_per_page * (page - 1)) * 2) + 1)
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'File', 'order_by_attribute': 'filename',
'order_by_direction': 'descending'}
response = self.app.get(url('files'), order_by_params, status=400,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
# Expect the default BY id ASCENDING ordering when the order_by_model/Attribute
# param is invalid.
order_by_params = {'order_by_model': 'Fileage', 'order_by_attribute': 'nom',
'order_by_direction': 'desc'}
response = self.app.get(url('files'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp[0]['id'] == files[0].id
# Expect a 400 error when the paginator GET params are empty, not
# specified or integers that are less than 1
paginator = {'items_per_page': u'a', 'page': u''}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter an integer value'
assert resp['errors']['page'] == u'Please enter a value'
paginator = {'items_per_page': 0, 'page': -1}
response = self.app.get(url('files'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter a number that is 1 or greater'
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
@nottest
def test_create(self):
"""Tests that POST /files correctly creates a new file."""
########################################################################
# base64-encoded file creation
########################################################################
# Pass some mal-formed JSON to test that a 400 error is returned.
params = u'"a' # Bad JSON
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'JSON decode error: the parameters provided were not valid JSON.'
# Create a test audio file.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'] == u'old_test.wav'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 1
assert response.content_type == 'application/json'
# Create a test image file.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
file_id = an_image_id = resp['id']
assert resp['filename'] == u'old_test.jpg'
assert resp['MIME_type'] == u'image/jpeg'
assert resp['size'] == jpg_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 2
# Create a test image file with many-to-many relations, i.e., tags and
# forms. First create a couple of tags.
tag1 = model.Tag()
tag1.name = u'tag 1'
tag2 = model.Tag()
tag2.name = u'tag 2'
restricted_tag = h.generate_restricted_tag()
Session.add_all([tag1, tag2, restricted_tag])
Session.commit()
tag1_id = tag1.id
tag2_id = tag2.id
restricted_tag_id = restricted_tag.id
# Then create a form to associate.
params = self.form_create_params.copy()
params.update({
'transcription': u'test',
'translations': [{'transcription': u'test', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
form_id = resp['id']
# Now create the file with forms and tags
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64,
'tags': [tag1_id, tag2_id],
'forms': [form_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'][:9] == u'old_test_'
assert resp['MIME_type'] == u'image/jpeg'
assert resp['size'] == jpg_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert sorted([t['id'] for t in resp['tags']]) == sorted([tag1_id, tag2_id])
assert resp['forms'][0]['transcription'] == u'test'
assert file_count == 3
# Invalid input
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'', # empty; not allowed
'base64_encoded_file': '', # empty; not allowed
'utterance_type': u'l' * 1000, # too long
'date_elicited': u'31/12/2012', # wrong format
'speaker': 200 # invalid id
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert u'Value must be one of: None; Object Language Utterance; Metalanguage Utterance; Mixed Utterance' in \
resp['errors']['utterance_type']
assert resp['errors']['speaker'] == u'There is no speaker with id 200.'
assert resp['errors']['date_elicited'] == u'Please enter a month from 1 to 12'
assert resp['errors']['filename'] == u'Please enter a value'
assert resp['errors']['base64_encoded_file']== u'Please enter a value'
assert file_count == 3
assert response.content_type == 'application/json'
# Create an audio file with unicode characters. Show that spaces are
# replaced with underscores and that apostrophes and quotation marks are
# removed.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'\u201Cold te\u0301st\u201D.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read()),
'tags': [restricted_tag_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
a_wav_file_id = resp['id']
file_count = Session.query(model.File).count()
assert u'\u201Cold_te\u0301st\u201D.wav' in os.listdir(self.files_path)
assert resp['filename'] == u'\u201Cold_te\u0301st\u201D.wav'
assert resp['name'] == resp['filename'] # name value set in files controller, user can't change this
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 4
assert restricted_tag_id in [t['id'] for t in resp['tags']]
assert response.content_type == 'application/json'
# Attempt to create an illicit file type (.html) but with a valid
# extension (.wav). Expect an error, i.e., validation detects that the
# file is really html, despite the misleading extension.
# WARNING: this (type of) test will fail of python-magic (and its dependency libmagic) is
# not installed. This is because the file create validator will not recognize this
# file as HTML pretending to be WAV
files_dir_list = os.listdir(self.files_path)
html_file_path = os.path.join(self.test_files_path, 'illicit.html')
html_file_base64 = b64encode(open(html_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'pretend_its_wav.wav',
'base64_encoded_file': html_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
new_files_dir_list = os.listdir(self.files_path)
assert file_count == 4
assert resp['errors'] == u"The file extension does not match the file's true type (audio/x-wav vs. text/html, respectively)."
assert files_dir_list == new_files_dir_list
########################################################################
# multipart/form-data file creation
########################################################################
# Upload a file using the multipart/form-data Content-Type and a POST
# request to /files. Here we do not supply a filename POST param so the
# files controller creates one based on the path automatically included
# in filedata. The controller removes the path separators of its os
# when it creates the filename; however path separators from a foreign os
# may remain in the generated filename.
params = self.file_create_params_MPFD.copy()
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', wav_file_path)])
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'] in os.listdir(self.files_path)
assert resp['filename'][:8] == u'old_test'
assert resp['name'] == resp['filename'] # name value set in files controller, user can't change this
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 5
assert response.content_type == 'application/json'
# Upload a file using the multipart/form-data Content-Type and a POST
# request to /files. Here we do supply a filename and some metadata.
params = self.file_create_params_MPFD.copy()
params.update({
'filename': u'wavfile.wav',
'description': u'multipart/form-data',
'date_elicited': u'12/03/2011', # mm/dd/yyyy
'utterance_type': u'Mixed Utterance',
'tags-0': tag1_id,
'tags-1': tag2_id,
'forms-0': form_id
})
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', wav_file_path)])
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert u'wavfile.wav' in os.listdir(self.files_path)
assert resp['filename'] == u'wavfile.wav'
assert resp['name'] == resp['filename'] # name value set in files controller, user can't change this
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert sorted([t['id'] for t in resp['tags']]) == sorted([tag1_id, tag2_id])
assert resp['forms'][0]['id'] == form_id
assert resp['utterance_type'] == u'Mixed Utterance'
assert resp['description'] == u'multipart/form-data'
assert resp['date_elicited'] == u'2011-12-03'
assert file_count == 6
assert response.content_type == 'application/json'
# Upload using multipart/form-data and attempt to pass a malicious
# filename; the path separator should be removed from the filename. If
# the separator were not removed, this filename could cause the file to
# be written to the parent directory of the files directory
params = self.file_create_params_MPFD.copy()
params.update({'filename': u'../wavfile.wav'})
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', wav_file_path)])
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
binary_files_list = os.listdir(self.files_path)
binary_files_list_count = len(binary_files_list)
assert u'..wavfile.wav' in binary_files_list
assert resp['filename'] == u'..wavfile.wav'
assert resp['name'] == resp['filename'] # name value set in files controller, user can't change this
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 7
assert response.content_type == 'application/json'
# Upload using multipart/form-data and attempt to pass an invalid file
# type (.html) but with a valid extension (.wav). Expect an error.
html_file_path = os.path.join(self.test_files_path, 'illicit.html')
files_dir_list = os.listdir(self.files_path)
params = self.file_create_params_MPFD.copy()
params.update({'filename': u'pretend_its_wav.wav'})
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', html_file_path)], status=400)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
new_files_dir_list = os.listdir(self.files_path)
assert file_count == new_file_count
assert resp['errors'] == u"The file extension does not match the file's true type (audio/x-wav vs. text/html, respectively)."
assert files_dir_list == new_files_dir_list
# Try the same as above but instead of providing a deceitful filename in
# the POST params, upload a file with a false extension.
html_file_path = os.path.join(self.test_files_path, 'illicit.wav')
files_dir_list = new_files_dir_list
params = self.file_create_params_MPFD.copy()
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', html_file_path)], status=400)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
new_files_dir_list = os.listdir(self.files_path)
assert file_count == new_file_count
assert resp['errors'] == u"The file extension does not match the file's true type (audio/x-wav vs. text/html, respectively)."
assert files_dir_list == new_files_dir_list
########################################################################
# Subinterval-Referencing File
########################################################################
# Create a subinterval-referencing audio file; reference one of the wav
# files created earlier.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': a_wav_file_id,
'name': u'subinterval_x',
'start': 1.3,
'end': 2.6
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
new_binary_files_list = os.listdir(self.files_path)
new_binary_files_list_count = len(new_binary_files_list)
subinterval_referencing_id = resp['id']
assert new_binary_files_list_count == binary_files_list_count
assert u'\u201Cold_te\u0301st\u201D.wav' in new_binary_files_list
assert u'subinterval_x' not in new_binary_files_list
assert resp['filename'] == None
assert resp['parent_file']['filename'] == u'\u201Cold_te\u0301st\u201D.wav'
assert resp['name'] == u'subinterval_x'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == None
assert resp['parent_file']['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert resp['start'] == 1.3
assert type(resp['start']) is float
assert resp['end'] == 2.6
assert type(resp['end']) is float
assert file_count == 8
assert response.content_type == 'application/json'
# Attempt to create another subinterval-referencing audio file; fail
# because name is too long, parent_file is empty, start is not a number
# and end is unspecified
params = self.file_create_params_sub_ref.copy()
params.update({
'name': u'subinterval_x' * 200,
'start': u'a',
'end': None
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 8 # unchanged
assert resp['errors']['parent_file'] == u'An id corresponding to an existing audio or video file must be provided.'
assert resp['errors']['start'] == u'Please enter a number'
assert resp['errors']['end'] == u'Please enter a value'
assert resp['errors']['name'] == u'Enter a value not more than 255 characters long'
# Attempt to create another subinterval-referencing audio file; fail
# because the contributor is not authorized to access the restricted parent_file.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': a_wav_file_id,
'name': u'subinterval_y',
'start': 3.75,
'end': 4.999
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_contrib, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 8
assert resp['errors']['parent_file'] == u'You are not authorized to access the file with id %d.' % a_wav_file_id
# Create another subinterval-referencing audio file; this one's parent is
# restricted. Note that it does not itself become restricted. Note also
# that a name is not required.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': a_wav_file_id,
'start': 3.75,
'end': 4.999
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 9
assert resp['parent_file']['id'] == a_wav_file_id
assert u'restricted' not in [t['name'] for t in resp['tags']]
assert resp['name'] == resp['parent_file']['name']
# Attempt to create another subinterval-referencing file; fail because
# the parent file is not an A/V file.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': an_image_id,
'name': u'subinterval_y',
'start': 3.75,
'end': 4.999
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 9
assert resp['errors']['parent_file'] == u'File %d is not an audio or a video file.' % an_image_id
# Attempt to create another subinterval-referencing file; fail because
# the parent file id is invalid
bad_id = 1000009252345345
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': bad_id,
'name': u'subinterval_y',
'start': 3.75,
'end': 4.999
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 9
assert resp['errors']['parent_file'] == u'There is no file with id %d.' % bad_id
# Attempt to create another subinterval-referencing file; fail because
# the parent file id is itself a subinterval-referencing file
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': subinterval_referencing_id,
'name': u'subinterval_y',
'start': 3.75,
'end': 4.999
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert file_count == 9
assert resp['errors']['parent_file'] == u'The parent file cannot itself be a subinterval-referencing file.'
# Attempt to create a subinterval-referencing audio file; fail because
# start >= end.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': a_wav_file_id,
'name': u'subinterval_z',
'start': 1.3,
'end': 1.3
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert response.content_type == 'application/json'
assert resp['errors'] == u'The start value must be less than the end value.'
########################################################################
# externally hosted file creation
########################################################################
# Create a valid externally hosted file
params = self.file_create_params_ext_host.copy()
url_ = u'http://vimeo.com/54144270'
params.update({
'url': url_,
'name': u'externally hosted file',
'MIME_type': u'video/mpeg',
'description': u'A large video file I didn\'t want to upload here.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['description'] == u'A large video file I didn\'t want to upload here.'
assert resp['url'] == url_
# Attempt to create an externally hosted file with invalid params
params = self.file_create_params_ext_host.copy()
url_ = u'http://vimeo/541442705414427054144270541442705414427054144270' # Invalid url
params.update({
'url': url_,
'name': u'invalid externally hosted file',
'MIME_type': u'video/gepm', # invalid MIME_type
'description': u'A large video file, sadly invalid.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['MIME_type'] == u'The file upload failed because the file type video/gepm is not allowed.'
resp['errors']['url'] == u'You must provide a full domain name (like vimeo.com)'
# Attempt to create an externally hosted file with different invalid params
params = self.file_create_params_ext_host.copy()
params.update({
'url': u'', # shouldn't be empty
'name': u'invalid externally hosted file' * 200, # too long
'password': u'a87XS.1d9X837a001W2w3a87XS.1d9X837a001W2w3' * 200, # too long
'description': u'A large video file, sadly invalid.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['url'] == u'Please enter a value'
assert resp['errors']['password'] == u'Enter a value not more than 255 characters long'
assert resp['errors']['name'] == u'Enter a value not more than 255 characters long'
# Show that the name param is optional
params = self.file_create_params_ext_host.copy()
url_ = u'http://vimeo.com/54144270'
params.update({
'url': url_,
'MIME_type': u'video/mpeg',
'description': u'A large video file I didn\'t want to upload here.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['name'] == u''
@nottest
def test_relational_restrictions(self):
"""Tests that the restricted tag works correctly with respect to relational attributes of files.
That is, tests that (a) file.forms does not return restricted forms to
restricted users and (b) a restricted user cannot append a restricted
form to file.forms."""
admin = self.extra_environ_admin.copy()
admin.update({'test.application_settings': True})
contrib = self.extra_environ_contrib.copy()
contrib.update({'test.application_settings': True})
# Create a test audio file.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'] == u'old_test.wav'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 1
assert response.content_type == 'application/json'
# First create the restricted tag.
restricted_tag = h.generate_restricted_tag()
Session.add(restricted_tag)
Session.commit()
restricted_tag_id = restricted_tag.id
# Then create two forms, one restricted and one not.
params = self.form_create_params.copy()
params.update({
'transcription': u'restricted',
'translations': [{'transcription': u'restricted', 'grammaticality': u''}],
'tags': [restricted_tag_id]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers,
admin)
resp = json.loads(response.body)
restricted_form_id = resp['id']
params = self.form_create_params.copy()
params.update({
'transcription': u'unrestricted',
'translations': [{'transcription': u'unrestricted', 'grammaticality': u''}]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers,
admin)
resp = json.loads(response.body)
unrestricted_form_id = resp['id']
# Now, as a (restricted) contributor, attempt to create a file and
# associate it to a restricted form -- expect to fail.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64,
'forms': [restricted_form_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
contrib, status=400)
resp = json.loads(response.body)
assert u'You are not authorized to access the form with id %d.' % restricted_form_id in \
resp['errors']['forms']
# Now, as a (restricted) contributor, attempt to create a file and
# associate it to an unrestricted form -- expect to succeed.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64,
'forms': [unrestricted_form_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
contrib)
resp = json.loads(response.body)
unrestricted_file_id = resp['id']
assert resp['filename'] == u'old_test.jpg'
assert resp['forms'][0]['transcription'] == u'unrestricted'
# Now, as a(n unrestricted) administrator, attempt to create a file and
# associate it to a restricted form -- expect (a) to succeed and (b) to
# find that the file is now restricted.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64,
'forms': [restricted_form_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, admin)
resp = json.loads(response.body)
indirectly_restricted_file_id = resp['id']
assert resp['filename'][:8] == u'old_test'
assert resp['forms'][0]['transcription'] == u'restricted'
assert u'restricted' in [t['name'] for t in resp['tags']]
# Now show that the indirectly restricted files are inaccessible to
# unrestricted users.
response = self.app.get(url('files'), headers=self.json_headers,
extra_environ=contrib)
resp = json.loads(response.body)
assert indirectly_restricted_file_id not in [f['id'] for f in resp]
# Now, as a(n unrestricted) administrator, create a file.
unrestricted_file_params = self.file_create_params_base64.copy()
unrestricted_file_params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(unrestricted_file_params)
response = self.app.post(url('files'), params, self.json_headers, admin)
resp = json.loads(response.body)
unrestricted_file_id = resp['id']
assert resp['filename'][:8] == u'old_test'
assert response.content_type == 'application/json'
# As a restricted contributor, attempt to update the unrestricted file
# just created by associating it to a restricted form -- expect to fail.
unrestricted_file_params.update({'forms': [restricted_form_id]})
params = json.dumps(unrestricted_file_params)
response = self.app.put(url('file', id=unrestricted_file_id), params,
self.json_headers, contrib, status=400)
resp = json.loads(response.body)
assert u'You are not authorized to access the form with id %d.' % restricted_form_id in \
resp['errors']['forms']
assert response.content_type == 'application/json'
# As an unrestricted administrator, attempt to update an unrestricted file
# by associating it to a restricted form -- expect to succeed.
response = self.app.put(url('file', id=unrestricted_file_id), params,
self.json_headers, admin)
resp = json.loads(response.body)
assert resp['id'] == unrestricted_file_id
assert u'restricted' in [t['name'] for t in resp['tags']]
# Now show that the newly indirectly restricted file is also
# inaccessible to an unrestricted user.
response = self.app.get(url('file', id=unrestricted_file_id),
headers=self.json_headers, extra_environ=contrib, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
@nottest
def test_create_large(self):
"""Tests that POST /files correctly creates a large file.
WARNING 1: long-running test.
WARNING: 2: if a large file named old_test_long.wav does not exist in
``tests/data/files``, this test will pass vacuously. I don't want to
include such a large file in the code base so this file needs to be
created if one wants this test to run.
"""
file_count = new_file_count = Session.query(model.File).count()
# Try to create a file with a > 20 MB file as content using JSON/Base64
# encoding and expect to fail because the file is too big.
long_wav_filename = 'old_test_long.wav'
long_wav_file_path = os.path.join(self.test_files_path, long_wav_filename)
if os.path.exists(long_wav_file_path):
long_wav_file_size = os.path.getsize(long_wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': long_wav_filename,
'base64_encoded_file': b64encode(open(long_wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
assert file_count == new_file_count
assert resp['error'] == u'The request body is too large; use the multipart/form-data Content-Type when uploading files greater than 20MB.'
assert response.content_type == 'application/json'
# Try to create a file with a ~6MB .wav file as content using JSON/Base64
# encoding and expect to succeed because the file is < 20MB.
medium_wav_filename = u'old_test_medium.wav'
medium_wav_file_path = os.path.join(self.test_files_path, medium_wav_filename)
if os.path.exists(medium_wav_file_path):
old_reduced_dir_list = os.listdir(self.reduced_files_path)
medium_wav_file_size = os.path.getsize(medium_wav_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': medium_wav_filename,
'base64_encoded_file': b64encode(open(medium_wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
file_count = new_file_count
new_file_count = Session.query(model.File).count()
new_reduced_dir_list = os.listdir(self.reduced_files_path)
lossy_filename = '%s.%s' % (os.path.splitext(medium_wav_filename)[0],
self.config.get('preferred_lossy_audio_format', 'ogg'))
assert file_count + 1 == new_file_count
assert resp['filename'] == medium_wav_filename
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == medium_wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert response.content_type == 'application/json'
assert lossy_filename not in old_reduced_dir_list
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
assert resp['lossy_filename'] == lossy_filename
assert lossy_filename in new_reduced_dir_list
else:
assert resp['lossy_filename'] == None
assert lossy_filename not in new_reduced_dir_list
# Create the large (> 20MB) .wav file from above using the multipart/form-data
# POST method.
if os.path.exists(long_wav_file_path):
long_wav_file_size = os.path.getsize(long_wav_file_path)
params = self.file_create_params_MPFD.copy()
params.update({'filename': long_wav_filename})
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', long_wav_file_path)])
resp = json.loads(response.body)
file_count = new_file_count
new_file_count = Session.query(model.File).count()
new_reduced_dir_list = os.listdir(self.reduced_files_path)
lossy_filename = '%s.%s' % (os.path.splitext(long_wav_filename)[0],
self.config.get('preferred_lossy_audio_format', 'ogg'))
assert file_count + 1 == new_file_count
assert resp['filename'] == long_wav_filename
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == long_wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert response.content_type == 'application/json'
assert lossy_filename not in old_reduced_dir_list
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
assert resp['lossy_filename'] == lossy_filename
assert lossy_filename in new_reduced_dir_list
else:
assert resp['lossy_filename'] == None
assert lossy_filename not in new_reduced_dir_list
@nottest
def test_new(self):
"""Tests that GET /file/new returns an appropriate JSON object for creating a new OLD file.
The properties of the JSON object are 'tags', 'utterance_types',
'speakers'and 'users' and their values are arrays/lists.
"""
# Unauthorized user ('viewer') should return a 403 status code on the
# new action, which requires a 'contributor' or an 'administrator'.
extra_environ = {'test.authentication.role': 'viewer'}
response = self.app.get(url('new_file'), extra_environ=extra_environ,
status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Add some test data to the database.
application_settings = h.generate_default_application_settings()
restricted_tag = h.generate_restricted_tag()
speaker = h.generate_default_speaker()
Session.add_all([application_settings, restricted_tag, speaker])
Session.commit()
# Get the data currently in the db (see websetup.py for the test data).
data = {
'tags': h.get_mini_dicts_getter('Tag')(),
'speakers': h.get_mini_dicts_getter('Speaker')(),
'users': h.get_mini_dicts_getter('User')(),
'utterance_types': h.utterance_types,
'allowed_file_types': h.allowed_file_types
}
# JSON.stringify and then re-Python-ify the data. This is what the data
# should look like in the response to a simulated GET request.
data = json.loads(json.dumps(data, cls=h.JSONOLDEncoder))
# GET /file/new without params. Without any GET params, /file/new
# should return a JSON array for every store.
response = self.app.get(url('new_file'),
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['tags'] == data['tags']
assert resp['speakers'] == data['speakers']
assert resp['users'] == data['users']
assert resp['utterance_types'] == data['utterance_types']
assert resp['allowed_file_types'] == data['allowed_file_types']
assert response.content_type == 'application/json'
# GET /new_file with params. Param values are treated as strings, not
# JSON. If any params are specified, the default is to return a JSON
# array corresponding to store for the param. There are three cases
# that will result in an empty JSON array being returned:
# 1. the param is not specified
# 2. the value of the specified param is an empty string
# 3. the value of the specified param is an ISO 8601 UTC datetime
# string that matches the most recent datetime_modified value of the
# store in question.
params = {
# Value is any string: 'speakers' will be in response.
'speakers': 'anything can go here!',
# Value is ISO 8601 UTC datetime string that does not match the most
# recent User.datetime_modified value: 'users' *will* be in
# response.
'users': datetime.datetime.utcnow().isoformat(),
# Value is ISO 8601 UTC datetime string that does match the most
# recent Tag.datetime_modified value: 'tags' will *not* be in response.
'tags': h.get_most_recent_modification_datetime('Tag').isoformat()
}
response = self.app.get(url('new_file'), params,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['tags'] == []
assert resp['speakers'] == data['speakers']
assert resp['users'] == data['users']
assert resp['utterance_types'] == data['utterance_types']
assert response.content_type == 'application/json'
@nottest
def test_update(self):
"""Tests that PUT /files/id correctly updates an existing file."""
file_count = Session.query(model.File).count()
# Add the default application settings and the restricted tag.
restricted_tag = h.generate_restricted_tag()
application_settings = h.generate_default_application_settings()
Session.add_all([application_settings, restricted_tag])
Session.commit()
restricted_tag = h.get_restricted_tag()
restricted_tag_id = restricted_tag.id
# Create a file to update.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
wav_file_size = os.path.getsize(wav_file_path)
params = self.file_create_params_base64.copy()
original_name = u'test_update_name.wav'
params.update({
'filename': original_name,
'tags': [restricted_tag.id],
'description': u'description',
'base64_encoded_file': b64encode(open(wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
id = int(resp['id'])
new_file_count = Session.query(model.File).count()
assert resp['filename'] == original_name
assert new_file_count == file_count + 1
# As a viewer, attempt to update the restricted file we just created.
# Expect to fail.
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
params = self.file_create_params_base64.copy()
params.update({
'description': u'A file that has been updated.',
})
params = json.dumps(params)
response = self.app.put(url('file', id=id), params,
self.json_headers, extra_environ, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# As an administrator now, update the file just created and expect to
# succeed.
params = self.file_create_params_base64.copy()
params.update({
'description': u'A file that has been updated.'
})
params = json.dumps(params)
response = self.app.put(url('file', id=id), params,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
assert resp['description'] == u'A file that has been updated.'
assert resp['tags'] == []
assert new_file_count == file_count + 1
assert response.content_type == 'application/json'
# Attempt an update with no new data. Expect a 400 error
# and response['errors'] = {'no change': The update request failed
# because the submitted data were not new.'}.
response = self.app.put(url('file', id=id), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert u'the submitted data were not new' in resp['error']
# Add a speaker and some tags to the db.
speaker = h.generate_default_speaker()
tag1 = model.Tag()
tag1.name = u'tag 1'
tag2 = model.Tag()
tag2.name = u'tag 2'
Session.add_all([speaker, tag1, tag2])
Session.commit()
speaker = h.get_speakers()[0]
tag1_id = tag1.id
tag2_id = tag2.id
speaker_id = speaker.id
# Now update our file by adding a many-to-one datum, viz. a speaker
params = self.file_create_params_base64.copy()
params.update({'speaker': speaker.id})
params = json.dumps(params)
response = self.app.put(url('file', id=id), params, self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['speaker']['first_name'] == speaker.first_name
# Finally, update the file by adding some many-to-many data, i.e., tags
params = self.file_create_params_base64.copy()
params.update({'tags': [tag1_id, tag2_id]})
params = json.dumps(params)
response = self.app.put(url('file', id=id), params, self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert sorted([t['name'] for t in resp['tags']]) == [u'tag 1', u'tag 2']
########################################################################
# Updating "Plain Files"
########################################################################
# Create a file using the multipart/form-data POST method.
params = self.file_create_params_MPFD.copy()
params.update({'filename': u'multipart.wav'})
response = self.app.post(url('/files'), params, extra_environ=self.extra_environ_admin,
upload_files=[('filedata', wav_file_path)])
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
plain_file_id = resp['id']
assert resp['filename'] == u'multipart.wav'
assert resp['filename'] in os.listdir(self.files_path)
assert resp['name'] == resp['filename'] # name value set in files controller, user can't change this
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['enterer']['first_name'] == u'Admin'
assert response.content_type == 'application/json'
# Update the plain file by adding some metadata.
params = self.file_create_params_base64.copy()
params.update({
'tags': [tag1_id, tag2_id],
'description': u'plain updated',
'date_elicited': u'01/01/2000',
'speaker': speaker_id,
'utterance_type': u'Metalanguage Utterance'
})
params = json.dumps(params)
response = self.app.put(url('file', id=plain_file_id), params, self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert sorted([t['name'] for t in resp['tags']]) == [u'tag 1', u'tag 2']
assert resp['description'] == u'plain updated'
assert resp['speaker']['id'] == speaker_id
assert resp['filename'] == resp['name'] == u'multipart.wav'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['enterer']['first_name'] == u'Admin'
assert response.content_type == 'application/json'
########################################################################
# Update a subinterval-referencing file
########################################################################
# Create a subinterval-referencing audio file; reference one of the wav
# files created earlier.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': plain_file_id,
'name': u'anyname',
'start': 13.3,
'end': 26.89,
'tags': [tag1_id],
'description': u'subinterval-referencing file',
'date_elicited': u'01/01/2000',
'speaker': speaker_id,
'utterance_type': u'Object Language Utterance'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_contrib)
resp = json.loads(response.body)
subinterval_referencing_id = resp['id']
assert resp['filename'] == None
assert resp['name'] == u'anyname'
assert resp['parent_file']['filename'] == u'multipart.wav'
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == None
assert resp['parent_file']['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Contributor'
assert resp['start'] == 13.3
assert type(resp['start']) is float
assert resp['end'] == 26.89
assert type(resp['end']) is float
assert resp['tags'][0]['id'] == tag1_id
assert response.content_type == 'application/json'
# Update the subinterval-referencing file.
params = self.file_create_params_base64.copy()
params.update({
'parent_file': plain_file_id,
'start': 13.3,
'end': 26.89,
'tags': [],
'description': u'abc to def',
'date_elicited': u'01/01/2010',
'utterance_type': u'Metalanguage Utterance'
})
params = json.dumps(params)
response = self.app.put(url('file', id=subinterval_referencing_id), params, self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
assert resp['parent_file']['id'] == plain_file_id
assert resp['name'] == resp['parent_file']['name']
assert resp['tags'] == []
assert resp['description'] == u'abc to def'
assert resp['speaker'] == None
assert resp['MIME_type'] == u'audio/x-wav'
assert response.content_type == 'application/json'
# Attempt a vacuous update and expect an error message.
response = self.app.put(url('file', id=subinterval_referencing_id), params, self.json_headers,
extra_environ=self.extra_environ_contrib, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'The update request failed because the submitted data were not new.'
# Now restrict the parent file and verify that the child file does not
# thereby become restricted. This means that the metadata of a restricted
# parent file may accessible to restricted users via the child file;
# however, this is ok since the serve action still will not allow
# the contents of the restricted file to be served to the restricted users.
params = self.file_create_params_base64.copy()
params.update({
'tags': [tag1_id, tag2_id, restricted_tag_id],
'description': u'plain updated',
'date_elicited': u'01/01/2000',
'speaker': speaker_id,
'utterance_type': u'Metalanguage Utterance'
})
params = json.dumps(params)
response = self.app.put(url('file', id=plain_file_id), params,
self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert u'restricted' in [t['name'] for t in resp['tags']]
SRFile = Session.query(model.File).get(subinterval_referencing_id)
assert u'restricted' not in [t.name for t in SRFile.tags]
########################################################################
# externally hosted file creation
########################################################################
# Create a valid externally hosted file
url_ = 'http://vimeo.com/54144270'
params = self.file_create_params_ext_host.copy()
params.update({
'url': url_,
'name': u'externally hosted file',
'MIME_type': u'video/mpeg',
'description': u'A large video file I didn\'t want to upload here.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['description'] == u'A large video file I didn\'t want to upload here.'
assert resp['url'] == url_
# Update the externally hosted file
params = self.file_create_params_ext_host.copy()
params.update({
'url': url_,
'name': u'externally hosted file',
'password': u'abc',
'MIME_type': u'video/mpeg',
'description': u'A large video file I didn\'t want to upload here.',
'date_elicited': u'12/29/1987'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['date_elicited'] == u'1987-12-29'
assert resp['password'] == u'abc'
# Attempt to update the externally hosted file with invalid params.
params = self.file_create_params_ext_host.copy()
params.update({
'url': u'abc', # Invalid
'name': u'externally hosted file' * 200, # too long
'MIME_type': u'zooboomafoo', # invalid
'description': u'A large video file I didn\'t want to upload here.',
'date_elicited': u'1987/12/29' # wrong format
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['MIME_type'] == u'The file upload failed because the file type zooboomafoo is not allowed.'
assert resp['errors']['url'] == u'You must provide a full domain name (like abc.com)'
assert resp['errors']['name'] == u'Enter a value not more than 255 characters long'
assert resp['errors']['date_elicited'] == u'Please enter the date in the form mm/dd/yyyy'
@nottest
def test_delete(self):
"""Tests that DELETE /files/id deletes the file with id=id and returns a JSON representation.
If the id is invalid or unspecified, then JSON null or a 404 status code
are returned, respectively.
"""
# Add some objects to the db: a default application settings, a speaker
# and a tag.
application_settings = h.generate_default_application_settings()
speaker = h.generate_default_speaker()
my_contributor = h.generate_default_user()
my_contributor.username = u'uniqueusername'
tag = model.Tag()
tag.name = u'default tag'
Session.add_all([application_settings, speaker, my_contributor, tag])
Session.commit()
my_contributor = Session.query(model.User).filter(
model.User.username==u'uniqueusername').first()
my_contributor_id = my_contributor.id
tag_id = tag.id
speaker_id = speaker.id
# Count the original number of files
file_count = Session.query(model.File).count()
# First, as my_contributor, create a file to delete.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
params = self.file_create_params_base64.copy()
params.update({
'filename': u'test_delete.jpg',
'base64_encoded_file': b64encode(open(jpg_file_path).read()),
'speaker': speaker_id,
'tags': [tag_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
to_delete_id = resp['id']
to_delete_name = resp['filename']
assert resp['filename'] == u'test_delete.jpg'
assert resp['tags'][0]['name'] == u'default tag'
# Now count the files
new_file_count = Session.query(model.File).count()
assert new_file_count == file_count + 1
# Now, as the default contributor, attempt to delete the my_contributor-
# entered file we just created and expect to fail.
extra_environ = {'test.authentication.role': 'contributor',
'test.application_settings': True}
response = self.app.delete(url('file', id=to_delete_id),
extra_environ=extra_environ, status=403)
resp = json.loads(response.body)
file_that_was_not_deleted = Session.query(model.File).get(to_delete_id)
file_path = os.path.join(self.files_path, to_delete_name)
assert os.path.exists(file_path)
assert file_that_was_not_deleted is not None
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# As my_contributor, attempt to delete the file we just created and
# expect to succeed.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
response = self.app.delete(url('file', id=to_delete_id),
extra_environ=extra_environ)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
tag_of_deleted_file = Session.query(model.Tag).get(
resp['tags'][0]['id'])
speaker_of_deleted_file = Session.query(model.Speaker).get(
resp['speaker']['id'])
assert isinstance(tag_of_deleted_file, model.Tag)
assert isinstance(speaker_of_deleted_file, model.Speaker)
assert new_file_count == file_count
# The deleted file will be returned to us, so the assertions from above
# should still hold true.
file_that_was_deleted = Session.query(model.File).get(to_delete_id)
file_path = os.path.join(self.files_path, to_delete_name)
assert not os.path.exists(file_path)
assert 'old_test.jpg' not in os.listdir(self.files_path)
assert file_that_was_deleted is None
assert resp['filename'] == u'test_delete.jpg'
# Delete with an invalid id
id = 9999999999999
response = self.app.delete(url('file', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no file with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# Delete without an id
response = self.app.delete(url('file', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == \
'The resource could not be found.'
# Create and delete a file with unicode characters in the file name
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
params = self.file_create_params_base64.copy()
params.update({
'filename': u'\u201Cte\u0301st delete\u201D.jpg',
'base64_encoded_file': b64encode(open(jpg_file_path).read()),
'speaker': speaker_id,
'tags': [tag_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, extra_environ)
resp = json.loads(response.body)
to_delete_id = resp['id']
to_delete_name = resp['filename']
assert resp['filename'] == u'\u201Cte\u0301st_delete\u201D.jpg'
assert resp['tags'][0]['name'] == u'default tag'
assert u'\u201Cte\u0301st_delete\u201D.jpg' in os.listdir(self.files_path)
response = self.app.delete(url('file', id=to_delete_id), extra_environ=extra_environ)
resp = json.loads(response.body)
assert u'\u201Cte\u0301st_delete\u201D.jpg' not in os.listdir(self.files_path)
# Create a file, create a subinterval-referencing file that references
# it and then delete the parent file. Show that the child files become
# "orphaned" but are not deleted. Use case: user has uploaded an incorrect
# parent file; must delete parent file, create a new one and then update
# child files' parent_file attribute.
# Create the parent WAV file.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
params = self.file_create_params_base64.copy()
params.update({
'filename': u'parent.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
parent_id = resp['id']
parent_filename = resp['filename']
parent_lossy_filename = resp['lossy_filename']
# Create a subinterval-referencing audio file; reference one of the wav
# files created earlier.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': parent_id,
'name': u'child',
'start': 1,
'end': 2,
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
child_id = resp['id']
assert resp['parent_file']['id'] == parent_id
# Show that the child file still exists after the parent has been deleted.
assert parent_filename in os.listdir(self.files_path)
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
assert parent_lossy_filename in os.listdir(self.reduced_files_path)
response = self.app.delete(url('file', id=parent_id), extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert parent_filename not in os.listdir(self.files_path)
assert parent_lossy_filename not in os.listdir(self.reduced_files_path)
assert resp['filename'] == u'parent.wav'
parent = Session.query(model.File).get(parent_id)
assert parent is None
child = Session.query(model.File).get(child_id)
assert child is not None
assert child.parent_file is None
# Delete the child file
response = self.app.delete(url('file', id=child_id), extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['name'] == u'child'
@nottest
def test_show(self):
"""Tests that GET /files/id returns a JSON file object, null or 404
depending on whether the id is valid, invalid or unspecified,
respectively.
"""
# First create a test image file.
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': b64encode(open(jpg_file_path).read())
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
file_id = resp['id']
assert resp['filename'] == u'old_test.jpg'
assert resp['MIME_type'] == u'image/jpeg'
assert resp['size'] == jpg_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert file_count == 1
# Then create a form associated to the image file just created and make sure
# we can access the form via the file.forms backreference.
params = self.form_create_params.copy()
params.update({
'transcription': u'test',
'translations': [{'transcription': u'test', 'grammaticality': u''}],
'files': [file_id]
})
params = json.dumps(params)
response = self.app.post(url('forms'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
assert type(resp) == type({})
assert resp['transcription'] == u'test'
assert resp['translations'][0]['transcription'] == u'test'
assert resp['morpheme_break_ids'] == None
assert resp['enterer']['first_name'] == u'Admin'
assert resp['files'][0]['filename'] == u'old_test.jpg'
# GET the image file and make sure we see the associated form.
response = self.app.get(url('file', id=file_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['forms'][0]['transcription'] == u'test'
assert resp['filename'] == u'old_test.jpg'
assert response.content_type == 'application/json'
# Invalid id
id = 100000000000
response = self.app.get(url('file', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
resp = json.loads(response.body)
assert u'There is no file with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('file', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == \
'The resource could not be found.'
# Now test that the restricted tag is working correctly.
# First get the default contributor's id.
users = h.get_users()
contributor_id = [u for u in users if u.role == u'contributor'][0].id
# Then add another contributor and a restricted tag.
restricted_tag = h.generate_restricted_tag()
my_contributor = h.generate_default_user()
my_contributor_first_name = u'Mycontributor'
my_contributor.first_name = my_contributor_first_name
my_contributor.username = u'uniqueusername'
Session.add_all([restricted_tag, my_contributor])
Session.commit()
my_contributor = Session.query(model.User).filter(
model.User.first_name == my_contributor_first_name).first()
my_contributor_id = my_contributor.id
# Then add the default application settings with my_contributor as the
# only unrestricted user.
application_settings = h.generate_default_application_settings()
application_settings.unrestricted_users = [my_contributor]
Session.add(application_settings)
Session.commit()
# Finally, issue a POST request to create the restricted file with
# the *default* contributor as the enterer.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read()),
'tags': [h.get_tags()[0].id] # the restricted tag should be the only one
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
extra_environ)
resp = json.loads(response.body)
restricted_file_id = resp['id']
# Expectation: the administrator, the default contributor (qua enterer)
# and the unrestricted my_contributor should all be able to view the file.
# The viewer should get a 403 error when attempting to view this file.
# An administrator should be able to view this file.
extra_environ = {'test.authentication.role': 'administrator',
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ)
# The default contributor (qua enterer) should be able to view this file.
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ)
# Mycontributor (an unrestricted user) should be able to view this
# restricted file.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ)
# A (not unrestricted) viewer should *not* be able to view this file.
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ, status=403)
# Remove Mycontributor from the unrestricted users list and access will be denied.
application_settings = h.get_application_settings()
application_settings.unrestricted_users = []
Session.add(application_settings)
Session.commit()
# Mycontributor (no longer an unrestricted user) should now *not* be
# able to view this restricted file.
extra_environ = {'test.authentication.id': my_contributor_id,
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ, status=403)
# Remove the restricted tag from the file and the viewer should now be
# able to view it too.
restricted_file = Session.query(model.File).get(restricted_file_id)
restricted_file.tags = []
Session.add(restricted_file)
Session.commit()
extra_environ = {'test.authentication.role': 'viewer',
'test.application_settings': True}
response = self.app.get(url('file', id=restricted_file_id),
headers=self.json_headers, extra_environ=extra_environ)
assert response.content_type == 'application/json'
@nottest
def test_edit(self):
"""Tests that GET /files/id/edit returns a JSON object of data necessary to edit the file with id=id.
The JSON object is of the form {'file': {...}, 'data': {...}} or
{'error': '...'} (with a 404 status code) depending on whether the id is
valid or invalid/unspecified, respectively.
"""
# Add the default application settings and the restricted tag.
application_settings = h.generate_default_application_settings()
restricted_tag = h.generate_restricted_tag()
Session.add_all([restricted_tag, application_settings])
Session.commit()
restricted_tag = h.get_restricted_tag()
contributor = [u for u in h.get_users() if u.role == u'contributor'][0]
contributor_id = contributor.id
# Create a restricted file.
wav_file_path = os.path.join(self.test_files_path, 'old_test.wav')
extra_environ = {'test.authentication.id': contributor_id,
'test.application_settings': True}
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.wav',
'base64_encoded_file': b64encode(open(wav_file_path).read()),
'tags': [restricted_tag.id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
restricted_file_id = resp['id']
# As a (not unrestricted) contributor, attempt to call edit on the
# restricted form and expect to fail.
extra_environ = {'test.authentication.role': 'contributor',
'test.application_settings': True}
response = self.app.get(url('edit_file', id=restricted_file_id),
extra_environ=extra_environ, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Not logged in: expect 401 Unauthorized
response = self.app.get(url('edit_file', id=restricted_file_id), status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
# Invalid id
id = 9876544
response = self.app.get(url('edit_file', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no file with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('edit_file', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == \
'The resource could not be found.'
# Valid id
response = self.app.get(url('edit_file', id=restricted_file_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['file']['filename'] == u'old_test.wav'
assert response.content_type == 'application/json'
# Valid id with GET params. Param values are treated as strings, not
# JSON. If any params are specified, the default is to return a JSON
# array corresponding to store for the param. There are three cases
# that will result in an empty JSON array being returned:
# 1. the param is not specified
# 2. the value of the specified param is an empty string
# 3. the value of the specified param is an ISO 8601 UTC datetime
# string that matches the most recent datetime_modified value of the
# store in question.
# Add some test data to the database.
application_settings = h.generate_default_application_settings()
speaker = h.generate_default_speaker()
tag = model.Tag()
tag.name = u'name'
Session.add_all([application_settings, speaker, tag])
Session.commit()
# Get the data currently in the db (see websetup.py for the test data).
data = {
'tags': h.get_mini_dicts_getter('Tag')(),
'speakers': h.get_mini_dicts_getter('Speaker')(),
'users': h.get_mini_dicts_getter('User')(),
'utterance_types': h.utterance_types,
'allowed_file_types': h.allowed_file_types
}
# JSON.stringify and then re-Python-ify the data. This is what the data
# should look like in the response to a simulated GET request.
data = json.loads(json.dumps(data, cls=h.JSONOLDEncoder))
params = {
# Value is a non-empty string: 'users' will be in response.
'users': 'give me some users!',
# Value is empty string: 'speakers' will not be in response.
'speakers': '',
# Value is ISO 8601 UTC datetime string that does not match the most
# recent Tag.datetime_modified value: 'tags' *will* be in response.
'tags': datetime.datetime.utcnow().isoformat(),
}
response = self.app.get(url('edit_file', id=restricted_file_id), params,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['data']['tags'] == data['tags']
assert resp['data']['speakers'] == []
assert resp['data']['users'] == data['users']
assert resp['data']['utterance_types'] == data['utterance_types']
assert response.content_type == 'application/json'
# Invalid id with GET params. It should still return 'null'.
params = {
# If id were valid, this would cause a speakers array to be returned
# also.
'speakers': 'True',
}
response = self.app.get(url('edit_file', id=id), params,
extra_environ=self.extra_environ_admin, status=404)
assert u'There is no file with id %s' % id in json.loads(response.body)['error']
@nottest
def test_serve(self):
"""Tests that GET /files/id/serve returns the file with name id from
the permanent store, i.e., from onlinelinguisticdatabase/files/.
"""
extra_environ_admin = {'test.authentication.role': 'administrator',
'test.application_settings': True}
extra_environ_contrib = {'test.authentication.role': 'contributor',
'test.application_settings': True}
# Create a restricted wav file.
restricted_tag = h.generate_restricted_tag()
Session.add(restricted_tag)
Session.commit()
restricted_tag_id = restricted_tag.id
test_files_path = self.test_files_path
wav_filename = u'old_test.wav'
wav_file_path = os.path.join(test_files_path, wav_filename)
wav_file_size = os.path.getsize(wav_file_path)
wav_file_base64 = b64encode(open(wav_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': wav_filename,
'base64_encoded_file': wav_file_base64,
'tags': [restricted_tag_id]
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, extra_environ_admin)
resp = json.loads(response.body)
wav_filename = resp['filename']
wav_file_id = resp['id']
# Retrieve the file data as the admin who entered it
response = self.app.get(url(controller='files', action='serve', id=wav_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert wav_file_base64 == response_base64
assert guess_type(wav_filename)[0] == response.headers['Content-Type']
assert wav_file_size == int(response.headers['Content-Length'])
# Attempt to retrieve the file without authentication and expect to fail (401).
response = self.app.get(url(controller='files', action='serve', id=wav_file_id),
headers=self.json_headers, status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
assert response.content_type == 'application/json'
# Attempt to retrieve the restricted file data as the contrib and expect to fail.
response = self.app.get(url(controller='files', action='serve', id=wav_file_id),
headers=self.json_headers, extra_environ=extra_environ_contrib, status=403)
resp = json.loads(response.body)
assert resp['error'] == u'You are not authorized to access this resource.'
assert response.content_type == 'application/json'
# Attempt to serve an externally hosted file and expect a 400 status response.
# Create a valid externally hosted file
params = self.file_create_params_ext_host.copy()
url_ = 'http://vimeo.com/54144270'
params.update({
'url': url_,
'name': u'externally hosted file',
'MIME_type': u'video/mpeg',
'description': u'A large video file I didn\'t want to upload here.'
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
eh_file_id = resp['id']
# Attempt to retrieve the externally hosted file's "data" and expect a 400 response.
response = self.app.get(url(controller='files', action='serve', id=eh_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['error'] == u'The content of file %s is stored elsewhere at %s' % (eh_file_id, url_)
assert response.content_type == 'application/json'
# Request the content of a subinterval-referencing file and expect to receive
# the file data from its parent_file
# Create a subinterval-referencing audio file; reference the wav created above.
params = self.file_create_params_sub_ref.copy()
params.update({
'parent_file': wav_file_id,
'name': u'subinterval_x',
'start': 1.3,
'end': 2.6
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sr_file_id = resp['id']
# Retrieve the parent file's file data when requesting that of the child.
response = self.app.get(url(controller='files', action='serve', id=sr_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert wav_file_base64 == response_base64
assert guess_type(wav_filename)[0] == response.headers['Content-Type']
# Retrieve the reduced file data of the wav file created above.
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
response = self.app.get(url(controller='files', action='serve_reduced', id=wav_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert len(wav_file_base64) > len(response_base64)
assert response.content_type == h.guess_type('x.%s' % self.preferred_lossy_audio_format)[0]
else:
response = self.app.get(url(controller='files', action='serve_reduced', id=wav_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no size-reduced copy of file %s' % wav_file_id
assert response.content_type == 'application/json'
# Retrieve the reduced file of the wav-subinterval-referencing file above
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
response = self.app.get(url(controller='files', action='serve_reduced', id=sr_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
sr_response_base64 = b64encode(response.body)
assert len(wav_file_base64) > len(sr_response_base64)
assert sr_response_base64 == response_base64
assert response.content_type == h.guess_type('x.%s' % self.preferred_lossy_audio_format)[0]
else:
response = self.app.get(url(controller='files', action='serve_reduced', id=sr_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no size-reduced copy of file %s' % sr_file_id
assert response.content_type == 'application/json'
# Create an image file and retrieve its contents and resized contents
jpg_filename = u'large_image.jpg'
jpg_file_path = os.path.join(test_files_path, jpg_filename)
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': jpg_filename,
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, extra_environ_admin)
resp = json.loads(response.body)
jpg_filename = resp['filename']
jpg_file_id = resp['id']
# Get the image file's contents
response = self.app.get(url(controller='files', action='serve', id=jpg_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert jpg_file_base64 == response_base64
assert guess_type(jpg_filename)[0] == response.headers['Content-Type']
assert jpg_file_size == int(response.headers['Content-Length'])
# Get the reduced image file's contents
if self.create_reduced_size_file_copies and Image:
response = self.app.get(url(controller='files', action='serve_reduced', id=jpg_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert jpg_file_base64 > response_base64
assert guess_type(jpg_filename)[0] == response.headers['Content-Type']
else:
response = self.app.get(url(controller='files', action='serve_reduced', id=jpg_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no size-reduced copy of file %s' % jpg_file_id
# Attempt to get the reduced contents of a file that has none (i.e., no
# lossy_filename value) and expect to fail.
# Create a .ogg file and retrieve its contents and fail to retrieve its resized contents
ogg_filename = u'old_test.ogg'
ogg_file_path = os.path.join(test_files_path, ogg_filename)
ogg_file_size = os.path.getsize(ogg_file_path)
ogg_file_base64 = b64encode(open(ogg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': ogg_filename,
'base64_encoded_file': ogg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers, extra_environ_admin)
resp = json.loads(response.body)
ogg_filename = resp['filename']
ogg_file_id = resp['id']
# Get the .ogg file's contents
response = self.app.get(url(controller='files', action='serve', id=ogg_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin)
response_base64 = b64encode(response.body)
assert ogg_file_base64 == response_base64
assert guess_type(ogg_filename)[0] == response.headers['Content-Type']
assert ogg_file_size == int(response.headers['Content-Length'])
# Attempt to get the reduced image file's contents and expect to fail
response = self.app.get(url(controller='files', action='serve_reduced', id=ogg_file_id),
headers=self.json_headers, extra_environ=extra_environ_admin, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no size-reduced copy of file %s' % ogg_file_id
# Invalid id
response = self.app.get(url(controller='files', action='serve', id=123456789012),
headers=self.json_headers, extra_environ=extra_environ_admin, status=404)
resp = json.loads(response.body)
assert resp['error'] == u'There is no file with id 123456789012'
@nottest
def test_file_reduction(self):
"""Verifies that reduced-size copies of image and wav files are created in files/reduced_files
and that the names of these reduced-size files is returned as the lossy_filename
attribute.
Note that this test will fail if create_reduced_size_file_copies is set
to 0 in the config file.
"""
def get_size(path):
return os.stat(path).st_size
# Create a JPG file that will not be reduced because it is already small enough
jpg_file_path = os.path.join(self.test_files_path, 'old_test.jpg')
jpg_file_size = os.path.getsize(jpg_file_path)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': u'old_test.jpg',
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = Session.query(model.File).count()
assert resp['filename'] == u'old_test.jpg'
assert resp['MIME_type'] == u'image/jpeg'
assert resp['size'] == jpg_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert resp['lossy_filename'] == None
assert file_count == 1
assert len(os.listdir(self.reduced_files_path)) == 0
# Create a large JPEG file and expect a reduced-size .jpg to be created in
# files/reduced_files.
filename = u'large_image.jpg'
jpg_file_path = os.path.join(self.test_files_path, filename)
jpg_reduced_file_path = os.path.join(self.reduced_files_path, filename)
jpg_file_base64 = b64encode(open(jpg_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': filename,
'base64_encoded_file': jpg_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
new_file_count = Session.query(model.File).count()
assert new_file_count == file_count + 1
assert resp['filename'] == filename
assert resp['MIME_type'] == u'image/jpeg'
assert resp['enterer']['first_name'] == u'Admin'
if self.create_reduced_size_file_copies and Image:
assert resp['lossy_filename'] == filename
assert resp['lossy_filename'] in os.listdir(self.reduced_files_path)
assert get_size(jpg_file_path) > get_size(jpg_reduced_file_path)
else:
assert resp['lossy_filename'] is None
assert not os.path.isfile(jpg_reduced_file_path)
# Create a large GIF file and expect a reduced-size .gif to be created in
# files/reduced_files.
filename = u'large_image.gif'
gif_file_path = os.path.join(self.test_files_path, filename)
gif_reduced_file_path = os.path.join(self.reduced_files_path, filename)
gif_file_base64 = b64encode(open(gif_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': filename,
'base64_encoded_file': gif_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = new_file_count
new_file_count = Session.query(model.File).count()
assert new_file_count == file_count + 1
assert resp['filename'] == filename
assert resp['MIME_type'] == u'image/gif'
assert resp['enterer']['first_name'] == u'Admin'
if self.create_reduced_size_file_copies and Image:
assert resp['lossy_filename'] == filename
assert resp['lossy_filename'] in os.listdir(self.reduced_files_path)
assert get_size(gif_file_path) > get_size(gif_reduced_file_path)
else:
assert resp['lossy_filename'] is None
assert not os.path.isfile(gif_reduced_file_path)
# Create a large PNG file and expect a reduced-size .png to be created in
# files/reduced_files.
filename = 'large_image.png'
png_file_path = os.path.join(self.test_files_path, filename)
png_reduced_file_path = os.path.join(self.reduced_files_path, filename)
params = self.file_create_params_MPFD.copy()
params.update({'filename': filename})
response = self.app.post(url('/files'), params,
extra_environ=self.extra_environ_admin,
upload_files=[('filedata', png_file_path)])
resp = json.loads(response.body)
file_count = new_file_count
new_file_count = Session.query(model.File).count()
assert new_file_count == file_count + 1
assert resp['filename'] == filename
assert resp['MIME_type'] == u'image/png'
assert resp['enterer']['first_name'] == u'Admin'
if self.create_reduced_size_file_copies and Image:
assert resp['lossy_filename'] == filename
assert resp['lossy_filename'] in os.listdir(self.reduced_files_path)
assert get_size(png_file_path) > get_size(png_reduced_file_path)
else:
assert resp['lossy_filename'] is None
assert not os.path.isfile(png_reduced_file_path)
# Test copying .wav files to .ogg/.mp3
format_ = self.preferred_lossy_audio_format
# Create a WAV file for which an .ogg/.mp3 Vorbis copy will be created in
# files/reduced_files.
filename = 'old_test.wav'
lossy_filename = u'%s.%s' % (os.path.splitext(filename)[0], format_)
lossy_file_path = os.path.join(self.reduced_files_path, lossy_filename)
wav_file_path = os.path.join(self.test_files_path, filename)
wav_file_size = os.path.getsize(wav_file_path)
wav_file_base64 = b64encode(open(wav_file_path).read())
params = self.file_create_params_base64.copy()
params.update({
'filename': filename,
'base64_encoded_file': wav_file_base64
})
params = json.dumps(params)
response = self.app.post(url('files'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
file_count = new_file_count
new_file_count = Session.query(model.File).count()
assert resp['filename'] == filename
assert resp['MIME_type'] == u'audio/x-wav'
assert resp['size'] == wav_file_size
assert resp['enterer']['first_name'] == u'Admin'
assert new_file_count == file_count + 1
if self.create_reduced_size_file_copies and h.command_line_program_installed('ffmpeg'):
assert resp['lossy_filename'] == lossy_filename
assert resp['lossy_filename'] in os.listdir(self.reduced_files_path)
assert get_size(wav_file_path) > get_size(lossy_file_path)
else:
assert resp['lossy_filename'] is None
assert not os.path.isfile(lossy_file_path)
@nottest
def test_new_search(self):
"""Tests that GET /files/new_search returns the search parameters for searching the files resource."""
query_builder = SQLAQueryBuilder('File')
response = self.app.get(url('/files/new_search'), headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp['search_parameters'] == h.get_search_parameters(query_builder)
|
"""
functional
----------
Functional programming utilities.
"""
from collections import deque
from operator import itemgetter
from .compat import viewkeys
def complement(f):
def not_f(*args, **kwargs):
return not f(*args, **kwargs)
return not_f
def keyfilter(f, d):
return {k: v for k, v in d.items() if f(k)}
def keysorted(d):
return sorted(d.items(), key=itemgetter(0))
def valfilter(f, d):
return {k: v for k, v in d.items() if f(v)}
def dzip(left, right):
return {k: (left.get(k), right.get(k)) for k in viewkeys(left) & viewkeys(right)}
def sliding_window(iterable, n):
it = iter(iterable)
items = deque(maxlen=n)
try:
for i in range(n):
items.append(next(it))
except StopIteration:
return
yield tuple(items)
for item in it:
items.append(item)
yield tuple(items)
def merge(dicts):
dicts = list(dicts)
if len(dicts) == 0:
return {}
elif len(dicts) == 1:
return dicts[0].copy()
else:
out = dicts[0].copy()
for other in dicts[1:]:
out.update(other)
return out
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from apicultur.service import Service
class AleatoriasNivel(Service):
# http://apicultur.io/apis/info?name=WordsbyFreq_Word_Molino_es&version=1.0.0&provider=MolinodeIdeas
version = '1.0.0'
endpoint = 'molinodeideas/freq/es/words'
method = 'GET'
arguments = ['frecuencia',]
func_name = 'aleatorias_nivel'
def get_endpoint(self):
return self._join_url(self.endpoint, self.version, '?frecuencia=%(frecuencia)s')
|
import sys
from math import sqrt
from math import floor
def h_ascii(key, n):
"""
This function hashes a key using the ascii value method
discussed during lecture.
Parameters:
- key(str): The key we wish to hash
- n(int): The size of the hast table
Returns:
- The hash value of the key
"""
asci_sum = 0
for char in key:
asci_sum += ord(char)
try:
return asci_sum % n
except ZeroDivisionError as inst:
print("Run-Time Error:", type(inst))
print("Cannot perform integer division or modulo by zero")
sys.exit(1)
def h_rolling(key, n):
"""
This function hashes a key using the rolling polynomial method
discussed during lecture.
Parameters:
- key(str): The key we wish to hash
- n(int): The size of the hast table
Returns:
- The hash value of the key
"""
p = 53
m = 2**64
asci_sum = 0
for c, char in enumerate(key):
asci_sum += ord(char) * p ** c
try:
asci_sum = asci_sum % m
except ZeroDivisionError as inst:
print("Run-Time Error:", type(inst))
print("Cannot perform integer division or modulo by zero")
sys.exit(1)
try:
return asci_sum % n
except ZeroDivisionError as inst:
print("Run-Time Error:", type(inst))
print("Cannot perform integer division or modulo by zero")
sys.exit(1)
def h_mult(key, n):
"""
This function hashes a key using the multiplicative method
discussed on pg. 264 of Thomas Cormen's Introduction to Algorithms.
Parameters:
- key(str): The key we wish to hash
- n(int): The size of the hast table
Returns:
- The hash value of the key
"""
A = (sqrt(5)-1)/2
asci_sum = 0
for char in key:
asci_sum += ord(char)
return floor(n*((asci_sum*A) % 1))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
next step: R
'''
import codecs,re
def splitJointmorph(jointMorph):
cas = '_'
num = '_'
gen = '_'
verbform = '_'
tense = '_'
if jointMorph!='_':
t5 = jointMorph.strip()
ms = t5.split('|')
for m in ms:
if m.startswith('Case='):
cas = m[5:]
elif m.startswith('Number='):
num = m[7:]
elif m.startswith('Gender='):
gen = m[7:]
elif m.startswith('VerbForm='):
verbform = m[9:]
elif m.startswith('Tense='):
tense = m[6:]
if cas=='_':
if 'Mood' in t5:
cas = 'FV'
elif 'VerbForm' in t5:
cas = 'IV'
else:
cas = 'ind'
return cas,num,gen,verbform,tense
conlluPath = '../data/sanskrit.conllu'
senNo = 1
wrdNo = 1
sep = '\t'
with codecs.open(conlluPath, 'r', 'UTF-8') as fIn, codecs.open('../data/conllu.dat','w','UTF-8') as fOut:
hdr = ['text','chapter','layer','sen.id','wrd.position','head.position','word','lemma','lemma.id','pos','label','sublabel','cas','num','gen','verbform','tense','annotator']
fOut.write(sep.join(hdr) + '\n')
reSenId = re.compile('^# sent_id = (.+)$')
reSenLayer = re.compile('^# layer=(.+)$')
written = 0
for lnum,line in enumerate(fIn):
line = line.strip()
if not line:
continue
m = reSenId.match(line)
if m:
'''
main headline of one sentence
'''
if written>0:
sen = []
senNo+=1
wrdNo =1
cit = m.group(1)
ix = cit.find('_')
txtname = cit[:ix]
chapter = cit[(ix+1):]
layer = ''
continue
if line.startswith('#'): # full text
m = reSenLayer.match(line)
if m:
s = m.group(1)
layer = s[:s.find('-')]
continue
'''
this is a regular line
'''
toks = line.split('\t')
if len(toks) < 12:
print('line with {0} tokens'.format(len(toks)))
continue
wrd = toks[2] # lexicon entry!
surfaceWrd = toks[1] if toks[1]!='_' else '[{0}]'.format(toks[2])
lemmaId = toks[14]
posTag = toks[3]
headPosition = toks[6]
labtoks = toks[7].split(':')
label = labtoks[0]
sublabel = labtoks[1] if len(labtoks)==2 else ''
annotator = toks[11]
''' morpho-syntax '''
jointMorph = toks[5]
cas,num,gen,verbform,tense = splitJointmorph(jointMorph)
''' write to the out file '''
itms = [txtname,chapter,layer,senNo,wrdNo,headPosition,surfaceWrd,wrd,lemmaId,posTag,label,sublabel,cas,num,gen,verbform,tense,annotator]
fOut.write(sep.join([str(x) for x in itms]) + '\n')
written+=1
wrdNo+=1
|
from django.contrib.auth.models import AbstractUser
from django.db import models
import decimal
class Costumer(AbstractUser):
telephone = models.CharField(max_length=30,blank=True)
street = models.CharField(max_length=30,blank=True)
city = models.CharField(max_length=30,blank=True)
district = models.CharField(max_length=30,blank=True)
zipcode = models.CharField(max_length=30,blank=True)
country = models.CharField(max_length=30,blank=True)
class Meta:
db_table = 'costumers'
class Product(models.Model):
name = models.CharField(max_length=255, unique=True)
ean = models.CharField(max_length=50)
price = models.DecimalField(max_digits=8,decimal_places=2)
old_price = models.DecimalField(max_digits=8,decimal_places=2,blank=True,default=0.0)
image = models.CharField(max_length=255,blank=True)
is_on_sale = models.BooleanField(default=False)
is_bestseller = models.BooleanField(default=False)
is_new = models.BooleanField(default=False)
is_featured = models.BooleanField(default=False)
quantity = models.IntegerField()
description = models.TextField(blank=True)
brand = models.CharField(max_length=255, blank=True)
category = models.CharField(max_length=255, blank=True)
#categories = models.ManyToManyField(Category)
class Meta:
db_table = 'products'
def __unicode__(self):
return self.name
class Desktop(Product):
d_processor = models.CharField(max_length=255)
gpu = models.CharField(max_length=255)
ram = models.CharField(max_length=255)
storage = models.CharField(max_length=255)
os = models.CharField(max_length=255)
dimensions = models.CharField(max_length=255)
weight = models.CharField(max_length=255)
class Meta:
db_table = 'desktops'
class SmartPhone(Product):
os = models.CharField(max_length=255)
s_processor = models.CharField(max_length=255)
storage = models.CharField(max_length=255)
ram = models.CharField(max_length=255)
display = models.CharField(max_length=255)
camera = models.CharField(max_length=255)
mobile_data = models.CharField(max_length=255)
dimensions = models.CharField(max_length=255)
weight = models.CharField(max_length=255)
class Meta:
db_table = 'smartphones'
class Processor(Product):
base_freq = models.CharField(max_length=255)
turbo_freq = models.CharField(max_length=255)
num_cores = models.CharField(max_length=255)
num_threads = models.CharField(max_length=255)
tdp = models.CharField(max_length=255)
cache = models.CharField(max_length=255)
socket = models.CharField(max_length=255)
class Meta:
db_table = 'processors'
class Monitor(Product):
size = models.CharField(max_length=255)
aspect_ratio = models.CharField(max_length=255)
resolution = models.CharField(max_length=255)
response_time = models.CharField(max_length=255)
conectivity = models.CharField(max_length=255)
dimensions = models.CharField(max_length=255)
weight = models.CharField(max_length=255)
class Meta:
db_table = 'monitors'
class Keyboard(Product):
Layout = models.CharField(max_length=255)
conectivity = models.CharField(max_length=255)
dimensions = models.CharField(max_length=255)
weight = models.CharField(max_length=255)
class Meta:
db_table = 'keyboards'
def __unicode__(self):
return self.name
class SSD(Product):
Capacity = models.CharField(max_length=255)
format = models.CharField(max_length=255)
interface = models.CharField(max_length=255)
seq_read_speed = models.CharField(max_length=255)
seq_write_speed = models.CharField(max_length=255)
random_read = models.CharField(max_length=255)
random_write = models.CharField(max_length=255)
class Meta:
db_table = 'ssds'
class Router(Product):
wireless_norm = models.CharField(max_length=255)
segment = models.CharField(max_length=255)
data_rate = models.CharField(max_length=255)
antena = models.CharField(max_length=255)
segnal_freq = models.CharField(max_length=255)
ports = models.CharField(max_length=255)
dimensions = models.CharField(max_length=255)
class Meta:
db_table = 'routers'
class CartItem(models.Model):
cart_id = models.CharField(max_length=50)
date_added = models.DateTimeField(auto_now_add=True)
quantity = models.IntegerField(default=1)
product = models.ForeignKey('Product', on_delete=models.CASCADE, unique=False)
order = models.ForeignKey('Order', on_delete=models.CASCADE, unique=False, null=True)
class Meta:
db_table = 'cart_items'
ordering = ['date_added']
def total(self):
return self.quantity * self.product.price
def name(self):
return self.product.name
def price(self):
return self.product.price
def get_absolute_url(self):
return self.product.get_absolute_url()
def augment_quantity(self, quantity):
self.quantity = self.quantity + int(quantity)
self.save()
class Order(models.Model):
date = models.DateTimeField(auto_now_add=True)
total = models.DecimalField(max_digits=8,decimal_places=2, null=True)
costumer = models.ForeignKey('Costumer', on_delete=models.PROTECT, unique=False)
class Meta:
db_table = 'orders'
ordering = ['date']
# def get_items(self):
# items = CartItem.objects.filter(order=self)
# return items
def total(self):
total = decimal.Decimal('0.00')
items = self.cartitem_set.all()
for item in items:
total += item.total()
return total |
#! /bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
from harvester.couchdb_init import get_couchdb
from harvester.couchdb_sync_db_by_collection import delete_id_list
from harvester.post_processing.couchdb_runner import CouchDBCollectionFilter
def confirm_deletion(count, objChecksum, cid):
prompt = "\nDelete {0} documents with object checksum {1} from Collection {2}? yes to confirm\n".format(count, objChecksum, cid)
while True:
ans = raw_input(prompt).lower()
if ans == "yes":
return True
else:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Delete all documents in given collection matching given object checksum. ' \
'Use for metadata-only records that can only be identified by value in object field ' \
'USAGE: delete_couchdocs_by_obj_checksum.py [collection id] [object value]')
parser.add_argument('cid', help='Collection ID')
parser.add_argument('objChecksum', help='CouchDB "object" value of documents to delete')
args = parser.parse_args(sys.argv[1:])
if not args.cid or not args.objChecksum:
parser.print_help()
sys.exit(27)
ids = []
_couchdb = get_couchdb()
rows = CouchDBCollectionFilter(collection_key=args.cid, couchdb_obj=_couchdb)
for row in rows:
couchdoc = row.doc
if 'object' in couchdoc and couchdoc['object'] == args.objChecksum:
couchID = couchdoc['_id']
ids.append(couchID)
if not ids:
print 'No docs found with object checksum matching {}'.format(args.objChecksum)
sys.exit(27)
if confirm_deletion(len(ids), args.objChecksum, args.cid):
num_deleted, delete_ids = delete_id_list(ids, _couchdb=_couchdb)
print 'Deleted {} documents'.format(num_deleted)
else:
print "Exiting without deleting"
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
import requests, bs4
import sys, os, subprocess
import tempfile
from termcolor import *
from urllib.parse import quote
from time import sleep
BASE_URL = "http://www.allitebooks.com/"
def send_request(url):
try:
r = requests.get(url)
r.raise_for_status()
return r
except requests.exceptions.ConnectionError:
cprint("Connection Error Occurred", 'red')
except requests.exceptions.HTTPError:
cprint(str(r.status_code) + ' Error', 'red')
def download_and_save_pdf(title_h2):
title_text = title_h2.find('a').text
cprint('Downloading ' + title_text, 'green')
anchor = title_h2.find('a')['href']
r = send_request(anchor)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
dwnld_link = soup.find_all('span', {'class': 'download-links'})[0].find('a')['href']
r = send_request(dwnld_link)
f = open(title_text + '.pdf', 'wb')
f.write(r.content)
f.close()
cprint('File %s has been saved in the current directory' % (title_text + '.pdf'), 'green')
def view_thumbnail(filepath):
cprint('Opening thumbnail..', 'yellow')
cprint('The thumbnail will be deleted automatically', 'blue')
if sys.platform.startswith('darwin'):
subprocess.call(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.call(('xdg-open', filepath))
sleep(1) # So that the application gets time to launch and load the temp photo before the code exectus further and deletes it
def save_thumbnail(article):
thumb = article.find('div', attrs={'class': 'entry-thumbnail'})
src = thumb.find('img')['src']
suffix = '.' + src.split('.')[-1]
temp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
try:
r = send_request(src)
temp.write(r.content)
temp.close()
except:
cprint('Sorry, error occurred while saving thumbnail', 'red')
os.remove(temp.name)
return temp.name # filepath
def catalogue(soup):
main = soup.find('main', attrs={'id': 'main-content'})
articles = main.find_all('article')
books = []
for article in articles:
title = article.find('h2', attrs={'class': 'entry-title'})
authors = article.find('h5', attrs={'class': 'entry-author'})
description = article.select('div.entry-summary > p')[0]
print('\n')
cprint(title.find('a').text, 'cyan')
cprint('By: ' + ', '.join([a.text for a in authors.find_all('a')]), 'yellow')
cprint(description.text, 'magenta')
inp = input('Save this PDF? (To view its thumbnail photo, enter \'p\') (y/n/p): ').lower()
if inp.startswith('y'):
download_and_save_pdf(title)
exit(0)
elif inp.startswith('p'):
filepath = save_thumbnail(article)
try:
view_thumbnail(filepath)
finally:
os.remove(filepath)
inp = input('Save this PDF? (y/n): ').lower()
if inp.startswith('y'):
download_and_save_pdf(title)
exit(0)
info = {'title': title, 'authors': authors, 'description': description}
books.append(info)
def paginate(page_soup, query):
paginator = page_soup.find('div', {'class': 'pagination'})
last_page_number = 1
if paginator:
page_range = paginator.find('span', {'class': 'pages'}).get_text()
last_page_number = int(page_range.split('/')[-1].strip()[0])
for i in range(last_page_number):
if i != 0: # Because the soup for the first page is passed in
cprint('\nRetrieving Page No. %s' % str(i+1), 'blue')
url = BASE_URL + ('page/%s/?s=' % str(i+1)) + query
print(url)
r = send_request(url)
page_soup = bs4.BeautifulSoup(r.text, 'html.parser')
catalogue(page_soup)
cprint('Sorry, couldn\'t find a relevant book', 'red')
if __name__ == '__main__':
query = quote(input('Enter Search Query: '), safe='')
url = BASE_URL + '?s=' + query
r = send_request(url)
soup = bs4.BeautifulSoup(r.text, 'html.parser')
paginate(soup, query)
|
import math
def cos_sen_tan(angulo):
seno = math.sin(math.radians(angulo))
print('O seno de {} é {:.2f}'.format(angulo, seno))
cosseno = math.cos(math.radians(angulo))
print('O cosseno {} é {:.2f}'.format(angulo, cosseno))
tangente = math.tan(math.radians(angulo))
print('A tangente {} é {:.2f}'.format(angulo, tangente))
angulo = float(input('Informe o ângulo: '))
cos_sen_tan(angulo) |
# -*- coding: utf-8 -*-
from odoo.tests.common import TransactionCase
from odoo.tests.common import SingleTransactionCase
import datetime
from .commissiondata import CommissionData as cd
from .commissiondata import TestCase1 as t1
from .commissiondata import TestCase2 as t2
from .commissiondata import TestCase3 as t3
import time
class TestCommission(TransactionCase):
arr_schemes = []
arr_groups = []
arr_users = []
arr_leads = []
arr_teams = []
prefix = 'UT_'
# set basic data
def setUp(self, *args, **kwargs):
super(TestCommission, self).setUp(*args, **kwargs)
# set all groups to inactive
self.env['commission.group'] \
.search([('active', '=', True)]) \
.write({'active' : False})
# set all schemes inactive
self.env['commission.scheme'] \
.search([('active', '=', True)]) \
.write({'active': False})
# set all teams inactive
self.env['crm.team'] \
.search([('active', '=', True)]) \
.write({'active': False})
# test the commission configuraton
def test_populate_commission(self):
print('test run configuration')
#self.populate_scheme()
#self.populate_hierarchy()
#self.populate_user()
#self.populate_team()
def test_calculate_commission(self):
self.calculate_commission(t3())
#self.__create_sales(t1().salelines)
#self.env['commission'].calculate()
###################################################################################################################################
# private helper methods
# create the tiers, schemes and groups
def populate_scheme(self):
# use demo user
demo_user = self.env.ref('base.user_demo')
# create two schemes for two products with two tiers each
scheme = self.env['commission.scheme']
self.scheme1 = scheme.create(cd.scheme_a1)
self.scheme2 = scheme.create(cd.scheme_a2)
self.arr_schemes.append(self.scheme1.id)
self.arr_schemes.append(self.scheme2.id)
tier = self.env['commission.tier']
count = len(scheme.search([('active', '=', True)]))
self.assertEqual(count, 2, 'error creating schemes')
count = len(tier.search([('scheme.id', 'in', [self.scheme1.id, self.scheme2.id])]))
self.assertEqual(count, 4, 'error creating agent tiers')
# create manager commission scheme
self.scheme3 = scheme.create(cd.scheme_m1)
self.scheme4 = scheme.create(cd.scheme_m2)
self.arr_schemes.append(self.scheme3.id)
# create two commission groups
group = self.env['commission.group']
self.group1 = group.create({
'name' : 'manager',
'active' : True
})
self.group2 = group.create({
'name' : 'agent',
'active' : True,
})
self.arr_groups.append(self.group1.id)
self.arr_groups.append(self.group2.id)
self.group2.scheme_ids += self.scheme1
self.group2.scheme_ids += self.scheme2
self.group1.scheme_ids += self.scheme3
self.group1.scheme_ids += self.scheme4
#assign team to groups
self.team1 = self.env['crm.team'].search([('id', '=', cd.team_id1)])
self.manager_team1 = self.env['crm.team'].search([('id', '=', cd.manager_team_id1)])
self.group1.team_ids += self.manager_team1[0]
self.group2.team_ids += self.team1[0]
count = len(scheme.search([('active', '=', True)]))
self.assertEqual(count, 4, 'error creating schemes')
count = len(tier.search([('scheme.id', 'in', [self.scheme3.id])]))
self.assertEqual(count, 1, 'error creating manager tiers')
count = len(group.search([('active', '=', True)]) )
self.assertEqual(count, 2, 'wrong group count')
count = len(group.search([('team_ids', '=', cd.manager_team_id1)]))
self.assertEqual(count, 1, 'manager group - team count')
count = len(self.group2.team_ids)
self.assertEqual(count, 1, 'agent group - team count')
# create the commission hierarchy
def populate_hierarchy(self, arr_teams=None, testcase=None):
hierarchy = self.env['commission.hierarchy']
# default run
if testcase == None:
self.node1 = hierarchy.create({
'name' : 'hq',
'team' : 1
})
self.node2 = hierarchy.create({
'name' : 'region1',
'parent_id' : self.node1.id,
'team' : 2
})
self.node3 = hierarchy.create({
'name': 'team lead1',
'parent_id' : self.node2.id,
'team' : 3
})
self.node4 = hierarchy.create({
'name' : 'team1',
'parent_id' : self.node3.id,
'team' : 1
})
count = len(hierarchy.search([('id', '>=', self.node1.id)]))
self.assertEqual(count, 4, 'creating hierarchy')
return
#testcase run
start_count = hierarchy.search_count([])
# create all nodes first
for node in testcase.nodes:
self.node1 = hierarchy.create({
'name' : self.prefix + node.get('name'),
})
#assign manager
temp = self.env['res.users'].search([('login', '=', self.prefix + node.get('manager'))])
if len(temp) > 0:
self.node1.manager = temp[0]
#assign teams
for team in node.get('teams'):
tmp_team = self.env['crm.team'].search([('name', '=', self.prefix + team)])
if len(tmp_team) > 0:
self.node1.team_ids += tmp_team[0]
# add parent node
for node in testcase.nodes:
if node.get('parent') == '':
continue
parent = hierarchy.search([('name', '=', self.prefix + node.get('parent'))])
self.assertEqual(len(parent), 1, 'parent length for ' + node.get('name'))
child = hierarchy.search([('name', '=', self.prefix + node.get('name'))])
self.assertEqual(len(child), 1, 'child length ' + node.get('name') )
child[0].parent_id = parent[0]
count = hierarchy.search_count([])
self.assertEqual(count - start_count, len(testcase.nodes), 'wrong hierarchy count')
#create new useres
def populate_user(self, arr_names=[]):
users = self.env['res.users']
self.arr_users = []
#default run
if len(arr_names) == 0:
for x in range(1, 21):
name = 'user ' + str(x)
self.user1 = users.create({
'company_id': self.env.ref("base.main_company").id,
'name': name,
'login': name,
'email': 'agent@kgk.vn',
'groups_id': [(6, 0, [self.ref('sales_team.group_sale_manager')])]
})
self.arr_users.append(self.user1)
count = len(users.search([('email', '=', 'agent@kgk.vn')]))
self.assertEqual(count, 20, 'create salesman')
self.assertEqual(len(self.arr_users), 20, 'user count')
#create leads
for name in cd.arr_leads:
self.lead1 = users.create({
'company_id': self.env.ref("base.main_company").id,
'name': name,
'login': name,
'email': 'lead@kgk.vn',
'groups_id': [(6, 0, [self.ref('sales_team.group_sale_manager')])]
})
self.arr_leads.append(self.lead1)
count = len(users.search([('email', '=', 'lead@kgk.vn')]))
self.assertEqual(len(self.arr_leads), count, 'lead count')
return
#create users based on the names provided
before_count = users.search_count([])
for user in arr_names:
name = str(user)
self.user1 = users.create({
'company_id': self.env.ref("base.main_company").id,
'name': name,
'login': self.prefix + name,
'email': 'agent_test@kgk.vn',
'groups_id': [(6, 0, [self.ref('sales_team.group_sale_manager')])]
})
self.arr_users.append(self.user1)
count = users.search_count([])
self.assertEqual(len(arr_names), count - before_count, 'user setup count')
return self.arr_users
# create salesteams
def populate_team(self, arr_tems):
team = self.env['crm.team']
# default run
if len(self.arr_users) == 0:
agents = [1,5]
for x in agents:
name = 'team ' + str(x)
self.team1 = team.create({
'name' : name,
'active' : True,
'user_id' : x
})
#self.arr_teams.append(self.team1)
name = 'team ' + str(agents[0])
count = len(team.search([('name', '=', name)]))
self.assertEqual(count, 1, 'create team')
return
# create teams for testcase
for team in arr_teams:
self.team1 = team.create({
'name': self.prefix + team.name,
'active' : True,
'use_opportunities' : False,
'use_quotations' : False,
})
#assign members
for member in team.members:
temp = self.env['res.users'].search([('login', '=', self.prefix + member)])
if len(temp) > 0:
self.team1.member_ids += temp[0].id
self.arr_teams.append(self.team1)
count = len(team.search([('active', '=', True)]))
self.assertEqual(count, len(self.arr_teams), 'wrong count of teams')
# commission calculation
def calculate_commission(self, testcase):
self.__setup_config(testcase)
self.execute_calc(testcase.results)
# setup configuration based on testcase
def __setup_config(self, testcase):
arr_agents = self.populate_user(testcase.users)
arr_groups = self.__setup_scheme(testcase)
arr_teams = self.__setup_teams(testcase.teams)
self.__asign_teams(arr_groups, arr_teams, testcase)
self.populate_hierarchy(arr_teams, testcase)
self.__create_sales(testcase.salelines)
# create sales order
def create_sales(self, agents = [cd.agent_id1]):
for agent in agents:
sales = self.env['sale.order'].sudo(agent)
partners = self.env['res.partner'].search([('customer', '=', True)])
partner = partners[0]
product = self.env['product.product'].search([('id', '=', cd.prod_id1)])
start_count = len(self.env['sale.order.line'].search([('salesman_id', '=', agent)]))
order_count = len(sales.search([('partner_id', '=', partner.id)]))
self.so = sales.create({
'partner_id': partner.id,
'partner_invoice_id': partner.id,
'partner_shipping_id': partner.id,
'order_line': [(0, 0, {'name': product.name, 'product_id': product.id,
'product_uom_qty': 2,
'product_uom': product.uom_id.id,
'price_unit': product.list_price,
'salesman_id' : agent
})]
})
so_id = self.so.id
count = len(sales.search([('id', '=', so_id)]))
self.assertEqual(count, 1, 'verify order')
count = len(sales.search([('partner_id', '=', partner.id)]))
self.assertEqual(count - order_count, 1, 'order count')
count = len(self.env['sale.order.line'].search([('salesman_id', '=', agent)]))
self.assertEqual(count - start_count, 1, 'verify order line')
product = self.env['product.product'].search([('id', '=', cd.prod_id2)])
self.so1 = sales.create({
'partner_id': partner.id,
'partner_invoice_id': partner.id,
'partner_shipping_id': partner.id,
'order_line': [(0, 0, {'name': product.name, 'product_id': product.id,
'product_uom_qty': 1,
'product_uom': product.uom_id.id,
'price_unit': product.list_price,
'salesman_id' : agent
})]
})
#create group, schemes and tiers
def __setup_scheme(self, testcase):
arr_groups = []
groups = testcase.groups
dict_schemes = testcase.dict_schemes
dict_tiers = testcase.dict_tiers
o_schemes = self.env['commission.scheme']
o_groups = self.env['commission.group']
o_tiers = self.env['commission.tier']
group_count = o_groups.search_count([])
scheme_count = o_schemes.search_count([])
tier_count = o_tiers.search_count([])
expected_tiers = 0
num_schemes = 0
for group in groups:
_group1 = o_groups.create ({
'name' : group['name'],
'active' : True
})
for name in group['schemes']:
arr_tiers = []
scheme = dict_schemes[name]
for name in scheme['tiers']:
tier = dict_tiers[name]
arr_tiers.append((0, 0, tier))
#check if scheme exist
temp = o_schemes.search([('name', '=', scheme['name']), ('active', '=', True)])
_scheme1 = None
if len(temp) > 0:
_scheme1 = temp[0]
else:
_scheme1 = o_schemes.create({
'name' : scheme['name'],
'active' : True,
'product' : scheme['product'],
'points' : scheme['points'],
'aggregation' : scheme['aggregation'],
'tier_ids' : arr_tiers
})
expected_tiers += len(arr_tiers)
num_schemes += 1
_group1.scheme_ids += _scheme1
arr_groups.append(_group1)
count = o_tiers.search_count([])
self.assertEqual(expected_tiers, count - tier_count, 'setup schemes - wrong tier count')
count = o_schemes.search_count([])
self.assertEqual(num_schemes, count - scheme_count, 'setup schemes - wrong scheme count')
count = o_groups.search_count([])
self.assertEqual(len(groups), count - group_count, 'setup schemes - wrong group count')
return arr_groups
# call the calculation method
def execute_calc(self, dict_results):
self.env['commission'].calculate()
o_summary = self.env['commission.summary']
print(dict_results)
for agent in list(dict_results.keys()):
agent_id = 0
if type(agent) == str:
temp = self.env['res.users'].search([('login', '=', self.prefix + agent)])
if len(temp) == 0:
print('agent not found %s' % agent)
continue
agent_id = temp[0].id
else:
agent_id = agent
result = o_summary.search([('sales_agent', '=', agent_id)], order='id desc')
summary = []
amount = 0
if(len(result) == 0):
amount = 0
else:
summary = result[0]
amount = summary.amount
print('number of summaries %d' % len(summary))
expected = dict_results.get(agent)
print('expected amount %d actual %d for agent %s' % (expected, amount, agent))
if expected != amount:
self.__dump_agent_info(agent_id)
self.assertEqual(amount, expected, 'wrong amount for: ' +str(agent))
# create teams for list of provided
def __setup_teams(self, arr_teams):
arr_result = []
o_team = self.env['crm.team']
team_count = o_team.search_count([])
# create teams for testcase
for team in arr_teams:
self.team1 = o_team.create({
'name': self.prefix + team.get('name'),
'active' : True,
'use_opportunities' : False,
'use_quotations' : False,
})
#assign members
for member in team.get('members'):
temp = self.env['res.users'].search([('login', '=', self.prefix + member)])
if len(temp) > 0:
self.team1.member_ids += temp[0]
arr_result.append(self.team1)
count_after = o_team.search_count([])
self.assertEqual(count_after - team_count, len(arr_teams), 'wrong team count')
return arr_result
# iterate through all teams and lookup team members in the dictionary
def __asign_teammember(self, arr_teams, dict_teammembers):
for team in arr_teams:
members = dict_teammembers.get(team.name)
if members is not None:
for member in members:
user = self.env['res.users'].browse([member])
team.member_ids += user
#create sales lines, pick default company and first customer
def __create_sales(self, salelines):
o_sales = self.env['sale.order']
o_user = self.env['res.users']
customer = self.env['res.partner'].search([('customer', '=', True)])[0]
o_product = self.env['product.product']
line_count = self.env['sale.order.line'].search_count([])
order_count = o_sales.search_count([])
for saleline in salelines:
product = o_product.browse([saleline.get('product_id')])[0]
users = o_user.search([('login', '=', self.prefix + saleline.get('salesman_id'))])
self.assertEqual(len(users), 1, 'sales line user not found')
user_id = users[0].id
_so1 = o_sales.create({
'partner_id': customer.id,
'partner_invoice_id': customer.id,
'partner_shipping_id': customer.id,
'order_line': [(0, 0, {'name': product.name, 'product_id': product.id,
'product_uom_qty': saleline.get('qty'),
'product_uom': product.uom_id.id,
'price_unit': product.list_price,
'salesman_id' : user_id,
'price_total' : saleline.get('price_total')
})]
})
_so1.order_line.write({'salesman_id' : user_id})
_so1.order_line.write({'price_total' : saleline.get('price_total')})
sol = self.env['sale.order.line'].search([('product_id', '=', product.id), ('salesman_id', '=', user_id)], order='id desc')
self.assertEqual(len(sol), 1, 'failed to create sales lines for ' + product.name)
self.assertEqual(saleline['qty'], sol[0].product_uom_qty, 'wrong quantity')
print('line product: %d amount: %d qty: %d agent %d' % (sol.product_id.id, sol.price_total, sol.product_uom_qty, sol.salesman_id))
count = self.env['sale.order.line'].search_count([])
self.assertEqual(len(salelines), count - line_count, 'create_sales - wrong line count')
count = o_sales.search_count([])
self.assertEqual(len(salelines), count - order_count, 'create_sales - wrong order count')
# assign teams to the group
def __asign_teams(self, arr_groups, arr_teams, testcase):
tc_groups = testcase.groups
dict_teams = dict()
dict_groups = dict()
o_group = self.env['commission.group']
for team in arr_teams:
dict_teams.update({team.name : team})
for group in arr_groups:
dict_groups.update({group.name : group})
for group_name in tc_groups:
team_names = group_name['teams']
if team_names is None:
continue
for name in team_names:
team = dict_teams.get(self.prefix + name)
if team is None:
continue
group = dict_groups.get(group_name['name'])
group.team_ids += team
_groups = o_group.search([('name', '=', group_name.get('name')), ('active', '=', True)])
expect = len(group_name.get('teams'))
count = len(_groups[0].team_ids)
self.assertEqual(count, expect, 'wrong team count for ' + str(group_name.get('name')) )
def __dump_agent_info(self, agent_id):
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('agent info for - %d' % agent_id)
node = None
isManager = False
#team
temp = self.env['res.users'].browse(agent_id)
team = temp[0].sale_team_id
print('agent name %s' % temp[0].login)
print('member of team %s - %d'% (team.name, team.id))
#see if manager
temp = self.env['commission.hierarchy'].search([('manager', '=', agent_id)])
if len(temp) > 0:
node = temp[0]
isManager = True
print('manages this node: %s' % node.name)
#find node team belongs to
temp = self.env['commission.hierarchy'].search([('team_ids', '=', team.id)])
if len(temp) > 0:
node = temp[0]
print('her team belongs to the following node: %s' % node.name)
#reports
arr_reports = []
if node and isManager:
nodes = self.env['commission.hierarchy'].child_nodes_deep(node.id)
for _node in nodes:
print('child nodes - %s' % _node.name)
for _team in _node.team_ids:
print('team - %s' % _team.name)
arr_reports.extend(_team.member_ids)
for _team in node.team_ids:
print('team - %s' % _team.name)
arr_reports.extend(_team.member_ids)
print('reports - number %d - list %s' %(len(arr_reports), [user.login for user in arr_reports]) )
#group
temp = self.env['commission.group'].search([('team_ids', '=', team.id)])
for group in temp:
print('member of group: %s' % group.name)
for scheme in group.scheme_ids:
print('scheme applies: %s - product %s' % (scheme.name, scheme.product.name))
for tier in scheme.tier_ids:
print('tier start %d - amount %d - percent %d' % (tier.tier_start, tier.amount, tier.percent))
#saleslines
if isManager:
ids = [user.id for user in arr_reports]
else:
ids = [agent_id]
temp = self.env['sale.order.line'].search([('salesman_id', 'in', ids)])
for line in temp:
print('product %s - qty %d - amount %d' %(line.name, line.product_uom_qty, line.price_total))
print('+++++++++++++++++++++++++++++++++++++++++++++++++++++++')
|
fname = raw_input("File Name")
f = open(fname+'.csv', 'r')
lines = f.readlines()
array = []
for s in range(1,len(lines)):
data = lines[s].split()
array.append(data[len(data)-2][5:])
with open(fname+'.txt', 'w') as fw:
for j in array:
fw.write(j + '\n')
|
#! /usr/bin/python
# Written by Dan Mandle http://dan.mandle.me September 2012
# License: GPL 2.0
# edited by estheim telkom institute teknologi
import os
from gps import *
from time import *
import time
import threading
GpsData = None #seting the global variable
class GpsPoller(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
global gpsd #bring it in scope
gpsd = gps(mode=WATCH_ENABLE) #starting the stream of info
self.current_value = None
self.running = True #setting the thread running to true
def run(self):
global gpsd
while self.running: #cek thread true
gpsd.next() #this will continue to loop
def status(self):
#return nilai
if (len(psd.satellites) > 4):
return True
else:
return False
def data(self):
return gpsd.utc,gpsd.fix.latitude,gpsd.fix.longitude,gpsd.fix.altitude, gpsd.fix.speed,gpsd.fix.track, gpsd.fix.mode, len(gpsd.satellites)
'''
if __name__ =="__main__":
gpsSensor=GpsPoller()
gpsSensor.start()
GpsStatus=gpsSensor.status()
for i in range(10):
print "1",gpsd.fix.latitude,gpsd.fix.longitude,gpsd.fix.altitude,len(gpsd.satellites)
print "2",gpsSensor.data()
time.sleep(1)
gpsSensor.running=False #kill thread
gpsSensor.join() #wait untill thread finish process
print "Finish"
#, GpsData.fix.longitude, GpsData.fix.altitude, GpsData.fix.speed,GpsData.fix.track, GpsData.fix.mode
'''
'''
GpsThread=GpsPoller()
GpsThread.start()
try:
print GpsData.fix.latitude, GpsData.fix.longitude, GpsData.fix.altitude, GpsData.fix.speed,GpsData.fix.track, GpsData.fix.mode
except (KeyboardInterrupt, SystemExit): #when you press ctrl+c
print "\nKilling Thread..."
GpsThread.running = False
GpsThread.join() # wait for the thread to finish what it's doing
print 'latitude ' , GpsData.fix.latitude
print 'longitude ' , GpsData.fix.longitude
print 'time utc ' , GpsData.utc,' + ', GpsData.fix.time
print 'altitude (m)' , GpsData.fix.altitude
print 'eps ' , GpsData.fix.eps
print 'epx ' , GpsData.fix.epx
print 'epv ' , GpsData.fix.epv
print 'ept ' , GpsData.fix.ept
print 'speed (m/s) ' , GpsData.fix.speed
print 'climb ' , GpsData.fix.climb
print 'track ' , GpsData.fix.track
print 'mode ' , GpsData.fix.mode
print
print 'sats ' , GpsData.satellites
'''
|
from sambandid import app
def datetimeformat(value):
return value.strftime('%d.%m.%y %H:%M')
app.jinja_env.filters['datetimeformat'] = datetimeformat
|
import matplotlib.pyplot as plt
import pandas as pd
import statistics
plt.style.use('seaborn-whitegrid')
df=pd.read_csv('NSE-TATAGLOBAL11.csv')
x = df['Open']
y = df['Close']
plt.figure(figsize=(16,8)) plt.title('Open vs Close Price History') plt.plot(x, y, 'o')
plt.xlabel('Open Price INR(₹)', fontsize=14) plt.ylabel('Close Price INR(₹)')
#The model
# finding slope
meanx = mean(x)
meany = mean(y)
print(meanx,meany) meanxx = mean(x*x)
meanxy = mean(x*y)
m = ((meanx * meany) - meanxy) / ((meanx * meanx) - meanxx)
print('The slope is:'+str(m)+'.')
# finding intercept
b = meany - (m * meanx)
print('The intercept is:' +str(b))
#Prediction using input
predicx = 3000
predicy = ((m*predicx) + b)
print('The prediction of closing prices for opening price of 2800 : ' +str(predicy)) |
import argparse
import os
from ase.db import connect
import subprocess
from ase.io import read
ATAT_GENERATED = 'atat_generated'
SCRIPT_NAME = '.str2cif_wrapper.sh'
def str2cif_sh_script():
return "str2cif < $1 > $2\n"
def transfer_to_db(folder, db, structure_file):
print("Transferring structures from ATAT folder system to DB")
num_transferred = 0
with open(SCRIPT_NAME, 'w') as out:
out.write(str2cif_sh_script())
for root, _, files in os.walk(folder):
if structure_file in files:
# Check if the folder already exists in the DB
exists = True
try:
db.get(folder=root, struct_type=ATAT_GENERATED)
except KeyError:
exists = False
if not exists:
str_file = root + f"/{structure_file}"
cif_file = root + "/structure.cif"
# Convert to a CIF file
subprocess.run(['sh', SCRIPT_NAME, str_file, cif_file])
atoms = read(cif_file)
db.write(atoms, folder=root, struct_type=ATAT_GENERATED)
num_transferred += 1
print(f"Transferred {num_transferred} structures to the database")
os.remove(SCRIPT_NAME)
def parse_prop(prop):
if ':' not in prop:
return prop, prop
splitted = prop.split(':')
assert len(splitted) == 2
return splitted
def transfer_prop_from_db(db, prop):
num_prop_transferred = 0
db_prop, atat_prop = parse_prop(prop)
print(f"Writing DB prop {db_prop} to atat prop {atat_prop}")
for row in db.select(struct_type=ATAT_GENERATED):
folder= row.folder
value = row.get(db_prop, None)
if value is None:
continue
out_file = folder + f"/{atat_prop}"
with open(out_file, 'w') as out:
out.write(f"{value}\n")
num_prop_transferred += 1
print(f"Transferred {prop} for {num_prop_transferred} items")
def main():
parser = argparse.ArgumentParser(description="Transfer an ATAT project to an ASE DB")
parser.add_argument("folder", type=str, help="ATAT project folder")
parser.add_argument("db", type=str, help="ASE database")
parser.add_argument("--prop", type=str, default='energy',
help="Property to be exported. If the name expected by ATAT does not "
"match the key in the database, a mapping from database to ATAT "
"can be specified via <db item>:<atat name>. Thus, if the quantity "
"that ATAT want (say energy) is stored in the DB as for example "
"relaxed_energy, specify relaxed_energy:energy")
parser.add_argument("--str", type=str, default="str.out", help="Name of the structure files")
args = parser.parse_args()
db = connect(args.db)
transfer_to_db(args.folder, db, args.str)
transfer_prop_from_db(db, args.prop)
if __name__ == '__main__':
main()
|
from .context import Context
from .run_remote_script import Runner
from .service import Provision
from .service_util import adduser
class BuildUser(Provision):
name = "user(build)"
deps = ["start"]
def __call__(self, ctx: Context) -> None:
runner = Runner(ctx.root_conn)
adduser(ctx, runner, "build", [])
runner.execute()
class SerfUser(Provision):
name = "user(serf)"
deps = ["user(build)"]
def __call__(self, ctx: Context) -> None:
runner = Runner(ctx.root_conn)
adduser(ctx, runner, "serf", [])
runner.execute()
class StaticUser(Provision):
name = "user(static)"
deps = ["user(build)"]
def __call__(self, ctx: Context) -> None:
runner = Runner(ctx.root_conn)
adduser(ctx, runner, "static", [])
runner.execute()
class PiUser(Provision):
name = "user(pi)"
deps = ["user(build)", "user(serf)", "user(static)"]
def __call__(self, ctx: Context) -> None:
runner = Runner(ctx.root_conn)
adduser(ctx, runner, "pi", ["build", "adm", "serf", "static"])
for line in [
"export PATH=$PATH:/home/build/builds/heads-cli/prod",
"export PATH=$PATH:/home/build/builds/serf/prod",
]:
runner.run_remote_rpc("ensure_line_in_file", params=dict(
filename="/home/pi/.bashrc",
line=line,
))
runner.execute()
class RootUser(Provision):
name = "user(root)"
deps = ["user(pi)"]
def __call__(self, ctx: Context) -> None:
self.runner = Runner(ctx.root_conn)
adduser(ctx, self.runner, "root", [])
self.runner.run_remote_rpc("ensure_line_in_file", params=dict(
filename=".profile",
line=". .bash_aliases",
))
self.runner.execute()
|
"""
Let us call an integer sided triangle with sides a ≤ b ≤ c barely acute if the sides satisfy a2 + b2 = c2 + 1.
How many barely acute triangles are there with perimeter ≤ 25,000,000?
""" |
from bokeh.plotting import figure, show
#add data
x = [1, 2, 3, 4, 5, 6, 7]
y = [4, 9, 7, 2, 3, 1, 8]
#create a new plot with a title and axis label
p = figure(title="Dans plot", x_axis_label='x', y_axis_label='y')
#add a line redner with legend and line thickness to the plat
p.line(x, y, legend_label="Temp", line_width=15)
#show the results
show(p)
|
# Generated by Django 2.0.5 on 2018-07-02 02:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0010_auto_20180702_1058'),
]
operations = [
migrations.AlterField(
model_name='book',
name='ISBN',
field=models.TextField(blank=True, verbose_name='ISBN'),
),
]
|
import machine
import micropython
import time
import cet_time
import display
import efa
import timer
micropython.alloc_emergency_exception_buf(100)
class View:
def __init__(self):
self.error = None
self.departures = []
self.processing = False
self.message = None
@staticmethod
def format_departure(departure):
now = time.time()
remaining_minutes = departure.remaining_minutes(now)
departure_delay_formatted = '{:>3s}'.format('+' + str(departure.delay)) if departure.delay > 0 else ' '
return '{:2s} {:2d}min {} {}'.format(departure.number, remaining_minutes, departure_delay_formatted, departure.name)
def show_error(self, error):
self.error = str(error)
self.processing = False
self.message = None
def show_message(self, msg):
self.error = None
self.message = msg
def show_departures(self, departures):
self.error = None
self.message = None
self.departures = departures
def show_progress(self, boolean):
self.processing = boolean
def paint(self, oled):
oled.fill(0)
oled.invert(0)
oled.text(cet_time.current_time_formatted(), 0, 0)
if self.error:
oled.text('ERROR', 0, 26)
oled.text(self.error, 0, 38)
elif self.message:
oled.fill(0)
oled.text(str(self.message), 0, 0)
oled.corner_se_fill_circle(0.50, 1)
oled.corner_se_fill_circle(0.45, 0)
oled.corner_se_fill_circle(0.40, 1)
oled.corner_se_fill_circle(0.35, 0)
oled.corner_se_fill_circle(0.30, 1)
oled.corner_se_fill_circle(0.25, 0)
oled.corner_se_fill_circle(0.20, 1)
oled.corner_se_fill_circle(0.15, 0)
oled.corner_se_fill_circle(0.10, 1)
oled.corner_se_fill_circle(0.05, 0)
else:
reachable = self.reachable_departures()
if len(reachable) > 0:
oled.text(self.format_departure(reachable[0]), 0, 14)
if len(reachable) > 1:
oled.text(self.format_departure(reachable[1]), 0, 26)
if len(reachable) > 2:
oled.text(self.format_departure(reachable[2]), 0, 38)
if len(reachable) > 3:
oled.text(self.format_departure(reachable[3]), 0, 50)
if self.processing:
oled.fill_rect(0, 62, 128, 20, 1)
oled.show()
def reachable_departures(self):
now = time.time()
return [departure for departure in self.departures if departure.reachable(now)]
view = View()
i2c = machine.I2C(scl=machine.Pin(4), sda=machine.Pin(5))
oled = display.Display(i2c)
scheduler = timer.default(lambda: view.paint(oled)).start()
def stop():
print("Shuting down")
scheduler.stop()
counter = 0
try:
view.show_message('Booting...')
time.sleep(2)
network_connect(lambda: view.show_message('Network...'))
ntp_time_sync(lambda: view.show_message('NTP...'))
view.show_message('EFA...')
except Exception as e:
view.show_error(e)
view.paint(oled)
else:
while True:
counter = counter + 1
try:
view.show_progress(True)
network_connect(lambda: view.show_message('Network...'))
departures = efa.departures()
if counter % 100 == 0:
ntp_time_sync(lambda: view.show_message('NTP...'))
view.show_departures(departures)
except Exception as e:
print(e)
view.show_error(e)
finally:
view.show_progress(False)
time.sleep(90)
finally:
stop()
|
import pandas as pd
import math
from sklearn import linear_model
df = pd.read_csv('homeprices1.csv')
median_bedrooms = math.floor(df.bedrooms.median())
df.bedrooms = df.bedrooms.fillna(median_bedrooms)
print(df)
# create the model and train it.
model = linear_model.LinearRegression()
model.fit(df[['area', 'bedrooms', 'age']], df.price)
# predict the value according to the order which we specified during training
predicted = model.predict([[2500, 4, 5]])
print(predicted)
# finding the co-efficent it will give 3 values due to 3 independent Variables
# with 3 coefficent values by their values we can find which parameter matters more
coefficent = model.coef_
print(coefficent)
intercept = model.intercept_
print(intercept)
# Internal part happening inside the linear regression prediction
price = 137.25 * 2500 + -26025 * 4 + -6825 * 5 + intercept
print(price)
# Both predicted and price is same
|
"""Example script that demonstrates features of the Video4Linux device adapter.
Kyle M. Douglass, 2018
kyle.m.douglass@gmail.com
"""
import numpy as np
import MMCorePy
# Initialize the camera
mmc = MMCorePy.CMMCore()
mmc.loadDevice("camera", "video4linux2", "Video4Linux2")
mmc.initializeAllDevices()
mmc.setCameraDevice("camera")
# Snap an image and store it in a NumPy array
mmc.snapImage()
img = mmc.getImage()
|
import requests
from key import BINANCE_API_KEY
session = requests.Session()
def get_coinbase_trade(symbol, after_id):
if after_id == 0:
url = "https://api.pro.coinbase.com/products/{symbol}/trades".format(symbol=symbol)
else:
url = "https://api.pro.coinbase.com/products/{symbol}/trades?after={after_id}".format(symbol=symbol, after_id=after_id)
return session.get(url)
def get_okex_trade(symbol, after_id):
if after_id == 0:
url = "https://www.okex.com/api/spot/v3/instruments/{symbol}/trades".format(symbol=symbol)
else:
url = "https://www.okex.com/api/spot/v3/instruments/{symbol}/trades?after={after_id}".format(symbol=symbol, after_id=after_id)
return session.get(url)
def get_binance_trade(symbol, from_id, size=100):
if from_id == 0:
url = "https://api.binance.com/api/v3/historicalTrades?symbol={symbol}&limit={size}".format(symbol=symbol, size=size)
else:
url = "https://api.binance.com/api/v3/historicalTrades?symbol={symbol}&fromId={from_id}&limit={size}"\
.format(symbol=symbol, from_id=from_id, size=size)
session.headers.update({"X-MBX-APIKEY" : BINANCE_API_KEY})
return session.get(url)
|
import cv2
import numpy as np
import math
origen = cv2.VideoCapture('CarsDrivingUnderBridge.mp4')
ret, cam = origen.read()
ret2, cam2 = origen.read()
kernel = np.ones((5,5), np.uint8)
nCarros = 0
contador = 0
while(origen.isOpened()):
if ret == False or ret2 == False:
break
imgGris = cv2.cvtColor(cam, cv2.COLOR_BGR2GRAY)
imgGris2 = cv2.cvtColor(cam2, cv2.COLOR_BGR2GRAY)
imgBlurred1 = cv2.GaussianBlur(imgGris, (5, 5), 0)
imgBlurred2 = cv2.GaussianBlur(imgGris2, (5, 5), 0)
alto, ancho, algo = cam.shape
imgDif = cv2.absdiff(imgBlurred1, imgBlurred2)
# Threshold por defecto de opencv
ret, imgTh = cv2.threshold(imgDif,30,255,cv2.THRESH_BINARY)
# Erosiona y dilatar los objetos
imgE = cv2.erode(cv2.dilate(imgTh, kernel, iterations=3), kernel, iterations=2)
contours, hierarchy = cv2.findContours(imgE, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
blackImg = np.zeros((np.size(imgTh,0),np.size(imgTh,1)), np.uint8)
cv2.drawContours(blackImg, contours, -1, (255,255,255), -1)
#cv2.imshow('Contornos', blackImg)
# Ahora que tenemos los contornos hay que buscar los posibles autos
hull = []
for j in range(len(contours)):
hull.append(cv2.convexHull(contours[j], False))
fondo1 = np.zeros((np.size(imgTh,0),np.size(imgTh,1)), np.uint8)
for k in range(len(contours)):
cv2.drawContours(fondo1, contours, k, (0,255,0), 1,8, hierarchy)
cv2.drawContours(fondo1, hull, k, (255,255,255), 1, 8)
cv2.imshow('ContornosYMas', imgE)
#cv2.imshow('Figuras', imgE)
for l in range(len(hull)):
x,y,w,h = cv2.boundingRect(hull[l])
perimetro = cv2.arcLength(hull[l],True)
area = cv2.contourArea(hull[l])
rAspecto = float(w)/h
tamDiagonal = math.sqrt(w*w + h*h)
areaRect = w*h
if perimetro > 100 and area > 1000 and rAspecto > 0.2 and rAspecto < 4.0 and w > 30 and h > 30 and tamDiagonal > 60 and area/areaRect > 0.5:
#if perimetro > 80 and area > 300 and rAspecto > 0.2 and rAspecto < 4.0 and w > 20 and h > 20 and tamDiagonal > 30 and area/areaRect > 0.5:
if y > alto*5/8 and y < alto*5/8 + 15:
nCarros += 1
#print (nCarros)
cv2.rectangle(cam, (x,y), (x+w,y+h), (0,255,0),3)
cv2.line(cam, (int(0), int(alto*5/8)),(int(ancho), int(alto*5/8)), (0,0,255), 2)
cv2.imshow('Original', cam)
cam = cam2
if ((origen.get(cv2.CAP_PROP_POS_FRAMES) + 1) < origen.get(cv2.CAP_PROP_FRAME_COUNT)):
r, cam2 = origen.read()
# contador += 1
# if contador == 600:
# fCarros = open('carros.txt', 'a')
# fCarros.write('\n'+str(nCarros))
# fCarros.close()
# contador = 0
# nCarros = 0
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Deben ser 52 carros en el carril de la derecha para los carros debajo del puente
print (nCarros)
origen.release()
cv2.destroyAllWindows() |
# Standard Library imports
# Core Flask imports
from flask import render_template, redirect, url_for, request
from flask_login import login_user, logout_user, current_user, login_required
# Third-party imports
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import select
def init_routes(app, db):
"""
app: instance of flask app
db: DatabaseManager.session
"""
# App imports (placed here to avoid circular import)
from flask_for_startups.views import error_views, account_management_views, static_views
from flask_for_startups.utils import custom_errors
from flask_for_startups import login_manager
from flask_for_startups.models import User
# Request management
@app.before_request
def before_request():
db()
@app.teardown_appcontext
def shutdown_session(response_or_exc):
db.remove()
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
if user_id and user_id != 'None':
return User.query.filter_by(user_id=user_id).first()
# Error views
app.register_error_handler(404, error_views.not_found_error)
app.register_error_handler(500, error_views.internal_error)
app.register_error_handler(custom_errors.PermissionsDeniedError, error_views.permission_denied_error)
app.register_error_handler(SQLAlchemyError, error_views.internal_db_error)
# Public views
app.add_url_rule('/', view_func=static_views.index)
app.add_url_rule('/register', view_func=static_views.register)
app.add_url_rule('/login', view_func=static_views.login)
# Login required views
app.add_url_rule('/settings', view_func=static_views.settings)
# Public API
app.add_url_rule('/api/login', view_func=account_management_views.login_account, methods=['POST'])
app.add_url_rule('/logout', view_func=account_management_views.logout_account)
app.add_url_rule('/api/register',
view_func=account_management_views.register_account,
methods=['POST'])
# Login Required API
app.add_url_rule('/api/user', view_func=account_management_views.user)
app.add_url_rule('/api/email', view_func=account_management_views.email, methods=['POST'])
return
|
#!/usr/bin/env python
import os, sys, cv2
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import argparse
import glob
import caffe, random
def showTattoo(folder, dataset):
prototxt = 'test.prototxt'
caffemodel = 'tattc_voc.caffemodel'
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
dataset = os.path.join(folder, dataset)
f = open(dataset, 'r')
for line in f:
if random.randint(1, 1000) > 10:
continue
line = line[:-1]
imgname = os.path.join(folder, 'images', line)
im = cv2.imread(imgname)
print(imgName)
scores, boxes = im_detect(net, im)
print(scores.shape, boxes.shape)
if __name__ == '__main__':
showTattoo('flickr', 'flickr10000_group1.txt')
|
import py_vollib.black_scholes.implied_volatility as iv
import py_vollib.black_scholes as bs
import py_vollib.black_scholes.greeks.numerical as greek
import pandas as pd
import numpy as np
import time
def get_vol(option_price, spot, strike, T, r=0, option_type='p'):
"""
Calculates the implied volatility of an option.
Parameters
-------------
option_price: int
The market price of the option
spot: int
The current price of the underlying asset (in this case ETH)
strike: int
The strike price of the option contract
T: int
The time to maturity of the option in years
r: int
The current interest rate (assuming zero by default)
option_type: str
Option can be either a call ('c') or put ('p')
Returns
-------------
int:
Returns the annualised implied volatility of the option
Example
-------------
>>> get_vol(option_price=1.1031, spot=200,
strike=150, T=0.034, r=0, option_type='p')
1.0713281006448705
"""
return iv.implied_volatility(option_price, spot, strike, T, r, option_type)
def get_greeks(option_price, spot, strike, T, r=0, option_type='p'):
"""
Calculates the option greek values.
Parameters
-------------
option_price: int
The market price of the option
spot: int
The current price of the underlying asset (in this case ETH)
strike: int
The strike price of the option contract
T: int
The time to maturity of the option in years
r: int
The current interest rate (assuming zero by default)
option_type: str
Option can be either a call ('c') or put ('p')
Returns
-------------
list:
Current time, implied volatility, gamma, theta, vega and delta
Example
-------------
>>> get_greeks(option_price=1.1031, spot=200,
strike=150, T=0.034, r=0, option_type='p')
[1590791181.920008, 1.0713281006448705, 0.0030137160868808216,
-0.18953283576618232, 0.04391003074487654, -0.05996451476744491]
"""
iv = get_vol(option_price, spot, strike, T, r)
gamma = greek.agamma(option_type, spot, strike, T, r, iv)
theta = greek.atheta(option_type, spot, strike, T, r, iv)
delta = greek.adelta(option_type, spot, strike, T, r, iv)
vega = greek.avega(option_type, spot, strike, T, r, iv)
return [time.time(), iv, gamma, theta, vega, delta]
|
# -*- coding: utf-8 -*-
def create(config_dict):
if config_dict['type'] == 'bubble':
lower = config_dict['lower']
return BubbleSort(lower)
elif config_dict['type'] == 'select':
lower = config_dict['lower']
return SelectSort(lower)
elif config_dict['type'] == 'insert':
lower = config_dict['lower']
use_bin = config_dict['use_bin']
return InsertSort(lower, use_bin)
else:
raise NotImplementedError(
'{} is not implemented'.format(config_dict['type']))
class BubbleSort:
u"""
バブルソートを行う
隣り合う2つの要素を比較して、指定された大小関係でないなら入れ替える
"""
def __init__(self, lower):
u"""
Args:
lower: 降順にする(bool)
"""
if lower:
self.__check = lambda a, b: True if a <= b else False
else:
self.__check = lambda a, b: True if a > b else False
def sort(self, data):
u"""
ソートする
Args:
data: データ(list of value)
Returns:
True: 完了
False: まだやれる
"""
swapped = False
for i in range(len(data) - 1):
if self.__check(data[i], data[i + 1]):
data[i], data[i + 1] = data[i + 1], data[i]
swapped = True
return not swapped
class SelectSort:
def __init__(self, lower):
self.__index = 0
if lower:
self.__get_id = lambda data: max(
range(self.__index, len(data)), key=data.__getitem__)
else:
self.__get_id = lambda data: min(
range(self.__index, len(data)), key=data.__getitem__)
def sort(self, data):
target_index = self.__get_id(data)
data[self.__index], data[target_index] = data[target_index], data[self.__index]
self.__index += 1
if self.__index == len(data):
return True
return False
class InsertSort:
def __init__(self, lower, use_bin=False):
self.__i = 1
if lower:
self.__check = lambda a, b: True if a < b else False
else:
self.__check = lambda a, b: True if a > b else False
def sort(self, data):
j = self.__i
while j > 0 and self.__check(data[j - 1], data[j]):
tmp = data[j - 1]
data[j - 1] = data[j]
data[j] = tmp
j -= 1
self.__i += 1
return self.__i == len(data)
|
"""moonlight URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from .views import *
from django.urls import path
urlpatterns = [
path('budgets/', BudgetListView.as_view(), name='budgets'),
path('budgets/<int:pk>/', BudgetDetailView.as_view(), name='budget'),
path('wastebook/', WasteBookListView.as_view(), name='wastebooks'),
path('wastebook/<int:pk>/', WasteBookDetailView.as_view(), name='wastebook'),
path('category1/', Category1ListView.as_view(), name='category1s'),
path('category1/<int:pk>/', Category1DetailView.as_view(), name='category1'),
path('category2/', Category2ListView.as_view(), name='category2s'),
path('category2/<int:pk>/', Category2DetailView.as_view(), name='category2'),
]
|
import os
import requests
from flask import Flask, request
import logging
import json
import random
start = True
app = Flask(__name__)
sessionStorage = {}
logging.basicConfig(level=logging.INFO)
# создаем словарь, в котором ключ — название города,
# а значение — массив, где перечислены id картинок,
# которые мы записали в прошлом пункте.
@app.route('/post', methods=['POST'])
def main():
logging.info(f'Request: {request.json!r}')
response = {
'session': request.json['session'],
'version': request.json['version'],
'response': {
'end_session': False
}
}
handle_dialog(response, request.json)
logging.info(f'Response: {response!r}')
return json.dumps(response)
def handle_dialog(res, req):
user_id = req['session']['user_id']
# если пользователь новый, то просим его представиться.
if req['session']['new']:
res['response']['text'] = 'Привет! Назови свое имя!'
# создаем словарь в который в будущем положим имя пользователя
sessionStorage[user_id] = {
'first_name': None
}
return
# если пользователь не новый, то попадаем сюда.
# если поле имени пустое, то это говорит о том,
# что пользователь еще не представился.
if sessionStorage[user_id]['first_name'] is None:
# в последнем его сообщение ищем имя.
first_name = get_first_name(req)
# если не нашли, то сообщаем пользователю что не расслышали.
if first_name is None:
res['response']['text'] = \
'Не расслышала имя. Повтори, пожалуйста!'
# если нашли, то приветствуем пользователя.
# И спрашиваем какой город он хочет увидеть.
else:
sessionStorage[user_id]['first_name'] = first_name
res['response']['text'] = 'Привет, я переводчик\n' \
'Испльзуйте конструкцию:\n "Переведи слово: (слово для перевода)"'
else:
word = req['request']["original_utterance"].split(':')[1]
url = "https://translated-mymemory---translation-memory.p.rapidapi.com/api/get"
querystring = {"langpair": "ru|en", "q": word, "mt": "1", "onlyprivate": "0", "de": "a@b.c"}
headers = {
'x-rapidapi-key': "ff62ce869dmsh19d414c170f39aep18e37ajsn5ee33a675609",
'x-rapidapi-host': "translated-mymemory---translation-memory.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring).json()
res['response']['text'] = response['responseData']['translatedText']
return
def get_first_name(req):
# перебираем сущности
for entity in req['request']['nlu']['entities']:
# находим сущность с типом 'YANDEX.FIO'
if entity['type'] == 'YANDEX.FIO':
# Если есть сущность с ключом 'first_name',
# то возвращаем ее значение.
# Во всех остальных случаях возвращаем None.
return entity['value'].get('first_name', None)
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
|
# coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
from bitmovin_api_sdk.models.retry_hint import RetryHint
import pprint
import six
class ErrorDetails(object):
@poscheck_model
def __init__(self,
code=None,
category=None,
text=None,
retry_hint=None):
# type: (int, string_types, string_types, RetryHint) -> None
self._code = None
self._category = None
self._text = None
self._retry_hint = None
self.discriminator = None
if code is not None:
self.code = code
if category is not None:
self.category = category
if text is not None:
self.text = text
if retry_hint is not None:
self.retry_hint = retry_hint
@property
def openapi_types(self):
types = {
'code': 'int',
'category': 'string_types',
'text': 'string_types',
'retry_hint': 'RetryHint'
}
return types
@property
def attribute_map(self):
attributes = {
'code': 'code',
'category': 'category',
'text': 'text',
'retry_hint': 'retryHint'
}
return attributes
@property
def code(self):
# type: () -> int
"""Gets the code of this ErrorDetails.
Specific error code (required)
:return: The code of this ErrorDetails.
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
# type: (int) -> None
"""Sets the code of this ErrorDetails.
Specific error code (required)
:param code: The code of this ErrorDetails.
:type: int
"""
if code is not None:
if not isinstance(code, int):
raise TypeError("Invalid type for `code`, type has to be `int`")
self._code = code
@property
def category(self):
# type: () -> string_types
"""Gets the category of this ErrorDetails.
Error group name (required)
:return: The category of this ErrorDetails.
:rtype: string_types
"""
return self._category
@category.setter
def category(self, category):
# type: (string_types) -> None
"""Sets the category of this ErrorDetails.
Error group name (required)
:param category: The category of this ErrorDetails.
:type: string_types
"""
if category is not None:
if not isinstance(category, string_types):
raise TypeError("Invalid type for `category`, type has to be `string_types`")
self._category = category
@property
def text(self):
# type: () -> string_types
"""Gets the text of this ErrorDetails.
Detailed error message (required)
:return: The text of this ErrorDetails.
:rtype: string_types
"""
return self._text
@text.setter
def text(self, text):
# type: (string_types) -> None
"""Sets the text of this ErrorDetails.
Detailed error message (required)
:param text: The text of this ErrorDetails.
:type: string_types
"""
if text is not None:
if not isinstance(text, string_types):
raise TypeError("Invalid type for `text`, type has to be `string_types`")
self._text = text
@property
def retry_hint(self):
# type: () -> RetryHint
"""Gets the retry_hint of this ErrorDetails.
Information if the encoding could potentially succeed when retrying. (required)
:return: The retry_hint of this ErrorDetails.
:rtype: RetryHint
"""
return self._retry_hint
@retry_hint.setter
def retry_hint(self, retry_hint):
# type: (RetryHint) -> None
"""Sets the retry_hint of this ErrorDetails.
Information if the encoding could potentially succeed when retrying. (required)
:param retry_hint: The retry_hint of this ErrorDetails.
:type: RetryHint
"""
if retry_hint is not None:
if not isinstance(retry_hint, RetryHint):
raise TypeError("Invalid type for `retry_hint`, type has to be `RetryHint`")
self._retry_hint = retry_hint
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://forge.epn-campus.eu/projects/azimuthal
#
# File: "$Id$"
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "22/09/2014"
__status__ = "production"
import os
import sys
import threading
import logging
import gc
import types
import array
import operator
from math import sqrt
import numpy
from .gui_utils import pylab, update_fig, maximize_fig, QtGui, backend
from . import gui_utils
import fabio
from .utils import deprecated, percentile
from .reconstruct import reconstruct
from .calibrant import Calibrant, ALL_CALIBRANTS
from .blob_detection import BlobDetection
from .massif import Massif
logger = logging.getLogger("pyFAI.peak_picker")
if os.name != "nt":
WindowsError = RuntimeError
################################################################################
# Toolbar widget
################################################################################
#class PeakPickerToolbar(backend.NavigationToolbar2QT):
# def __init__(self, canvas, parent, coordinates=True):
# backend.NavigationToolbar2QT.__init__(self, canvas, parent, False)
# self.append_mode = None
#
#
## def _init_toolbar(self):
## backend.NavigationToolbar2QT._init_toolbar(self)
## self.addSeparator()
## a = self.addAction('+pts', self.on_plus_pts_clicked)
## a.setToolTip('Add more points to group')
## a = self.addAction('-pts', self.on_minus_pts_clicked)
## a.setToolTip('Remove points from group')
#
# def pan(self):
# self.append_mode = None
# backend.NavigationToolbar2QT.pan(self)
#
# def zoom(self):
# self.append_mode = None
# backend.NavigationToolbar2QT.zoom(self)
#
#
# def on_plus_pts_clicked(self, *args):
# """
# callback function
# """
# self.append_mode = True
# print(self.append_mode)
#
# def on_minus_pts_clicked(self, *args):
# """
# callback function
# """
# self.append_mode = False
# print(self.append_mode)
################################################################################
# PeakPicker
################################################################################
class PeakPicker(object):
"""
This class is in charge of peak picking, i.e. find bragg spots in the image
Two methods can be used : massif or blob
"""
VALID_METHODS = ["massif", "blob"]
def __init__(self, strFilename, reconst=False, mask=None,
pointfile=None, calibrant=None, wavelength=None, method="massif"):
"""
@param strFilename: input image filename
@param reconst: shall masked part or negative values be reconstructed (wipe out problems with pilatus gaps)
@param mask: area in which keypoints will not be considered as valid
@param pointfile:
"""
self.strFilename = strFilename
self.data = fabio.open(strFilename).data.astype("float32")
if mask is not None:
mask = mask.astype(bool)
view = self.data.ravel()
flat_mask = mask.ravel()
min_valid = view[numpy.where(flat_mask == False)].min()
view[numpy.where(flat_mask)] = min_valid
self.shape = self.data.shape
self.points = ControlPoints(pointfile, calibrant=calibrant, wavelength=wavelength)
self.fig = None
self.fig2 = None
self.fig2sp = None
self.ax = None
self.ct = None
self.msp = None
self.append_mode = None
self.spinbox = None
self.refine_btn = None
self.ref_action = None
self.sb_action = None
self.reconstruct = reconst
self.mask = mask
self.massif = None #used for massif detection
self.blob = None #used for blob detection
self._sem = threading.Semaphore()
# self._semGui = threading.Semaphore()
self.mpl_connectId = None
self.defaultNbPoints = 100
self._init_thread = None
self.point_filename = None
self.callback = None
if method in self.VALID_METHODS:
self.method = method
else:
logger.error("Not a valid peak-picker method: %s should be part of %s" % (method, self.VALID_METHODS))
self.method = self.VALID_METHODS[0]
if self.method == "massif":
self.init_massif(False)
elif self.method == "blob":
self.init_blob(False)
def init(self, method, sync=True):
"""
Unified initializer
"""
assert method in ["blob", "massif"]
if method != self.method:
self.__getattribute__("init_" + method)(sync)
def sync_init(self):
if self._init_thread:
self._init_thread.join()
def init_massif(self, sync=True):
"""
Initialize PeakPicker for massif based detection
"""
if self.reconstruct:
if self.mask is None:
self.mask = self.data < 0
data = reconstruct(self.data, self.mask)
else:
data = self.data
self.massif = Massif(data)
self._init_thread = threading.Thread(target=self.massif.getLabeledMassif, name="massif_process")
self._init_thread.start()
self.method = "massif"
if sync:
self._init_thread.join()
def init_blob(self, sync=True):
"""
Initialize PeakPicker for blob based detection
"""
if self.mask is not None:
self.blob = BlobDetection(self.data, mask=self.mask)
else:
self.blob = BlobDetection(self.data, mask=(self.data < 0))
self.method = "blob"
self._init_thread = threading.Thread(target=self.blob.process, name="blob_process")
self._init_thread.start()
if sync:
self._init_thread.join()
def peaks_from_area(self, mask, Imin, keep=1000, refine=True, method=None):
"""
Return the list of peaks within an area
@param mask: 2d array with mask.
@param Imin: minimum of intensity above the background to keep the point
@param keep: maximum number of points to keep
@param method: enforce the use of detection using "massif" or "blob"
@return: list of peaks [y,x], [y,x], ...]
"""
if not method:
method = self.method
else:
self.init(method, True)
obj = self.__getattribute__(method)
return obj.peaks_from_area(mask, Imin=Imin, keep=keep, refine=refine)
def reset(self):
"""
Reset control point and graph (if needed)
"""
self.points.reset()
if self.fig and self.ax:
#empty annotation and plots
if len(self.ax.texts) > 0:
self.ax.texts = []
if len(self.ax.lines) > 0:
self.ax.lines = []
#Redraw the image
if not gui_utils.main_loop:
self.fig.show()
update_fig(self.fig)
def gui(self, log=False, maximize=False, pick=True):
"""
@param log: show z in log scale
"""
if self.fig is None:
self.fig = pylab.plt.figure()
# add 3 subplots at the same position for debye-sherrer image, contour-plot and massif contour
self.ax = self.fig.add_subplot(111)
self.ct = self.fig.add_subplot(111)
self.msp = self.fig.add_subplot(111)
toolbar = self.fig.canvas.toolbar
toolbar.addSeparator()
a = toolbar.addAction('Opts', self.on_option_clicked)
a.setToolTip('open options window')
if pick:
label = QtGui.QLabel("Ring #", toolbar)
toolbar.addWidget(label)
self.spinbox = QtGui.QSpinBox(toolbar)
self.spinbox.setMinimum(0)
self.sb_action = toolbar.addWidget(self.spinbox)
a = toolbar.addAction('Refine', self.on_refine_clicked)
a.setToolTip('switch to refinement mode')
self.ref_action = a
self.mpl_connectId = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
if log:
showData = numpy.log1p(self.data - self.data.min())
self.ax.set_title('Log colour scale (skipping lowest/highest per mille)')
else:
showData = self.data
self.ax.set_title('Linear colour scale (skipping lowest/highest per mille)')
# skip lowest and highest per mille of image values via vmin/vmax
showMin = percentile(showData, .1)
showMax = percentile(showData, 99.9)
im = self.ax.imshow(showData, vmin=showMin, vmax=showMax, origin="lower", interpolation="nearest")
self.ax.autoscale_view(False, False, False)
self.fig.colorbar(im)#, self.ax)
update_fig(self.fig)
if maximize:
maximize_fig(self.fig)
if not gui_utils.main_loop:
self.fig.show()
def load(self, filename):
"""
load a filename and plot data on the screen (if GUI)
"""
self.points.load(filename)
self.display_points()
def display_points(self, minIndex=0):
"""
display all points and their ring annotations
@param minIndex: ring index to start with
"""
if self.ax is not None:
for idx, points in enumerate(self.points._points):
if idx < minIndex:
continue
if len(points) > 0:
pt0x = points[0][1]
pt0y = points[0][0]
self.ax.annotate("%i" % (idx), xy=(pt0x, pt0y), xytext=(pt0x + 10, pt0y + 10),
color="white", arrowprops=dict(facecolor='white', edgecolor='white'))
npl = numpy.array(points)
self.ax.plot(npl[:, 1], npl[:, 0], "o", scalex=False, scaley=False)
def onclick(self, event):
def annontate(x, x0=None, idx=None):
"""
Call back method to annotate the figure while calculation are going on ...
@param x: coordinates
@param x0: coordinates of the starting point
"""
if x0 is None:
self.ax.annotate(".", xy=(x[1], x[0]), color="black")
else:
self.ax.annotate("%i" % (len(self.points)), xy=(x[1], x[0]), xytext=(x0[1], x0[0]), color="white",
arrowprops=dict(facecolor='white', edgecolor='white'),)
update_fig(self.fig)
with self._sem:
x0 = event.xdata
y0 = event.ydata
ring = self.spinbox.value()
if event.button == 3: # right click: add points (1 or many) to new or existing group
logger.debug("Button: %i, Key modifier: %s" % (event.button, event.key))
if event.key == 'shift': # if 'shift' pressed add nearest maximum to the current group
points = self.points.pop(ring) or []
# no, keep annotation! if len(self.ax.texts) > 0: self.ax.texts.pop()
if len(self.ax.lines) > 0:
self.ax.lines.pop()
update_fig(self.fig)
newpeak = self.massif.nearest_peak([y0, x0])
if newpeak:
if not points:
# if new group, need annotation (before points.append!)
annontate(newpeak, [y0, x0])
points.append(newpeak)
logger.info("x=%5.1f, y=%5.1f added to group #%i" % (newpeak[1], newpeak[0], len(self.points)))
else:
logger.warning("No peak found !!!")
elif event.key == 'control': # if 'control' pressed add nearest maximum to a new group
points = []
newpeak = self.massif.nearest_peak([y0, x0])
if newpeak:
points.append(newpeak)
annontate(newpeak, [y0, x0])
logger.info("Create group #%i with single point x=%5.1f, y=%5.1f" % (len(self.points), newpeak[1], newpeak[0]))
else:
logger.warning("No peak found !!!")
elif event.key == 'm': # if 'm' pressed add new group to current group ... ?
points = self.points.pop(ring) or []
# no, keep annotation! if len(self.ax.texts) > 0: self.ax.texts.pop()
if len(self.ax.lines) > 0:
self.ax.lines.pop()
update_fig(self.fig)
# need to annotate only if a new group:
localAnn = None if points else annontate
listpeak = self.massif.find_peaks([y0, x0], self.defaultNbPoints, localAnn, self.massif_contour)
if len(listpeak) == 0:
logger.warning("No peak found !!!")
else:
points += listpeak
logger.info("Added %i points to group #%i (now %i points)" % (len(listpeak), len(self.points), len(points)))
else: # create new group
points = self.massif.find_peaks([y0, x0], self.defaultNbPoints, annontate, self.massif_contour)
if not points:
logger.warning("No peak found !!!")
else:
logger.info("Created group #%i with %i points" % (len(self.points), len(points)))
if not points:
return
self.points.append(points, ring=ring)
npl = numpy.array(points)
self.ax.plot(npl[:, 1], npl[:, 0], "o", scalex=False, scaley=False)
update_fig(self.fig)
sys.stdout.flush()
elif event.button == 2: # center click: remove 1 or all points from current group
logger.debug("Button: %i, Key modifier: %s" % (event.button, event.key))
poped_points = self.points.pop(ring) or []
# in case not the full group is removed, would like to keep annotation
# _except_ if the annotation is close to the removed point... too complicated!
if len(self.ax.texts) > 0:
self.ax.texts.pop()
if len(self.ax.lines) > 0:
self.ax.lines.pop()
if event.key == '1' and len(poped_points) > 1: # if '1' pressed AND > 1 point left:
# delete single closest point from current group
dists = [sqrt((p[1] - x0) ** 2 + (p[0] - y0) ** 2) for p in poped_points] # p[1],p[0]!
# index and distance of smallest distance:
indexMin = min(enumerate(dists), key=operator.itemgetter(1))
removedPt = poped_points.pop(indexMin[0])
logger.info("x=%5.1f, y=%5.1f removed from group #%i (%i points left)" % (removedPt[1], removedPt[0], len(self.points), len(poped_points)))
# annotate (new?) 1st point and add remaining points back
pt = (poped_points[0][0], poped_points[0][1])
annontate(pt, (pt[0] + 10, pt[1] + 10))
self.points.append(poped_points, ring=ring)
npl = numpy.array(poped_points)
self.ax.plot(npl[:, 1], npl[:, 0], "o", scalex=False, scaley=False)
elif len(poped_points) > 0: # not '1' pressed or only 1 point left: remove complete group
logger.info("Removing group #%i containing %i points" % (len(self.points), len(poped_points)))
else:
logger.info("No groups to remove")
update_fig(self.fig)
sys.stdout.flush()
def finish(self, filename=None, callback=None):
"""
Ask the ring number for the given points
@param filename: file with the point coordinates saved
"""
logging.info(os.linesep.join(["Please use the GUI and:",
" 1) Right-click: try an auto find for a ring",
" 2) Right-click + Ctrl: create new group with one point",
" 3) Right-click + Shift: add one point to current group",
" 4) Right-click + m: find more points for current group",
" 5) Center-click: erase current group",
" 6) Center-click + 1: erase closest point from current group"]))
if not callback:
raw_input("Please press enter when you are happy with your selection" + os.linesep)
# need to disconnect 'button_press_event':
self.fig.canvas.mpl_disconnect(self.mpl_connectId)
self.mpl_connectId = None
print("Now fill in the ring number. Ring number starts at 0, like point-groups.")
self.points.readRingNrFromKeyboard() # readAngleFromKeyboard()
if filename is not None:
self.points.save(filename)
return self.points.getWeightedList(self.data)
else:
self.point_filename = filename
self.callback = callback
gui_utils.main_loop = True
#MAIN LOOP
pylab.show()
def contour(self, data):
"""
Overlay a contour-plot
@param data: 2darray with the 2theta values in radians...
"""
if self.fig is None:
logging.warning("No diffraction image available => not showing the contour")
else:
while len(self.msp.images) > 1:
self.msp.images.pop()
while len(self.ct.images) > 1:
self.ct.images.pop()
while len(self.ct.collections) > 0:
self.ct.collections.pop()
if self.points.calibrant:
angles = [ i for i in self.points.calibrant.get_2th()
if i is not None]
else:
angles = None
try:
xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
self.ct.contour(data, levels=angles)
self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
print("Visually check that the curve overlays with the Debye-Sherrer rings of the image")
print("Check also for correct indexing of rings")
except MemoryError:
logging.error("Sorry but your computer does NOT have enough memory to display the 2-theta contour plot")
update_fig(self.fig)
def massif_contour(self, data):
"""
Overlays a mask over a diffraction image
@param data: mask to be overlaid
"""
if self.fig is None:
logging.error("No diffraction image available => not showing the contour")
else:
tmp = 100 * numpy.logical_not(data)
mask = numpy.zeros((data.shape[0], data.shape[1], 4), dtype="uint8")
mask[:, :, 0] = tmp
mask[:, :, 1] = tmp
mask[:, :, 2] = tmp
mask[:, :, 3] = tmp
while len(self.msp.images) > 1:
self.msp.images.pop()
try:
xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
self.msp.imshow(mask, cmap="gray", origin="lower", interpolation="nearest")
self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
except MemoryError:
logging.error("Sorry but your computer does NOT have enough memory to display the massif plot")
update_fig(self.fig)
def closeGUI(self):
if self.fig is not None:
self.fig.clear()
self.fig = None
gc.collect()
# def format_coord(self, x, y):
# """
# Print coordinated in matplotlib toolbar
# """
# col = int(x + 0.5)
# row = int(y + 0.5)
# if col >= 0 and col < self.shape[1] and row >= 0 and row < self.shape[0]:
# z = self.data[row, col]
# return 'x=%.2f \t y=%.2f \t I=%1.4f' % (x, y, z)
# else:
# return 'x=%.2f \t y=%.2f \t I=None' % (x, y)
def on_plus_pts_clicked(self, *args):
"""
callback function
"""
self.append_mode = True
print(self.append_mode)
def on_minus_pts_clicked(self, *args):
"""
callback function
"""
self.append_mode = False
print(self.append_mode)
def on_option_clicked(self, *args):
"""
callback function
"""
print("Option!")
def on_refine_clicked(self, *args):
"""
callback function
"""
print("refine, now!")
self.sb_action.setDisabled(True)
self.ref_action.setDisabled(True)
self.spinbox.setEnabled(False)
self.mpl_connectId = None
self.fig.canvas.mpl_disconnect(self.mpl_connectId)
pylab.ion()
if self.point_filename:
self.points.save(self.point_filename)
if self.callback:
self.callback(self.points.getWeightedList(self.data))
################################################################################
# ControlPoints
################################################################################
class ControlPoints(object):
"""
This class contains a set of control points with (optionally) their ring number hence d-spacing and diffraction 2Theta angle ...
"""
def __init__(self, filename=None, calibrant=None, wavelength=None):
# self.dSpacing = []
self._sem = threading.Semaphore()
self._angles = [] # angles are enforced in radians, conversion from degrees or q-space nm-1 are done on the fly
self._points = []
self._ring = [] # ring number ...
self.calibrant = Calibrant(wavelength=wavelength)
if filename is not None:
self.load(filename)
have_spacing = False
for i in self.dSpacing :
have_spacing = have_spacing or i
if (not have_spacing) and (calibrant is not None):
if isinstance(calibrant, Calibrant):
self.calibrant = calibrant
elif type(calibrant) in types.StringTypes:
if calibrant in ALL_CALIBRANTS:
self.calibrant = ALL_CALIBRANTS[calibrant]
elif os.path.isfile(calibrant):
self.calibrant = Calibrant(calibrant)
else:
logger.error("Unable to handle such calibrant: %s" % calibrant)
elif isinstance(self.dSpacing, (numpy.ndarray, list, tuple, array)):
self.calibrant = Calibrant(dSpacing=list(calibrant))
else:
logger.error("Unable to handle such calibrant: %s" % calibrant)
if not self.calibrant.wavelength:
self.calibrant.set_wavelength(wavelength)
def __repr__(self):
self.check()
lstOut = ["ControlPoints instance containing %i group of point:" % len(self)]
if self.calibrant:
lstOut.append(self.calibrant.__repr__())
for ring, angle, points in zip(self._ring, self._angles, self._points):
lstOut.append("%s %s: %s" % (ring, angle, points))
return os.linesep.join(lstOut)
def __len__(self):
return len(self._angles)
def check(self):
"""
check internal consistency of the class
"""
if len(self._angles) != len(self._points):
logger.error("in ControlPoints: length of the two arrays are not consistent!!! angle: %i points: %s ",
len(self._angles), len(self._points))
def reset(self):
"""
remove all stored values and resets them to default
"""
with self._sem:
# self.calibrant = Calibrant()
self._angles = [] # angles are enforced in radians, conversion from degrees or q-space nm-1 are done on the fly
self._points = []
self._ring = []
def append(self, points, angle=None, ring=None):
"""
@param point: list of points
@param angle: 2-theta angle in radians
"""
with self._sem:
self._angles.append(angle)
self._points.append(points)
if ring is None:
if angle in self.calibrant.get_2th():
self._ring.append(self.calibrant.get_2th().index(angle))
else:
if angle and (angle not in self.calibrant.get_2th()):
self.calibrant.append_2th(angle)
self.rings = [self.calibrant.get_2th_index(a) for a in self._angles]
else:
self._ring.append(None)
else:
self._ring.append(ring)
def append_2theta_deg(self, points, angle=None, ring=None):
"""
@param point: list of points
@param angle: 2-theta angle in degrees
"""
if angle:
self.append(points, numpy.deg2rad(angle), ring)
else:
self.append(points, None, ring)
def pop(self, idx=None):
"""
Remove the set of points at given index (by default the last)
@param idx: number of the ring to remove
"""
out = None
if (idx is None) or (idx not in self._ring):
with self._sem:
if self._angles:
self._angles.pop()
self._ring.pop()
out = self._points.pop()
else:
with self._sem:
i = self._ring.index(idx)
self._angles.pop(i)
self._ring.pop(i)
out = self._points.pop(i)
return out
def save(self, filename):
"""
Save a set of control points to a file
@param filename: name of the file
@return: None
"""
self.check()
with self._sem:
lstOut = ["# set of control point used by pyFAI to calibrate the geometry of a scattering experiment",
"#angles are in radians, wavelength in meter and positions in pixels"]
if self.calibrant.wavelength is not None:
lstOut.append("wavelength: %s" % self.calibrant.wavelength)
lstOut.append("dspacing:" + " ".join([str(i) for i in self.calibrant.dSpacing]))
for idx, angle, points, ring in zip(range(self.__len__()), self._angles, self._points, self._ring):
lstOut.append("")
lstOut.append("New group of points: %i" % idx)
lstOut.append("2theta: %s" % angle)
lstOut.append("ring: %s" % ring)
for point in points:
lstOut.append("point: x=%s y=%s" % (point[1], point[0]))
with open(filename, "w") as f:
f.write("\n".join(lstOut))
def load(self, filename):
"""
load all control points from a file
"""
if not os.path.isfile(filename):
logger.error("ControlPoint.load: No such file %s", filename)
return
self.reset()
tth = None
ring = None
points = []
for line in open(filename, "r"):
if line.startswith("#"):
continue
elif ":" in line:
key, value = line.split(":", 1)
value = value.strip()
key = key.strip().lower()
if key == "wavelength":
try:
self.calibrant.set_wavelength(float(value))
except Exception as error:
logger.error("ControlPoints.load: unable to convert to float %s (wavelength): %s", value, error)
elif key == "2theta":
if value.lower() == "none":
tth = None
else:
try:
tth = float(value)
except Exception as error:
logger.error("ControlPoints.load: unable to convert to float %s (2theta): %s", value, error)
elif key == "dspacing":
self.dSpacing = []
for val in value.split():
try:
fval = float(val)
except Exception:
fval = None
self.calibrant.append_dSpacing(fval)
elif key == "ring":
if value.lower() == "none":
ring = None
else:
try:
ring = int(value)
except Exception as error:
logger.error("ControlPoints.load: unable to convert to int %s (ring): %s", value, error)
elif key == "point":
vx = None
vy = None
if "x=" in value:
vx = value[value.index("x=") + 2:].split()[0]
if "y=" in value:
vy = value[value.index("y=") + 2:].split()[0]
if (vx is not None) and (vy is not None):
try:
x = float(vx)
y = float(vy)
except Exception as error:
logger.error("ControlPoints.load: unable to convert to float %s (point)", value, error)
else:
points.append([y, x])
elif key.startswith("new"):
if len(points) > 0:
with self._sem:
self._angles.append(tth)
self._ring.append(ring)
self._points.append(points)
tth = None
points = []
else:
logger.error("Unknown key: %s", key)
if len(points) > 0:
self._angles.append(tth)
self._points.append(points)
self._ring.append(ring)
@deprecated
def load_dSpacing(self, filename):
"""
Load a d-spacing file containing the inter-reticular plan distance in Angstrom
DEPRECATED: use a calibrant object
"""
self.calibrant.load_file(filename)
return self.calibrant.dSpacing
@deprecated
def save_dSpacing(self, filename):
"""
save the d-spacing to a file
DEPRECATED: use a calibrant object
"""
self.calibrant.save_dSpacing(filename)
def getList2theta(self):
"""
Retrieve the list of control points suitable for geometry refinement
"""
lstOut = []
for tth, points in zip(self._angles, self._points):
lstOut += [[pt[0], pt[1], tth] for pt in points]
return lstOut
def getListRing(self):
"""
Retrieve the list of control points suitable for geometry refinement with ring number
"""
lstOut = []
for ring, points in zip(self._ring, self._points):
lstOut += [[pt[0], pt[1], ring] for pt in points]
return lstOut
getList = getListRing
def getWeightedList(self, image):
"""
Retrieve the list of control points suitable for geometry refinement with ring number and intensities
@param image:
@return: a (x,4) array with pos0, pos1, ring nr and intensity
"""
lstOut = []
for ring, points in zip(self._ring, self._points):
lstOut += [[pt[0], pt[1], ring, image[int(pt[0] + 0.5), int(pt[1] + 0.5)]] for pt in points]
return lstOut
def readAngleFromKeyboard(self):
"""
Ask the 2theta values for the given points
"""
last2Theta = None
for idx, tth, point in zip(range(self.__len__()), self._angles, self._points):
bOk = False
while not bOk:
if tth is not None:
last2Theta = numpy.rad2deg(tth)
res = raw_input("Point group #%2i (%i points)\t (%6.1f,%6.1f) \t [default=%s] 2Theta= " % (idx, len(point), point[0][1], point[0][0], last2Theta)).strip()
if res == "":
res = last2Theta
try:
tth = float(res)
except (ValueError, TypeError):
logging.error("I did not understand your 2theta value")
else:
if tth > 0:
last2Theta = tth
self._angles[idx] = numpy.deg2rad(tth)
bOk = True
def readRingNrFromKeyboard(self):
"""
Ask the ring number values for the given points
"""
lastRing = None
for idx, ring, point in zip(range(self.__len__()), self._ring, self._points):
bOk = False
while not bOk:
defaultRing = 0
if ring is not None:
defaultRing = ring
elif lastRing is not None:
defaultRing = lastRing + 1
res = raw_input("Point group #%2i (%i points)\t (%6.1f,%6.1f) \t [default=%s] Ring# " % (idx, len(point), point[0][1], point[0][0], defaultRing)).strip()
if res == "":
res = defaultRing
try:
inputRing = int(res)
except (ValueError, TypeError):
logging.error("I did not understand the ring number you entered")
else:
if inputRing >= 0 and inputRing < len(self.calibrant.dSpacing):
lastRing = ring
self._ring[idx] = inputRing
self._angles[idx] = self.calibrant.get_2th()[inputRing]
bOk = True
else:
logging.error("Invalid ring number %i (range 0 -> %2i)" % (inputRing, len(self.calibrant.dSpacing) - 1))
def setWavelength_change2th(self, value=None):
with self._sem:
if self.calibrant is None:
self.calibrant = Calibrant()
self.calibrant.setWavelength_change2th(value)
self._angles = [self.calibrant.get_2th()[i] for i in self._ring]
def setWavelength_changeDs(self, value=None):
"""
This is probably not a good idea, but who knows !
"""
with self._sem:
if value :
if self.calibrant is None:
self.calibrant = Calibrant()
self.calibrant.setWavelength_changeDs(value)
def set_wavelength(self, value=None):
with self._sem:
if value:
self.calibrant.set_wavelength(value)
def get_wavelength(self):
return self.calibrant._wavelength
wavelength = property(get_wavelength, set_wavelength)
def get_dSpacing(self):
if self.calibrant:
return self.calibrant.dSpacing
else:
return []
def set_dSpacing(self, lst):
if not self.calibrant:
self.calibrant = Calibrant()
self.calibrant.dSpacing = lst
dSpacing = property(get_dSpacing, set_dSpacing)
|
import scrapy
import requests
from urllib.parse import urlparse
import news_crawler#如果找不到,至檔案夾將Mark Directory as改成root
from items import ScrapyCaseItem
class GoogleNewsSpider(scrapy.Spider):
name = "googlenews"
USER_AGENT = "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36"
def __init__(self,words=None,*args,**kwargs):
super().__init__(*args,**kwargs)
self.words=words
self.start_urls=[f'https://news.google.com/search?q={self.words}&hl=zh-TW&gl=TW&ceid=TW%3Azh-Hant']
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, headers={"User-Agent": self.USER_AGENT})#header
def parse(self, response):
news_xpath = response.xpath('//*[@id="yDmH0d"]/c-wiz/div/div[2]/div[2]/div/main/c-wiz/div[1]/div')
count_true=0
count_false=0
for i in range(len(news_xpath)-1):
try:
title=news_xpath[i].xpath('div/article/h3/a/text()').extract_first()# 標題
web=news_xpath[i].xpath('div/article/div[2]/div/a/text()').extract_first()# 來源
url='https://news.google.com' + news_xpath[i].xpath('div/article/h3/a/@href').extract_first()# url
origin_url = self.getOriginUrl(url) # 原文網址
key_domain = urlparse(origin_url).netloc # 網域
yield scrapy.Request(origin_url,meta={'resource':key_domain,'origin_url':origin_url},headers={"User-Agent": self.USER_AGENT},callback=self.switch)#傳送至對應框架
count_true+=1
except:
print('錯誤:',news_xpath[i])
count_false+=1
print('總成功筆數:', count_true)
print('總失敗筆數:', count_false)
def getOriginUrl(self,googleUrl):#取得原文網址
return requests.get(googleUrl).url
def switch(self,response):#判斷該新聞適用爬蟲框架
source=response.meta['resource']
origin_url=response.meta['origin_url']
title,content,time,web=news_crawler.getCrawler(source, response)
if title==None or content==None or time==None or web==None:#如果回傳是空的就不儲存
print('此網頁無爬蟲框架:'+origin_url)
with open('無爬蟲框架集.csv', 'a', encoding='utf_8_sig') as f:
f.write(origin_url)
f.write('\n')
else:
print('存取成功:'+origin_url)
item = ScrapyCaseItem()
item['title'] = title
item['content'] = content
item['time'] = time
item['web']=web
yield item |
from django.db import models
# Create your models here.
class Categorie(models.Model):
name = models.CharField(max_length=100, unique=True)
class Formation(models.Model): # will inherit the feature of a model
name = models.CharField(max_length=100)
img = models.ImageField(upload_to='pics')
desc = models.TextField()
price = models.IntegerField()
etat = models.BooleanField(default=True) # actived:true
categorie = models.ForeignKey(
Categorie, on_delete=models.CASCADE, null=False)
def __str__(self):
return str(self.name)
|
"Python Program to Add Two Matrices"
a = [[1,2,3],
[4,5,6],
[7,8,9]]
b =[[1,4,3],
[5,2,7],
[9,6,8]]
r = [[0,0,0],
[0,0,0],
[0,0,0]]
for i in range(len(a)):
for j in range(len (a[0])):
r[i][j]=a[i][j]+b[i][j]
print r
for c in r:
print c
|
if __name__ == '__main__':
n = int(input())
student_marks = {}
for _ in range(n):
name, *line = input().split()
scores = list(map(float, line))
student_marks[name] = scores
student_name = input()
a= student_marks[student_name]
b=sum(a)/3
c= round(b,2)
print("{0:.2f}".format(c))
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1456180775.315844
_enable_loop = True
_template_filename = '/Users/Jordan/Documents/BYU/0 - Senior Year/0 - Winter 2016/0 - 413/Colonial_Heritage_Foundation/manager/templates/products.edit.html'
_template_uri = 'products.edit.html'
_source_encoding = 'utf-8'
import os, os.path, re, json
_exports = ['top_content_area']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, 'app_base.htm', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
form = context.get('form', UNDEFINED)
def top_content_area():
return render_top_content_area(context._locals(__M_locals))
product_type = context.get('product_type', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'top_content_area'):
context['self'].top_content_area(**pageargs)
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_top_content_area(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
form = context.get('form', UNDEFINED)
def top_content_area():
return render_top_content_area(context)
product_type = context.get('product_type', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n <h3>Edit product info:</h3>\n <hr />\n <br />\n <form method="POST">\n <table>\n ')
__M_writer(str( form.as_table() ))
__M_writer('\n </table>\n <br />\n <input id="pType_saved" type="submit" class="btn btn-primary" value="Save Changes" data-pType="')
__M_writer(str(product_type))
__M_writer('"/>\n </form>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "products.edit.html", "source_encoding": "utf-8", "line_map": {"48": 3, "66": 60, "37": 1, "56": 3, "57": 9, "42": 14, "59": 12, "28": 0, "58": 9, "60": 12}, "filename": "/Users/Jordan/Documents/BYU/0 - Senior Year/0 - Winter 2016/0 - 413/Colonial_Heritage_Foundation/manager/templates/products.edit.html"}
__M_END_METADATA
"""
|
from flask_restful import Resource, marshal_with, fields
from flask import current_app, request
import base64
import re
import unittest
import random
#
from libs.cutoms import testtools
from layers.use_case_layer.actors import SomeoneActor
from libs.cutoms import ex_reqparse
from layers.ui_layer.rest import arguments
token_resource_fields = {
'access_token': fields.String,
'refresh_token': fields.String,
'token_type': fields.String,
'expire': fields.Integer
}
class TokenResource(Resource):
def __init__(self):
self._someone_actor = SomeoneActor()
@marshal_with(token_resource_fields)
def post(self):
request_parser = ex_reqparse.ExRequestParser()
request_parser.add_argument(arguments.BASIC_AUTH_HEADER)
request_args = request_parser.parse_args()
b64client = re.sub('^Basic ', '', request_args['Authorization'])
try:
username, password = base64.b64decode(b64client).split(':')
except:
username, password = ('', '')
token = self._someone_actor.create_token(username, password)
return token, 201
class TokenResourceTestCase(unittest.TestCase):
def setUp(self):
self._test_client = current_app.test_client()
def test_post(self):
username = "test_username_%d" % random.randint(99, 1000)
password = 'admin'
testtools.regist(self._test_client, username, password)
auth_string = base64.b64encode("%s:%s" % (username, password))
headers = {
'Authorization': "Basic %s" % auth_string
}
data = {
'type': "NORMAL"
}
rv = self._test_client.post('/api/token', headers=headers,data=data)
self.assertTrue(
rv.status_code == 201,
msg="http code is %d not 201" % rv.status_code)
if __name__ == "__main__":
from app import App
from constants.enums import AppMode
app = App(AppMode.Test)
app.init()
unittest.main()
|
#引入模块
import sys
print(sys.argv)
# 获取命令行参数列表
for i in sys.argv:
print(i)
# 从cmd执行并输入参数
name = sys.argv[1]
age = sys.argv[2]
hobby = sys.argv[3]
print(name,age,hobby)
# 查找模块所需模块的路径的列表
print(sys.path) |
import time
import sys
import select
class pumpkin_driver:
def __init__(self):
self.viper1 = [2, 3, 4, 17]
self.action_time = 0
def parse_line(self, line_in):
'''
This function takes in a command string `line_in`.
Allowed strings are:
- 'forward'
- 'reverse'
- 'left'
- 'right'
- 'stop'
'''
def forward(self, array):
GPIO.output(array[0], True)
GPIO.output(array[1], False)
GPIO.output(array[2], True)
GPIO.output(array[3], False)
if line_in is 'forward':
self.forward(self.viper1)
self.action_time = time.time()
def stop(self, array):
GPIO.output(array[0], False)
GPIO.output(array[1], False)
GPIO.output(array[2], False)
GPIO.output(array[3], False)
if __name__ == '__main__':
jim = pumpkin_driver()
while True:
if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
jim.parse_line(line)
else:
print "eof"
exit(0)
else:
if time.time() - jim.action_time > 1:
jim.stop(jim.viper1)
time.sleep(.01)
|
import requests
from bs4 import BeautifulSoup
from prac import create_category_folder
from detail_page import extract_img_info as extract_product_info
url = "https://marketb.kr/"
DataBASE = []
headers = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'}
r = requests.get(url,headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
def extract_product_url(URL,category_title):
category_name = f"{category_title}"
create_category_folder(category_name)
r = requests.get(URL,headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
prdLists = soup.find("ul",{"class":"prdList column4"}).find_all("li",{"class":"item"})[:30]
for prd in prdLists:
link = prd.find("div",{"class":"box"}).find("div",{"class":"thumbnail"}).find("a")["href"]
product_info_result = extract_product_info(f'{url}{link}&sort_method=6#Product_ListMenu',category_name)
return product_info_result
def extract_max_page(URL):
r = requests.get(URL,headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
pages = soup.find("div",{"class":"xans-product-normalpaging"}).find("ol").find_all("li")
max_page = int(pages[-1].text.strip("\n").strip(""))
return 1
def extract_marketb(URL):
categories = soup.find("nav",{"id":"topMenu"}).find("ul",{"class":"topMenuLi_ul"}).find_all("li",{"class":"topMenuLi"})[2:9]
for category in categories:
category_URL = url+category.find("a",{"class":"menuLink"})["href"]
extract_max_page_result = extract_max_page(category_URL)
category_title = category.find("a",{"class":"menuLink"}).text
for page in range(extract_max_page_result):
URL = f"{category_URL}&page={page}"
product_result = extract_product_url(URL,category_title)
DataBASE.append({category_title:product_result})
return DataBASE
extract_marketb(url)
|
"""
Dataset/Corpus related modules
"""
from .auto import ONLINE_EVAL_DATA_REGISTRY, get_auto_dataset
|
# -*- coding: utf-8 -*-
__author__ = 'bert'
WEIXIN_API_PROTOCAL = 'https'
WEIXIN_API_DOMAIN = 'api.weixin.qq.com'
WEIXIN_API_V3_DOMAIN = 'api.mch.weixin.qq.com'
API_GET= 'get'
API_POST= 'post'
API_CLASSES = {
'query_order': 'pay.weixin.api.api_pay_queryorder.WeixinPayQueryOrderApi',
'get_unifiedorder': 'pay.weixin.api.api_pay_unifiedorder.WeixinPayUnifiedOrderApi',
'get_openid': 'pay.weixin.api.api_pay_openid.WeixinPayOpenidApi',
} |
from random import seed, choice, randint
seed()
dictionary = {}
list_of_sources=["concrete_noun","title","adjective","abstract_noun"]
for source in list_of_sources:
with open ("{}.txt".format(source)) as f:
dictionary[source] = f.readlines()
with open ("patterns.txt") as file:
patterns = file.readlines()
for i in range(0,20):
values = {key: choice(dictionary[key]).strip() for key in dictionary}
pattern = choice(patterns)
print(pattern.format(**values).strip())
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import estimators
from interfazbd import InterfazBD
def calcular_performances_intermedias(metodo_testeable, metodo_perfecto, valores_consultas):
res = []
for i in valores_consultas:
real = metodo_perfecto(i)
estimado = metodo_testeable(i)
res.append(abs(real - estimado))
return res
def calcular_performance_global(metodo_testeable, metodo_perfecto):
valores_consultas = range(1, 1000, 10)
performances_intermedias = calcular_performances_intermedias(metodo_testeable, metodo_perfecto, valores_consultas)
return sum(performances_intermedias) / len(valores_consultas)
def calcular_error(archivo, tipo_estimador, selectividad):
# 'selectividad' debe ser "equal" o "greater"
errores = {} # diccionario 'p' -> error medio
for p in range(5, 200, 5):
if tipo_estimador == "classic":
estimador = estimators.ClassicHistogram(archivo, 'datos', 'c', p)
elif tipo_estimador == "steps":
estimador = estimators.DistributionSteps(archivo, 'datos', 'c', p)
elif tipo_estimador == "propio":
estimador = estimators.EstimadorGrupo(archivo, 'datos', 'c', p)
perfecto = estimators.EstimadorPerfecto(archivo, 'datos', 'c', p)
bd = InterfazBD(archivo)
if selectividad == "equal":
errores[p] = calcular_performance_global(estimador.estimate_equal, perfecto.estimate_equal)
elif selectividad == "greater":
errores[p] = calcular_performance_global(estimador.estimate_greater, perfecto.estimate_greater)
return errores
def graficar(archivo, datos):
def getxy(datos):
# transforma un diccionario en dos arreglos key, value
x = []
y = []
for p in sorted(datos.keys()):
x.append(p)
y.append(datos[p])
return x, y
plt.clf()
for tipo_estimador in datos:
x, y = getxy(datos[tipo_estimador])
plt.errorbar(x, y, label=tipo_estimador.title())
plt.xlabel("Parametro S")
plt.ylabel("Error medio")
plt.legend()
plt.savefig(archivo)
def process():
distribuciones = ("normal", "uniforme")
seleccionadores = ("equal", "greater")
tipos_estimadores = ("classic", "steps", "propio")
for distribucion in distribuciones:
archivo = "datasets/" + distribucion + ".sqlite3"
for seleccionador in seleccionadores:
performance = {}
for tipo_estimador in tipos_estimadores:
performance[tipo_estimador] = calcular_error(archivo, tipo_estimador, seleccionador)
grafico = "datasets/img/" + distribucion + seleccionador.title() + ".png"
graficar(grafico, performance)
plt.clf()
if __name__ == "__main__":
process()
|
import pygame
import os
class Paddle(pygame.sprite.Sprite):
# Constructor. Pass in the color of the block,
# and its x and y position
def __init__(self, images, width, height):
# Call the parent class (Sprite) constructor
pygame.sprite.Sprite.__init__(self)
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
self.index = 0
self.image = pygame.Surface([width, height])
self.transformed_images = []
for img in images:
self.transformed_images.append(pygame.transform.scale(img, (width, height)))
self.image = self.transformed_images[self.index]
# Fetch the rectangle object that has the dimensions of the image
# Update the position of this object by setting the values of rect.x and rect.y
self.rect = self.image.get_rect()
def updateImage(self):
#print(self.index)
self.index+=1
if self.index >= len(self.transformed_images):
self.index = 0
self.image = self.transformed_images[self.index]
def movement(self, keys_pressed, VEL, screen_Width):
#if specific keys are pressed, do action
if keys_pressed[pygame.K_LEFT] and self.rect.x > 0: #LEFT
self.rect.x -= VEL
if keys_pressed[pygame.K_RIGHT] and self.rect.x + self.rect.width < screen_Width: #RIGHT
self.rect.x += VEL |
from sys import stdin
from tqdm import tqdm
import numpy as np
insts = stdin.readlines()
insts = [inst.strip().split() for inst in insts]
def compile(inst):
if inst[1] == 'on' or inst[1] == 'off':
a = list(map(int, inst[2].split(',')))
b = list(map(int, inst[4].split(',')))
return " ".join(inst[:2]), a, b
else:
a = list(map(int, inst[1].split(',')))
b = list(map(int, inst[3].split(',')))
return 'toggle', a, b
class Grid:
def __init__(self):
self.grid = np.array([[0] * 1000] * 1000)
def execute(self, kind, a, b):
xs, ys = self.__locate(a, b)
for x in xs:
for y in ys:
if kind == 'turn on':
self.grid[x][y] += 1
elif kind == 'turn off':
self.grid[x][y] = max(self.grid[x][y] - 1, 0)
else:
self.grid[x][y] += 2
def __str__(self):
counts = 0
for row in self.grid:
for entry in row:
if entry:
counts += entry
return str(counts)
def __locate(self, a, b):
xs = [i for i in range(a[0], b[0] + 1)]
ys = [i for i in range(a[1], b[1] + 1)]
return xs, ys
grid = Grid()
for inst in tqdm(insts):
kind, a, b = compile(inst)
grid.execute(kind, a, b)
print(grid)
|
import json
from collections import OrderedDict
from brine.api import get_dataset_for_info
def info(dataset_name):
response = get_dataset_for_info(dataset_name)
ordered = OrderedDict()
ordered['name'] = response['name']
ordered['description'] = response['description']
ordered['versions'] = response['versions']
print(json.dumps(ordered, indent=4))
|
def function_names(name):
'''
(io.TextIOWrapper) -> list of str
Will take in the name of a file,open it and read it.
It wil then return the function names in that file.
REQ: File needs to be in PEP8 format
'''
# Open the file and read it
file = open(name, "r")
# Open the file and read every line
file = file.readlines()
# Create a list
word = []
# For every line in the file
for lines in file:
# If the word "def" in the lines
if("def" in lines):
# Start the word after finding "def "
first = (lines.index("def") + 4)
# End the word before "(" comes up
last = (lines.index("("))
# Show the word between "def" and the first "(" in that line
word.append(lines[first:last])
# Return the word
return word
def justified(file):
'''
(io.TextIOWrapper) -> bool
Will take in a file,open it and read it.
It wil then return False if at least one line of the file
begins with A space.Else wise it wil return True if it does not
REQ: File needs to be in PEP8 format
'''
# Open the file and read it
File_Open = open(file, "r")
# Open the file and read every line
File_Open = File_Open.readlines()
# For every line the reult is True
result = True
# For every line in the file
for lines in File_Open:
# If a line starts A SPACE
if((lines.startswith(" ")) or (lines.startswith("\t"))):
# The result is false
result = False
# Return the result
return result |
__author__ = 'mac'
class Solution():
def longestPalindrome(self, s):
length = len(s)-1
end_index = 0
i = 0
longest_sub = ""
longest_len = 0
if length == 0 or length == -1:
return s
while i < length:
temp = i
while s[temp+1:].find(s[i]) != -1:
end_index = s[temp+1:].index(s[i]) + temp + 1
sub = s[i: end_index+1]
len_sub = int(round((len(sub) + 0.0)/2))
forward_string = sub
backward_string = sub[::-1]
print("forward_string = ", forward_string)
print("backward_string = ", backward_string)
if forward_string == backward_string:
current_length = len(sub)
if current_length >= longest_len:
longest_len = current_length
longest_sub = sub
temp = end_index
i += 1
if len(s[i:]) < longest_len:
return longest_sub
if longest_sub == "":
return s[length]
return longest_sub
s1 = "babad"
s2 = "cbbd"
s3 = "a"
s4 = "abcdeecba"
s5 = "abcde"
f = Solution()
print(f.longestPalindrome(s5)) |
#!/usr/bin/env python
#
# Copyright 2012 Rafe Kaplan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from mox3 import mox
from sordid.props import prop_testutils
from sordid.props import proputils
class ConfigPropNameTest(mox.MoxTestBase):
def setUp(self):
super(ConfigPropNameTest, self).setUp()
class TargetClass(object):
pass
self.target_class = TargetClass
def testNotConfigurable(self):
for unconfigurable in (1, None, [], {}, 'str'):
self.assertFalse(
proputils.config_prop_name(self.target_class, 'a', unconfigurable))
def testConfigurable(self):
configurable = self.mox.CreateMockAnything()
configurable.__config__(self.target_class, 'a')
self.mox.ReplayAll()
self.assertTrue(
proputils.config_prop_name(self.target_class, 'a', configurable))
def testClassNotType(self):
for not_class in (1, None, [], {}, 'str'):
self.assertRaises(TypeError,
proputils.config_prop_name,
not_class, 'a', 'does not matter')
def testNameNotString(self):
for not_string in (1, None, [], {}, object):
self.assertRaises(TypeError,
proputils.config_prop_name,
self.target_class, not_string, 'does not matter')
def testNameEmpty(self):
self.assertRaises(ValueError,
proputils.config_prop_name,
self.target_class, '', 'does not matter')
class ConfigPropTest(mox.MoxTestBase):
def DoCustomConfigPropTest(self, is_prop):
class TargetClass(object):
__config_prop__ = self.mox.CreateMockAnything()
TargetClass.__config_prop__('a', 'prop1').AndReturn(is_prop)
self.mox.ReplayAll()
self.assertEquals(is_prop,
proputils.config_prop(TargetClass, 'a', 'prop1'))
def testCustomConfigPropTrue(self):
self.DoCustomConfigPropTest(True)
def testCustomConfigPropFalse(self):
self.DoCustomConfigPropTest(False)
def testConfigProp(self):
class TargetClass(object):
pass
# Test will pretend to be property.
self.__config__ = self.mox.CreateMockAnything()
self.__config__(TargetClass, 'a')
self.mox.ReplayAll()
self.assertEquals(True,
proputils.config_prop(TargetClass, 'a', self))
def testJustAttributes(self):
class TargetClass(object):
name = 'do not overwrite'
def a_method(self):
pass
@classmethod
def a_class_method(cls):
pass
for not_property in (10, 'str', [], {}, None, TargetClass, TargetClass(),
TargetClass.a_method, TargetClass.a_class_method):
self.assertFalse(proputils.config_prop(TargetClass, 'a', not_property))
if not_property == TargetClass or isinstance(not_property, TargetClass):
self.assertEquals('do not overwrite', TargetClass.name)
else:
self.assertFalse(hasattr(not_property, 'name'),
'Value \'%r\' has a name' % (not_property,))
class ConfigPropsTest(mox.MoxTestBase):
def testCustomConfigPropsMethod(self):
class TargetClass(object):
__config_props__ = self.mox.CreateMockAnything()
attrs = {'a': 1}
TargetClass.__config_props__(attrs)
self.mox.ReplayAll()
proputils.config_props(TargetClass, attrs)
def testCustomConfigPropMethod(self):
class TargetClass(object):
__config_prop__ = self.mox.CreateMockAnything()
mox.UnorderedGroup
TargetClass.__config_prop__('a', 1).InAnyOrder().AndReturn(False)
TargetClass.__config_prop__('b', 2).InAnyOrder().AndReturn(True)
self.mox.ReplayAll()
proputils.config_props(TargetClass, {'a': 1, 'b': 2})
def testCustomConfig(self):
prop1 = self.mox.CreateMockAnything()
prop2 = self.mox.CreateMockAnything()
class TargetClass(object):
pass
prop1.__config__(TargetClass, 'a')
prop2.__config__(TargetClass, 'b')
self.mox.ReplayAll()
proputils.config_props(TargetClass, {'a': prop1, 'b': prop2})
def testStillConstrains(self):
class TargetClass(object):
pass
self.mox.ReplayAll()
self.assertRaises(TypeError,
proputils.config_props, TargetClass, {1: self})
def testNotAttrs(self):
class TargetClass(object):
__config_props__ = self.mox.CreateMockAnything()
a = 1
b = 'str'
attrs = dict((n, getattr(TargetClass, n)) for n in dir(TargetClass))
TargetClass.__config_props__(attrs)
self.mox.ReplayAll()
proputils.config_props(TargetClass)
class PropertiedTypeTest(mox.MoxTestBase):
def testMetaClass(self):
self.mox.StubOutWithMock(proputils, 'config_props')
proputils.config_props(mox.Func(lambda c: c.__name__ == 'MyClass'),
{'__module__': __name__,
'__qualname__': 'PropertiedTypeTest.testMetaClass.<locals>.MyClass',
'a': 'a',
'b': 'b',
})
self.mox.ReplayAll()
class MyClass(object, metaclass=proputils.PropertiedType):
a = 'a'
b = 'b'
class PropDesc(object):
cls = None
name = None
def __config__(self, cls, name):
self.cls = cls
self.name = name
class PropertiedTest(mox.MoxTestBase):
def testMetaClass(self):
self.mox.StubOutWithMock(proputils, 'config_props')
proputils.config_props(mox.Func(lambda c: c.__name__ == 'MySubClass'),
{'__module__': __name__,
'__qualname__': 'PropertiedTest.testMetaClass.<locals>.MySubClass',
'a': 1,
'b': 2,
})
self.mox.ReplayAll()
class MySubClass(proputils.Propertied):
a = 1
b = 2
def testSimpleEndToEnd(self):
prop1 = PropDesc()
prop2 = PropDesc()
class MySubClass(proputils.Propertied):
a = 1
b = 2
p1 = prop1
p2 = prop2
self.assertEquals(MySubClass, prop1.cls)
self.assertEquals('p1', prop1.name)
self.assertEquals(MySubClass, prop2.cls)
self.assertEquals('p2', prop2.name)
class HasPropsTest(mox.MoxTestBase):
def testHasNoProperties(self):
class HasNoProps(proputils.HasProps):
pass
self.assertEquals(set(), set(HasNoProps.prop_names()))
self.assertEquals(set(), set(HasNoProps.props()))
def testHasOnlyAttrs(self):
class HasOnlyAttrs(proputils.HasProps):
a = 1
b = 2
self.assertEquals(set(), set(HasOnlyAttrs.prop_names()))
self.assertEquals(set(), set(HasOnlyAttrs.props()))
def testHasProps(self):
class HasProps(proputils.HasProps):
a = PropDesc()
b = PropDesc()
self.assertEquals(set(['a', 'b']), set(HasProps.prop_names()))
self.assertEquals(set([('a', HasProps.a),
('b', HasProps.b)
]),
set(HasProps.props()))
hp = HasProps()
hp.a = 1
hp.b = 2
self.assertEquals(1, hp.a)
self.assertEquals(2, hp.b)
def testHasPropsSubclass(self):
class MyHasProps(proputils.HasProps):
a = PropDesc()
b = PropDesc()
class MyHasPropsSub(MyHasProps):
b = PropDesc()
c = PropDesc()
self.assertEquals(set(['a', 'b', 'c']), set(MyHasPropsSub.prop_names()))
self.assertEquals(set([('a', MyHasPropsSub.a),
('b', MyHasPropsSub.b),
('c', MyHasPropsSub.c),
]),
set(MyHasPropsSub.props()))
self.assertNotEquals(MyHasProps.b, MyHasPropsSub.b)
hp = MyHasPropsSub()
hp.a = 1
hp.b = 2
hp.c = 3
self.assertEquals(1, hp.a)
self.assertEquals(2, hp.b)
self.assertEquals(3, hp.c)
def testNoStrangeInternalState(self):
class BadDict(object):
def iteritems(self):
raise AttributeError('unexpected')
class MyHasProps(proputils.HasProps):
pass
MyHasProps._HasProps__props = BadDict()
def new_subclass():
class MyHasPropsSubclass(MyHasProps):
pass
self.assertRaises(AttributeError, new_subclass)
class PropertyTest(prop_testutils.PropertyTestMixin, unittest.TestCase):
def new_class(self):
class C(object):
p = proputils.Property()
return C
def testOverridableGetProperty(self):
class CalcProp(proputils.Property):
def __init__(self):
self.count = 0
def __get_property__(self, instance):
self.count += 1
return 'I am calculated: %d' % self.count
class HasCalc(proputils.Propertied):
calc = CalcProp()
instance = HasCalc()
self.assertEquals('I am calculated: 1', instance.calc)
self.assertEquals('I am calculated: 2', instance.calc)
instance.calc = 'new val'
self.assertEquals('I am calculated: 3', instance.calc)
def testUnconfiguredClsReference(self):
prop = proputils.Property()
with self.assertRaisesRegex(AttributeError, 'Property not configured'):
prop.cls
def testConfigTwice(self):
class Owner(proputils.Propertied):
prop1 = proputils.Property()
with self.assertRaisesRegex(TypeError,
'Property \'prop1\' is already configured '
'on class \'Owner\''):
proputils.config_prop(Owner, 'prop2', Owner.prop1)
self.assertEqual('prop1', Owner.prop1.name)
class ReadOnlyPropertyTest(prop_testutils.PropertyTestMixin, unittest.TestCase):
def new_class(self):
class C(object):
p = proputils.ReadOnlyProperty()
return C
def do_test_set_and_delete(self, c):
c.p = 'x'
self.assertEquals('x', c.p)
self.assertRaises(AttributeError, setattr, c, 'p', 'y')
self.assertEquals('x', c.p)
self.assertRaises(AttributeError, delattr, c, 'p')
self.assertEquals('x', c.p)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 3.1.4 on 2020-12-09 17:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('App_Video', '0002_auto_20201209_1748'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'verbose_name_plural': 'Categories',
},
),
migrations.AddField(
model_name='video',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='category', to='App_Video.category'),
),
]
|
from collections import deque
from sys import maxsize, stdin
def parse_node(line):
digit_line = ''.join(c if c.isdigit() else ' ' for c in line)
x, y, _, used, avail, _ = [int(s) for s in digit_line.split()]
return (x, y), (used, avail)
def main():
_, _, *node_lines = stdin.read().splitlines()
nodes = dict(parse_node(line) for line in node_lines)
[(hole_x, hole_y)] = [
(x, y)
for (x, y), (used, avail) in nodes.items()
if used == 0
]
goal_x, goal_y = max(x for x, y in nodes), 0
queue = deque([(0, hole_x, hole_y, goal_x, goal_y)])
visited = set()
while queue:
step, hole_x, hole_y, goal_x, goal_y = queue.popleft()
if (hole_x, hole_y, goal_x, goal_y) in visited:
continue
visited.add((hole_x, hole_y, goal_x, goal_y))
if (goal_x, goal_y) == (0, 0):
print(step)
return
hole_used, hole_avail = nodes[hole_x, hole_y]
for dx, dy in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
new_hole_x, new_hole_y = hole_x + dx, hole_y + dy
if (new_hole_x, new_hole_y) not in nodes:
continue
new_hole_used, _ = nodes[new_hole_x, new_hole_y]
if new_hole_used > hole_used + hole_avail:
continue
new_goal_x, new_goal_y = goal_x, goal_y
if (new_hole_x, new_hole_y) == (goal_x, goal_y):
new_goal_x, new_goal_y = hole_x, hole_y
queue.append(
(step + 1, new_hole_x, new_hole_y, new_goal_x, new_goal_y))
if __name__ == '__main__':
main()
|
import sys
import json
import datefinder
import requests
import datetime
from gcal_uplink import *
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 import Features, KeywordsOptions, ConceptsOptions, EntitiesOptions
def get_nlu_data(samples):
"""Query IBM NLU to get keyword data for each sample."""
data = {}
nlu = NaturalLanguageUnderstandingV1(
version='2018-03-16',
username='764f1427-efb8-41b7-96b5-ab585021e5da',
password='GwnlOQ77YmGy')
for s in samples:
response = nlu.analyze(
text=s,
language='en',
features=Features(
entities=EntitiesOptions(
emotion=True,
limit=5)
))
data[s] = {'ent' : {}}
for ent_data in response.result['entities']:
if ('relevance' not in ent_data or 'emotion' not in ent_data):
continue #yuh yeet
data[s]['ent'][ent_data['text']] = ent_data
return data
[{'time', 'subj', 'sender', 'body'}]
def nlu_data_processor(data):
entities = []
for entry in data:
name = entry['sender']
text = [entry['body']]
date = datefinder.find_dates(text[0])
for d in date:
time = d
if 'tomorrow' in text[0] or 'Tomorrow' in text[0]:
time += datetime.timedelta(days = 1)
loc = get_nlu_data(text)[text[0]]['ent']
dict = {}
dict['name'] = name
dict['time'] = time
print(time.year)
print(time.month)
print(time.day)
print(time.hour)
print(time.minute)
print(time.second)
print(time.microsecond)
for x in loc:
type = loc[x]['type']
if type == 'Facility' or type == 'Organization' or type == 'Location' or type == 'Geographic Feature':
dict['location'] = loc[x]['text']
break
entities.append(dict)
return entities
def run(RAW_DATA):
data = nlu_data_processor(RAW_DATA)
for d in data:
print(d)
pushToCal(d)
|
#Tipado dinamico
'''
el tipado dinamico es que puedes cambiar el tipo de variable
'''
valor=10
print(valor)
valor="Emanuel"
print(valor)
|
from django.contrib import admin
from models import Feed, Article, Tag
admin.site.register(Feed)
admin.site.register(Article)
admin.site.register(Tag)
|
from django.db import models
class BookInfoManager(models.Manager):
"""图书模型管理类"""
# 应用1.改变查询的结果集
def all(self):
# 1.调用父类的all方法,获取所有数据集Queryset
books = super().all()
# 2.对数据进行过滤
books = books.filter(bisDelete=False)
# 3.返回过滤后的数据集
return books
# 应用2.封装函数:操作模型类对应数据表的(增删改查)
def create_book(self, btitle, bpub_date):
# 1.创建一个图书对象
# book = BookInfo()
# model属性可以获取self所属的模型类,这样就可以避免模型类名修改的时候对这里造成的影响
model_class = self.model
book = model_class()
# 2.保存进数据库
book.btitle = btitle
book.bpub_date = bpub_date
book.save()
# 3.返回图书对象
return book
# 一类对多类的关系
# 一类
# Create your models here.
# 建立模型类
class BookInfo(models.Model):
"""图书模型类"""
# 图书名称
btitle = models.CharField(max_length=20)
# 可以指定唯一,还可以设置索引,可以指定字段名
# btitle = models.CharField(max_length=20, unique=True, db_index=True, db_column='title')
# 图书价格, 最大位数为10,小数位数为2
# bprice = models.DecimalField(max_digits=10, decimal_places=2)
# 出版日期
# DateTimeField() 包含年月日-时分秒
# bpub_date = models.DateField()
# 自动赋值创建时间
# bpub_date = models.DateField(auto_now_add=True)
# 自动复制最后一次修改时间
bpub_date = models.DateField(auto_now=True)
# 图书阅读量
bread = models.IntegerField(default=0)
# 评论量
bcomment = models.IntegerField(default=0)
# 逻辑删除(假的删除,作为一个标记)
bisDelete = models.BooleanField(default=False)
# BookInfo.objects.all() ---> BookInfo.book.all()
# book = models.Manager() # 自定义一个Manager管理器类对象
# 这样数据库中的类名不会依赖于应用名
objects = BookInfoManager() # 自定义管理器对象
def __str__(self):
return self.btitle
# 定义一个类方法
@classmethod
def create_book(cls, btitle, bpub_date):
# 1.创建一个图书对象
book = cls()
# 2.保存进数据库
book.btitle = btitle
book.bpub_date = bpub_date
book.save()
# 3.返回图书对象
return book
class Meta:
db_table = 'bookinfo' # 通过元选项指定模型类对应的表名
# 多类
class HeroInfo(models.Model):
"""英雄信息类"""
# 英雄名
hname = models.CharField(max_length=20)
# 性别, 默认男
hgender = models.BooleanField(default=False)
# 年龄
hage = models.PositiveSmallIntegerField(default=30)
# 英雄备注, 并允许为空,允许为空白
hcomment = models.CharField(max_length=30, null=True, blank=True)
# 多类中定义关系属性,设置外键关系
hbook = models.ForeignKey('BookInfo')
# 逻辑删除(假的删除,作为一个标记)
hisDelete = models.BooleanField(default=False)
def __str__(self):
return self.hname
# 多类对多类的关系
# 新闻类型类
class NewsType(models.Model):
"""新闻类型"""
type_name = models.CharField(max_length=20)
# 新闻类
class NewsInfo(models.Model):
"""新闻"""
title = models.CharField(max_length=128)
# 发布时间
pub_date = models.DateTimeField(auto_now_add=True)
# 信息内容
content = models.TextField()
# 多类对多类关联属性,定义在哪个类都可以
news_type = models.ManyToManyField('NewsType')
# 一类对一类的关系
# 员工基本信息类
class EmployeeBasicInfo(models.Model):
# 姓名
name = models.CharField(max_length=20)
# 性别
gender = models.BooleanField(default=False)
# 年龄
age = models.PositiveSmallIntegerField()
# 员工详细信息类
class EmployeeDetailInfo(models.Model):
# 详细地址
addr = models.CharField(max_length=256)
# 电话号码
tel = models.CharField(max_length=13)
# 一类对一类的关系属性,定义在哪个类都可以
employee_basic = models.OneToOneField('EmployeeBasicInfo')
# 自关联
# 地区模型类
class AreaInfo(models.Model):
"""地区,省市县"""
# 地区名称
atitle = models.CharField(max_length=20)
# 关系属性,代表当前地区的父级地区
aParent = models.ForeignKey('self', null=True, blank=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.