index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,400 | 5f8609b964783b9aca76455860e00341e948ba6d | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 20:00:57 2015
@author: Chauncey
"""
import pandas as pd
# yelp feature extraction
def feature_extract_yelp_list(json_list):
''' get features for each restaurant, and return use pandas concat to get them'''
return pd.concat(map(feature_extract_yelp_json, json_list))
def feature_extract_yelp_json(cur_json):
''' extract features from a single json element'''
#print(cur_json['name'])
feature_list = ('id', 'rating', 'categories', 'name', 'phone', 'review_count')
cur_dict = dict((x, cur_json[x]) for x in feature_list if x in cur_json.keys())
cur_dict['categories'] = [[x[0] for x in cur_dict['categories']]]
return pd.DataFrame(cur_dict).set_index('name')
# yelp feature extraction
def feature_extract_fs_list(fs_json_list):
''' get features for each restaurant, and return use pandas concat to get them'''
return pd.concat(map(feature_extract_fs_json, fs_json_list))
def feature_extract_fs_json(cur_json):
cur_venue= cur_json['venue']
# get base features
feature_list = ('id', 'rating', 'categories', 'name', 'rating', 'price', 'stats', 'contact')
#pdb.set_trace()
cur_dict = dict((x, cur_venue[x]) for x in feature_list if x in cur_venue.keys())
# extract nested features
nest_features = [['price','tier'], ['stats','checkinsCount'], ['stats','tipCount'],
['stats','usersCount'], ['contact', 'phone']]
for key, nested_key in nest_features:
if nested_key in cur_dict[key].keys():
cur_dict[nested_key] = cur_dict[key][nested_key]
# get categories
new_cat = []
for cur_cat in cur_dict['categories']:
new_cat.append( cur_cat['shortName'])
cur_dict['categories'] = new_cat
# drop nested features that we have
drop_keys = ['contact', 'price', 'stats']
for key in drop_keys:
cur_dict.pop(key)
return pd.DataFrame(cur_dict).set_index('name') |
21,401 | acc4aa5808ca52a81b045bcf1758e6cd1857683f |
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
cdict = {'red': ((0., 1, 1),
(0.05, 1, 1),
(0.11, 0, 0),
(0.66, 1, 1),
(0.89, 1, 1),
(1, 0.5, 0.5)),
'green': ((0., 1, 1),
(0.05, 1, 1),
(0.11, 0, 0),
(0.375, 1, 1),
(0.64, 1, 1),
(0.91, 0, 0),
(1, 0, 0)),
'blue': ((0., 1, 1),
(0.05, 1, 1),
(0.11, 1, 1),
(0.34, 1, 1),
(0.65, 0, 0),
(1, 0, 0))}
whitejet = matplotlib.colors.LinearSegmentedColormap('whitejet',cdict,256)
def tile_slip(rupt,nstrike,ndip):
'''
Quick and dirty plot of a .rupt file
'''
from numpy import genfromtxt,unique,where,zeros
import matplotlib.pyplot as plt
f=genfromtxt(rupt)
num=f[:,0]
all_ss=f[:,8]
all_ds=f[:,9]
#Now parse for multiple rupture speeds
unum=unique(num)
ss=zeros(len(unum))
ds=zeros(len(unum))
for k in range(len(unum)):
i=where(unum[k]==num)
ss[k]=all_ss[i].sum()
ds[k]=all_ds[i].sum()
#Sum them
slip=(ss**2+ds**2)**0.5
#Get unit rake vector
rakess=ss/slip
rakeds=ds/slip
#Get indices for plot
istrike=zeros(nstrike*ndip)
idip=zeros(nstrike*ndip)
k=0
for i in range(ndip):
for j in range(nstrike):
istrike[k]=nstrike-j
idip[k]=ndip-i
k+=1
#Plot
plt.figure()
plt.scatter(istrike,idip,marker='o',c=slip,s=250,cmap=whitejet)
plt.ylabel('Along-dip index')
plt.xlabel('Along-strike index')
cb=plt.colorbar()
cb.set_label('Slip (m)')
plt.axis('equal')
plt.xlim(istrike.min()-1,istrike.max()+1)
plt.ylim(idip.min()-1,idip.max()+1)
plt.quiver(istrike,idip,rakess,rakeds,color='green',width=0.002)
plt.grid()
plt.title(rupt)
plt.show()
def tile_slip_segs(rupt,nstrike,ndip):
'''
Quick and dirty plot of a .rupt file with multiple fault segments
'''
from numpy import genfromtxt,unique,where,zeros
import matplotlib.pyplot as plt
f=genfromtxt(rupt)
num=f[:,0]
all_ss=f[:,8]
all_ds=f[:,9]
#Now parse for multiple rupture speeds
unum=unique(num)
ss=zeros(len(unum))
ds=zeros(len(unum))
for k in range(len(unum)):
i=where(unum[k]==num)
ss[k]=all_ss[i].sum()
ds[k]=all_ds[i].sum()
#Sum them
slip=(ss**2+ds**2)**0.5
#Get unit rake vector
rakess=ss/slip
rakeds=ds/slip
#Get indices for plot
for r in range(len(nstrike)):
istrike=zeros(nstrike[r]*ndip)
idip=zeros(nstrike[r]*ndip)
k=0
for i in range(ndip):
for j in range(nstrike[r]):
istrike[k]=nstrike[r]-j
idip[k]=ndip-i
k+=1
#Plot
plt.figure()
plt.scatter(istrike,idip,marker='o',c=slip,s=250,cmap=whitejet)
plt.ylabel('Along-dip index')
plt.xlabel('Along-strike index')
cb=plt.colorbar()
cb.set_label('Slip (m)')
plt.axis('equal')
plt.xlim(istrike.min()-1,istrike.max()+1)
plt.ylim(idip.min()-1,idip.max()+1)
plt.quiver(istrike,idip,rakess,rakeds,color='green',width=0.002)
plt.grid()
plt.title(rupt)
plt.show()
|
21,402 | 1dcfc8cc570d156444780a70cb62ef36e1d844b9 | from dashboard.tradlablib.peakdetect import peakdetect
from dashboard.tradlablib.exec_trade import *
from dashboard.tradlablib.model_train import *
from dashboard.tradlablib import technicalindicator as tind
from dashboard.tradlablib import tradelib
import numpy as np
class KnowSureThingOscillator(object):
def __init__(self, data, tii, ti):
self.data = data
self.tii = tii
self.ti = ti
graphdata = tind.display_indicator(self.data, self.tii.indicator.name, self.tii)
for pltdt in graphdata:
if pltdt['name'] == 'KST':
self.kst=pltdt['y']
break
def trigger(self):
close = self.data['Close']
kst = self.kst
signals_trade_buy = []
signals_trade_sell = []
signal_graph = []
signals = np.zeros(close.shape)
prevsig = 0
for i in range(1, len(kst)):
if (kst[i-1] < 0 and kst[i] > 0):
#sell
if prevsig != 2:
signals_trade_sell.append({'x': i, 'y': kst[i]})
prevsig = 2
elif kst[i-1] > 0 and kst[i] < 0:
#overbought end, sell start
if prevsig != 1:
signals_trade_buy.append({'x': i, 'y': kst[i]})
prevsig = 1
signals[i] = prevsig
signal_graph.append({'data': signals_trade_buy, 'type': 'signal-trade-buy', 'name': 'signal-trade-buy', 'id': self.ti.pk})
signal_graph.append({'data': signals_trade_sell, 'type': 'signal-trade-sell', 'name': 'signal-trade-sell', 'id': self.ti.pk})
traderet = trade_with_signals(self.data, signals)
return signal_graph, signals, traderet
def train(self):
cols = []
params1 = []
for ii in self.tii.indicator.indicatorinputs.all():
params1.append(get_input_value(self.tii, ii.parameter))
cols.append(ii.parameter)
psetb, pret = train_for_kst(self.data, *params1)
graphdata = tind.display_indicator(self.data, self.tii.indicator.name, self.tii, True, *psetb)
for pltdt in graphdata:
if pltdt['name'] == 'KST':
self.kst=pltdt['y']
break
signal_graph, signals, traderet = self.trigger()
return psetb, pret, signals
|
21,403 | f8e6d58f0544884ef80beb62cd884c822c61cd07 | # https://docs.sympy.org/latest/tutorial/simplification.html
from sympy import *
from sys import exit
init_printing(use_unicode=True)
def p(s):
print('--------------------------------------------------------------------------------')
pprint(s)
x, y, z = symbols('x y z')
p(simplify(sin(x) ** 2 + cos(x) ** 2))
p(simplify((x ** 3 + x ** 2 - x - 1) / (x ** 2 + 2 * x + 1)))
p(simplify(gamma(x) / gamma(x - 2)))
p(expand((x + 1) ** 2))
p(expand((x + 2) * (x - 3)))
p(expand((x + 1) * (x - 2) - (x - 1) * x))
p(factor(x ** 3 - x ** 2 + x - 1))
p(factor(x ** 2 * z + 4 * x * y * z + 4 * y ** 2 * z))
p(factor_list(x ** 2 * z + 4 * x * y * z + 4 * y ** 2 * z))
p(expand((cos(x) + sin(x)) ** 2))
p(factor(cos(x) ** 2 + 2 * cos(x) * sin(x) + sin(x) ** 2))
expr = x * y + x - 3 + 2 * x ** 2 - z * x ** 2 + x ** 3
p(expr)
collected_expr = collect(expr, x)
p(collected_expr)
p(collected_expr.coeff(x, 2))
p(cancel((x ** 2 + 2 * x + 1) / (x ** 2 + x)))
expr = 1 / x + (3 * x / 2 - 2) / (x - 4)
p(expr)
p(cancel(expr))
expr = (x * y ** 2 - 2 * x * y * z + x * z ** 2 + y ** 2 - 2 * y * z + z ** 2) / (x ** 2 - 1)
p(expr)
p(cancel(expr))
p(factor(expr))
expr = (4 * x ** 3 + 21 * x ** 2 + 10 * x + 12) / (x ** 4 + 5 * x ** 3 + 5 * x ** 2 + 4 * x)
p(expr)
p(apart(expr))
p(acos(x))
p(cos(acos(x)))
p(asin(1))
p(trigsimp(sin(x) ** 2 + cos(x) ** 2))
p(trigsimp(sin(x) ** 4 - 2 * cos(x) ** 2 * sin(x) ** 2 + cos(x) ** 4))
p(trigsimp(sin(x) * tan(x) / sec(x)))
p(trigsimp(cosh(x) ** 2 + sinh(x) ** 2))
p(trigsimp(sinh(x) / tanh(x)))
p(expand_trig(sin(x + y)))
p(trigsimp(sin(x) * cos(y) + sin(y) * cos(x)))
x, y = symbols('x y', positive=True)
a, b = symbols('a b', real=True)
z, t, c = symbols('z t c')
p(sqrt(x) == x ** Rational(1, 2))
p(powsimp(x ** a * x ** b))
p(powsimp(x ** a * y ** a))
p(powsimp(t ** c * z ** c))
p(powsimp(t ** c * z ** c, force=True))
p((z * t) ** 2)
p(sqrt(x * y))
p(powsimp(z ** 2 * t ** 2))
p(powsimp(sqrt(x) * sqrt(y)))
p(expand_power_exp(x ** (a + b)))
p(expand_power_base((x * y) ** a))
p(expand_power_base((z * t) ** c))
p(expand_power_base((z * t) ** c, force=True))
p(x ** 2 * x ** 3)
p(expand_power_exp(x ** 5))
p(powdenest((x ** a) ** b))
p(powdenest((z ** a) ** b))
p(powdenest((z ** a) ** b, force=True))
x, y = symbols('x y', positive=True)
n = symbols('n', real=True)
p(expand_log(log(x * y)))
p(expand_log(log(x / y)))
p(expand_log(log(x ** 2)))
p(expand_log(log(x ** n)))
p(expand_log(log(z * t)))
p(expand_log(log(z ** 2)))
p(expand_log(log(z ** 2), force=True))
p(logcombine(log(x) + log(y)))
p(logcombine(n * log(x)))
p(logcombine(n * log(z)))
p(logcombine(n * log(z), force=True))
x, y, z = symbols('x y z')
k, m, n = symbols('k m n')
p(factorial(n))
p(binomial(n, k))
p(gamma(z))
p(hyper([1, 2], [3], z))
p(tan(x).rewrite(sin))
p(factorial(x).rewrite(gamma))
p(expand_func(gamma(x + 3)))
p(hyperexpand(hyper([1, 1], [2], z)))
expr = meijerg([[1], [1]], [[1], []], -z)
p(expr)
p(hyperexpand(expr))
n, k = symbols('n k', integer=True)
p(combsimp(factorial(n) / factorial(n - 3)))
p(combsimp(binomial(n + 1, k + 1) / binomial(n, k)))
p(gammasimp(gamma(x) * gamma(1 - x)))
def list_to_frac(l):
expr = Integer(0)
for i in reversed(l[1:]):
expr += i
expr = 1 / expr
return l[0] + expr
p(list_to_frac([x, y, z]))
p(list_to_frac([1, 2, 3, 4]))
syms = symbols('a0:5')
p(syms)
a0, a1, a2, a3, a4 = syms
frac = list_to_frac(syms)
p(frac)
frac = cancel(frac)
p(frac)
# pull it back apart, but manually
l = []
frac = apart(frac, a0)
p(frac)
l.append(a0)
frac = 1 / (frac - a1)
p(frac)
l.append(a1)
frac = 1 / (frac - a2)
p(frac)
# etc...
exit()
|
21,404 | ce2fbff9a99f90850ed8c6fba36318e1bb325aca | from sqlmodel import Session, SQLModel, create_engine
import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
pg_url2 = os.environ.get("PG_URL")
engine = create_engine(pg_url2, echo=True)
SessionLocal2 = Session(engine) |
21,405 | bcd3da9a97c289efcc07469cae8cad319c9f0fee | ##
import numpy as np
from scipy.misc import logsumexp
from scipy.interpolate import interp1d
import lal
import lalsimulation as lalsim
import emcee
from emcee import PTSampler
import GenWaveform as wv
import os, sys
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import corner
sys.path.append('../')
import MonashGWTools.tools as tools
#-----------------------------------------#
## Minimum + maximum thresholds:
m1_min = 1.0; m1_max = 100.0
m2_min = 1.0; m2_max = 100.0
dist_min = 10; dist_max = 1e4
ecc_min = np.log10(1.e-4); ecc_max = np.log10(0.5)
angle_min = 0.0; angle_max = np.pi*2.0
#thresh = [[m1_min, m1_max],
# [m2_min, m2_max],
# [dist_min, dist_max],
# [ecc_min, ecc_max],
# [angle_min, angle_max]]
#-----------------------------------------#
def gen_waveform(deltaF, m1, m2, fmin, fmax, iota, dist, e_min):
fref = 20.
m1 *= lal.lal.MSUN_SI
m2 *= lal.lal.MSUN_SI
dist = 1e6*lal.lal.PC_SI*dist
S1 = [0,0,0]
S2 = [0,0,0]
phaseO = 1
phiRef = np.pi
meanPerAno = 0.0
longAscNodes = 0.0
approx = lalsim.GetApproximantFromString("EccentricFD")
WFdict = lal.CreateDict()
H = lalsim.SimInspiralChooseFDWaveform(m1, m2, S1[0], S1[1], S1[2], S2[0], S2[1], S2[2], dist, iota, phiRef, longAscNodes, e_min, meanPerAno, deltaF, fmin, fmax, fref, WFdict, approx)
hplus = H[0].data.data
hcross = H[1].data.data
return hplus, hcross
def make_arr_samesize(arr1,arr2):
'''
Makes two arrays the same size by appending zeroes to the end of the 2nd one.
'''
len1 = np.size(arr1)
len2 = np.size(arr2)
if len1 < len2:
diff = int(len2 - len1)
arr1 = np.append(arr1,np.zeros(diff))
len1 = np.size(arr1)
elif len2 < len1:
diff = int(len1 - len2)
arr2 = np.append(arr2,np.zeros(diff))
len1 = np.size(arr2)
return arr1, arr2
def uniformPrior(var, varMin, varMax):
## uniform prior
return 10**(varMin + var * (varMax - varMin))
def logPrior(x):
'''
Generates the log of the prior distribution
'''
m1 = x[0]
m2 = x[1]
e_min = x[2]
dist = x[3]
iota = x[4]
RA = x[5]
DEC = x[6]
if m1 >= m1_min and m1 <= m1_max and m2 <= m2_max and m2 >= m2_min and ((e_min <= ecc_max and e_min >= ecc_min) or e_min == 0.0) and dist <= dist_max and dist >= dist_min and iota <= angle_max and iota >= angle_min and RA <= angle_max and RA >= angle_min and DEC <= angle_max and DEC >= angle_min and m1 > m2 :
eta = (m1*m2)/((m1+m2)**2.)
##-- Flat Prior --###
logprior = 1.#np.log(((m1+m2)*(m1+m2))/((m1-m2)*pow(eta,3.0/5.0)) )
return logprior
else:
return -np.inf
def logL(x, dataH1, dataL1, PSD, fmin, fmax, deltaF):
'''
Generates the log likelihood
'''
m1 = x[0]
m2 = x[1]
e_min = x[2]
dist = x[3]
iota = x[4]
RA = x[5]
DEC = x[6]
psi = 0.0
S1=[0,0,0]
S2=[0,0,0]
phi = np.pi
fRef = fmin
if m1 >= m1_min and m1 <= m1_max and m2 <= m2_max and m2 >= m2_min and ((e_min <= ecc_max and e_min >= ecc_min) or e_min == 0.0) and dist <= dist_max and dist >= dist_min and iota <= angle_max and iota >= angle_min and RA <= angle_max and RA >= angle_min and DEC <= angle_max and DEC >= angle_min and m1 > m2 :
# Generate the waveform:
tc = 1000000008
e_min = 10**(e_min)
hp, hc = wv.GenFDWaveform(fmin, fmax, deltaF, dist, m1, m2, S1, S2, e_min, fRef, iota, phi, waveform='EccentricFD')
htildeH,f_arrH = wv.DetectorStrain(hp, hc, fmax, deltaF, RA, DEC, psi, tc, ifo='H1')
##-- Likelihood --###
logL = -0.5 * (4*deltaF*np.vdot(dataH1 - htildeH, (dataH1 - htildeH)/PSD)).real
print(logL)
return logL
def run_sampler(dataH1, dataL1, PSD, fmin, fmax, deltaF, ntemps, ndim, nsteps, nwalkers, job, ecc=True):
'''
Setting parameters:
'''
m1_min, m1_max = 5, 50
m2_min, m2_max = 5, 50
angle_min, angle_max = 0., np.pi*2.
dist_min, dist_max = 50, 3000.
m1 = np.random.uniform(low=(m1_min+5), high=m1_max, size=(ntemps, nwalkers, 1))
m2 = np.random.uniform(low=m2_min, high=m2_max, size=(ntemps, nwalkers, 1))
if ecc == True:
ecc_min, ecc_max = np.log10(1.e-4),np.log10(0.5)
e_min = np.random.uniform(low=ecc_min, high=ecc_max, size=(ntemps, nwalkers, 1))
else:
e_min = np.zeros((ntemps, nwalkers, 1))
dist = np.random.uniform(low=dist_min, high=dist_max, size=(ntemps, nwalkers, 1))
iota = np.random.uniform(low=angle_min, high=0.5*angle_max, size=(ntemps, nwalkers, 1))
RA = np.random.uniform(low=angle_min, high=angle_max, size=(ntemps, nwalkers, 1))
DEC = np.random.uniform(low=angle_min, high=angle_max, size=(ntemps, nwalkers, 1))
## Ensure m1 > m2:
for i in range(0,ntemps):
for k in range(0,nwalkers):
if m2[i,k,0] >= m1[i,k,0]:
m2[i,k,0] = np.random.uniform(low=m2_min,high=(0.9*m1[i,k,0]),size=1)
else:
pass
## Setting initial walker positions:
p0 = np.array([m1, m2, e_min, dist, iota, RA, DEC])
p0 = np.reshape(p0, (ndim,ntemps, nwalkers))
p0 = np.swapaxes(p0, 2,1)
p0 = np.swapaxes(p0, 0,2)
## Setting up the sampler:
betas = np.logspace(0, -ntemps, ntemps, base=10)
sampler = PTSampler(ntemps, nwalkers, ndim, logL, logPrior, loglargs=[dataH1, dataL1, PSD, fmin, fmax, deltaF], threads=16, a=10., betas=betas)
## Running the sampler:
print 'sampling underway...'
(pos, lnprob, rstate) = sampler.run_mcmc(p0, nsteps)
## Get log evidence & log Bayes factor
lnZ, dlnZ = get_Evidence(sampler, pos, lnprob, rstate)
## make corner plots:
if ecc == True:
print "making corner plots..."
make_triangles(sampler, job, ndim)
#return sampler, pos, lnprob, rstate
return lnZ, dlnZ
def get_Evidence(sampler, pos, lnprob, rstate):
'''
Getting the evidence and Bayes factor:
'''
(lnZ_pt, dlnZ_pt) = sampler.thermodynamic_integration_log_evidence(fburnin=0.5)
return lnZ_pt, dlnZ_pt
def make_triangles(sampler, job, ndim):
## Making corner plots:
truths=[35.,30.,np.log10(0.1),220.,(90.*np.pi/180),(90*np.pi/180.),(90.*np.pi/180.)]
samples = sampler.chain[0]
samples = samples[:, 100:, :].reshape(-1, ndim)
fig = corner.corner(samples,labels=['m1', 'm2', 'log$_{10}$e', 'dist', 'iota', 'RA', 'DEC'],show_titles=True,quantiles=[0.16, 0.5, 0.84], truths=truths)
fig.savefig("posteriors/triangle_"+str(job.filename)+".png")
|
21,406 | 9ecd7b940520f67b4e523d8c9e03606a2a5f821f | # coding=utf-8
# Copyright 2019, Rusty Bower, rustybower.com
import arrow
import requests
from sopel.formatting import bold
from sopel.module import commands, example
def parse_games(date):
if date:
r = requests.get(
"https://statsapi.mlb.com/api/v1/schedule?sportId=1&date={}".format(date)
)
else:
r = requests.get("https://statsapi.mlb.com/api/v1/schedule?sportId=1")
reply = []
for date in r.json()["dates"]:
# TODO - Figure out what events and matches are
for game in date["games"]:
# Game Is Not Started
if game["status"]["abstractGameState"] == "Preview":
reply.append(
"{} @ {} {} Eastern".format(
game["teams"]["away"]["team"]["name"],
game["teams"]["home"]["team"]["name"],
# TODO - Allow users to specify timezone to return
arrow.get(game["gameDate"]).to("US/Eastern").format("HH:mm"),
)
)
elif game["status"]["abstractGameState"] == "Final":
# Away Team Win
if int(game["teams"]["away"]["score"]) > int(
game["teams"]["home"]["score"]
):
reply.append(
"{} {} {} {} Final".format(
bold(game["teams"]["away"]["team"]["name"]),
bold(str(game["teams"]["away"]["score"])),
game["teams"]["home"]["team"]["name"],
str(game["teams"]["home"]["score"]),
)
)
# Home Team Win
elif int(game["teams"]["home"]["score"]) > int(
game["teams"]["away"]["score"]
):
reply.append(
"{} {} {} {} Final".format(
game["teams"]["away"]["team"]["name"],
str(game["teams"]["away"]["score"]),
bold(game["teams"]["home"]["team"]["name"]),
bold(str(game["teams"]["home"]["score"])),
)
)
# Tie Game
else:
reply.append(
"{} {} {} {} Final".format(
game["teams"]["away"]["team"]["name"],
game["teams"]["away"]["score"],
game["teams"]["home"]["team"]["name"],
game["teams"]["home"]["score"],
)
)
return reply
@commands("mlb")
@example(".mlb")
@example(".mlb 2019-10-29")
def mlb(bot, trigger):
date = trigger.group(2) or None
# Get Game Data
reply = " | ".join(parse_games(date))
# Split if greater than 200 characters so we don't accidentally cut anything off
if len(reply) > 200:
length = int(len(reply.split(" | ")) / 2)
bot.reply(" | ".join(reply.split(" | ")[0:length]))
bot.reply(" | ".join(reply.split(" | ")[length:]))
return
else:
if reply:
return bot.reply(reply)
else:
return
|
21,407 | a5636e980b9f720ed26e0f7915ffb2c93c45dee0 | """Example of Jsonable classes."""
import jsonable
class Pillow(jsonable.Jsonable):
def __init__(self, is_soft=True):
self.is_soft = is_soft
pillow_database = {'Default': Pillow, 'Pillow1': Pillow, 'Pillow2': Pillow}
cushion_database = {'Default': Pillow, 'Cushion1': Pillow, 'Cushion2': Pillow}
class Bed(jsonable.Jsonable):
inner = {
'pillow': pillow_database,
'cushion': cushion_database
}
def __init__(self, length=2.0):
self.length=length
self.add_inner_defaults()
@jsonable.add_to_methods_list
def change_sheets(self,length = 1.0):
self.length = length
bed_database = {'Default': Bed,'Bed1': Bed, 'Bed2': Bed}
class Room(jsonable.Jsonable):
inner = {'bed': bed_database}
def __init__(self, area=18.0):
self.area=area
self.add_inner_defaults()
room_database = {'Room1':Room,'Room2':Room} |
21,408 | 5aa4a8d4c10a67945e5d2258aa22bc143abafc8b | import os
from flask import Flask, g
from services import dao
from controllers import user, auth
app = Flask(__name__)
app.config.from_object(__name__)
app.register_blueprint(user.get_user)
app.register_blueprint(user.user_page)
app.register_blueprint(user.register_user)
app.register_blueprint(user.register_page)
app.register_blueprint(auth.login_page)
app.register_blueprint(auth.logout_user)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'app.db'),
PHOTOS=os.path.join('static', 'uploads/'),
SECRET_KEY='app-key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('APP_SETTINGS', silent=True)
def init_db():
db = dao.get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def init_db_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
if __name__ == '__main__':
app.run()
|
21,409 | 2651cceac29bd19cfcc31b7099649a30cbec1f12 | import shared
## Data
raw = shared.read_file('2021/input/day03.txt')
test = [
'00100',
'11110',
'10110',
'10111',
'10101',
'01111',
'00111',
'11100',
'10000',
'11001',
'00010',
'01010',
]
## Functions
def get_gamma_rate(report):
counts = [[0, 0] for _ in report[0]]
for num in report:
for i,value in enumerate(num):
counts[i][int(value)] += 1
out = ''.join(['0' if count[0] > count[1] else '1' for count in counts])
return out
def get_epsilon_rate(report):
gamma = get_gamma_rate(report)
return swap_vals(gamma)
def swap_vals(num):
remap = {'1': '0', '0': '1'}
return ''.join([remap[x] for x in num])
def solve_puzzle(report, func1=get_gamma_rate, func2=get_epsilon_rate):
return int(func1(report), 2) * int(func2(report), 2)
def get_ratings(report, rating_type):
selection = report
for i in range(len(report[0])):
if len(selection) == 1:
break
found = [[], []]
for num in selection:
val = num[i]
found[int(val)].append(num)
zeros, ones = [len(x) for x in found]
if (rating_type == 'o2' and zeros > ones) or (rating_type == 'co2' and zeros <= ones):
selection = found[0]
else:
selection = found[1]
return selection[0]
def get_o2_gen_rating(report):
return get_ratings(report, 'o2')
def get_co2_scrubber_rating(report):
return get_ratings(report, 'co2')
## Testing
assert get_gamma_rate(test) == '10110'
assert get_epsilon_rate(test) == '01001'
assert int(get_gamma_rate(test), 2) == 22
assert solve_puzzle(test) == 198
assert get_o2_gen_rating(test) == '10111'
assert get_co2_scrubber_rating(test) == '01010'
assert solve_puzzle(test, get_o2_gen_rating, get_co2_scrubber_rating) == 230
## Solution for part 1
print(f'Part 1: Submarine power consumption is {solve_puzzle(raw)}')
## Solution for part 2
print(f'Part 2: Submarine life support rating is {solve_puzzle(raw, get_o2_gen_rating, get_co2_scrubber_rating)}') |
21,410 | b8667935fb8da2f74a80c7bb42d5b50d1c4dd409 | import pygame
import pyscroll
import pytmx
import re
import pygame
from app.settings import *
from app.tools.functionTools import *
import os
from app.sprites.enemyFactory import EnemyFactory
from app.sprites.itemFactory import ItemFactory
import weakref
# from app.sound.soundPlayerController import *
# from app.sprites.player import *
class MapData:
def __init__(self, mapName="WorldMap", nameInZone="StartPointWorld", screenSize=(SCREEN_WIDTH, SCREEN_HEIGHT)):
self.nameMap = mapName
self.tmxData = pytmx.util_pygame.load_pygame(self.reqImageName(self.nameMap))
self.tiledMapData = pyscroll.data.TiledMapData(self.tmxData)
self.cameraPlayer = pyscroll.BufferedRenderer(self.tiledMapData, screenSize, clamp_camera=True)
# self.soundController = soundPlayerController()
self.allSprites = pygame.sprite.Group()
self.enemyGroup = pygame.sprite.Group()
self.itemGroup = pygame.sprite.Group()
self.friendlyBullet = pygame.sprite.Group()
self.enemyBullet = pygame.sprite.Group()
self.spritesHUD = pygame.sprite.Group()
self.notifySet = weakref.WeakSet() #Set of all object that needs to be notified of events. Weak references are used to prevent this set from keeping objects alive
eFactory = EnemyFactory()
iFactory = ItemFactory()
for obj in self.tmxData.objects:
if obj.type == "enemy":
enemy = eFactory.create(obj, self)
if enemy is not None:
self.allSprites.add(enemy)
self.enemyGroup.add(enemy)
# if obj.type == "item":
# item = iFactory.create(obj)
# self.allSprites.add(item)
# self.itemGroup.add(item)
# Put camera in mapData
self.camera = pyscroll.PyscrollGroup(map_layer=self.cameraPlayer, default_layer=SPRITE_LAYER)
self.camera.add(self.allSprites)
# Spawn point of the player
valBool = False
for obj in self.tmxData.objects:
if obj.name == "InZone":
if obj.StartPoint == nameInZone:
self.spawmPointPlayerx = obj.x
self.spawmPointPlayery = obj.y
valBool = True
# The game is not complete?
if valBool == False:
quitGame()
def reqImageName(self, nameMap):
return os.path.join('tiles_map', nameMap + ".tmx") |
21,411 | 1467fc0a99725a5d1851676aa0ed89515747013c | """
A module that provides a manage.py command to wait for a database.
Our Django-application can only start once our database server accepts
incoming connections. Since we cannot always guarantee start-up order
in container-based deployment and development environments, the custom
manage.py command provided by this module waits for the database to
become available, with incremental back-off logic.
"""
import argparse
import logging
import os
import time
import psycopg2
from django.core.management import BaseCommand
log = logging.getLogger("simple_django_app.core.waitforpostgres")
def wait_for_postgres(max_attempts: int = 5, backoff_exponent: int = 1) -> None:
"""Wait for Postgres to be ready to accept connections."""
log.info("Waiting for database to start up.")
for attempt in range(1, max_attempts + 1):
try:
log.info("Attempting to connect to database [%s/%s]", attempt, max_attempts)
conn = psycopg2.connect(os.environ["DATABASE_URL"])
except psycopg2.OperationalError:
if attempt < max_attempts:
delay = attempt ** backoff_exponent
log.info("Connection failed, sleeping for %s second...", delay)
time.sleep(delay)
else:
log.critical("Failed to connect to database.")
raise
else:
log.info("Connected successfully to the database.")
conn.close()
break
class Command(BaseCommand):
"""A command to wait for postgres to become available."""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
"""Add additional arguments to the default argument parser."""
super().add_arguments(parser)
parser.add_argument(
"--database-attempts",
action="store",
type=int,
default=5,
dest="max_database_attempts",
help="Maximum number of database connection attempts that are made (default=5).",
)
parser.add_argument(
"--exponential-backoff",
action="store",
type=int,
default=1,
dest="backoff_exponent",
help="Exponent applied to the incremental back-offs (default=1).",
)
def handle(self, *args, **options) -> None:
"""Handle the command line options and execute the waiter."""
max_database_attempts = options.pop("max_database_attempts", 5)
backoff_exponent = options.pop("backoff_exponent", 1)
wait_for_postgres(max_database_attempts, backoff_exponent)
|
21,412 | 9db52693775e170f77fa16a1be6ad3874dfa4d46 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.pkg.k4.key4hep_stack import Ilcsoftpackage
class Ddkaltest(CMakePackage, Ilcsoftpackage):
"""Interface between KalTest fitter and DD4hep based geometry"""
homepage = "https://github.com/iLCSoft/DDKalTest"
url = "https://github.com/iLCSoft/DDKalTest/archive/v01-06.tar.gz"
git = "https://github.com/iLCSoft/DDKalTest.git"
maintainers = ["vvolkl"]
version("master", branch="master")
version(
"1.7", sha256="5126404bcad2f6f669ef8f02c80de097196e346f5945e7f6249820f8cd5fd86c"
)
version(
"1.6", sha256="e668242d84eb94e59edca18e524b1a928fcf7ae7c4b79f76f0338a0a4e835d8f"
)
version(
"1.5", sha256="4ef6fea7527dbb5f9a12322e92e27d80f2c29b115aae13987f55cb6cf02f31f5"
)
depends_on("dd4hep")
depends_on("root")
depends_on("ilcutil")
depends_on("lcio")
depends_on("gsl")
depends_on("kaltest")
depends_on("aidatt")
@run_after("install")
def installheaders(self):
# make('install')
install_tree(".", self.prefix)
def cmake_args(self):
# C++ Standard
return ["-DCMAKE_CXX_STANDARD=%s" % self.spec["root"].variants["cxxstd"].value]
|
21,413 | fe434741bfefaba2ed292c7144e3511de2939c2f | def product(L1, L2):
return map(lambda x, y: x * y, L1, L2)
L1 = [1, 2, 3, 4]
L2 = [2, 3, 4, 5]
print(product(L1, L2))
|
21,414 | d7d6320612e0352472266d4b5dbbae47987f62f5 | from django.shortcuts import render, redirect, get_list_or_404
from django.views import generic
from django.conf import settings
from newsapi import NewsApiClient
import requests
import json
from.models import Topics
class IndexView(generic.TemplateView):
template_name = "news/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['topic'] = get_list_or_404(Topics, domain_tags=self.kwargs.get("country_name"))
context['domain'] = self.kwargs.get("country_name")
context['news_type'] = 'all'
return context
class TopIndexView(generic.TemplateView):
template_name = "news/index.html"
def get_context_data(self, **kwargs):
context = super(TopIndexView, self).get_context_data(**kwargs)
context['topic'] = get_list_or_404(Topics, domain_tags=self.kwargs.get("country_name"), top_news="top")
context['domain'] = self.kwargs.get("country_name")
context['news_type'] = 'top'
return context
class ForeignView(generic.TemplateView):
template_name = "news/foreign.html"
def get_context_data(self, **kwargs):
context = super(ForeignView, self).get_context_data(**kwargs)
api_world = " https://coronavirus-19-api.herokuapp.com/all"
api = "https://coronavirus-19-api.herokuapp.com/countries"
api_japan = "https://coronavirus-19-api.herokuapp.com/countries/japan"
r_world = requests.get(api_world)
r = requests.get(api)
r_japan = requests.get(api_japan)
data_world = json.loads(r_world.text)
data = json.loads(r.text)
data_jp = json.loads(r_japan.text)
context['corona'] = sorted(data, key=lambda x:x['cases'], reverse=True)
context['world_corona'] = data_world
context['jp_corona'] = data_jp
return context
# Create your views here.
|
21,415 | e413674117d9997306a869dd1cdcaf01efeb6382 | import zmq
import socket
import msgpack
import os
mission_dict = {"mission": "image classification", "image_size": [3,32,32]}
#send request
context = zmq.Context()
zmq_socket = context.socket(zmq.REQ)
zmq_socket.connect("tcp://127.0.0.1:60001")
zmq_socket.send(msgpack.dumps(mission_dict))
#get and download encoder
file = zmq_socket.recv()
os.system("wget 127.0.0.1:8080/{}".format(file))
#data encoding
os.system("python -u user.py > user.log")
zmq_socket.send("complete")
|
21,416 | 35972c0314842f424088f85182615ef310592e47 | from rest_framework import viewsets, filters, status
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from ..models import Course
from ..serializers import CourseSerializer
from courses.api.permissions import CoursePermission
from courses.api.filters import CourseFilter
class CourseViewSet(viewsets.ModelViewSet):
permission_classes = [CoursePermission]
queryset = Course.objects.all()
serializer_class = CourseSerializer
filter_backends = [DjangoFilterBackend, filters.OrderingFilter]
filterset_class = CourseFilter
ordering_fields = ['id', 'title', 'subtitle', 'price']
ordering = ['id']
def get_displayed_fields(self, pk=None):
fields_string = self.request.query_params.get('fields')
if fields_string is None:
if pk is None:
fields = self.ordering_fields
else:
fields = None
else:
fields_string = fields_string[1:-1]
fields_list = fields_string.split(',')
fields = tuple(fields_list)
return fields
def get_field_order(self):
order_field = self.request.query_params.get('ordering')
if order_field:
field = order_field.replace("-", "")
order_field = order_field if (field in self.ordering_fields) else self.ordering[0]
else:
order_field = self.ordering[0]
return order_field
def list(self, request, **kwargs):
fields = self.get_displayed_fields()
queryset = super().get_queryset()
order_field = self.get_field_order()
# queryset = queryset.order_by(order_field)
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, pk=None):
fields = self.get_displayed_fields(pk=pk)
data = self.get_object()
serializer = self.serializer_class(data, fields=fields)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(author=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['post'], permission_classes=[IsAuthenticated])
def join(self, request, pk=None, *args, **kwargs):
""" action to join class """
course = self.get_object()
user = request.user
serializer = self.serializer_class(course)
res = Course.objects.join(course.id, user)
if res:
return Response({'status': True,
'message': 'Success Join Course',
'data': serializer.data}, status=status.HTTP_200_OK)
else:
return Response({'status': False,
'message': 'You have joined Course, Please check your dashboard',
'data': serializer.data},
status=status.HTTP_200_OK)
@action(detail=False)
def me(self, request, **kwargs):
fields = self.get_displayed_fields()
queryset = super().get_queryset().filter(author=request.user)
order_field = self.get_field_order()
queryset = queryset.order_by(order_field)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
@action(detail=False)
def my_class(self, request, **kwargs):
fields = self.get_displayed_fields()
user = request.user
queryset = super().get_queryset().filter(members__id=user.id)
order_field = self.get_field_order()
queryset = queryset.order_by(order_field)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
|
21,417 | 7be06d29075c1eb18e619f234a70eb90233a828d | vp=0
cp=0
iva=0
des=0
st=0
print("Programa que va a dar a conocer el total a pagar en una caja registradora")
cp=(int(input("Ingrese la cantidad de articulos:\n")))
vp=(int(input("Ingrese el valor de cada producto:\n")))
des=(int(input("¿Desea ingresar otro articulo? Si=1, No=2\n")))
st=vp*cp
while des == 1:
cp=(int(input("Ingrese la cantidad de articulos:\n")))
vp=(int(input("Ingrese el valor de cada producto:\n")))
st=st+cp*vp
des=(int(input("¿Desea ingresar otro articulo? Si=1, No=2\n")))
iva = st * 0.16
des=(int(input("Ingrese su forma de pago: Tarjeta=3, Efectivo=4\n")))
if (des==3):
print ("El valor total a pagar es: ",st+iva)
else:
print("El valor total a pagar es: ",st-iva)
|
21,418 | de4e25bf7c8cea1911b69203721e0ad941a5b834 | from argparse import ArgumentParser
import pandas as pd
from sklearn.model_selection import train_test_split
import xgboost as xgb
import mlflow
import mlflow.xgboost
print("Tracking URI:", mlflow.tracking.get_tracking_uri())
print("MLflow Version:", mlflow.__version__)
print("XGBoost version:",xgb.__version__)
def build_data(data_path):
data = pd.read_csv(data_path)
X = data.drop(["quality"], axis=1)
y = data["quality"]
return X, y
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--model_uri", dest="model_uri", help="model_uri", required=True)
parser.add_argument("--data_path", dest="data_path", help="data_path", default="../../data/train/wine-quality-white.csv")
args = parser.parse_args()
print("Arguments:")
for arg in vars(args):
print(f" {arg}: {getattr(args, arg)}")
X,y = build_data(args.data_path)
X_xgb = xgb.DMatrix(X, label=y)
print("\n=== mlflow.xgboost.load_model")
model = mlflow.xgboost.load_model(args.model_uri)
print("model:", model)
predictions = model.predict(X_xgb)
print("predictions.type:", type(predictions))
print("predictions.shape:", predictions.shape)
print("predictions:", predictions)
print("\n=== mlflow.pyfunc.load_model")
model = mlflow.pyfunc.load_model(args.model_uri)
print("model:", model)
predictions = model.predict(X)
print("predictions.type:", type(predictions))
print("predictions.shape:", predictions.shape)
print("predictions:", predictions)
|
21,419 | 77c3f536944a96e1ae22c6d935c6b74879065b21 | import numpy as np
"""
MCESP for Game-Delayed Reinforcements
"""
class MCESP_D:
def __init__(self, observations, field = np.zeros((1,1))):
"""
Constructor for MCESP-D. Field integration currently stubbed.
Parameters
----------
observations : int
The allowed specificity of stress levels
field : array, shape (H, W)
A NumPy grayscale image
"""
self.actions = 2
self.observations = observations
self.q_table = np.ones((self.observations,self.actions))
self.c_table = np.zeros((self.observations,self.actions))
self.set_prior(field)
def set_prior(self,field):
"""
Set the initial observation discretization to the dimentionality of observations.
Initially set discretization factor to uniform. Set discretization learning rate to 1.
"""
self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]
self.observation_samples = 1
# TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements
# self.norm = field.max()
def update_reward(self, observation, action, reward):
"""
Update the Q-table when a delayed reward is received from a subsequent layer.
"""
self.q_table[observation,action] = (1 - self.count(observation,action)) * self.q_table[observation,action] + self.count(observation,action) * reward # Canonical Q-update
self.increment_count(observation,action)
def count(self,observation, action):
"""
Q-learning learning schedule.
"""
return(1/(1+self.c_table[observation,action]))
def increment_count(self,observation,action):
self.c_table[observation,action] += 1
def act(self,observation):
"""
Return the current learned max action for this layer. If there's a tie, pick randomly.
"""
maximum_actions = np.argwhere(self.q_table[observation] == np.amax(self.q_table[observation])).flatten()
return(np.random.choice(maximum_actions)) |
21,420 | d4a6994e10c26c1352e130c8d84f0bf179bb6885 | import repeater as r
s = input("반복할 문자를 입력하세요 : ")
n = input("반봇회수를 입력하세요 : ")
r.repeat(s, int(n))
r.repeat(s)
r.once(s)
|
21,421 | d7f0295f028a07315eb4089841a9b968bccaf102 | """
我们有一个由平面上的点组成的列表 points。需要从中找出 K 个距离原点 (0, 0) 最近的点。
(这里,平面上两点之间的距离是欧几里德距离。)
你可以按任何顺序返回答案。除了点坐标的顺序之外,答案确保是唯一的。
示例 1:
输入:points = [[1,3],[-2,2]], K = 1
输出:[[-2,2]]
解释:
(1, 3) 和原点之间的距离为 sqrt(10),
(-2, 2) 和原点之间的距离为 sqrt(8),
由于 sqrt(8) < sqrt(10),(-2, 2) 离原点更近。
我们只需要距离原点最近的 K = 1 个点,所以答案就是 [[-2,2]]。
示例 2:
输入:points = [[3,3],[5,-1],[-2,4]], K = 2
输出:[[3,3],[-2,4]]
(答案 [[-2,4],[3,3]] 也会被接受。)
提示:
1 <= K <= points.length <= 10000
-10000 < points[i][0] < 10000
-10000 < points[i][1] < 10000
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/k-closest-points-to-origin
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
class Solution:
def kClosest(self, points: list, K: int) -> list:
if len(points) == 0:
return []
res = [points[0]]
for i in range(1, K):
self.insert(res, points[i])
max_val = res[0][0] ** 2 + res[0][1] ** 2
for i in range(K, len(points)):
val = points[i][0] ** 2 + points[i][1] ** 2
if max_val > val:
self.update(res, points[i], val)
max_val = res[0][0] ** 2 + res[0][1] ** 2
return res
def update(self, heap, point, val):
heap[0] = point
index = 0
while True:
left = index * 2 + 1
right = left + 1
if left >= len(heap):
break
left_val = heap[left][0] ** 2 + heap[left][1] ** 2
if right < len(heap):
right_val = heap[right][0] ** 2 + heap[right][1] ** 2
else:
right_val = None
if val >= left_val and (right_val is None or (right_val and val >= right_val)):
break
if right_val is None or left_val >= right_val:
heap[index], heap[left] = heap[left], heap[index]
index = left
else:
heap[index], heap[right] = heap[right], heap[index]
index = right
def insert(self, heap, val):
heap.append(val)
self.heapity(heap)
def heapity(self, heap):
index = len(heap) - 1
son_val = heap[index][0] ** 2 + heap[index][1] ** 2
while index > 0:
parent = (index - 1) // 2
par_val = heap[parent][0] ** 2 + heap[parent][1] ** 2
if son_val > par_val:
heap[index], heap[parent] = heap[parent], heap[index]
index = parent
else:
break
if __name__ == "__main__":
solu = Solution()
print(solu.kClosest([[-66,42],[-67,94],[46,10],[35,27],[-9,-6],[-84,-97],[38,-22],[3,-39],[71,-97],[-40,-86],[-45,56],[-91,59],[24,-11],[91,100],[-68,43],[34,27]], 6)) |
21,422 | 372070f2c902f31cc51044bd31e53c9bd5db99f9 | import os
import jwt
import datetime
import random
from app import db
from models import User, Otp
from flask import request, Blueprint
from helpers import token_required
from twilio.rest import Client
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import and_
api = Blueprint('api', __name__)
@api.route('/signup', methods=['POST'])
def add_user():
data = request.get_json()
password = generate_password_hash(data['password'], method='sha256')
existing_mobile = User.query.filter_by(mobile=data['mobile']).first()
existing_mail = User.query.filter_by(email_address=data['email']).first()
if existing_mobile:
return {'message': "Mobile number already exists, try with another number!"}
if existing_mail:
return {'message': "Email address already exists, try with another email address!"}
new_user = User(name=data['name'], mobile=data['mobile'], email_address=data['email'], password=password)
db.session.add(new_user)
db.session.commit()
return {'message': 'new user added successfully'}
@api.route('/login')
def login():
data = request.get_json()
email = data['email']
password = data['password']
user = User.query.filter_by(email_address=email).first()
if not user:
return {'message': 'user does not exist'}
if not password or not check_password_hash(user.password, password):
return {'message': 'please enter valid password'}
token = jwt.encode(
{'id': user.id, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=15)},
os.environ.get('SECRET_KEY'), algorithm="HS256")
user.jwt_token = token
db.session.add(user)
db.session.commit()
return {'message': 'you have successfully logged in', 'token': token}
@api.route('/change-password')
@token_required
def change_password(current_user):
data = request.get_json()
existing_password = data['password']
if not check_password_hash(current_user.password, existing_password):
return {'message': 'wrong existing password'}
new_password = data['new_password']
new_password1 = data['new_password1']
if new_password != new_password1:
return {'message': 'passwords do not match'}
current_user.password = generate_password_hash(new_password, method='sha256')
db.session.add(current_user)
db.session.commit()
return {'message': 'password changed successfully'}
@api.route('/logout')
@token_required
def logout(current_user):
current_user.jwt_token = None
db.session.add(current_user)
db.session.commit()
return {'message': 'you have successfully logged out'}
@api.route('/forgot-password-send-otp')
def send_otp():
data = request.get_json()
if data['email']:
user = User.query.filter_by(email_address=data['email']).first()
elif data['mobile']:
user = User.query.filter_by(mobile=data['mobile']).first()
else:
return {'message': 'please enter a valid email/mobile.'}
account_sid = os.environ.get('TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
existing_otp = Otp.query.filter(and_(Otp.user_id == user.id, Otp.expiry > db.func.now(), Otp.verified == 0)).\
order_by(Otp.created_at.desc()).first()
if existing_otp:
otp = existing_otp.code
else:
otp = random.randint(1000, 9999)
user_otp = Otp(user_id=user.id, code=otp, expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=5))
db.session.add(user_otp)
db.session.commit()
message = client.messages.create(
body=f"Hello {user.name} Your otp for forgot password is {otp}", from_='+13347218537', to=f"+91{data['mobile']}")
message = Mail(
from_email='sameersetia17@gmail.com',
to_emails=data['email'],
subject='Forgot Password - OTP',
html_content=f'Hello {user.name} your otp for forget password is {otp}')
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
try:
sg.send(message)
except Exception as e:
print(e)
return {'message': 'message sent successfully'}
@api.route('/forgot-password-validate-otp')
def validate_otp():
data = request.get_json()
if data['email']:
user = User.query.filter_by(email_address=data['email']).first()
elif data['mobile']:
user = User.query.filter_by(mobile=data['mobile']).first()
else:
return {'message': 'please enter a valid email/mobile.'}
user_otp = Otp.query.filter(and_(Otp.user_id == user.id, Otp.expiry > db.func.now(), Otp.verified == 0)).\
order_by(Otp.created_at.desc()).first()
if user_otp and user_otp.code == data['otp']:
pass1 = data['pass1']
pass2 = data['pass2']
if pass1 != pass2:
return {'message': 'passwords do not match'}
user.password = generate_password_hash(pass1, method='sha256')
db.session.add(user)
user_otp.verified = 1
db.session.add(user_otp)
db.session.commit()
return {'message': 'password reset successful'}
else:
return {'message': 'please enter a valid otp.'}
|
21,423 | 8a6a324dfb85827745e496c164bd388c62b33356 | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
class Questions(models.Model):
CAT_CHOICES = (
('Dogs','dogs'),
('Cats','cats'),
)
question = models.CharField(max_length = 250)
optiona = models.CharField(max_length = 100)
optionb = models.CharField(max_length = 100)
optionc = models.CharField(max_length = 100)
optiond = models.CharField(max_length = 100)
answer = models.CharField(max_length = 100)
catagory = models.CharField(max_length=20, choices = CAT_CHOICES)
class Meta:
ordering = ('-catagory',)
def __str__(self):
return self.question
class Users(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
type = models.BooleanField(default=True)
def __str__(self):
return self.user.username
|
21,424 | 123f277a3ad0f537cbc161bf7b9b384c4d3ac9c7 | import discord
from src.on_message import execute
from src.helpers.discord_response import response
from src.enums import system_variables as sv
import os
client = discord.Client()
async def configure():
dirs = ["./csv", "./graphs", "./saved"]
mode = 0o666
for d in dirs:
if not os.path.exists(d):
os.mkdir(d, mode)
@client.event
async def on_ready():
await configure()
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.author == client.user: # This line avoids loops
return
if message.author.bot:
return
if message.channel.name != sv.LISTENING_CHANNEL:
return
try:
await response(await execute(message.content), message)
except Exception as e:
await response(e, message)
client.run(sv.TOKEN)
|
21,425 | 6b925a6c02d2415674e24160fc317565ef9d832a | from django.test import TransactionTestCase
from django.apps import apps
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from care.facility.models.patient_base import DiseaseStatusEnum
from care.utils.tests.test_base import TestBase
from care.users.models import District, State
class BasePatientRegistrationMigrationTest(TransactionTestCase):
"""
Test specific migrations
Make sure that `self.migrate_from` and `self.migrate_to` are defined.
"""
migrate_from = None
migrate_to = None
@property
def app(self):
return apps.get_containing_app_config(type(self).__module__).label
def setUp(self):
super().setUp()
assert self.migrate_to and self.migrate_from, \
f'TestCase {type(self).__name__} must define migrate_to and migrate_from properties'
self.migrate_from = [(self.app, self.migrate_from)]
self.migrate_to = [(self.app, self.migrate_to)]
self.executor = MigrationExecutor(connection)
self.old_apps = self.executor.loader.project_state(self.migrate_from).apps
# revert to the original migration
self.executor.migrate(self.migrate_from)
# ensure return to the latest migration, even if the test fails
self.addCleanup(self.force_migrate)
self.setUpBeforeMigration(self.old_apps)
self.executor.loader.build_graph()
self.executor.migrate(self.migrate_to)
self.new_apps = self.executor.loader.project_state(self.migrate_to).apps
def setUpBeforeMigration(self, apps):
"""
This method may be used to create stuff before the migrations.
Something like creating an instance of an old model.
"""
pass
@property
def new_model(self):
return self.new_apps.get_model(self.app, 'PatientRegistration')
@property
def old_model(self):
return self.old_apps.get_model(self.app, 'PatientRegistration')
def force_migrate(self, migrate_to=None):
self.executor.loader.build_graph() # reload.
if migrate_to is None:
# get latest migration of current app
migrate_to = [key for key in self.executor.loader.graph.leaf_nodes() if key[0] == self.app]
self.executor.migrate(migrate_to)
class DiseaseStatusMigrationTest(BasePatientRegistrationMigrationTest):
migrate_from = '0223_merge_20210427_1419'
migrate_to = '0224_change_disease_status_from_recover_to_recovered'
def create_patient(self):
data = self.data.copy()
data.pop('medical_history', [])
data.pop('state', '')
data.pop('district', '')
return self.old_model.objects.create(**data)
def setUpBeforeMigration(self, apps):
_state = State.objects.create(name='bihar')
_district = District.objects.create(state=_state, name='dharbhanga')
self.data = TestBase.get_patient_data(state=_state, district=_district)
self.data.update({
'disease_status': DiseaseStatusEnum.RECOVERY.value,
'state_id': _state.id,
'district_id': _district.id,
})
self.patient = self.create_patient()
def test_recover_changed_to_recovered(self):
patient = self.new_model.objects.get(id=self.patient.id)
self.assertEqual(patient.disease_status, DiseaseStatusEnum.RECOVERED.value)
|
21,426 | 0ec3619cec0dc520dabfe5fe4317acc9f95312fb | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import re
"""
String Util
"""
def checkURI(str):
if re.search('',str,re.S):
return 1
else:
return 0
def delete_space(obj):
while '' in obj:
obj.remove('')
return obj
def fromat_list(obj):
temp = []
for x in obj:
temp.append(x.strip().replace("\n", "").replace(" ", ""))
return temp
def toString(result,result1):
ls1 = delete_space(fromat_list(result))
ls2 = delete_space(fromat_list(result1))
for i in range(0,len(ls1)):
print ls1[i],ls2[i]
def toString(result,result1,result2):
ls1 = delete_space(fromat_list(result))
ls2 = delete_space(fromat_list(result1))
ls3 = delete_space(fromat_list(result2))
for i in range(0,len(ls1)):
print ls1[i],ls2[i],ls3[i] |
21,427 | 328352e49941de01d48537891c30a81c63e993df | from os import getenv
from typing import Type
from flask import Flask
from flask_cors import CORS
from flask_restful import Api
from flask_jwt import JWT
from util.db import db
from util.security import authenticate, identify
from resources.index import Index
from resources.task import Task, TaskList, TaskCreate
def create_app():
# Flask setup
app = Flask(__name__)
app.secret_key = getenv('FLASK_SECRET_KEY', 'default')
cors = CORS(app, resources={r"*": {"origins": "*"}})
api = Api(app)
# Database setup
app.config['SQLALCHEMY_DATABASE_URI'] = getenv(
'MYSQL_URI', 'mysql+pymysql://queue_user:queue_password@localhost/queue_db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
# Authentication JWT
JWT(
app=app,
authentication_handler=authenticate,
identity_handler=identify
)
# Create Routes
api.add_resource(Index, '/')
api.add_resource(TaskCreate, '/user/task')
api.add_resource(Task, '/user/task/<int:task_id>')
api.add_resource(TaskList, '/user/tasks')
# Create database tables
with app.app_context():
db.create_all()
return app
if __name__ == '__main__':
create_app().run()
|
21,428 | 7b0b668bf77027471e80996fda597a7e077b9db0 | import sys
import time
from datetime import datetime
nl = "\n" # Save newline as variable since f-string cannot take backslash
SAVEFILE = "record.csv"
class Workout ():
def SaveWorkout(self):
with open(SAVEFILE, "a+") as f:
f.write(f"{datetime.now().strftime('%d/%m/%Y %H:%M:%S')},{self.excercise},{self.sets},{self.reps},")
for i in range(self.sets):
f.write(f"{self.times[i]}{',' if i < self.sets-1 else nl}")
f.close()
def GetTimes (self):
for i in range(self.sets):
input(f"Press [ENTER] to start set {i+1}")
start = time.time()
input(f"Press [ENTER] to end set {i+1}")
end = time.time()
self.times[i] = (int(end-start))
print(f"Set {i+1} completed in {self.times[i]} seconds\n")
def __init__(self, argv, excerciseList):
self.excercise, self.sets, self.reps = None, None, None
self.ignoreTime = False
# Take args
for i in range(len(argv)):
arg = argv[i].lower()
try:
if arg == "-e":
excercise = argv[i+1]
if excercise.isnumeric():
excercise = int(excercise)
if len(excerciseList) < excercise - 1:
print(f"Excercise {excercise} is out of bounds")
sys.exit()
self.excercise = excerciseList[excercise] # excercise from list
else:
self.excercise = excercise # new excercise
elif arg == "-s":
self.sets = int(argv[i+1])
elif arg == "-r":
self.reps = int(argv[i+1])
elif arg == "-nt":
self.ignoreTime = True
elif arg == "-mr":
self.reps = "MAX"
except IndexError:
print(f"Missing value for {arg}")
sys.exit()
except ValueError:
print(f"Value given for {arg} is NaN")
sys.exit()
# Check args
if self.excercise == None:
print("excercise not specified")
sys.exit()
if self.sets == None:
self.sets = 1
if self.reps == None:
print("Reps not specified")
sys.exit()
# Show brief
msg = f"{self.excercise} - {self.sets} x {self.reps}"
print(f"{'='*len(msg)}\n{msg}\n{'='*len(msg)}")
# Init times table
self.times = ["x"] * self.sets
def GetExcerciseList():
list = []
with open(SAVEFILE, "r") as f:
lines = f.readlines()
for line in lines:
excercise = line.split(",")[1]
if excercise not in list:
list.append(excercise)
return list
if __name__ == "__main__":
excerciseList = GetExcerciseList()
if "--list" in sys.argv or "-l" in sys.argv:
for i in range(len(excerciseList)):
print(f"{i}\t{excerciseList[i]}")
sys.exit()
workout = Workout(sys.argv, excerciseList)
if not workout.ignoreTime:
workout.GetTimes()
workout.SaveWorkout()
|
21,429 | 156b41cc5a3b517facf8f51a1c32e5f4d89a219d | # This script will parse the current directory recursively and find any .h and .cpp files.
# In any file it finds, it will look for #include "anyfile.h" strings, find the file referenced and replace
# the whole #include statement with #include <Path/From/Current/Directory/anyfile.h>
# This will not work if there are files with the same name in the hierarchy. Then the first file w/ a given name in a depth-first search will be used.
import re
import os
import os.path
import string
# Returns the full path to a specific file or none if no such file
def FindFile(filename):
for root, dirs, files in os.walk("."):
for f in files:
if f == filename:
return os.path.join(root, f)
return None
# Find #include directives and manipulate them
def ChangeFile(filename, root):
file = open(filename, "r+")
lines = file.readlines()
file.close()
# Find include directives and rewrite them
newlines = []
for line in lines:
if line.startswith('#include "'):
lastquote = string.rfind(line, '"')
includename = line[10 : lastquote]
print lastquote
fullpath = FindFile(includename)
if (fullpath == None):
line = line[:-1] + " // Relative path not found\n"
print "Relative path not found in file", filename
else:
line = "#include <%s>\n" % fullpath[2:]
newlines.append(line)
# Write the new, modified, lines back
file = open(filename, "w+")
file.writelines(newlines)
file.close()
# Traverse the current directory recursively
for root, dirs, files in os.walk("."):
print "Directory", root
print "Directories: ", dirs
print "Files: ", files
print "\n"
for filename in files:
if filename.endswith(".cpp") or filename.endswith(".h"):
# parse file
filename = os.path.join(root, filename)
print filename
ChangeFile(filename, root)
print "\n"
|
21,430 | 9cec215f9eebb88bdaaba771efc1c43d2594bac9 | #https://modcom.co.ke/datascience/sales.csv
#Get sales.csv
import pandas
df=pandas.read_csv('sales.csv')
print(df)
print(df.isnull().sum())
import matplotlib.pyplot as plt
#histograms of ext price
#pie chart for show category distribution
#Which category brought more cash?
#Which company brought the highest cash?
#Which month,year has highest sales
#What is the correlation between unit price and ext price
#jupyterlab online free editor;jupyter.org
|
21,431 | 6014b9933f9d786eff819fd904b48c699efcf7c5 | """
Use Azure Blob as a Pillar source.
.. versionadded:: 3001
:maintainer: <devops@eitr.tech>
:maturity: new
:depends:
* `azure-storage-blob <https://pypi.org/project/azure-storage-blob/>`_ >= 12.0.0
The Azure Blob ext_pillar can be configured with the following parameters:
.. code-block:: yaml
ext_pillar:
- azureblob:
container: 'test_container'
connection_string: 'connection_string'
multiple_env: False
environment: 'base'
blob_cache_expire: 30
blob_sync_on_update: True
:param container: The name of the target Azure Blob Container.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
:param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.
"""
import logging
import os
import pickle
import time
from copy import deepcopy
import salt.utils.files
import salt.utils.hashutils
from salt.pillar import Pillar
HAS_LIBS = False
try:
# pylint: disable=no-name-in-module
from azure.storage.blob import BlobServiceClient
# pylint: enable=no-name-in-module
HAS_LIBS = True
except ImportError:
pass
__virtualname__ = "azureblob"
# Set up logging
log = logging.getLogger(__name__)
def __virtual__():
if not HAS_LIBS:
return (
False,
"The following dependency is required to use the Azure Blob ext_pillar: "
"Microsoft Azure Storage Blob >= 12.0.0 ",
)
return __virtualname__
def ext_pillar(
minion_id,
pillar, # pylint: disable=W0613
container,
connection_string,
multiple_env=False,
environment="base",
blob_cache_expire=30,
blob_sync_on_update=True,
):
"""
Execute a command and read the output as YAML.
:param container: The name of the target Azure Blob Container.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
:param blob_sync_on_update: Specifies if the cache is synced on update. Defaults to True.
"""
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(
os.path.join(_get_cache_dir(), environment, container)
)
if __opts__["pillar_roots"].get(environment, []) == [pillar_dir]:
return {}
metadata = _init(
connection_string, container, multiple_env, environment, blob_cache_expire
)
log.debug("Blob metadata: %s", metadata)
if blob_sync_on_update:
# sync the containers to the local cache
log.info("Syncing local pillar cache from Azure Blob...")
for saltenv, env_meta in metadata.items():
for container, files in _find_files(env_meta).items():
for file_path in files:
cached_file_path = _get_cached_file_name(
container, saltenv, file_path
)
log.info("%s - %s : %s", container, saltenv, file_path)
# load the file from Azure Blob if not in the cache or too old
_get_file_from_blob(
connection_string,
metadata,
saltenv,
container,
file_path,
cached_file_path,
)
log.info("Sync local pillar cache from Azure Blob completed.")
opts = deepcopy(__opts__)
opts["pillar_roots"][environment] = (
[os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir]
)
# Avoid recursively re-adding this same pillar
opts["ext_pillar"] = [x for x in opts["ext_pillar"] if "azureblob" not in x]
pil = Pillar(opts, __grains__, minion_id, environment)
compiled_pillar = pil.compile_pillar(ext=False)
return compiled_pillar
def _init(connection_string, container, multiple_env, environment, blob_cache_expire):
"""
.. versionadded:: 3001
Connect to Blob Storage and download the metadata for each file in all containers specified and
cache the data to disk.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
Defaults to false.
:param environment: Specifies which environment the container represents when in single environment mode. Defaults
to 'base' and is ignored if multiple_env is set as True.
:param blob_cache_expire: Specifies expiration time of the Azure Blob metadata cache file. Defaults to 30s.
"""
cache_file = _get_containers_cache_filename(container)
exp = time.time() - blob_cache_expire
# Check if cache_file exists and its mtime
if os.path.isfile(cache_file):
cache_file_mtime = os.path.getmtime(cache_file)
else:
# If the file does not exist then set mtime to 0 (aka epoch)
cache_file_mtime = 0
expired = cache_file_mtime <= exp
log.debug(
"Blob storage container cache file %s is %sexpired, mtime_diff=%ss,"
" expiration=%ss",
cache_file,
"" if expired else "not ",
cache_file_mtime - exp,
blob_cache_expire,
)
if expired:
pillars = _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env, environment
)
else:
pillars = _read_containers_cache_file(cache_file)
log.debug("Blob container retrieved pillars %s", pillars)
return pillars
def _get_cache_dir():
"""
.. versionadded:: 3001
Get pillar cache directory. Initialize it if it does not exist.
"""
cache_dir = os.path.join(__opts__["cachedir"], "pillar_azureblob")
if not os.path.isdir(cache_dir):
log.debug("Initializing Azure Blob Pillar Cache")
os.makedirs(cache_dir)
return cache_dir
def _get_cached_file_name(container, saltenv, path):
"""
.. versionadded:: 3001
Return the cached file name for a container path file.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
file_path = os.path.join(_get_cache_dir(), saltenv, container, path)
# make sure container and saltenv directories exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
return file_path
def _get_containers_cache_filename(container):
"""
.. versionadded:: 3001
Return the filename of the cache for container contents. Create the path if it does not exist.
:param container: The name of the target Azure Blob Container.
"""
cache_dir = _get_cache_dir()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return os.path.join(cache_dir, "{}-files.cache".format(container))
def _refresh_containers_cache_file(
connection_string, container, cache_file, multiple_env=False, environment="base"
):
"""
.. versionadded:: 3001
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param container: The name of the target Azure Blob Container.
:param cache_file: The path of where the file will be cached.
:param multiple_env: Specifies whether the pillar should interpret top level folders as pillar environments.
:param environment: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
"""
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
metadata = {}
def _walk_blobs(saltenv="base", prefix=None):
# Walk the blobs in the container with a generator
blob_list = container_client.walk_blobs(name_starts_with=prefix)
# Iterate over the generator
while True:
try:
blob = next(blob_list)
except StopIteration:
break
log.debug("Raw blob attributes: %s", blob)
# Directories end with "/".
if blob.name.endswith("/"):
# Recurse into the directory
_walk_blobs(prefix=blob.name)
continue
if multiple_env:
saltenv = "base" if (not prefix or prefix == ".") else prefix[:-1]
if saltenv not in metadata:
metadata[saltenv] = {}
if container not in metadata[saltenv]:
metadata[saltenv][container] = []
metadata[saltenv][container].append(blob)
_walk_blobs(saltenv=environment)
# write the metadata to disk
if os.path.isfile(cache_file):
os.remove(cache_file)
log.debug("Writing Azure blobs pillar cache file")
with salt.utils.files.fopen(cache_file, "wb") as fp_:
pickle.dump(metadata, fp_)
return metadata
def _read_containers_cache_file(cache_file):
"""
.. versionadded:: 3001
Return the contents of the containers cache file.
:param cache_file: The path for where the file will be cached.
"""
log.debug("Reading containers cache file")
with salt.utils.files.fopen(cache_file, "rb") as fp_:
data = pickle.load(fp_)
return data
def _find_files(metadata):
"""
.. versionadded:: 3001
Looks for all the files in the Azure Blob container cache metadata.
:param metadata: The metadata for the container files.
"""
ret = {}
for container, data in metadata.items():
if container not in ret:
ret[container] = []
# grab the paths from the metadata
file_paths = [k["name"] for k in data]
# filter out the dirs
ret[container] += [k for k in file_paths if not k.endswith("/")]
return ret
def _find_file_meta(metadata, container, saltenv, path):
"""
.. versionadded:: 3001
Looks for a file's metadata in the Azure Blob Container cache file.
:param metadata: The metadata for the container files.
:param container: The name of the target Azure Blob Container.
:param saltenv: Specifies which environment the container represents.
:param path: The path of the file in the container.
"""
env_meta = metadata[saltenv] if saltenv in metadata else {}
container_meta = env_meta[container] if container in env_meta else {}
for item_meta in container_meta:
item_meta = dict(item_meta)
if "name" in item_meta and item_meta["name"] == path:
return item_meta
def _get_file_from_blob(
connection_string, metadata, saltenv, container, path, cached_file_path
):
"""
.. versionadded:: 3001
Downloads the entire contents of an Azure storage container to the local filesystem.
:param connection_string: The connection string to use to access the specified Azure Blob Container.
:param metadata: The metadata for the container files.
:param saltenv: Specifies which environment the container represents when in single environment mode. This is
ignored if multiple_env is set as True.
:param container: The name of the target Azure Blob Container.
:param path: The path of the file in the container.
:param cached_file_path: The path of where the file will be cached.
"""
# check the local cache...
if os.path.isfile(cached_file_path):
file_meta = _find_file_meta(metadata, container, saltenv, path)
file_md5 = (
"".join(list(filter(str.isalnum, file_meta["etag"]))) if file_meta else None
)
cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, "md5")
# hashes match we have a cache hit
log.debug(
"Cached file: path=%s, md5=%s, etag=%s",
cached_file_path,
cached_md5,
file_md5,
)
if cached_md5 == file_md5:
return
try:
# Create the BlobServiceClient object which will be used to create a container client
blob_service_client = BlobServiceClient.from_connection_string(
connection_string
)
# Create the ContainerClient object
container_client = blob_service_client.get_container_client(container)
# Create the BlobClient object
blob_client = container_client.get_blob_client(path)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception: %s", exc)
return False
with salt.utils.files.fopen(cached_file_path, "wb") as outfile:
outfile.write(blob_client.download_blob().readall())
return
|
21,432 | da720fc5ff44d50ab8bc01e346e2a307424b8d79 | from models.order import *
order1 = Order("Niall", "April 14th", "Food", 1)
order2 = Order("Lina", "May 15th", "Books", 1)
order3 = Order("Bob", "June 20th", "Paint", 1)
orders_list = [order1, order2, order3] |
21,433 | bac14154352643bca26972fe407fb89238722740 | import htkmfc
import sys
import numpy as np
import os
import re
from six.moves import cPickle
import h5py
from fuel.datasets.hdf5 import H5PYDataset
#count the number of chunks in a file
#bkg set
f1=open('/misc/data15/reco/bhattgau/Rnn/code/Code_/rsr/newshit/lists/seq2seq/rsr-p1-mf-devbkg.trn')
tfeats = [l.strip() for l in f1.readlines()]
f2=open('/misc/data15/reco/bhattgau/Rnn/code/Code_/rsr/newshit/lists/seq2seq/rsr-p1-mf-devbkg.val')
vfeats = [l.strip() for l in f2.readlines()]
dpth='/misc/scratch02/reco/alamja/workshop_pdt_2015/features/Nobackup/mel_fbank_fb40_fs8k_cmvn/rsr2015/VQ_VAD_HO_EPD/'
tfinames = [re.split('[/.]',l)[2] for l in tfeats]
tspk_phr = list(set([l.split('_')[0]+'_'+l.split('_')[2] for l in tfinames]))
all_feats = tfeats+vfeats
frame_count=[]
LABELS=[]
NAMES=[]
MASKS=[]
#find maxlen recording in training set
"""
for l in all_feats:
spk_id = re.split('[/_.]',l)[2]+'_'+re.split('[/_.]',l)[4]
lab = tspk_phr.index(spk_id)
LABELS.append(lab)
fname = re.split('[/.]',l)[2]
NAMES.append(fname)
fbfeat = os.path.join(dpth,l)
if os.path.exists(fbfeat):
ff = htkmfc.HTKFeat_read(fbfeat)
data = ff.getall()
nframes=data.shape[0]
frame_count.append(nframes)
maxl = max(frame_count)
print maxl
"""
maxl = 382
slc=10
### pad and store in matrices
rsr_train_data = []
for l in all_feats:
fbfeat = os.path.join(dpth,l)
spk_id = re.split('[/_.]',l)[2]+'_'+re.split('[/_.]',l)[4]
lab = tspk_phr.index(spk_id)
fname = re.split('[/.]',l)[2]
if os.path.exists(fbfeat):
LABELS.append(lab)
NAMES.append(fname)
mask = np.zeros((382,),dtype='float32')
ff = htkmfc.HTKFeat_read(fbfeat)
data = ff.getall()
nframes=data.shape[0]
mask[:nframes] = 1.0
MASKS.append(mask)
padl = maxl - nframes
pad = np.zeros((padl,40),dtype='float32')
datapad = np.vstack((data,pad))
nframes = datapad.shape[0]
ptr=0
#give each frame a forward-backward context of 5 frames
cnnstack = np.empty((400,nframes),dtype='float32')
while ptr < nframes:
if ptr+slc >= nframes:
padl = ptr+slc-nframes + 1
pad=np.zeros((padl,40),dtype='float32')
dslice = datapad[ptr: ptr+slc-padl]
dslice = np.vstack((dslice,pad))
dslice = dslice.flatten()
else:
dslice = datapad[ptr:ptr+slc,:]
dslice = dslice.flatten()
cnnstack[:,ptr]=dslice
ptr+=1
rsr_train_data.append(cnnstack)
rsr_train_data = np.asarray(rsr_train_data,dtype='float32')
nlen = len(rsr_train_data)
rsr_train_data = np.reshape(rsr_train_data,(nlen,382,400))
LABELS = np.asarray(LABELS,dtype='int32')
mlen = len(MASKS)
if mlen != nlen:
print 'shit'
MASKS = np.asarray(MASKS,dtype='float32')
MASKS = np.reshape(MASKS,(mlen,382))
hdf5file='/misc/scratch03/reco/bhattaga/data/RSR/rsr-DEVBKG-cldnn-train.hdf5'
nspk_trn=47284
nspk_val=5000
ndp = nspk_trn+nspk_val
if os.path.exists(hdf5file):
print 'HDF5 fie exits. Deleting...'
command00 = "rm -r" +" "+ hdf5file
process0 = subprocess.check_call(command00.split())
f=h5py.File(hdf5file, mode='w')
features = f.create_dataset('features', (ndp,382,400), dtype='float32')
masks = f.create_dataset('masks',(ndp,382), dtype='float32')
names = f.create_dataset('names', (ndp,), dtype='S150')
labels = f.create_dataset('labels',(ndp,), dtype='int32')
#label the dimensions
for i, label in enumerate(('batch','timesteps', 'dimension')):
f['features'].dims[i].label = label
for i, label in enumerate(('batch','timesteps')):
f['masks'].dims[i].label = label
for i, label in enumerate(('batch',)):
f['labels'].dims[i].label = label
for i, label in enumerate(('batch',)):
f['names'].dims[i].label = label
nspk_all = nspk_trn+nspk_val
split_dict = {'train': {'features': (0, nspk_trn), 'labels': (0, nspk_trn),
'masks':(0,nspk_trn), 'names': (0, nspk_trn)},
'valid': {'features': (nspk_trn, nspk_all), 'labels': (nspk_trn, nspk_all),
'masks':(nspk_trn,nspk_all),'names': (nspk_trn,nspk_all)}}
DATA=np.load('/misc/scratch03/reco/bhattaga/data/RSR/rsr-mf-DEVBKG-cldnn.npy')
LABELS = np.load('/misc/scratch03/reco/bhattaga/data/RSR/rsr-mf-spkphr-labels-cldnn.npy')
fname = open('/misc/scratch03/reco/bhattaga/data/RSR/rsr-mf-BKG-names-cldnn.pkl')
NAMES = cPickle.load(fname)
MASKS = np.load('/misc/data15/reco/bhattgau/Rnn/code/Code_/rsr/newshit/DATA/rsr-mf-p1-DEVBKG-masks.npy')
features[...] = DATA
labels[...] = LABELS
masks[...] = MASKS
names[...] = NAMES
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
f.flush()
f
|
21,434 | a0a21b83fdd9738ae004940037e498d18a38055c | import numpy as np
import matplotlib.pyplot as plt
def pressure(surface_pressure, fluid_density, g, depth):
if isinstance(depth, (list, tuple, np.ndarray)):
pressure = surface_pressure + fluid_density * g * depth
plt.plot(depth, pressure)
plt.xlabel("depth [m]")
plt.ylabel("pressure [_]")
plt.show()
return surface_pressure + fluid_density * g * depth
|
21,435 | 7a719d8c122644a76dec9b1a866e4c7d92b50097 | from controller_generator import generate_controller
from controller_generator import add_controller
from database_operations import db_create, db_migrate, db_upgrade
from database_operations import db_downgrade, db_version
from help_operations import help
from model_generator import add_model
from package_operations import install_package
from server_operations import run_tornado, run_testrun
# end of file |
21,436 | 32e0099353cd41be52b81d77a040f0910e361deb | # -*- coding: utf-8 -*-
"""
Created on Mon May 14 21:21:48 2012
@author: Philipp
"""
import numpy as np
from matplotlib import dates
import calculate_data as calculate_data
import trefferquoten as tq
def main(cp,conn,cursor):
sql = "SELECT close , `datum` FROM kursdaten WHERE unternehmen =%d ORDER BY `datum`" %(cp)
sql4 = """SELECT neues_kursziel, zieldatum, analyst,neue_einstufung FROM prognose
WHERE unternehmen = %d
AND `zieldatum`>(SELECT CURDATE()) AND neues_kursziel >0
ORDER BY zieldatum""" %(cp)
sql5 = """SELECT neue_einstufung,analyst FROM prognose
WHERE unternehmen = %d
AND `zieldatum`>(SELECT CURDATE()) AND neues_kursziel >0
ORDER BY zieldatum""" %(cp)
trefferquoten_dict = tq.start_company(cp,conn,cursor)
avg_kurse = calculate_data.get_select(sql,cursor,conn)
prognose = calculate_data.get_select(sql4,cursor,conn)
einstufung = calculate_data.get_select(sql5,cursor,conn)
avg = [q[0] for q in avg_kurse]
datum_avg = [q[1] for q in avg_kurse]
datum_avg =dates.date2num(datum_avg)
datum_prognose = [q[1] for q in prognose]
datum_prognose = dates.date2num(datum_prognose)
analysten_prognosen_dict ={}
for row in prognose:
analysten_prognosen_dict[row[2]] = []
for row in prognose:
value = analysten_prognosen_dict[row[2]]
value.append([row[0],dates.date2num(row[1]), row[3]])
buy_sell_neutral_count_percent_and_mittlere_trefferquoten = calculate_data.get_buy_sell_neutral_count_percent_and_mittlere_trefferquoten(einstufung,trefferquoten_dict,cp)
# [buy,prozent_buy,tr_qt_buy],[sell,prozent_sell,tr_qt_sell],[neutral,prozent_neutral,tr_qt_neutral]
colored_trend_prognosis = calculate_data.get_colored_trend_prognosis(analysten_prognosen_dict,datum_prognose)
tats_kurse_datum = [datum_avg,avg]
result_set = []
result_set.append(tats_kurse_datum)
result_set.append(buy_sell_neutral_count_percent_and_mittlere_trefferquoten)
result_set.append(colored_trend_prognosis)
return result_set
|
21,437 | f0561646573f3e5c02f63bbc03350c15a758fc9d | #!/usr/bin/env python
import random
def print_M(matrix_name, M):
print(f'{matrix_name} = ')
for row in M:
print(' ', row)
print('\n')
def generate_new_matrix(cities, weights, arcs=None):
M = []
rows = cities
cols = cities
for i in range(rows):
row = [random.randrange(weights[0], weights[1] + 1) for _ in range(cols)]
row[i] = 0 # main diagonal element
M.append(row)
if arcs:
max_arcs_no = rows * (rows - 1)
non_removable_arcs = []
col_numbers = [j for j in range(cols)]
for i in range(rows):
col_numbers_except_main_diagonal = [(col_numbers_index, j) for col_numbers_index, j in
enumerate(col_numbers) if j != i]
tmp = col_numbers_except_main_diagonal[random.choice(range(len(col_numbers_except_main_diagonal)))]
col_numbers_index = tmp[0]
j = tmp[1]
non_removable_arcs.append([i, j])
col_numbers = col_numbers[:col_numbers_index] + col_numbers[col_numbers_index + 1:]
removable_arcs = []
for i in range(rows):
for j in range(cols):
if i != j and [i, j] not in non_removable_arcs:
removable_arcs.append([i, j])
removed_arcs_no = max_arcs_no - arcs
for _ in range(removed_arcs_no):
removed_arc_index = random.choice(range(len(removable_arcs)))
i_j = removable_arcs[removed_arc_index]
i = i_j[0]
j = i_j[1]
M[i][j] = 0
removable_arcs = removable_arcs[:removed_arc_index] + removable_arcs[removed_arc_index + 1:]
return M
if __name__ == '__main__':
random.seed(0)
# all zeroes except 10 elements such that every
# row and every column have exactly one element
M_1 = generate_new_matrix(10, [1, 10], arcs=10)
assert (M_1 == [[0, 0, 0, 0, 0, 8, 0, 0, 0, 0],
[0, 0, 0, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 0, 0, 8, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 8, 0, 0, 0],
[0, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[8, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# 5 zeroes not in the main diagonal
M_2 = generate_new_matrix(10, [1, 10], arcs=85)
assert (M_2 == [[0, 1, 3, 3, 6, 9, 5, 2, 10, 8],
[3, 0, 8, 7, 10, 9, 5, 6, 0, 5],
[3, 9, 0, 8, 2, 0, 1, 9, 5, 3],
[4, 8, 6, 0, 5, 6, 10, 10, 3, 5],
[7, 7, 2, 1, 0, 4, 6, 0, 4, 4],
[8, 7, 10, 7, 1, 0, 0, 7, 1, 3],
[8, 2, 5, 3, 8, 9, 0, 9, 10, 1],
[1, 8, 6, 5, 8, 1, 7, 0, 9, 2],
[3, 1, 0, 7, 6, 1, 4, 1, 0, 9],
[10, 2, 4, 2, 10, 4, 5, 5, 3, 0]])
# 50 zeroes not in the main diagonal
M_3 = generate_new_matrix(10, [1, 10], arcs=40)
assert (M_3 == [[0, 0, 1, 4, 5, 0, 0, 0, 0, 0],
[10, 0, 0, 7, 6, 0, 0, 4, 0, 0],
[5, 0, 0, 3, 0, 0, 6, 6, 0, 0],
[10, 1, 1, 0, 0, 0, 10, 0, 0, 0],
[0, 3, 0, 0, 0, 0, 0, 5, 3, 9],
[0, 5, 7, 0, 5, 0, 0, 0, 0, 8],
[8, 6, 0, 0, 0, 0, 0, 0, 1, 5],
[0, 0, 3, 10, 7, 2, 0, 0, 0, 0],
[1, 0, 0, 2, 0, 0, 0, 5, 0, 8],
[0, 4, 7, 0, 0, 4, 5, 0, 3, 0]])
|
21,438 | 04160cd3118481562ed08d39634478210d7ee7de | from bs4 import BeautifulSoup as bs
import urllib.request
import re
import os
import xml.etree.ElementTree as et
output = open('output.txt', 'w+')
email = False
output.write('THE NEW YORK TIMES\n')
nyt_page = urllib.request.urlopen('https://www.nytimes.com/section/books')
nyt_html = nyt_page.read()
nyt_page.close()
beau = bs(nyt_html, 'html.parser')
for article in beau.findAll('div', {'class':'story-body'}):
if 'Dostoevsky' in article.find('p', 'summary').get_text() or 'your favorite author here' in article.find('p', 'summary').get_text():
if article.h2.a != None:
email = True
output.write(article.h2.a.get_text() + '\n')
output.write(article.p.get_text() + '\n')
output.write(article.h2.a.get('href') + '\n')
output.write('\nTHE NEW YORKER\n')
nyer_page = urllib.request.urlopen('https://www.newyorker.com/books')
nyer_html = nyer_page.read()
nyer_page.close()
beau = bs(nyer_html, 'html.parser')
class_name = re.compile('Card__content__')
for article in beau.findAll('div', {'class':class_name}):
if 'Steinbeck' in article.find('p', re.compile('Card__dek___')).get_text() or 'your favorite author here' in article.find('p', re.compile('Card__dek___')).get_text():
email = True
output.write(article.find('a', re.compile('Link__link___')).get_text() + '\n')
output.write(article.p.get_text() + '\n')
output.write('https://www.newyorker.com/' + article.find('a', re.compile('Link__link___')).get('href') + '\n')
output.write('\nSEEKING ALPHA\n')
request = urllib.request.Request('https://seekingalpha.com/sitemap_news.xml', headers={'User-Agent': 'Mozilla/5.0'})
sa_page = urllib.request.urlopen(request)
sa_unformed = sa_page.read()
sa_formed = sa_unformed.decode('utf-8')
root = et.fromstring(sa_formed)
sa_page.close()
for url in root.findall('{http://www.sitemaps.org/schemas/sitemap/0.9}url'):
news = url.find('{http://www.google.com/schemas/sitemap-news/0.9}news')
tickers = news.find('{http://www.google.com/schemas/sitemap-news/0.9}stock_tickers')
if (tickers != None) and tickers.text != None and ('your stock here' in tickers.text or 'your stock here' in tickers.text):
email = True
output.write(news.find('{http://www.google.com/schemas/sitemap-news/0.9}title').text + '\n')
output.write(tickers.text + '\n')
output.write(url.find('{http://www.sitemaps.org/schemas/sitemap/0.9}loc').text + '\n\n')
output.close()
#if email:
# os.system('cat output.txt | mail "')
|
21,439 | 65fb1cdfffe1072713d4cb8d3bf58ac0fab3d9de | import torch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
max_length = 60
embedding_size = 64
hidden_size = 64
total_epoch = 20
batch_size = 16
learning_rate = 0.001
|
21,440 | 3b8980623575581de4b06644f8688295a4082681 | dan = int(input('몇 단을 출력할까요?'))
times = 1
while times < 10:
print(f'{dan} * {times} = {dan*times}')
times += 1 |
21,441 | a6b9d29bb67c4e775fe594a15868f3f492b8360b | # ПЕРЕДАЧА СПИСКА.
# Функция greet_users() рассчитывает получить список имен,
# который сохраняется в парамете nemes. Функция перебирает
# полученный список и выводит приветсвие для каждого пользователя.
# В (1) мы определяем список пользователей usernames, который
# затем передается greet_users() в вызове функциии.
#
#
def greet_users(names):
"""Вывод простого приветсвия."""
for name in names:
msg = f"Hello, {name.title()}!"
print(msg)
usernames = ['hannah', 'ty', 'margot'] #1
greet_users(usernames) |
21,442 | 65d0bb7c1ece6bacccf7cd2ef31acbef95434906 | import unittest
from python.nowcoder.climb import Solution
class TestClimb(unittest.TestCase):
def test_resolve_v1(self):
s = Solution()
self.assertEqual(s.resolve_v1(1), 1)
self.assertEqual(s.resolve_v1(2), 2)
self.assertEqual(s.resolve_v1(3), 3)
self.assertEqual(s.resolve_v1(4), 5)
def test_resolve_v2(self):
s = Solution()
self.assertEqual(s.resolve_v2(1), 1)
self.assertEqual(s.resolve_v2(2), 2)
self.assertEqual(s.resolve_v2(3), 3)
self.assertEqual(s.resolve_v2(4), 5)
def test_resolve_v3(self):
s = Solution()
self.assertEqual(s.resolve_v3(1), 1)
self.assertEqual(s.resolve_v3(2), 2)
self.assertEqual(s.resolve_v3(3), 3)
self.assertEqual(s.resolve_v3(4), 5)
def test_resolve_v4(self):
s = Solution()
self.assertEqual(s.resolve_v4(1), 1)
self.assertEqual(s.resolve_v4(2), 2)
self.assertEqual(s.resolve_v4(3), 3)
self.assertEqual(s.resolve_v4(4), 5)
if __name__ == '__main__':
unittest.main()
|
21,443 | 22658f0eaf5ee604cc659def0fcd0e2bb786f8f0 | list = ['abcd', 789, 2.23, 'johon', 70.2]
tinylist = [123, 'john']
#print(list);
#print(list[0]);
#print(list[1:3:1]);#[)左闭右开
#print(list, list);
i = 1
while i <= 2 :
#print(list);
i += 1
k = 0
while True:
list.append(tinylist[k])
print(list);
|
21,444 | 7a949b8345306da09d54b475e0ba3fd011af069b | import types
def escape_text(text=''):
text = str(text)
return text.replace("\'", "\\'")
class GaqHub(object):
data_struct = None
def __init__(self, account_id, single_push=False):
"""Sets up self.data_struct dict which we use for storage.
You'd probably have something like this in your base controller:
class Handler(object):
def __init__(self, request):
self.request = request
h.gaq_setup(self.request, 'AccountId')
All of the other commands in the module accept an optional 'request' kwarg.
If no 'request' is submitted, it will call pyramid.threadlocal.get_current_request()
This should allow you to easily and cleanly call this within templates, and not just handler methods.
"""
self.data_struct = {
'__singlePush': single_push,
'__setAccountAdditional': set({}),
'_setAccount': account_id,
'_setCustomVar': dict((i, None) for i in range(1, 6)),
'_setDomainName': False,
'_setAllowLinker': False,
'_addTrans': [],
'_addItem': [],
'_trackTrans': False,
'_trackEvent': [],
}
def setAccount(self, account_id):
"""This should really never be called, best to setup during __init__, where it is required"""
self.data_struct['_setAccount'] = account_id
def setAccountAdditional_add(self, account_id):
"""add an additional account id to send the data to. please note - this is only tested to work with the async method.
"""
self.data_struct['__setAccountAdditional'].add(account_id)
def setAccountAdditional_del(self, account_id):
try:
self.data_struct['__setAccountAdditional'].remove(account_id)
except KeyError:
pass
def setSinglePush(self, bool_value):
"""GA supports a single 'push' event. """
self.data_struct['__singlePush'] = bool_value
def trackEvent(self, track_dict):
"""'Constructs and sends the event tracking call to the Google Analytics Tracking Code. Use this to track visitor behavior on your website that is not related to a web page visit, such as interaction with a Flash video movie control or any user event that does not trigger a page request. For more information on Event Tracking, see the Event Tracking Guide.
You can use any of the following optional parameters: opt_label, opt_value or opt_noninteraction. If you want to provide a value only for the second or 3rd optional parameter, you need to pass in undefined for the preceding optional parameter.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEventTracking.html#_gat.GA_EventTracker_._trackEvent
"""
clean = []
for i in ['category', 'actions', 'opt_label', 'opt_value', 'opt_noninteraction']:
if i in track_dict:
clean.append("'%s'" % track_dict[i])
else:
clean.append('undefined')
self.data_struct['_trackEvent'].append("""['_trackEvent',%s]""" % ','.join(clean))
def setCustomVar(self, index, name, value, opt_scope=None):
"""_setCustomVar(index, name, value, opt_scope)
'Sets a custom variable with the supplied name, value, and scope for the variable. There is a 64-byte character limit for the name and value combined.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiBasicConfiguration.html#_gat.GA_Tracker_._setCustomVar
"""
self.data_struct['_setCustomVar'][index] = (escape_text(name), escape_text(value), opt_scope)
def setDomainName(self, domain_name):
"""_setDomainName(newDomainName)
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setDomainName
"""
self.data_struct['_setDomainName'] = domain_name
def setAllowLinker(self, bool_allow):
"""_setAllowLinker(bool)
http://code.google.com/apis/analytics/docs/gaJS/gaJSApiDomainDirectory.html#_gat.GA_Tracker_._setAllowLinker
"""
self.data_struct['_setAllowLinker'] = bool_allow
def addTrans(self, track_dict):
"""'Creates a transaction object with the given values. As with _addItem(), this method handles only transaction tracking and provides no additional ecommerce functionality. Therefore, if the transaction is a duplicate of an existing transaction for that session, the old transaction values are over-written with the new transaction values. Arguments for this method are matched by position, so be sure to supply all parameters, even if some of them have an empty value.'
-- from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addTrans
"""
for i in ['order_id', 'total']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['opt_affiliation', 'opt_tax', 'opt_shipping', 'opt_city', 'opt_state', 'opt_country']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addTrans'].append("""['_addTrans',%(order_id)s,'%(opt_affiliation)s','%(total)s','%(opt_tax)s','%(opt_shipping)s','%(opt_city)s','%(opt_state)s','%(opt_country)s']""" % track_dict)
def addItem(self, track_dict):
"""'Use this method to track items purchased by visitors to your ecommerce site. This method tracks individual items by their SKU. This means that the sku parameter is required. This method then associates the item to the parent transaction object via the orderId argument'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._addItem
"""
for i in ['order_id', 'sku', 'name', 'price', 'quantity']: # fix required ; let javascript show errors if null
if i not in track_dict:
track_dict[i] = ''
for i in ['category']: # fix optionals for positioning
if i not in track_dict:
track_dict[i] = ''
self.data_struct['_addItem'].append("""['_addItem',%(order_id)s,'%(sku)s','%(name)s','%(category)s','%(price)s','%(quantity)s']""" % track_dict)
def trackTrans(self):
"""gaq_trackTrans(request=None)- You merely have to call this to enable it. I decided to require this, instead of automatically calling it if a transaction exists, because this must be explicitly called in the ga.js API and its safer to reinforce this behavior.
'Sends both the transaction and item data to the Google Analytics server. This method should be called after _trackPageview(), and used in conjunction with the _addItem() and addTrans() methods. It should be called after items and transaction elements have been set up.'
--from http://code.google.com/apis/analytics/docs/gaJS/gaJSApiEcommerce.html#_gat.GA_Tracker_._trackTrans
"""
self.data_struct['_trackTrans'] = True
def _inner_render(self, single_push, single_pushes, script, account_id, is_secondary_account=False):
# start the single push if we elected
if single_push:
script.append(u"""_gaq.push(""")
# according to GA docs, the order to submit via javascript is:
# # _setAccount
# # _setDomainName
# # _setAllowLinker
# #
# # cross domain tracking reference
# # http://code.google.com/apis/analytics/docs/tracking/gaTrackingSite.html
# _setAccount
if single_push:
single_pushes.append(u"""['_setAccount', '%s']""" % account_id)
else:
script.append(u"""_gaq.push(['_setAccount', '%s']);""" % account_id)
# _setDomainName
if self.data_struct['_setDomainName']:
if single_push:
single_pushes.append(u"""['_setDomainName', '%s']""" % self.data_struct['_setDomainName'])
else:
script.append(u"""_gaq.push(['_setDomainName', '%s']);""" % self.data_struct['_setDomainName'])
# _setAllowLinker
if self.data_struct['_setAllowLinker']:
if single_push:
single_pushes.append(u"""['_setAllowLinker', %s]""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
else:
script.append(u"""_gaq.push(['_setAllowLinker', %s]);""" % ("%s" % self.data_struct['_setAllowLinker']).lower())
# _setCustomVar is next
for index in self.data_struct['_setCustomVar'].keys():
_payload = self.data_struct['_setCustomVar'][index]
if not _payload: continue
_payload = (index, ) + _payload
if _payload[3]:
formatted = u"""['_setCustomVar',%s,'%s','%s',%s]""" % _payload
else:
formatted = u"""['_setCustomVar',%s,'%s','%s']""" % _payload[:3]
if single_push:
single_pushes.append(formatted)
else:
script.append(u"""_gaq.push(%s);""" % formatted)
if single_push:
single_pushes.append(u"""['_trackPageview']""")
else:
script.append(u"""_gaq.push(['_trackPageview']);""")
# according to GA docs, the order to submit via javascript is:
# # _trackPageview
# # _addTrans
# # _addItem
# # _trackTrans
for category in ['_addTrans', '_addItem']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
if self.data_struct['_trackTrans']:
if single_push:
single_pushes.append(u"""['_trackTrans']""")
else:
script.append(u"""_gaq.push(['_trackTrans']);""")
# events seem to be on their own.
for category in ['_trackEvent']:
for i in self.data_struct[category]:
if single_push:
single_pushes.append(i)
else:
script.append(u"""_gaq.push(%s);""" % i)
return single_pushes, script
def as_html(self):
"""helper function. prints out GA code for you, in the right order.
You'd probably call it like this in a Mako template:
<head>
${h.as_html()|n}
</head>
Notice that you have to escape under Mako. For more information on mako escape options - http://www.makotemplates.org/docs/filtering.html
"""
single_push = self.data_struct['__singlePush']
single_pushes = []
script = [
u"""<script type="text/javascript">""",
u"""var _gaq = _gaq || [];""",
]
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, self.data_struct['_setAccount'], is_secondary_account=False)
for account_id in self.data_struct['__setAccountAdditional']:
(single_pushes, script) = self._inner_render(single_push, single_pushes, script, account_id, is_secondary_account=True)
# close the single push if we elected
if single_push:
script.append(u""",\n""".join(single_pushes))
script.append(u""");""")
script.append(u"""(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl': 'http://www') + '.google-analytics.com/analytics.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();""")
script.append(u"""</script>""")
return u"""\n""".join(script)
|
21,445 | 6bde4dd73ee9acdeaaafd1e64182c7ceb707f4d9 | from typing import Generator
from fastapi import Depends, HTTPException, Security, status
from fastapi.security import (
OAuth2PasswordBearer,
SecurityScopes,
)
from jose import jwt
from pydantic import ValidationError
from sqlalchemy.orm import Session
from app import crud, models, schemas
from app.core import security
from app.core.config import settings
from app.db import database
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl=f"{settings.API_V1_STR}/auth/access-token",
scopes={
"me": "All information about me",
"super": "Superuser"
}
)
async def get_current_user(
security_scopes: SecurityScopes,
token: str = Depends(oauth2_scheme)
) -> models.User:
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = f"Bearer"
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": authenticate_value},
)
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.TokenPayload(**payload)
if token_data.sub is None:
raise credentials_exception
except (jwt.JWTError, ValidationError):
raise credentials_exception
user = await crud.user.get_by_email_or_username(username=token_data.sub)
if not user:
raise credentials_exception
token_scopes = token_data.scopes.split(" ")
for scope in security_scopes.scopes:
if scope not in token_scopes:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not enough permissions",
headers={"WWW-Authenticate": authenticate_value},
)
return user
async def get_current_active_user(
current_user: models.User = Security(get_current_user, scopes=["me"]),
) -> models.User:
if not crud.user.is_active(current_user):
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
async def get_current_active_superuser(
current_user: models.User = Security(get_current_user, scopes=["super"]),
) -> models.User:
if not crud.user.is_superuser(current_user):
raise HTTPException(
status_code=400, detail="The user doesn't have enough privileges"
)
return current_user
|
21,446 | 36459f0321e28f42c8d12d4e71e9a458b0ef5990 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def add_one_row(self, root: TreeNode, v: int, d: int) -> TreeNode:
if d == 1:
new_root = TreeNode(v)
new_root.left = root
root = new_root
else:
level = 1
parents = [root]
while level + 1 != d:
level += 1
childrens = []
for node in parents:
if node.left is not None:
childrens.append(node.left)
if node.right is not None:
childrens.append(node.right)
parents = childrens[:]
for p in parents:
left = p.left
right = p.right
p.left = TreeNode(v)
p.left.left = left
p.right = TreeNode(v)
p.right.right = right
return root
|
21,447 | 3f8cf2f46a1d4369a3a7dd92ecb3fdc24848436e | #!/usr/bin/env python
# !-*- coding:utf-8 -*-
import projectupdate
import FormatPrint
import getopt
import sys
import TomcatFunc
import HealthCheck
import platformFunc
import MailFunc
class Controler(object):
def __init__(self):
pass
method = None
projectName = None
projectVersion = None
tomcatTag = None
path = None
command = None
time = None
# 更新邮件配置信息
# -m updateMailConf -P insy
def updateMailConf():
FormatPrint.printDebug("更新邮件配置信息")
MailFunc.updateMailConf(projectName)
# 平台项目更新
# -m platformupdate -P insy
def platformupdate():
FormatPrint.printInfo("平台更新" + str(projectName))
stopHealthCheck() # 关闭健康检查
restartProject() # 重启项目
startHealthCheck() # 启动健康检查
#平台发送邮件
def platformSendMail():
updateInfo = platformFunc.getUpdateInfo(projectName)
projectVersion = updateInfo.updateVersion
subject = str(projectName) + "项目" + str(projectVersion) + "版本更新"
messages = str(projectName) + "项目" + str(projectVersion) + "版本更新,成功"
level = 50 # 发送给所有人
MailFunc.sendMails(projectName,subject, messages, level)
# 删除更新信息
def platformDelUpdateFile():
platformFunc.delUpdateFile()
# 平台资源替换
def platformReplaceResource():
FormatPrint.printInfo("平台更新替换资源" + str(projectName))
updateInfo = platformFunc.getUpdateInfo(projectName)
time = updateInfo.updateTime
projectVersion = updateInfo.updateVersion
projectupdate.replaceResource(projectName, projectVersion, time)
# 资源替换
def replaceResource():
FormatPrint.printInfo("更新替换资源" + str(projectName) + "替换版本" + str(projectVersion))
projectupdate.replaceResource(projectName, projectVersion, time)
# 项目更新
def update():
FormatPrint.printInfo("更新" + str(projectName) + "更新版本" + str(projectVersion))
replaceResource() # 替换资源
stopHealthCheck() # 关闭健康检查
updateProject() # 更新项目
startHealthCheck() # 启动健康检查
# 项目回滚
def rollback():
FormatPrint.printInfo("更新回滚" + str(projectName) + "回滚版本" + str(projectVersion))
replaceResource() # 替换资源
stopHealthCheck() # 关闭健康检查
updateProject() # 更新项目
startHealthCheck() # 启动健康检查
# 更新项目
def updateProject():
FormatPrint.printInfo("项目更新" + str(projectName))
projectupdate.updateProject(projectName)
# 重启项目
def restartProject():
FormatPrint.printInfo("重启项目" + str(projectName))
projectupdate.restartProject(projectName)
# 启动tomcat
def startTomcat():
FormatPrint.printDebug("startTomcat")
FormatPrint.printInfo("启动tocmat" + str(tomcatTag))
TomcatFunc.startTomcat(path, tomcatTag)
# 关闭tomcat
def tomatKill():
FormatPrint.printDebug("tomatKill")
FormatPrint.printInfo("关闭tocmat" + str(tomcatTag))
TomcatFunc.killTomcat(path, tomcatTag)
# 启动健康检查服务
def startHealthCheck():
FormatPrint.printDebug("startHealCheck")
if HealthCheck.startHealthCheck(projectName):
FormatPrint.printInfo("启动健康检查服务成功")
else:
FormatPrint.printInfo("启动健康检查服务失败")
# 关闭健康检查服务
def stopHealthCheck():
FormatPrint.printDebug("stopHealthCheck")
if HealthCheck.stopHealthCheck(projectName):
FormatPrint.printInfo("关闭健康检查服务成功")
else:
FormatPrint.printInfo("关闭健康检查服务失败")
# 重启健康检查服务
def restartHealthCheck():
FormatPrint.printDebug("restartHealthCheck")
if HealthCheck.restartHealthCheck(projectName):
FormatPrint.printInfo("重启健康检查服务成功")
else:
FormatPrint.printInfo("重启健康检查服务失败")
# 一次性健康检查服务
def healthCheckOnce():
FormatPrint.printDebug("startHealthCheckOnce")
if HealthCheck.checkOnce(projectName):
FormatPrint.printInfo("一次性健康检查服务成功")
else:
FormatPrint.printInfo("一次性健康检查服务失败")
# 多次性健康检查服务
def healthCheckAll():
FormatPrint.printDebug("healthCheckAll")
if HealthCheck.checkAllTime(projectName):
FormatPrint.printInfo("多次性健康检查服务成功")
else:
FormatPrint.printInfo("多次性健康检查服务失败")
def healthCheckStatus():
FormatPrint.printDebug("healthCheckStatus")
HealthCheck.healthCheckStatus(projectName)
# 帮助
def help():
print ("-h,--help")
print (
"-m:,--method=,will be run method:platformReplaceResource|platformupdate|platformSendMail|platformDelUpdateFile|update|updateMailConf|rollBack|starttomcat|killtomcat|startHealthCheck|stopHealthCheck|restartHealthCheck|healthCheckOnce|healthCheckAll|healthCheckStatus")
print ("-u:,--update=,specify update project name")
print ("-r:,--roolback=,specify roolback project name")
print ("-v:,--version=,specify projectupdate version number")
print ("-k:,--killtomcat=,specify close tomcattag")
print ("-s:,--starttomcat=,specify start tomcattag ")
print ("-p:,--path=,specify a detail path")
print ("-c:,--command=,shell command")
print ("-P:,--Project=,project name")
print ("-t:,--Time=,project update time")
operator = \
{
'platformReplaceResource': platformReplaceResource,
'platformupdate': platformupdate,
'platformSendMail': platformSendMail,
'platformDelUpdateFile': platformDelUpdateFile,
'update': update,
'rollback': rollback,
'help': help,
'starttomcat': startTomcat,
'killtomcat': tomatKill,
'startHealthCheck': startHealthCheck,
'stopHealthCheck': stopHealthCheck,
'restartHealthCheck': restartHealthCheck,
'healthCheckOnce': healthCheckOnce,
'healthCheckAll': healthCheckAll,
'healthCheckStatus': healthCheckStatus,
'replaceResource': replaceResource,
'updateMailConf': updateMailConf
}
options, args = getopt.getopt(sys.argv[1:], "hv:p:c:m:P:t:",
["help", "version=", "path=", "command=", "method=", "Project=", "Time="])
# method = "help"
if len(options) <= 0:
if method is not None:
FormatPrint.printFalat("已经指定方法,请使用正确方法")
else:
method = "help"
else:
for name, value in options:
if name in ['-h', '--help']:
if method is not None:
FormatPrint.printFalat("已经指定方法,请使用正确方法")
else:
method = "help"
elif name in ['-v', '--version=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-v:--version需要参数projectVersion")
sys.exit(1)
projectVersion = value
elif name in ['-p', '--path=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-p:--path需要参数filepath")
sys.exit(1)
path = value
elif name in ['-c', '--command=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-c:--command需要参数command")
sys.exit(1)
command = value
elif name in ['-m', '--method=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-m:--method需要参数method")
sys.exit(1)
method = value
elif name in ['-P', '--Project=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-P:--Project需要参数projectname")
sys.exit(1)
projectName = value
elif name in ['-t', '--Time=']:
if value is None or str(value).startswith("-"):
FormatPrint.printInfo("-t:--Time需要参数timestamp")
sys.exit(1)
time = value
else:
method = "help"
operator.get(method)()
|
21,448 | 37d1cc29e280d91b983cb1730ece661b918f4051 |
Don't show this again.
Questions List
C++
Go
Java
Javascript
Python
Theme:
algoexpert
algoexpert
blackboard
cobalt
lucario
midnight
night
oceanic-next
rubyblue
white
Keymaps:
default
default
emacs
vim
00:00
×Don't forget to scroll to the bottom of the page for the video explanation!
Question:_
No changes made.
Longest Increasing Subsequence
Given a non-empty array of integers, write a function that returns the longest strictly-increasing subsequence of the array. A subsequence is defined as a set of numbers that are not necessarily adjacent but that are in the same order as they appear in the array. Assume that there will only be one longest increasing subsequence.
Sample input: [5, 7, -24, 12, 10, 2, 3, 12, 5, 6, 35]
Sample output: [-24, 2, 3, 5, 6, 35]
Input:Your Solution
Our Solution
No changes made.
Run Code
Solution #1Solution #2
1
# Copyright © 2019 AlgoExpert, LLC. All rights reserved.
2
3
# O(nlogn) time | O(n) space
4
def longestIncreasingSubsequence(array):
5
sequences = [None for x in array]
6
indices = [None for x in range(len(array) + 1)]
7
length = 0
8
for i, num in enumerate(array):
9
newLength = binarySearch(1, length, indices, array, num)
10
sequences[i] = indices[newLength - 1]
11
indices[newLength] = i
12
length = max(length, newLength)
13
return buildSequence(array, sequences, indices[length])
14
15
def binarySearch(startIdx, endIdx, indices, array, num):
16
if startIdx > endIdx:
17
return startIdx
18
middleIdx = (startIdx + endIdx) // 2
19
if array[indices[middleIdx]] < num:
20
startIdx = middleIdx + 1
21
else:
22
endIdx = middleIdx - 1
23
return binarySearch(startIdx, endIdx, indices, array, num)
24
25
def buildSequence(array, sequences, currentIdx):
26
sequence = []
27
while currentIdx is not None:
28
sequence.append(array[currentIdx])
29
currentIdx = sequences[currentIdx]
30
return list(reversed(sequence))
31
Help:Hide
Show
Hint #1Hint #2Hint #3Optimal Space & Time Complexity
Try building an array of the same length as the input array. At each index in this new array, store the length of the longest increasing subsequence ending with the number found at that index in the input array.
Output:Custom Output
Raw Output
Run your code when you feel ready.
Tests:Our Tests
Your Tests
Hide
Show
No changes made.
1
import program
2
import unittest
3
4
5
class TestProgram(unittest.TestCase):
6
7
def test_case_1(self):
8
self.assertEqual(program.longestIncreasingSubsequence([-1]), [-1])
9
10
def test_case_2(self):
11
self.assertEqual(program.longestIncreasingSubsequence([-1, 2]), [-1, 2])
12
13
def test_case_3(self):
14
self.assertEqual(program.longestIncreasingSubsequence([-1, 2, 1, 2]), [-1, 1, 2])
15
16
def test_case_4(self):
17
self.assertEqual(program.longestIncreasingSubsequence([1, 5, -1, 10]), [1, 5, 10])
18
19
def test_case_5(self):
Video ExplanationGo to Conceptual OverviewGo to Code WalkthroughQuestions List
Copyright © 2019 AlgoExpert, LLC. All rights reserved.
Become An Affiliate
Contact Us
FAQ
Legal Stuff
Privacy Policy
|
21,449 | c105fee366b7c8a15aefaef3f8b63eb4e8ec1b26 | from String import first_occurence_substr
def test_find_first_occurrence():
str = 'hello'
needle = 'll'
assert 2 == first_occurence_substr.find_first_occurrence_substr(str, needle)
haystack = "aaaaa"
needle = "bba"
assert -1 == first_occurence_substr.find_first_occurrence_substr(str, needle)
str = 'q'
needle = 'q'
assert 0 == first_occurence_substr.find_first_occurrence_substr(str, needle)
|
21,450 | 5edc48ae71a50114f71df99f1b878218230b0b72 | from scipy.io import wavfile
import math
import re
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
def moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def filter_bank(o_data, low_pass, high_pass, fs, order_of_filter, window_dur, hop_dur):
atad = butter_bandpass_filter(o_data, low_pass, high_pass, fs, order_of_filter)
window_size = int(window_dur * fs * 0.001) # Converting window length to samples
hop_size = int(hop_dur * fs * 0.001) # Converting hop length to samples
window_type = np.hanning(window_size) # Window type: Hanning (by default)
no_frames = int(math.ceil(len(atad) / (float(hop_size)))) # Determining the number of frames
zero_array = np.zeros(window_size) # Appending appropriate number of zeros
atad = np.concatenate((atad, zero_array))
st_energy = []
for i in range(no_frames): # Calculating frame wise short term energy
frame = atad[i * hop_size:i * hop_size + window_size] * window_type # Multiplying each frame with a hamming window
st_energy.append(sum(frame ** 2)) # Calculating the short term energy
max_st_energy = max(st_energy) # Maximum value of Short term energy curve
for i in range(no_frames):
st_energy[i] = st_energy[i] / max_st_energy # Normalizing the curve
return st_energy, atad
file_no = '27'
audio_file ='F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V3\Analyze\Vowel_Evaluation_V3_I6_M12\\' + file_no + '.wav'
textgridFA = 'F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V3\Analyze\Vowel_Evaluation_V3_I6_M12\\' + file_no + 'FA.TextGrid'
window = 50
hop = 7
fs, data = wavfile.read(audio_file) # Reading data from wav file in an array
data = data / float(2 ** 15) # Normalizing it to [-1,1] range from [-2^15,2^15]
st_energy_1, f_data_1 = filter_bank(data, 200,400,fs,6,window,hop)
st_energy_2, f_data_2 = filter_bank(data, 400,630,fs,6,window,hop)
st_energy_3, f_data_3 = filter_bank(data, 630,920,fs,6,window,hop)
st_energy_4, f_data_4 = filter_bank(data, 920,1270,fs,6,window,hop)
st_energy_5, f_data_5 = filter_bank(data, 1270,1720,fs,6,window,hop)
st_energy_6, f_data_6 = filter_bank(data, 1720,2320,fs,6,window,hop)
st_energy_7, f_data_7 = filter_bank(data, 2320,3200,fs,6,window,hop)
# st_energy_sum = [0] * len(st_energy_1)
# for i in range(len(st_energy_1)):
# st_energy_sum[i] = st_energy_1[i] + st_energy_2[i] + st_energy_3[i] + st_energy_4[i] + st_energy_5[i] + st_energy_6[i] + st_energy_7[i]
# max_st_energy_sum = max(st_energy_sum) # Maximum value of Short term energy curve
# for i in range(len(st_energy_sum)):
# st_energy_sum[i] = st_energy_sum[i] / max_st_energy_sum # Normalizing the curve
window_size = int(window * fs * 0.001) # Converting window length to samples
hop_size = int(hop * fs * 0.001) # Converting hop length to samples
window_type = np.hanning(window_size) # Window type: Hanning (by default)
no_frames = int(math.ceil(len(data) / (float(hop_size)))) # Determining the number of frames
zero_array = np.zeros(window_size) # Appending appropriate number of zeros
data = np.concatenate((data, zero_array))
x_values = np.arange(0, len(data), 1) / float(fs)
st_energy = []
for i in range(no_frames): # Calculating frame wise short term energy
frame = data[i * hop_size:i * hop_size + window_size] * window_type # Multiplying each frame with a hamming window
st_energy.append(sum(frame ** 2)) # Calculating the short term energy
max_st_energy = max(st_energy) # Maximum value of Short term energy curve
for i in range(no_frames):
st_energy[i] = st_energy[i] / max_st_energy # Normalizing the curve
# plt.subplot(8,1,1)
# plt.plot(data)
# plt.subplot(8,1,2)
# plt.plot(f_data_1)
# plt.subplot(8,1,3)
# plt.plot(f_data_2)
# plt.subplot(8,1,4)
# plt.plot(f_data_3)
# plt.subplot(8,1,5)
# plt.plot(f_data_4)
# plt.subplot(8,1,6)
# plt.plot(f_data_5)
# plt.subplot(8,1,7)
# plt.plot(f_data_6)
# plt.subplot(8,1,8)
# plt.plot(f_data_7)
# plt.show()
# plt.subplot(111)
# plt.plot(st_energy_1,'red',label='[200-400]')
# plt.plot(st_energy_2,'orange',label='[400-630]')
# plt.plot(st_energy_3,'yellow',label='[630-920]')
# plt.plot(st_energy_4,'green',label='[920-1270]')
# plt.plot(st_energy_5,'blue',label='[1270-1720]')
# plt.plot(st_energy_6,'indigo',label='[1720-2320]')
# plt.plot(st_energy_7,'violet',label='[2320-3200]')
# plt.legend()
# plt.show()
#----------------------------------------------------------------------------------------------------------------------#
text_grid_1 = open(textgridFA, 'r') # Open the FA TextGrid
data_FA = text_grid_1.read() # Read and assign the content of the FA TextGrid to data_1
time_1 = [] # Creating an empty list to record time
counter = 0
#----------------------------------------------------------------------------------------------------------------------#
for m in re.finditer('text = "', data_FA):
if data_FA[m.start() - 33] == '=':
time_1.append(float(
data_FA[m.start() - 32] + data_FA[m.start() - 31] + data_FA[m.start() - 30] + data_FA[m.start() - 29] +
data_FA[m.start() - 28] + data_FA[m.start() - 27] + data_FA[m.start() - 26]))
time_1.append(float(
data_FA[m.start() - 13] + data_FA[m.start() - 12] + data_FA[m.start() - 11] + data_FA[m.start() - 10] +
data_FA[m.start() - 9] + data_FA[m.start() - 8] + data_FA[m.start() - 7] + data_FA[m.start() - 6] +
data_FA[m.start() - 5]))
else:
time_1.append(float(
data_FA[m.start() - 33] + data_FA[m.start() - 32] + data_FA[m.start() - 31] + data_FA[m.start() - 30] +
data_FA[m.start() - 29] + data_FA[m.start() - 28] + data_FA[m.start() - 27] + data_FA[m.start() - 26]))
time_1.append(float(
data_FA[m.start() - 13] + data_FA[m.start() - 12] + data_FA[m.start() - 11] + data_FA[m.start() - 10] +
data_FA[m.start() - 9] + data_FA[m.start() - 8] + data_FA[m.start() - 7] + data_FA[m.start() - 6] +
data_FA[m.start() - 5]))
#----------------------------------------------------------------------------------------------------------------------#
if data_FA[m.start() + 9] == '"':
time_1.append((data_FA[m.start() + 8]))
elif data_FA[m.start() + 10] == '"':
time_1.append((data_FA[m.start() + 8] + data_FA[m.start() + 9]))
else:
time_1.append((data_FA[m.start() + 8] + data_FA[m.start() + 9] + data_FA[m.start() + 10]))
time_1.append(counter)
counter += 1
#----------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
plt.subplot(211)
plt.plot(x_values,data) # The Original Data
plt.xlim(0,x_values[-1]) # Limiting it to fixed range for representational purposes
for j in range(0, len(time_1), 4):
plt.vlines(time_1[j], min(data)+0.30*min(data), max(data), 'black') # Syllable Boundaries
for j in range(2, len(time_1), 4):
plt.text(time_1[j - 2], min(data)+0.28*min(data), time_1[j], fontsize=15, color='green', rotation=0) # Syllable Labels
for j in range(0,len(time_1),4): # Bounding arrows for Syllable
plt.arrow(time_1[j], min(data)+0.30*min(data), (time_1[j + 1] - time_1[j])-0.01, 0, head_width=0.005, head_length=0.01)
plt.arrow(time_1[j+1], min(data)+0.30*min(data), -(time_1[j + 1] - time_1[j]) + 0.01, 0, head_width=0.005, head_length=0.01)
plt.xlabel('Time (In seconds)')
plt.ylabel('Amplitude')
plt.title('Sound Waveform',color='blue')
plt.subplot(212)
plt.plot(st_energy_1,'red',label='[200-400]')
plt.plot(st_energy_2,'orange',label='[400-630]')
plt.plot(st_energy_3,'yellow',label='[630-920]')
plt.plot(st_energy_4,'green',label='[920-1270]')
plt.plot(st_energy_5,'blue',label='[1270-1720]')
plt.plot(st_energy_6,'indigo',label='[1720-2320]',ls='dotted')
plt.plot(st_energy_7,'violet',label='[2320-3200]',ls='dashed')
plt.xlim(0,len(st_energy_1))
plt.legend()
plt.xlabel('No. of frames')
plt.ylabel('Normalised Magnitude')
plt.title('Short Term Energy')
plt.show()
#---------------------------------------------------------------------------------------------------------------------#
|
21,451 | 7e411bc6a2ceb1696c3b09817dfb51082c54588f | """Line RFI flagging.
Inheritance diagram
-------------------
.. inheritance-diagram:: Flag
:parts: 2
"""
import warnings
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import timestream_task
from tlpipe.container.raw_timestream import RawTimestream
from tlpipe.container.timestream import Timestream
from tlpipe.utils.sg_filter import savitzky_golay
from tlpipe.utils.path_util import output_path
import tlpipe.plot
import matplotlib.pyplot as plt
class Flag(timestream_task.TimestreamTask):
"""Line RFI flagging.
This task flags the line RFI along time (then frequency) by first integrate
data along frequency (and correspondingly time) axis, and mask values that
exceeds the given threshold.
"""
params_init = {
'freq_window': 15,
'time_window': 15,
'freq_sigma': 2.0,
'time_sigma': 5.0,
'plot_fit': False, # plot the smoothing fit
'freq_fig_name': 'rfi_freq',
'time_fig_name': 'rfi_time',
}
prefix = 'lf_'
def process(self, ts):
freq_window = self.params['freq_window']
time_window = self.params['time_window']
ts.redistribute('baseline')
time = ts.time[:]
nt = len(time)
freq = ts.freq[:]
nfreq = len(freq)
# bl = ts.local_bl[:]
# freq_window = min(nfreq/2, freq_window)
# ensure window_size is an odd number
if freq_window % 2 == 0:
freq_window += 1
if nfreq < 2*freq_window:
warnings.warn('Not enough frequency points to do the smoothing')
freq_flag = False
else:
freq_flag = True
# time_window = min(nt/2, time_window)
# ensure window_size is an odd number
if time_window % 2 == 0:
time_window += 1
if nt < 2*time_window:
warnings.warn('Not enough time points to do the smoothing')
time_flag = False
else:
time_flag = True
if isinstance(ts, RawTimestream):
func = ts.bl_data_operate
elif isinstance(ts, Timestream):
func = ts.pol_and_bl_data_operate
func(self.flag, full_data=True, keep_dist_axis=False, freq_flag=freq_flag, time_flag=time_flag)
return super(Flag, self).process(ts)
def flag(self, vis, vis_mask, li, gi, bl, ts, **kwargs):
"""Function that does the actual flag."""
freq_window = self.params['freq_window']
time_window = self.params['time_window']
freq_sigma = self.params['freq_sigma']
time_sigma = self.params['time_sigma']
plot_fit = self.params['plot_fit']
freq_fig_prefix = self.params['freq_fig_name']
time_fig_prefix = self.params['time_fig_name']
tag_output_iter = self.params['tag_output_iter']
iteration = self.iteration
freq_flag = kwargs.get('freq_flag')
time_flag = kwargs.get('time_flag')
time = ts.time[:]
nt = len(time)
freq = ts.freq[:]
nfreq = len(freq)
if isinstance(ts, Timestream): # for Timestream
pol = bl[0]
bl = tuple(bl[1])
elif isinstance(ts, RawTimestream): # for RawTimestream
pol = None
bl = tuple(bl)
else:
raise ValueError('Need either a RawTimestream or Timestream')
if freq_flag:
# time integration
tm_vis = np.ma.mean(np.ma.array(vis, mask=vis_mask), axis=0)
abs_vis = np.abs(tm_vis)
if np.ma.count_masked(tm_vis) > 0: # has masked value
abs_vis_valid = abs_vis[~abs_vis.mask]
inds_valid = np.arange(nfreq)[~abs_vis.mask]
itp = InterpolatedUnivariateSpline(inds_valid, abs_vis_valid)
abs_vis_itp = itp(np.arange(nfreq))
abs_vis1 = abs_vis_itp.copy()
else:
abs_vis1 = abs_vis.copy()
for cnt in xrange(10):
if cnt != 0:
abs_vis1[inds] = smooth[inds]
smooth = savitzky_golay(abs_vis1, freq_window, 3)
# flage RFI
diff = abs_vis1 - smooth
median = np.median(diff)
abs_diff = np.abs(diff - median)
mad = np.median(abs_diff) / 0.6745
inds = np.where(abs_diff > freq_sigma*mad)[0] # masked inds
if len(inds) == 0:
break
diff = abs_vis - smooth
median = np.median(diff)
abs_diff = np.abs(diff - median)
mad = np.median(abs_diff) / 0.6745
inds = np.where(abs_diff > freq_sigma*mad)[0] # masked inds
vis_mask[:, inds] = True # set mask
if plot_fit:
plt.figure()
plt.plot(freq, abs_vis, label='data')
plt.plot(freq[inds], abs_vis[inds], 'ro', label='flag')
plt.plot(freq, smooth, label='smooth')
plt.xlabel(r'$\nu$ / MHz')
plt.legend(loc='best')
if pol is None:
fig_name = '%s_%d_%d.png' % (freq_fig_prefix, bl[0], bl[1])
else:
fig_name = '%s_%d_%d_%s.png' % (freq_fig_prefix, bl[0], bl[1], pol)
if tag_output_iter:
fig_name = output_path(fig_name, iteration=iteration)
else:
fig_name = output_path(fig_name)
plt.savefig(fig_name)
plt.close()
if time_flag:
# freq integration
fm_vis = np.ma.mean(np.ma.array(vis, mask=vis_mask), axis=1)
abs_vis = np.abs(fm_vis)
if np.ma.count_masked(fm_vis) > 0: # has masked value
abs_vis_valid = abs_vis[~abs_vis.mask]
inds_valid = np.arange(nt)[~abs_vis.mask]
itp = InterpolatedUnivariateSpline(inds_valid, abs_vis_valid)
abs_vis_itp = itp(np.arange(nt))
abs_vis1 = abs_vis_itp.copy()
else:
abs_vis1 = abs_vis.copy()
for cnt in xrange(10):
if cnt != 0:
abs_vis1[inds] = smooth[inds]
smooth = savitzky_golay(abs_vis1, time_window, 3)
# flage RFI
diff = abs_vis1 - smooth
median = np.median(diff)
abs_diff = np.abs(diff - median)
mad = np.median(abs_diff) / 0.6745
inds = np.where(abs_diff > time_sigma*mad)[0] # masked inds
if len(inds) == 0:
break
diff = abs_vis - smooth
median = np.median(diff)
abs_diff = np.abs(diff - median)
mad = np.median(abs_diff) / 0.6745
inds = np.where(abs_diff > time_sigma*mad)[0] # masked inds
# Addtional threshold
# inds1 = np.where(np.abs(diff[inds]) > 1.0e-2*np.abs(smooth[inds]))[0]
# inds = inds[inds1]
vis_mask[inds] = True # set mask
if plot_fit:
plt.figure()
plt.plot(time, abs_vis, label='data')
plt.plot(time[inds], abs_vis[inds], 'ro', label='flag')
plt.plot(time, smooth, label='smooth')
plt.xlabel(r'$t$ / Julian Date')
plt.legend(loc='best')
if pol is None:
fig_name = '%s_%d_%d.png' % (time_fig_prefix, bl[0], bl[1])
else:
fig_name = '%s_%d_%d_%s.png' % (time_fig_prefix, bl[0], bl[1], pol)
if tag_output_iter:
fig_name = output_path(fig_name, iteration=iteration)
else:
fig_name = output_path(fig_name)
plt.savefig(fig_name)
plt.close()
|
21,452 | 1e8ba3f34e91976a7f3af313faec85a16de6ddb1 | # https://deeplearningcourses.com/c/cluster-analysis-unsupervised-machine-learning-python
# https://www.udemy.com/cluster-analysis-unsupervised-machine-learning-python
from __future__ import print_function, division
from future.utils import iteritems
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import networkx as nx
import nltk
import numpy as np
import matplotlib.pyplot as plt
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, LocallyLinearEmbedding as LLE
from sklearn.feature_extraction.text import TfidfTransformer
wordnet_lemmatizer = WordNetLemmatizer()
titles = [line.rstrip() for line in open('../nlp_class/all_book_titles.txt')]
# copy tokenizer from sentiment example
stopwords = set(w.rstrip() for w in open('../nlp_class/stopwords.txt'))
# add more stopwords specific to this problem
stopwords = stopwords.union({
'introduction', 'edition', 'series', 'application',
'approach', 'card', 'access', 'package', 'plus', 'etext',
'brief', 'vol', 'fundamental', 'guide', 'essential', 'printed',
'third', 'second', 'fourth', })
def my_tokenizer(s):
s = s.lower() # downcase
tokens = nltk.tokenize.word_tokenize(s) # split string into words (tokens)
tokens = [t for t in tokens if len(t) > 2] # remove short words, they're probably not useful
tokens = [wordnet_lemmatizer.lemmatize(t) for t in tokens] # put words into base form
tokens = [t for t in tokens if t not in stopwords] # remove stopwords
tokens = [t for t in tokens if not any(c.isdigit() for c in t)] # remove any digits, i.e. "3rd edition"
return tokens
# create a word-to-index map so that we can create our word-frequency vectors later
# let's also save the tokenized versions so we don't have to tokenize again later
word_index_map = {}
current_index = 0
all_tokens = []
all_titles = []
index_word_map = []
print("num titles:", len(titles))
print("first title:", titles[0])
for title in titles:
try:
title = title.encode('ascii', 'ignore') # this will throw exception if bad characters
title = title.decode('utf-8')
all_titles.append(title)
tokens = my_tokenizer(title)
all_tokens.append(tokens)
for token in tokens:
if token not in word_index_map:
word_index_map[token] = current_index
current_index += 1
index_word_map.append(token)
except Exception as e:
print(e)
# now let's create our input matrices - just indicator variables for this example - works better than proportions
def tokens_to_vector(tokens):
x = np.zeros(len(word_index_map))
for t in tokens:
i = word_index_map[t]
x[i] += 1
return x
N = len(all_tokens)
D = len(word_index_map)
X = np.zeros((D, N)) # terms will go along rows, documents along columns
i = 0
for tokens in all_tokens:
X[:,i] = tokens_to_vector(tokens)
i += 1
def d(u, v):
diff = u - v
return diff.dot(diff)
def cost(X, R, M):
cost = 0
for k in range(len(M)):
# method 1
# for n in range(len(X)):
# cost += R[n,k]*d(M[k], X[n])
# method 2
diff = X - M[k]
sq_distances = (diff * diff).sum(axis=1)
cost += (R[:,k] * sq_distances).sum()
return cost
def plot_k_means(X, K, index_word_map, max_iter=20, beta=1.0, show_plots=True):
N, D = X.shape
M = np.zeros((K, D))
R = np.zeros((N, K))
exponents = np.empty((N, K))
# initialize M to random
for k in range(K):
M[k] = X[np.random.choice(N)]
costs = np.zeros(max_iter)
for i in range(max_iter):
# step 1: determine assignments / resposibilities
# is this inefficient?
for k in range(K):
for n in range(N):
# R[n,k] = np.exp(-beta*d(M[k], X[n])) / np.sum( np.exp(-beta*d(M[j], X[n])) for j in range(K) )
exponents[n,k] = np.exp(-beta*d(M[k], X[n]))
R = exponents / exponents.sum(axis=1, keepdims=True)
# step 2: recalculate means
for k in range(K):
M[k] = R[:,k].dot(X) / R[:,k].sum()
costs[i] = cost(X, R, M)
if i > 0:
if np.abs(costs[i] - costs[i-1]) < 10e-5:
break
if show_plots:
# plt.plot(costs)
# plt.title("Costs")
# plt.show()
random_colors = np.random.random((K, 3))
colors = R.dot(random_colors)
plt.figure(figsize=(80.0, 80.0))
plt.scatter(X[:,0], X[:,1], s=300, alpha=0.9, c=colors)
annotate1(X, index_word_map)
# plt.show()
plt.savefig("test.png")
# print out the clusters
hard_responsibilities = np.argmax(R, axis=1) # is an N-size array of cluster identities
# let's "reverse" the order so it's cluster identity -> word index
cluster2word = {}
for i in range(len(hard_responsibilities)):
word = index_word_map[i]
cluster = hard_responsibilities[i]
if cluster not in cluster2word:
cluster2word[cluster] = []
cluster2word[cluster].append(word)
# print out the words grouped by cluster
for cluster, wordlist in cluster2word.items():
print("cluster", cluster, "->", wordlist)
return M, R
# def annotate2(X, index_word_map, k=0.1):
# N, D = X.shape
# # create graph
# G = nx.DiGraph()
# data_nodes = []
# init_pos = {}
# for i in range(N):
# x, y = X[i]
# label = index_word_map[i]
# data_str = 'data_{0}'.format(label)
# G.add_node(data_str)
# G.add_node(label)
# G.add_edge(label, data_str)
# data_nodes.append(data_str)
# init_pos[data_str] = (x, y)
# init_pos[label] = (x, y)
# pos = nx.spring_layout(G, pos=init_pos, fixed=data_nodes, k=k)
# # undo spring_layout's rescaling
# pos_after = np.vstack([pos[d] for d in data_nodes])
# pos_before = np.vstack([init_pos[d] for d in data_nodes])
# scale, shift_x = np.polyfit(pos_after[:,0], pos_before[:,0], 1)
# scale, shift_y = np.polyfit(pos_after[:,1], pos_before[:,1], 1)
# shift = np.array([shift_x, shift_y])
# for key, val in pos.items():
# pos[key] = (val*scale) + shift
# for label, data_str in G.edges():
# plt.annotate(
# label,
# xy=pos[data_str], xycoords='data',
# xytext=pos[label], textcoords='data',
# arrowprops=dict(arrowstyle="->", color='black'),
# )
# # expand limits
# all_pos = np.vstack(pos.values())
# x_span, y_span = np.ptp(all_pos, axis=0)
# mins = np.min(all_pos-x_span*0.15, 0)
# maxs = np.max(all_pos+y_span*0.15, 0)
# plt.xlim([mins[0], maxs[0]])
# plt.ylim([mins[1], maxs[1]])
def annotate1(X, index_word_map, eps=0.1):
N, D = X.shape
placed = np.empty((N, D))
for i in range(N):
x, y = X[i]
# if x, y is too close to something already plotted, move it
close = []
x, y = X[i]
for retry in range(3):
for j in range(i):
diff = np.array([x, y]) - placed[j]
# if something is close, append it to the close list
if diff.dot(diff) < eps:
close.append(placed[j])
if close:
# then the close list is not empty
x += (np.random.randn() + 0.5) * (1 if np.random.rand() < 0.5 else -1)
y += (np.random.randn() + 0.5) * (1 if np.random.rand() < 0.5 else -1)
close = [] # so we can start again with an empty list
else:
# nothing close, let's break
break
placed[i] = (x, y)
plt.annotate(
s=index_word_map[i],
xy=(X[i,0], X[i,1]),
xytext=(x, y),
arrowprops={
'arrowstyle' : '->',
'color' : 'black',
}
)
print("vocab size:", current_index)
transformer = TfidfTransformer()
X = transformer.fit_transform(X).toarray()
reducer = TSNE()
Z = reducer.fit_transform(X)
plot_k_means(Z[:,:2], current_index//10, index_word_map, show_plots=True)
|
21,453 | ed0e60d5089aaa21f476fa08fff256153627cf2e | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-09 03:36
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('email', models.EmailField(max_length=254, unique=True)),
('is_admin', models.BooleanField(default=False)),
('date_of_birth', models.DateField()),
('date_joined', models.DateTimeField(auto_now_add=True)),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 60 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=60, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
),
migrations.CreateModel(
name='AccountReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('brief', models.TextField()),
('violation', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='AccountRequest',
fields=[
('accept', models.BooleanField(default=False)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('userFrom', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='af_requester', to=settings.AUTH_USER_MODEL)),
('userTo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='af_requested', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='FilePart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='')),
],
),
migrations.CreateModel(
name='LastSeen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_of_seen', models.GenericIPAddressField(default=None, editable=False)),
('time_and_date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('bio', models.CharField(blank=True, max_length=255, null=True)),
('link1', models.URLField(blank=True, null=True)),
('link2', models.URLField(blank=True, null=True)),
('location', models.CharField(blank=True, max_length=150, null=True)),
('pic', models.FileField(blank=True, null=True, upload_to='')),
('banner', models.FileField(blank=True, null=True, upload_to='')),
('strikes', models.IntegerField(blank=True, default=0)),
('suspended', models.BooleanField(default=False)),
('private', models.BooleanField(default=True)),
('celeb', models.BooleanField(default=False)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('tier', models.PositiveIntegerField(default=1)),
('content_requires_18', models.BooleanField(default=False)),
('percentage', models.PositiveIntegerField(default=25)),
('blocked', models.ManyToManyField(blank=True, related_name='blocked_by', to=settings.AUTH_USER_MODEL)),
('following', models.ManyToManyField(blank=True, related_name='followed_by', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
21,454 | 587c7ff31e74d6569a1a8772ef3c7a3acd15a7cd | #!/usr/bin/env python3
# Q: http://www.pythonchallenge.com/pc/ring/bell.html
# A: http://www.pythonchallenge.com/pc/ring/guido.html
import urllib.request
from PIL import Image
import PC_Util
def whodunnit():
return 'Guido van Rossum'.lower()
def main():
PC_Util.configure_auth()
local_filename = urllib.request.urlretrieve('http://www.pythonchallenge.com/pc/ring/bell.png')[0]
image = Image.open(local_filename)
width, height = image.size
message = ''
for y in range(height):
for x in range(0, width, 2):
green_pair_diff = abs(image.getpixel((x, y))[1] - image.getpixel((x + 1, y))[1])
if green_pair_diff != 42:
message += chr(green_pair_diff)
print(message)
print(whodunnit().split()[0])
if __name__ == '__main__':
main()
|
21,455 | 348cfad9af798a45f66cf83b1f1745b2ea13c0dc |
def unique_sort(lst):
lst = set(lst)
lst = list(lst)
lst.sort()
return lst
|
21,456 | a92a83c663159ffe76ab5e3d85d9e498191313bd | # -*- coding: utf-8 -*-
# @Time : 2021/3/7 16:06
# @Author : cyx
# @function: 该功能描述
# @gitbub :
from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
# 主页
class MainPage(object):
def __init__(self,driver:webdriver):
self.driver = driver
def goto_addresslist(self):
|
21,457 | 18c267d78a175b65fdf40a028b0f8be35d5dfdde | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Chi quantum channel representation class."""
import unittest
import numpy as np
from qiskit import QiskitError
from qiskit.quantum_info.operators.channel import Chi
from .channel_test_case import ChannelTestCase
class TestChi(ChannelTestCase):
"""Tests for Chi channel representation."""
def test_init(self):
"""Test initialization"""
mat4 = np.eye(4) / 2.0
chan = Chi(mat4)
self.assertAllClose(chan.data, mat4)
self.assertEqual(chan.dim, (2, 2))
mat16 = np.eye(16) / 4
chan = Chi(mat16)
self.assertAllClose(chan.data, mat16)
self.assertEqual(chan.dim, (4, 4))
# Wrong input or output dims should raise exception
self.assertRaises(QiskitError, Chi, mat16, input_dims=2, output_dims=4)
# Non multi-qubit dimensions should raise exception
self.assertRaises(
QiskitError, Chi, np.eye(6) / 2, input_dims=3, output_dims=2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
circuit, target = self.simple_circuit_no_measure()
op = Chi(circuit)
target = Chi(target)
self.assertEqual(op, target)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Chi, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(4, 4, real=True)
self.assertEqual(Chi(mat), Chi(mat))
def test_copy(self):
"""Test copy method"""
mat = np.eye(4)
orig = Chi(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
def test_evolve(self):
"""Test evolve method."""
input_psi = [0, 1]
input_rho = [[0, 0], [0, 1]]
# Identity channel
chan = Chi(self.chiI)
target_rho = np.array([[0, 0], [0, 1]])
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
# Hadamard channel
chan = Chi(self.chiH)
target_rho = np.array([[1, -1], [-1, 1]]) / 2
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
# Completely depolarizing channel
chan = Chi(self.depol_chi(1))
target_rho = np.eye(2) / 2
self.assertAllClose(chan._evolve(input_psi), target_rho)
self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho)
self.assertAllClose(chan._evolve(input_rho), target_rho)
self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho)
def test_is_cptp(self):
"""Test is_cptp method."""
self.assertTrue(Chi(self.depol_chi(0.25)).is_cptp())
# Non-CPTP should return false
self.assertFalse(
Chi(1.25 * self.chiI - 0.25 * self.depol_chi(1)).is_cptp())
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError, Chi(np.eye(4)).compose, Chi(np.eye(16)))
self.assertRaises(QiskitError, Chi(np.eye(4)).compose, 2)
def test_compose(self):
"""Test compose method."""
# Random input test state
rho = self.rand_rho(2)
# UnitaryChannel evolution
chan1 = Chi(self.chiX)
chan2 = Chi(self.chiY)
chan = chan1.compose(chan2)
targ = Chi(self.chiZ)._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
# 50% depolarizing channel
chan1 = Chi(self.depol_chi(0.5))
chan = chan1.compose(chan1)
targ = Chi(self.depol_chi(0.75))._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
# Compose random
chi1 = self.rand_matrix(4, 4, real=True)
chi2 = self.rand_matrix(4, 4, real=True)
chan1 = Chi(chi1, input_dims=2, output_dims=2)
chan2 = Chi(chi2, input_dims=2, output_dims=2)
targ = chan2._evolve(chan1._evolve(rho))
chan = chan1.compose(chan2)
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
chan = chan1 @ chan2
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
def test_compose_front(self):
"""Test front compose method."""
# Random input test state
rho = self.rand_rho(2)
# UnitaryChannel evolution
chan1 = Chi(self.chiX)
chan2 = Chi(self.chiY)
chan = chan2.compose(chan1, front=True)
targ = Chi(self.chiZ)._evolve(rho)
self.assertAllClose(chan._evolve(rho), targ)
# Compose random
chi1 = self.rand_matrix(4, 4, real=True)
chi2 = self.rand_matrix(4, 4, real=True)
chan1 = Chi(chi1, input_dims=2, output_dims=2)
chan2 = Chi(chi2, input_dims=2, output_dims=2)
targ = chan2._evolve(chan1._evolve(rho))
chan = chan2.compose(chan1, front=True)
self.assertEqual(chan.dim, (2, 2))
self.assertAllClose(chan._evolve(rho), targ)
def test_expand(self):
"""Test expand method."""
# Pauli channels
paulis = [self.chiI, self.chiX, self.chiY, self.chiZ]
targs = 4 * np.eye(16) # diagonals of Pauli channel Chi mats
for i, chi1 in enumerate(paulis):
for j, chi2 in enumerate(paulis):
chan1 = Chi(chi1)
chan2 = Chi(chi2)
chan = chan1.expand(chan2)
# Target for diagonal Pauli channel
targ = Chi(np.diag(targs[i + 4 * j]))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(chan, targ)
# Completely depolarizing
rho = np.diag([1, 0, 0, 0])
chan_dep = Chi(self.depol_chi(1))
chan = chan_dep.expand(chan_dep)
targ = np.diag([1, 1, 1, 1]) / 4
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho), targ)
def test_tensor(self):
"""Test tensor method."""
# Pauli channels
paulis = [self.chiI, self.chiX, self.chiY, self.chiZ]
targs = 4 * np.eye(16) # diagonals of Pauli channel Chi mats
for i, chi1 in enumerate(paulis):
for j, chi2 in enumerate(paulis):
chan1 = Chi(chi1)
chan2 = Chi(chi2)
chan = chan2.tensor(chan1)
# Target for diagonal Pauli channel
targ = Chi(np.diag(targs[i + 4 * j]))
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(chan, targ)
# Test overload
chan = chan2 ^ chan1
self.assertEqual(chan.dim, (4, 4))
self.assertEqual(chan, targ)
# Completely depolarizing
rho = np.diag([1, 0, 0, 0])
chan_dep = Chi(self.depol_chi(1))
chan = chan_dep.tensor(chan_dep)
targ = np.diag([1, 1, 1, 1]) / 4
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho), targ)
# Test operator overload
chan = chan_dep ^ chan_dep
self.assertEqual(chan.dim, (4, 4))
self.assertAllClose(chan._evolve(rho), targ)
def test_power(self):
"""Test power method."""
# 10% depolarizing channel
p_id = 0.9
depol = Chi(self.depol_chi(1 - p_id))
# Compose 3 times
p_id3 = p_id**3
chan3 = depol.power(3)
targ3 = Chi(self.depol_chi(1 - p_id3))
self.assertEqual(chan3, targ3)
def test_power_except(self):
"""Test power method raises exceptions."""
chan = Chi(self.depol_chi(1))
# Non-integer power raises error
self.assertRaises(QiskitError, chan.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = 0.5 * self.chiI
mat2 = 0.5 * self.depol_chi(1)
targ = Chi(mat1 + mat2)
chan1 = Chi(mat1)
chan2 = Chi(mat2)
self.assertEqual(chan1.add(chan2), targ)
self.assertEqual(chan1 + chan2, targ)
def test_add_except(self):
"""Test add method raises exceptions."""
chan1 = Chi(self.chiI)
chan2 = Chi(np.eye(16))
self.assertRaises(QiskitError, chan1.add, chan2)
self.assertRaises(QiskitError, chan1.add, 5)
def test_subtract(self):
"""Test subtract method."""
mat1 = 0.5 * self.chiI
mat2 = 0.5 * self.depol_chi(1)
targ = Chi(mat1 - mat2)
chan1 = Chi(mat1)
chan2 = Chi(mat2)
self.assertEqual(chan1.subtract(chan2), targ)
self.assertEqual(chan1 - chan2, targ)
def test_subtract_except(self):
"""Test subtract method raises exceptions."""
chan1 = Chi(self.chiI)
chan2 = Chi(np.eye(16))
self.assertRaises(QiskitError, chan1.subtract, chan2)
self.assertRaises(QiskitError, chan1.subtract, 5)
def test_multiply(self):
"""Test multiply method."""
chan = Chi(self.chiI)
val = 0.5
targ = Chi(val * self.chiI)
self.assertEqual(chan.multiply(val), targ)
self.assertEqual(val * chan, targ)
self.assertEqual(chan * val, targ)
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
chan = Chi(self.chiI)
self.assertRaises(QiskitError, chan.multiply, 's')
self.assertRaises(QiskitError, chan.multiply, chan)
def test_negate(self):
"""Test negate method"""
chan = Chi(self.chiI)
targ = Chi(-self.chiI)
self.assertEqual(-chan, targ)
if __name__ == '__main__':
unittest.main()
|
21,458 | 216f1887aa041a8efe00447ecb85a17d3cabc259 | import os
import re
import json
import time
import numpy as np
import pandas as pd
# Config
PATH = os.getcwd()
path_n = re.split(pattern=r"/|\\", string=PATH)[1:]
if os.name == "posix":
path_n = "/" + os.path.join(*path_n)
else:
drive = PATH[0:3]
path_n = drive + os.path.join(*path_n)
RUNS = 100
def infer_column_cats(dir: "Path to working directoty.") -> tuple:
"""Helper function to identify dataset sizes based on file names."""
files = os.listdir(os.path.join(dir, "data"))
cats = set([re.match(pattern=".*_(.*).csv$", string=file).group(1) for file in files])
cols = set([re.match(pattern=".*_(.*)_.*.csv$", string=file).group(1) for file in files])
return cats, cols
def time_function(func: "Function call to be evaluted as str.") -> float:
"""Helper function to time data access."""
start = time.time()
eval(func)
return time.time() - start
def create_stats(measures: "List of function timings.",
col: "Current Column.", row: "Current Row",
scenario: "Current Scenario.") -> dict:
"""Helper function to create result dataset."""
return {"scenario": scenario,
"no_column": col,
"data_length": row,
"min": np.min(measures),
"max": np.max(measures),
"avg": np.mean(measures),
"q50": np.median(measures)}
scenarios = json.load(open(os.path.join(path_n, "output", "arrange.JSON")))
operations = scenarios.keys()
nrows, ncols = infer_column_cats(path_n)
timings, results = [], []
for col in ncols:
print(f"-Column: {col}--")
for row in nrows:
print(f"--Row: {row}")
data = pd.read_csv(os.path.join(path_n, "data", f"sim_data_{col}_{row}.csv"))
for i, scenario in enumerate(scenarios[col]):
print(f"---Scenario {i+1}: {scenario}---")
sel = scenarios[col][scenario]
print(sel)
for j in range(RUNS):
temp = data
timings.append(time_function(func=f"temp.sort_values({sel})"))
temp = None
results.append(create_stats(measures=timings, col=col, row=row, scenario=scenario))
print(results[-1])
timings = []
results_df = pd.DataFrame(results)
results_df[["data_length", "no_column"]] = results_df[["data_length", "no_column"]].apply(pd.to_numeric,
axis=1,
downcast="integer")
results_df.sort_values(["data_length", "no_column"])
results_df[["min", "max", "q50", "avg"]] = round(results_df[["min", "max", "q50", "avg"]] * 1000, 2)
# results_df["sel_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="([13])", string=x).group(1))
# results_df["pos_col"] = results_df["scenario"].apply(lambda x: re.search(pattern="[13](.*)$", string=x).group(1))
results_df.to_csv(os.path.join(path_n, "output", "arrange_results_pandas.csv"), index=False) |
21,459 | 318cb708f1ede5c992e4c93620b2f12f7ed0d584 | from randcam.randcam import *
|
21,460 | 2a9c6a9cffe1b2f60d9f2b802a7350202e881d9d | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='tv-home'),
path('about/', views.about, name='tv-about'),
path('show/', views.show, name='tv-show'),
path('show/<int:showid>/', views.show, name='tv-show'),
path('list/', views.showlist, name='tv-showlist'),
path('list/<str:username>/', views.showlist, name='tv-showlist'),
path('search/', views.search, name='tv-search'),
path('search/<str:query>/', views.search, name='tv-search')
]
|
21,461 | 28c41814a9838c18beadc1de8d5a08ca7920104f | ######################################################ABOUT######################################################
# #
# This is the code used for training the model for the self driving car. #
# #
# This code was written by Kishore Kumar from Madras Institute of Technology. #
# #
# Please give due credit while using it in your project. #
# #
##########################################IMPORTING ESSENTIAL LIBRARIES##########################################
import tensorflow as tf #Deep learning library.
import numpy as np #For processing matrices and array.
import cv2 #For processing images.
from tensorflow.keras.layers import Flatten,Dense,Conv2D,MaxPool2D,Activation,Dropout #Necessary layers for constructing the model.
from tensorflow.keras.callbacks import ModelCheckpoint #This is used for checkpointing the model.
keras=tf.keras #Keras model of tensorflow.
###############################################DATASET AUGMENTATION##############################################
def augment(img): #Augmentation function.
M=cv2.getRotationMatrix2D((64,64),np.random.randint(-10,10),1) #Matrix to randomly rotate the image anywhere by -10 to 10 degreees about the center.
img=cv2.warpAffine(img,M,(128,128)) #Rotating the image using the matrix.
return img #Return the augmented image.
##############################################PREPARING THE DATASET##############################################
data=np.load("Datasets/TRAIN_BALANCED_STEER.npy",allow_pickle=True) #Load the balanced dataset.
X=np.array([augment(x[0]) for x in data ]) #Training input with augmented images.
Y=np.array([np.array(x[1]) for x in data]) #Training targets.
###############################################DEVELOPING THE MODEL##############################################
model=keras.Sequential() #Blank model with no layers.
model.add(keras.layers.Lambda(lambda x:x/255,input_shape=(128,128,3))) #This layer normalises the input images.
model.add(Conv2D(32,(3,3),padding='same')) #Convolutional layer with 32 feature maps and 3x3 kernels.
model.add(Activation('relu')) #ReLU activation function.
model.add(Dropout(0.3)) #Neuron dropout probability is 30%.
model.add(MaxPool2D(pool_size=(2,2))) #Maxpooling layer reduces the size of the feature maps by half.
model.add(Conv2D(64,(3,3),padding='same')) #Convolutional layer with 64 feature maps and 3x3 kernels.
model.add(Activation('relu')) #ReLU activation function.
model.add(Dropout(0.3)) #Neuron dropout probability is 30%.
model.add(MaxPool2D(pool_size=(2,2))) #Maxpooling layer reduces the size of the feature maps by half.
model.add(Conv2D(128,(3,3),padding='same')) #Convolutional layer with 32 feature maps and 3x3 kernels.
model.add(Activation('relu')) #ReLU activation function.
model.add(Dropout(0.3)) #Neuron dropout probability is 30%.
model.add(MaxPool2D(pool_size=(2,2))) #Maxpooling layer reduces the size of the feature maps by half.
model.add(Conv2D(256,(3,3),padding='same')) #Convolutional layer with 32 feature maps and 3x3 kernels.
model.add(Activation('relu')) #ReLU activation function.
model.add(Dropout(0.3)) #Neuron dropout probability is 30%.
model.add(MaxPool2D(pool_size=(2,2))) #Maxpooling layer reduces the size of the feature maps by half.
model.add(Flatten()) #Flatten the feature maps to a 1-D vector.
model.add(Dense(128)) #Fully connected layer with 128 neurons.
model.add(Activation("relu")) #ReLU activation function.
model.add(Dropout(0.3)) #Neuron dropout probability is 30%.
model.add(Dense(1)) #Output neuron to predict normalised steering angle.
model.add(Activation("sigmoid")) #Sigmoid activation function returns a value between 0-1.
################################################TRAINING THE MODEL###############################################
#model=keras.models.load_model("Model.model") #Uncomment this line to retrain a model(Change the file path).
model.compile(loss="mse",optimizer=keras.optimizers.Adam(learning_rate=0.0003),metrics=['accuracy']) #Using Adam optmiser and mean square error to optimise the model.
filepath="Models/Models HDF5/Regression/weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5" #Path to save the checkpoints.
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') #Save the models with the lowest validation loss upto that point.
callbacks_list = [checkpoint] #Used in model.fit.
model.fit(X, Y, validation_split=0.30, epochs=30, batch_size=10, callbacks=callbacks_list, verbose=1) #Train the model for 30 epochs using 30% of the data as validation data.
#################################################################################################################
|
21,462 | 251f4f48cb353a2cdfd5d0f596196c5635c10c50 | def replace_all(first, second):
if first in second:
second = second.replace(first, "", 1)
return replace_all(first, second)
else:
return second
print(replace_all(input(), input()))
|
21,463 | 126dd6696fdb287ae261683d77152918c2d607d8 | import unittest
from Build.Objects.grid_controller import GridController
from Build.Simulation_Operation.event import Event
from Build.Simulation_Operation.queue import PriorityQueue
from Build.Simulation_Operation.supervisor import Supervisor
@unittest.skip("TODO: DO NOT USE. HAS NOT BEEN UPDATED.")
class TestEventModel(unittest.TestCase):
def setUp(self):
self.sup = Supervisor()
self.gc1 = GridController("gc1", self.sup)
self.gc2 = GridController("gc2", self.sup)
self.sup.register_device(self.gc1)
self.sup.register_device(self.gc2)
def test_event_queue(self):
pq = PriorityQueue()
pq.add(Event(self.gc1.set_power_in, 20), 1)
pq.add(Event(self.gc1.set_power_in, 20), 2) # to make sure they are treated as distinct entities.
event1, time_stamp = pq.pop()
event1.run_event()
self.assertEqual(self.gc1._power_in, 20)
event2, time_stamp = pq.pop()
event2.run_event()
self.assertEqual(self.gc1._power_in, 20)
def test_single_argument_event(self):
self.gc1.add_event(Event(self.gc1.set_power_in, 20), 1)
self.gc1.update_time(1)
self.gc1.process_events()
self.assertEqual(self.gc1._power_in, 20)
def test_multiple_argument_event(self):
self.gc1.add_event(Event(self.gc1.on_allocated, "gc2", 20), 1)
self.gc1.update_time(1)
self.gc1.process_events()
self.assertEqual(self.gc1._allocated["gc2"], 20)
def test_no_argument_event(self):
self.gc1.add_event(Event(self.gc1.add_power_in), 1)
self.gc1.update_time(1)
self.gc1.process_events()
self.assertEqual(self.gc1._power_in, 10)
"""More Tests Here are Highest Priority"""
if __name__ == '__main__':
unittest.main()
|
21,464 | 9d405b3d070615cbd25f66a0d00b595778549e66 | import os
import exifread
currentDirectory = "/home/dale/Projects/filetest"
for root, dirs, files in os.walk(currentDirectory):
for picFile in files:
if picFile.endswith(".jpg") or picFile.endswith(".JPG"):
picFileDirectory = os.path.join(root, picFile)
print picFileDirectory
f = open(picFileDirectory, 'rb')
tags = exifread.process_file(f)
#print tags
print len(tags)
#for key in tags:
# print "key: %s, value: %s" % (key, tags[key])
if tags.get('Image DateTime'):
print "it is here"
elif tags.get('EXIF DateTimeDigitized'):
print "no it is here"
else:
print "it isn't anywhere"
value = tags.get('Image DateTime', "empty")
print value
#if tags.get(['Image DateTime']):
# print len(tags['Image DateTime'])
if len(tags) > 10:
if tags['EXIF DateTimeDigitized']:
dateTakenexif = str(tags['EXIF DateTimeDigitized'])
yearTaken = dateTakenexif[0:4]
monthTaken = dateTakenexif[5:7]
print monthTaken |
21,465 | 765a7eafd8dffbc18b9b49d3604aace470011665 | # pylint: disable=no-self-use,invalid-name
import numpy
from deep_qa.common.params import Params
from deep_qa.data import DataGenerator, IndexedDataset
from ..common.test_case import DeepQaTestCase
class TestDataGenerator(DeepQaTestCase):
def setUp(self):
super(TestDataGenerator, self).setUp()
self.text_trainer = FakeTextTrainer()
self.instances = [
FakeInstance(0, 5, 3, 2),
FakeInstance(1, 4, 3, 2),
FakeInstance(2, 4, 1, 2),
FakeInstance(3, 9, 3, 2),
FakeInstance(4, 8, 3, 2),
FakeInstance(5, 2, 1, 2),
FakeInstance(6, 3, 3, 2),
FakeInstance(7, 3, 3, 3),
FakeInstance(8, 1, 1, 2),
FakeInstance(9, 1, 1, 3),
]
def test_instances_are_sorted_by_sorting_keys(self):
params = Params({
'dynamic_padding': True,
'padding_noise': 0.0,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
one_epoch_arrays = [next(batches) for _ in range(4)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
assert self.as_list(one_epoch_arrays[0][0]) == [1, 0, 4]
assert self.as_list(one_epoch_arrays[1][0]) == [3]
assert self.as_list(one_epoch_arrays[2][0]) == [6, 7, 2]
assert self.as_list(one_epoch_arrays[3][0]) == [8, 9, 5]
def test_batches_are_consistent_with_no_repermuting(self):
params = Params({
'padding_noise': 0.0,
'sort_every_epoch': False,
'dynamic_padding': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
first_epoch_arrays = [next(batches) for _ in range(4)]
second_epoch_arrays = [next(batches) for _ in range(4)]
first_epoch_arrays.sort(key=lambda x: x[0][0])
second_epoch_arrays.sort(key=lambda x: x[0][0])
first_epoch = [self.as_list(x[0]) for x in first_epoch_arrays]
second_epoch = [self.as_list(x[0]) for x in second_epoch_arrays]
assert first_epoch == second_epoch
def test_biggest_batch_first(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'biggest_batch_first': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
biggest_batches = [next(batches) for _ in range(2)]
assert self.as_list(biggest_batches[0][0]) == [3]
assert self.as_list(biggest_batches[1][0]) == [1, 0, 4]
def test_adaptive_grouping(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'adaptive_batch_sizes': True,
'adaptive_memory_usage_constant': 130,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
one_epoch_arrays = [next(batches) for _ in range(4)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
assert self.as_list(one_epoch_arrays[0][0]) == [0, 4]
assert self.as_list(one_epoch_arrays[1][0]) == [3]
assert self.as_list(one_epoch_arrays[2][0]) == [7, 2, 1]
assert self.as_list(one_epoch_arrays[3][0]) == [8, 9, 5, 6]
def test_sort_every_batch_actually_adds_noise_every_batch(self):
# We're just going to get two epoch's worth of batches, and make sure that they're
# different.
params = Params({
'padding_noise': 0.8,
'sort_every_epoch': True,
'dynamic_padding': True,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 4
first_epoch_arrays = [next(batches) for _ in range(4)]
second_epoch_arrays = [next(batches) for _ in range(4)]
first_epoch_arrays.sort(key=lambda x: x[0][0])
second_epoch_arrays.sort(key=lambda x: x[0][0])
first_epoch = [self.as_list(x[0]) for x in first_epoch_arrays]
second_epoch = [self.as_list(x[0]) for x in second_epoch_arrays]
assert first_epoch != second_epoch
def test_maximum_batch_size_is_actually_a_maximum(self):
params = Params({
'padding_noise': 0.0,
'dynamic_padding': True,
'adaptive_batch_sizes': True,
'adaptive_memory_usage_constant': 50,
'maximum_batch_size': 2,
})
generator = DataGenerator(self.text_trainer, params)
batches = generator.create_generator(IndexedDataset(self.instances))
assert generator.last_num_batches == 7
one_epoch_arrays = [next(batches) for _ in range(7)]
one_epoch_arrays.sort(key=lambda x: x[0][0])
print([self.as_list(x[0]) for x in one_epoch_arrays])
assert self.as_list(one_epoch_arrays[0][0]) == [0]
assert self.as_list(one_epoch_arrays[1][0]) == [2, 1]
assert self.as_list(one_epoch_arrays[2][0]) == [3]
assert self.as_list(one_epoch_arrays[3][0]) == [4]
assert self.as_list(one_epoch_arrays[4][0]) == [5, 6]
assert self.as_list(one_epoch_arrays[5][0]) == [7]
assert self.as_list(one_epoch_arrays[6][0]) == [8, 9]
def as_list(self, array):
return list(numpy.squeeze(array, axis=-1))
class FakeInstance:
def __init__(self, index, a_length, b_length, c_length):
self.index = index
self.a_length = a_length
self.b_length = b_length
self.c_length = c_length
def get_padding_lengths(self):
return {'a': self.a_length, 'b': self.b_length, 'c': self.c_length}
def pad(self, lengths):
pass
def as_training_data(self):
return numpy.asarray([self.index]), numpy.asarray([self.index])
class FakeTextTrainer:
batch_size = 3
a_length = None
b_length = None
c_length = None
def get_instance_sorting_keys(self):
return ['a', 'b', 'c']
def get_padding_lengths(self):
return {'a': self.a_length, 'b': self.b_length, 'c': self.c_length}
def get_padding_memory_scaling(self, lengths):
return lengths['a'] * lengths['b'] * lengths['c']
|
21,466 | eb8943bf3373ec99a4d20516c4205224fd0d15c3 | import logging
from collections import defaultdict
from collections.abc import Iterator
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional
from rotkehlchen.db.constants import BINANCE_MARKETS_KEY, KRAKEN_ACCOUNT_TYPE_KEY
from rotkehlchen.errors.misc import InputError
from rotkehlchen.exchanges.binance import BINANCE_BASE_URL, BINANCEUS_BASE_URL
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeWithExtras
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize exchanges for which we have keys and are not already initialized
for location, credentials_list in exchange_credentials.items():
module = self._get_exchange_module(location)
for credentials in credentials_list:
extras = database.get_exchange_credentials_extras(
name=credentials.name,
location=credentials.location,
)
exchange_obj = self.initialize_exchange(
module=module,
credentials=credentials,
database=database,
**extras,
)
self.connected_exchanges[location].append(exchange_obj)
log.debug('Initialized exchanges')
def get_user_binance_pairs(self, name: str, location: Location) -> list[str]:
is_connected = location in self.connected_exchanges
if is_connected:
return self.database.get_binance_pairs(name, location)
return []
def query_history_events(self) -> None:
"""Queries all history events for exchanges that need it
May raise:
- RemoteError if any exchange's remote query fails
"""
for exchange in self.iterate_exchanges():
exchange.query_history_events()
def get_exchange_mappings(self) -> LocationEventMappingType:
"""Collect event mappings from each exchange"""
mappings: LocationEventMappingType = {}
for location, exchanges in self.connected_exchanges.items():
for exchange in exchanges:
if len(exchange_mapping := exchange.get_event_mappings()) != 0:
mappings[location] = exchange_mapping
break
return mappings
|
21,467 | c8172acc58b409d2016dd441c2a0f31d33cffef1 | #!/urs/bin/python
import wx
from ..core.gk_node import GKNode, GK_SHAPE_TYPE
class GKUINodeEditDialog(wx.Dialog):
def __init__(self, parent, node):
wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title=u"Edit Node", pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
self.m_node = node
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
bSizer4 = wx.BoxSizer(wx.VERTICAL)
fgSizer1 = wx.FlexGridSizer(0, 2, 0, 0)
fgSizer1.AddGrowableCol(1)
fgSizer1.AddGrowableRow(1)
fgSizer1.SetFlexibleDirection(wx.BOTH)
fgSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.m_staticText1 = wx.StaticText(self, wx.ID_ANY, u"Label:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText1.Wrap(-1)
fgSizer1.Add(self.m_staticText1, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_label_ctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0)
fgSizer1.Add(self.m_label_ctrl, 0, wx.ALL | wx.EXPAND, 5)
self.m_staticText2 = wx.StaticText(self, wx.ID_ANY, u"Description:", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText2.Wrap(-1)
fgSizer1.Add(self.m_staticText2, 0, wx.ALL, 5)
self.m_desc_ctrl = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(250, 100),
wx.TE_MULTILINE)
fgSizer1.Add(self.m_desc_ctrl, 1, wx.ALL | wx.EXPAND, 5)
bSizer4.Add(fgSizer1, 1, wx.EXPAND, 5)
self.m_style_ctrl = wx.RadioBox(self, wx.ID_ANY, u"Style", wx.DefaultPosition, wx.DefaultSize, GK_SHAPE_TYPE, 1,
wx.RA_SPECIFY_COLS)
self.m_style_ctrl.SetSelection(0)
bSizer4.Add(self.m_style_ctrl, 0, wx.ALL | wx.EXPAND, 5)
bSizer5 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText3 = wx.StaticText(self, wx.ID_ANY, u"Image: ", wx.DefaultPosition, wx.DefaultSize, 0)
self.m_staticText3.Wrap(-1)
bSizer5.Add(self.m_staticText3, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
self.m_img_ctrl = wx.FilePickerCtrl(self, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*",
wx.DefaultPosition, wx.DefaultSize, wx.FLP_DEFAULT_STYLE)
bSizer5.Add(self.m_img_ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
bSizer4.Add(bSizer5, 0, wx.EXPAND, 5)
m_buttons_ctrl = wx.StdDialogButtonSizer()
self.m_buttons_ctrlSave = wx.Button(self, wx.ID_SAVE)
m_buttons_ctrl.AddButton(self.m_buttons_ctrlSave)
self.m_buttons_ctrlCancel = wx.Button(self, wx.ID_CANCEL)
m_buttons_ctrl.AddButton(self.m_buttons_ctrlCancel)
m_buttons_ctrl.Realize()
bSizer4.Add(m_buttons_ctrl, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(bSizer4)
self.Layout()
bSizer4.Fit(self)
self.Centre(wx.BOTH)
# Connect Events
self.m_img_ctrl.Bind(wx.EVT_UPDATE_UI, self.OnUpdateUIImage)
self.m_buttons_ctrlSave.Bind(wx.EVT_BUTTON, self.OnSave)
def __del__(self):
pass
def OnUpdateUIImage(self, event):
"""Update the UI for the image browse control"""
index = GK_SHAPE_TYPE.index("image")
if self.m_style_ctrl.GetSelection() == GK_SHAPE_TYPE.index("image"):
event.Enable(True)
else:
event.Enable(False)
def TransferDataToWindow(self):
if self.m_node.m_name:
self.m_label_ctrl.SetValue(self.m_node.m_name)
if self.m_node.m_description:
self.m_desc_ctrl.SetValue(self.m_node.m_description)
self.m_style_ctrl.SetSelection(GK_SHAPE_TYPE.index(self.m_node.m_shapetype))
if self.m_node.m_external_link:
self.m_img_ctrl.SetPath(self.m_node.m_external_link)
return True
def OnSave(self, event):
self.m_node.m_name = self.m_label_ctrl.GetValue()
self.m_node.m_description = self.m_desc_ctrl.GetValue()
self.m_node.m_shapetype = GK_SHAPE_TYPE[self.m_style_ctrl.GetSelection()]
self.m_node.m_external_link = self.m_img_ctrl.GetPath()
self.EndModal(wx.ID_SAVE)
event.Skip()
|
21,468 | a4b8ae86e3ede7acf2af316da6eab1b22b9d1d4f | #!/usr/bin/python3.6
from create import createRandomArr
import time
def mergeSort(arr):
arr1 = []
arr2 = []
mid = len(arr) // 2 if len(arr) // 2 > 0 else 1
arr1 = arr[:mid]
arr2 = arr[mid:]
return (mergeLists(arr1, arr2))
def mergeLists(arr1, arr2):
if (len(arr1) == 1 and len(arr2) == 1):
return ([arr1[0], arr2[0]] if arr1[0] <= arr2[0] else [arr2[0], arr1[0]])
i = 0
j = 0
auxArr1 = mergeSort(arr1) if len(arr1) > 1 else arr1
auxArr2 = mergeSort(arr2) if len(arr2) > 1 else arr2
auxArr = []
while (i < len(auxArr1) or j < len(auxArr2)):
if (j >= len(auxArr2) or (i < len(auxArr1) and auxArr1[i] <= auxArr2[j])):
auxArr.append(auxArr1[i])
i += 1
else:
auxArr.append(auxArr2[j])
j += 1
return (auxArr)
if __name__ == "__main__":
size = 100000
arr = createRandomArr(size)
startTime = time.time()
mergeSort(arr)
print ("Top Down Merge Sort Time Taken :", (time.time() - startTime), "s for", size, "numbers") |
21,469 | ec18d0d47c9dec444556e758b70ecb9ccb3308eb | from keras import activations
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.engine.base_layer import InputSpec
from keras.engine.base_layer import Layer
from keras.layers import *
from keras.models import Model
from keras import backend as K
import keras
import tensorflow as tf
def root_mean_squared_log_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(K.log(y_pred + 1) - K.log(y_true + 1)), axis=-1))
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def pearson_correlation(y_true, y_pred):
mean_true = tf.reduce_mean(y_true)
mean_pred = tf.reduce_mean(y_pred)
diff_true = y_true - mean_true
diff_pred = y_pred - mean_pred
diff_true_squared = tf.sqrt(tf.reduce_sum(tf.square(y_true - mean_true)))
diff_pred_squared = tf.sqrt(tf.reduce_sum(tf.square(y_pred - mean_pred)))
correlation = tf.reduce_sum(diff_true * diff_pred)/(diff_true_squared * diff_pred_squared)
return correlation
def get_additiveAttention_model(total_seq_length,
mode,
num_classes = 2,
num_motifs=32,
motif_size=10,
adjacent_bp_pool_size=10,
attention_dim=10,
attention_hops=1,
dropout_rate=0.1):
# set model training settings
if mode == 'classification':
mode_activation = 'sigmoid'
mode_loss = keras.losses.categorical_crossentropy
mode_metrics = ['categorical_accuracy']
mode_optimizer = keras.optimizers.Adam()
elif mode == 'signal_regression':
mode_activation = 'relu'
mode_loss = keras.losses.mean_squared_logarithmic_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
elif mode == 'fold_regression':
mode_activation = 'linear'
mode_loss = keras.losses.mean_absolute_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
input_fwd = Input(shape=(total_seq_length,4), name='input_fwd')
### find motifs ###
convolution_layer = Conv1D(filters=num_motifs,
kernel_size=motif_size,
activation='relu',
input_shape=(total_seq_length,4),
name='convolution_layer',
padding = 'same',
use_bias = False
)
forward_motif_scores = convolution_layer(input_fwd)
### attention tanh layer ###
attention_tanh_layer = Dense(attention_dim,
activation='tanh',
use_bias=False,
name = 'attention_tanh_layer')
attention_tanh_layer_out = attention_tanh_layer(forward_motif_scores)
### outer layer ###
attention_outer_layer = Dense(attention_hops,
activation='linear',
use_bias=False,
name = 'attention_outer_layer')
attention_outer_layer_out = attention_outer_layer(attention_tanh_layer_out)
### apply softmax ###
softmax_layer = Softmax(axis=1, name='attention_softmax_layer')
attention_softmax_layer_out = softmax_layer(attention_outer_layer_out)
### attention dropout ###
attention_dropout_layer = Dropout(dropout_rate, name='attention_dropout')
attention_dropout_layer_out = attention_dropout_layer(attention_softmax_layer_out)
### attend to hidden states ###
attending_layer = Dot(axes=(1,1),
name='attending_layer')
attended_states = attending_layer([attention_dropout_layer_out, forward_motif_scores])
# dense_layer = TimeDistributed(Dense(units=1, activation = 'linear'),
# name='dense_layer')
# dense_out = dense_layer(attended_states)
# make prediction
flattened = Flatten(name='flatten')(attended_states)
predictions = Dense(num_classes,
name='predictions',
activation = mode_activation
)(flattened)
# define and compile model
model = Model(inputs=[input_fwd], outputs=predictions)
model.compile(loss=mode_loss,
optimizer=mode_optimizer,
metrics=mode_metrics)
return model
def element_multiply (x,y):
x_shape = []
for i, s in zip(K.int_shape(x), tf.unstack(tf.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(K.int_shape(y), tf.unstack(tf.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
xt = tf.reshape(x, [-1, x_shape[-1],1])
yt = tf.reshape(y, [y_shape[-2],1])
return tf.multiply(xt,yt)
class Projection(Layer):
"""
Learn linear transform of imput tensor
"""
def __init__(self, units,
kernel_initializer='glorot_uniform',
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
self.units = units
self.activation = activations.linear
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
super(Projection, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
constraint=self.kernel_constraint)
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
super(Projection, self).build(input_shape)
def call(self, inputs):
output = element_multiply(inputs, self.kernel)
output = self.activation(output)
return output
def get_config(self):
config = {
'units': self.units,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
}
return config
def compute_output_shape(self, input_shape):
output_shape = (self.units, input_shape[1])
return output_shape
def get_dotProductAttention_model(total_seq_length,
mode,
num_classes = 1,
num_motifs=150,
motif_size=10,
adjacent_bp_pool_size=10,
num_dense_neurons=10,
dropout_rate=0.75):
# set model training settings
if mode == 'classification':
mode_activation = 'sigmoid'
mode_loss = keras.losses.categorical_crossentropy
mode_metrics = ['categorical_accuracy']
mode_optimizer = keras.optimizers.Adam()
elif mode == 'signal_regression':
mode_activation = 'relu'
mode_loss = keras.losses.mean_squared_logarithmic_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
elif mode == 'fold_regression':
mode_activation = 'linear'
mode_loss = keras.losses.mean_absolute_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
input_fwd = Input(shape=(total_seq_length,4), name='input_fwd')
### find motifs ###
convolution_layer = Conv1D(filters=num_motifs,
kernel_size=motif_size,
activation='relu',
input_shape=(total_seq_length,4),
name='convolution_layer',
padding = 'same',
use_bias=False,
)
forward_motif_scores = convolution_layer(input_fwd)
### crop motif scores to avoid parts of sequence where motif score is computed in only one direction ###
forward_motif_scores = forward_motif_scores
### pool across length of sequence ###
sequence_pooling_layer = MaxPool1D(pool_size=adjacent_bp_pool_size,
strides=adjacent_bp_pool_size,
name='sequence_pooling_layer')
pooled_scores = sequence_pooling_layer(forward_motif_scores)
### compute attention ###
### weight queries ###
query_transformer = TimeDistributed(Projection(units=1),
input_shape=(int(total_seq_length/adjacent_bp_pool_size), num_motifs*2),
name='query_transformer'
)
weighted_queries = query_transformer(pooled_scores)
### weight keys ###
key_transformer = TimeDistributed(Projection(units=1),
input_shape=(int(total_seq_length/adjacent_bp_pool_size), num_motifs*2),
name = 'key_transformer')
weighted_keys = key_transformer(pooled_scores)
dot_product = Dot(axes=(2,2),name='dot_product')
attention_weights = dot_product([weighted_queries, weighted_keys])
#scaling_layer = Lambda(lambda x: x/(int(num_motifs*2)**-2),
# name='scaling_layer')
#scaled_attention_weights = scaling_layer(attention_weights)
### apply softmax ###
softmax_layer = Softmax(axis=1, name='attention_softmax_layer')
attention_softmax_layer_out = softmax_layer(attention_weights)
#attention_softmax_layer_out = softmax_layer(scaled_attention_weights)
attention_dropout_layer = Dropout(dropout_rate, name='attention_dropout')
attention_dropout_layer_out = attention_dropout_layer(attention_softmax_layer_out)
### weight values ###
value_transformer = TimeDistributed(Projection(units=1),
input_shape=(int(total_seq_length/adjacent_bp_pool_size), num_motifs*2),
name='value_transformer'
)
weighted_values = value_transformer(pooled_scores)
### attend to hidden states ###
ax1 = 1
ax2 = 1
attending_layer = Dot(axes=(ax1,ax2),
name='attending_layer')
#print('attending axes', ax1,ax2, 'linear')
attended_states = attending_layer([attention_dropout_layer_out, weighted_values])
# make prediction
dense_layer = TimeDistributed(
Dense(
units=num_dense_neurons,
activation = 'tanh'),
name='dense_layer')
dense_out = dense_layer(attended_states)
flattened = Flatten(name='flatten')(dense_out)#(drop_out)
predictions = Dense(num_classes,
name='predictions',
activation = mode_activation,
)(flattened)
# define and compile model
model = Model(inputs=[input_fwd], outputs=predictions)
model.compile(loss=mode_loss,
optimizer=mode_optimizer,
metrics=mode_metrics)
return model
def get_convolution_model(
total_seq_length,
mode,
num_classes = 1,
num_motifs = 150,
motif_size = 10,
num_dense_neurons = 50,
dropout_rate = 0.75
):
'''
Implementation of DeepBind model adapted to also do regression
in addition to classification of regulatory sequences (enhancers)
'''
# set model training settings
if mode == 'classification':
mode_activation = 'sigmoid'
mode_loss = keras.losses.categorical_crossentropy
mode_metrics = ['categorical_accuracy']
mode_optimizer = keras.optimizers.Adam()
elif mode == 'signal_regression':
mode_activation = 'relu'
mode_loss = keras.losses.mean_squared_logarithmic_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
elif mode == 'fold_regression':
mode_activation = 'linear'
mode_loss = keras.losses.mean_absolute_error
mode_metrics = [pearson_correlation]
mode_optimizer = keras.optimizers.RMSprop()
else:
return None
input_fwd = Input(shape=(total_seq_length,4), name='input_fwd')
input_rev = Input(shape=(total_seq_length,4), name='input_rev')
# find motifs
convolution_layer = Conv1D(filters=num_motifs,
kernel_size=motif_size,
activation='relu',
input_shape=(total_seq_length,4),
name='convolution_layer',
padding = 'same',
use_bias = False,
)
forward_motif_scores = convolution_layer(input_fwd)
reverse_motif_scores = convolution_layer(input_rev)
# calculate max scores for each orientation
seq_pool_layer = MaxPool1D(pool_size=total_seq_length)
max_fwd_scores = seq_pool_layer(forward_motif_scores)
max_rev_scores = seq_pool_layer(reverse_motif_scores)
# calculate max score for strand
orientation_max_layer = Maximum()
max_seq_scores = orientation_max_layer([max_fwd_scores, max_rev_scores])
# fully connected layer
dense_out = Dense(num_dense_neurons, activation='relu',
)(max_seq_scores)
# drop out
drop_out = Dropout(dropout_rate)(dense_out)
# make prediction
flattened = Flatten()(drop_out)
predictions = Dense(num_classes,
activation = mode_activation,
)(flattened)
# define and compile model
model = Model(inputs=[input_fwd, input_rev], outputs=predictions)
model.compile(loss=mode_loss,
optimizer=mode_optimizer,
metrics=mode_metrics)
return model
|
21,470 | 48f8ea999f47e98df6228b0cb99ca605d28786c3 | from core.model import ModelWrapper
from flask_restplus import fields
from werkzeug.datastructures import FileStorage
from maxfw.core import MAX_API, PredictAPI
# Set up parser for input data (http://flask-restplus.readthedocs.io/en/stable/parsing.html)
input_parser = MAX_API.parser()
input_parser.add_argument('image', type=FileStorage, location='files', required=True, help="An image file (RGB/HWC)")
label_prediction = MAX_API.model('LabelPrediction', {
'label_id': fields.String(required=False, description='Class label identifier'),
'label': fields.String(required=True, description='Class label'),
'probability': fields.Float(required=True, description='Predicted probability for the class label')
})
predict_response = MAX_API.model('ModelPredictResponse', {
'status': fields.String(required=True, description='Response status message'),
'predictions': fields.List(fields.Nested(label_prediction), description='Predicted class labels and probabilities')
})
class ModelPredictAPI(PredictAPI):
model_wrapper = ModelWrapper()
@MAX_API.doc('predict')
@MAX_API.expect(input_parser)
@MAX_API.marshal_with(predict_response)
def post(self):
"""Make a prediction given input data"""
result = {'status': 'error'}
args = input_parser.parse_args()
input_data = args['image'].read()
image = self.model_wrapper._read_image(input_data)
preds = self.model_wrapper._predict(image)
# Modify this code if the schema is changed
label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]
result['predictions'] = label_preds
result['status'] = 'ok'
return result
|
21,471 | 97357ef123dc439939d631f318b41e6c7e67c07d | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #AF_INET表示ipv4协议,sock_stream表示面向流的tcp协议
s.connect(('www.baidu.com', 80)) #参数是一个tuple 包含地址和端口号
s.send(b'GET / HTTP/1.1\r\nHost: www.baidu.com\r\nConnection: close\r\n\r\n')
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = b''.join(buffer)
s.close()
header, html = data.split(b'\r\n\r\n',1)
print(header.decode("utf-8"))
print('\n')
print(html.decode("utf-8"))
with open('baidu.html', 'wb') as f:
f.write(html) |
21,472 | 8d9d3d8beb155afaa684520977001391091a11ce | # day 7 puzzle 2
import itertools
from collections import deque
def is_aba(s):
return True if s[0] != s[1] and s[0] == s[2] else False
def is_bab_of_aba(bab, aba):
return True if bab[0] == aba[1] and bab[1] == aba[0] else False
buffer_length = 3
total_ssl = 0
with open('day_7_puzzle_1.txt') as fh:
for line in fh:
line = line.strip()
buffer = deque(maxlen = buffer_length)
inside_square = False
all_aba_outside = set()
all_aba_inside = set()
# print(line)
for char in line:
if '[' == char:
# print('[')
inside_square = True
buffer = deque(maxlen = buffer_length)
continue
if ']' == char:
# print(']')
inside_square = False
buffer = deque(maxlen = buffer_length)
continue
buffer.append(char)
if len(buffer) < buffer_length:
continue
if is_aba(buffer):
if inside_square:
all_aba_inside.add(''.join(buffer))
# print('ABA inside:', ''.join(buffer))
else:
all_aba_outside.add(''.join(buffer))
# print('ABA outside:', ''.join(buffer))
if len(all_aba_inside) > 0 and len(all_aba_outside) > 0:
for aba, bab in itertools.product(all_aba_outside, all_aba_inside):
if is_bab_of_aba(bab, aba):
# print(bab, 'is a BAB of ABA', aba)
total_ssl += 1
break
# print('COUNTER: ', total_ssl)
print('Total SSL: ', total_ssl)
|
21,473 | 4a350addcd19e064daf56e32d4001c32f9027155 | # encoding: utf-8
"""
@project:data_structure_and_algorithm
@author: Jiang Hui
@language:Python 3.7.2 [GCC 7.3.0] :: Anaconda, Inc. on linux
@time: 2019/8/21 15:25
@desc:
"""
"""
这一题和判断二叉树是否为平衡二叉树的思路近似,都需要用到求解二叉树的深度的模板代码。
在本题中,二叉树的直径即为左右子树深度和最大的值。
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
res = 0
def diameterOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
self.res = 0
self.dfs(root)
return self.res
# 求二叉树的深度,顺便记录左右子树深度和最大的值,即为直径
def dfs(self, root):
if not root:
return 0
left = self.dfs(root.left)
right = self.dfs(root.right)
self.res = max(self.res, left + right)
return max(left, right) + 1
|
21,474 | b0e5154a930169b65bfc2e4db0734ee27c7199ad | """Реализовать класс Road (дорога), в котором определить атрибуты: length (длина), width (ширина).
Значения данных атрибутов должны передаваться при создании экземпляра класса. Атрибуты сделать защищенными.
Определить метод расчета массы асфальта, необходимого для покрытия всего дорожного полотна. Использовать формулу:
длина*ширина*масса асфальта для покрытия одного кв метра дороги асфальтом, толщиной в 1 см*число см толщины полотна.
Проверить работу метода.
Например: 20м*5000м*25кг*5см = 12500 т"""
class Road:
def __init__(self, length, width):
self._length = length
self._width= width
self.__weight = 25
self.__depth = 10
def mass(self):
mass_ = (self._length * self._width * self.__weight * self.__depth) / 1000
print(f'Mass of asphalt required to cover the roadbed in deprh - {self.__depth}, length - {self._length}, '
f'weight - {self._width} equal {mass_:.2f} tons')
mass_asphalt = Road(7000,30)
mass_asphalt.mass()
|
21,475 | abb5e2614e047018705cbee79017e51a13d8db0f | # Q. Given two (singly) linked list, determine if the two lists intersect.
# Return the intersecting node. Note that the intersection is defined based on
# reference, not value. That is, the kth node of the first linked list is the
# exact same node (by reference) as the jth node of the second linked list,
# then they are intersecting.
# Time complexity: O(N+M); N and M are the length of the linked list
# Space complexity: O(1)
import operator
import unittest
from dataclasses import dataclass, field
@dataclass
class Node:
data: object
next: object = field(default=None, init=False, repr=False)
@dataclass
class LinkedList:
head: Node = None
@classmethod
def create_from_list(cls, iter):
ll = LinkedList()
if not iter:
return ll
ll.head = Node(iter[0])
current = ll.head
for index in range(1, len(iter)):
current.next = Node(iter[index])
current = current.next
def __len__(self):
current = self.head
counter = 0
while current:
counter += 1
current = current.next
return counter
def get_list(self):
current = self.head
while current:
yield current.data
current = current.next
def __and__(self, other):
if not isinstance(other, LinkedList):
raise NotImplementedError
l1 = len(self)
l2 = len(other)
p1 = self.head
p2 = other.head
# Bring both to the same starting point
if l1 > l2:
for _ in range(l1 - l2):
p1 = p1.next
elif l2 > l1:
for _ in range(l2 - l1):
p2 = p2.next
while p1:
if p1 is p2:
return p1
p1 = p1.next
p2 = p2.next
return None
class TestIntersection(unittest.TestCase):
def test_intersecting(self):
n1 = Node(1)
n2 = Node(2)
n1.next = n2
n3 = Node(3)
n2.next = n3
n4 = Node(4)
n5 = Node(5)
n5.next = n4
n4.next = n2
ll1 = LinkedList()
ll1.head = n5
ll2 = LinkedList()
ll2.head = n1
self.assertIs(n2, operator.and_(ll1, ll2))
def test_not_intersecting(self):
n1 = Node(1)
n2 = Node(2)
n1.next = n2
n3 = Node(3)
n2.next = n3
n4 = Node(4)
n5 = Node(5)
n5.next = n4
ll1 = LinkedList()
ll1.head = n5
ll2 = LinkedList()
ll2.head = n1
self.assertIsNone(operator.and_(ll1, ll2))
if __name__ == "__main__":
unittest.main()
|
21,476 | a1d9ebc9c3b3d6095549b313c62d34c4c2502750 | # Generated by Django 3.1 on 2021-05-09 13:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('birds', '0022_auto_20210509_0852'),
]
operations = [
migrations.CreateModel(
name='AuthorBird',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authors_bird', to='birds.author')),
('bird', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='authors_bird', to='birds.bird')),
],
),
migrations.AddField(
model_name='bird',
name='authors',
field=models.ManyToManyField(related_name='bird_authors', through='birds.AuthorBird', to='birds.Author'),
),
]
|
21,477 | fd02aa9c4fe03f1f502e2f8cdbe380ae08245c90 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import argparse
import numpy as np
import time
from Robot import Robot
from trayectorias.trayectorias import *
def main(args):
try:
if args.radioD < 0:
print('d must be a positive value')
exit(1)
# Instantiate Odometry. Default value will be 0,0,0
# robot = Robot(init_position=args.pos_ini)
robot = Robot()
print("X value at the beginning from main X= %.2f" %(robot.x.value))
# 1. launch updateOdometry Process()
robot.startOdometry()
# 2. perform trajectory
tray = 3
if args.stop == 1:
robot.setSpeed(0,0)
robot.stopOdometry()
return 0
if args.control == "tiempo":
if args.trayectoria == 1: # ocho
d = 0.2
t1 = Trayectoria1(d)
robot.setTrajectory(t1)
robot.executeTrajectory_time()
elif args.trayectoria == 3: # linea recta
d = 1
t3 = Trayectoria3(d)
robot.setTrajectory(t3)
robot.executeTrajectory_time()
else: # trayectoria 2, 2 radios
r1 = 0.2
r2 = 0.3
d = 1
t2 = Trayectoria2(r1, r2, d)
robot.setTrajectory(t2)
robot.executeTrajectory_time()
else:
if args.trayectoria == 1: # ocho
d = 0.2
t1 = Trayectoria1Velocidades(d)
robot.setTrajectory(t1)
robot.executeTrajectory()
elif args.trayectoria == 3: # linea recta
d = 1
t3 = Trayectoria3Posiciones(d)
robot.setTrajectory(t3)
robot.executeTrajectory()
elif args.trayectoria== 4:
d = 0.38
t4 = TrayectoriaTrabajo(d)
robot.setTrajectory(t4)
robot.executeTrajectory()
else: # trayectoria 2, 2 radios
r1 = 0.2
r2 = 0.3
d = 1
t2 = Trayectoria2Velocidades(r1, r2, d)
robot.setTrajectory(t2)
robot.executeTrajectory()
robot.setSpeed(0,0)
robot.stopOdometry()
except KeyboardInterrupt:
# except the program gets interrupted by Ctrl+C on the keyboard.
# THIS IS IMPORTANT if we want that motors STOP when we Ctrl+C ...
robot.stopOdometry()
if __name__ == "__main__":
# get and parse arguments passed to main
# Add as many args as you need ...
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--radioD", help="Radio to perform the 8-trajectory (mm)",
type=float, default=40.0)
parser.add_argument("-t", "--trayectoria", help="Elige la trayectoria a realizar (1, 2 o 3)",
type=int, default=1)
parser.add_argument("-c", "--control", help="Elige el control (<tiempo> o <odometria>)",
type=str, default="odometria")
parser.add_argument("-s", "--stop", help="Para",
type=int, default=0)
args = parser.parse_args()
main(args)
|
21,478 | 7b4974c8a0f369b47139b3c616d789b2762ee04e | import configparser
import time
import boto3
config = configparser.ConfigParser()
config.sections()
config.read('./config.ini')
elastic = config['elasticmq']
params = {'endpoint_url': elastic['host'] + ':' + elastic['port'],
'region_name': 'elasticmq',
'aws_secret_access_key': elastic['aws_secret_access_key'],
'aws_access_key_id': elastic['aws_access_key_id']}
client = boto3.resource('sqs',
endpoint_url=params['endpoint_url'],
region_name=params['region_name'],
aws_secret_access_key=params['aws_secret_access_key'],
aws_access_key_id=params['aws_access_key_id'],
use_ssl=False)
sqs = boto3.client('sqs',
endpoint_url=params['endpoint_url'],
region_name=params['region_name'],
aws_secret_access_key=params['aws_secret_access_key'],
aws_access_key_id=params['aws_access_key_id'],
use_ssl=False)
class Queue:
def __init__(self, name, url, numMes, invisMes, tst):
self.name = url.rsplit('/', 1)[-1]
self.url = url
self.numMes = numMes
self.invisMes = invisMes
self.timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(tst)))
class Message:
def __init__(self, body, receiptHandle):
self.body = body
self.receiptHandle = receiptHandle
def cleint_get_queues():
urls = []
error = None
try:
for queue in client.queues.all():
urls.append(Queue('XXX', queue.url,
queue.attributes.get('ApproximateNumberOfMessages'),
queue.attributes.get('ApproximateNumberOfMessagesNotVisible'),
queue.attributes.get('CreatedTimestamp')))
def getKey(custom):
return custom.name
urls = sorted(urls, key=getKey)
except Exception:
error = "Cannot connect to "+ params['endpoint_url']
return urls, error
finally:
return urls, error
def client_get_queu_by_name(name):
return client.get_queue_by_name(QueueName=name)
def client_send_message(name, body):
response = sqs.send_message(
QueueUrl=client_get_queu_by_name(name).url,
MessageBody=body
)
return response['MessageId']
def client_get_messages(name):
result = []
queue = client_get_queu_by_name(name)
messages_count = queue.attributes['ApproximateNumberOfMessages']
for i in range(int(messages_count)):
messages = []
messages = sqs.receive_message(QueueUrl=queue.url,
MaxNumberOfMessages=10)
if 'Messages' in messages:
for message in messages['Messages']:
result.append(Message(message['Body'], message['ReceiptHandle']))
if len(messages) >= int(messages_count):
break
return result
def client_delete_message(name, receiptHandle):
response = sqs.delete_message(QueueUrl=client_get_queu_by_name(name).url, ReceiptHandle=receiptHandle)
return response
def client_purge(name):
messages = client_get_messages(name)
for i in range(len(messages)):
client_delete_message(name, messages[i].receiptHandle)
|
21,479 | 872f7d1ccdac8295aa36eff0817f14ff4e6d7234 | #!/usr/bin/env python3
import sys
"""
Name: man_deblob.sh
Main: jadedctrl
Lisc: ISC
Desc: Delobbing OBSD man
pages for use in
LBSD.
"""
# Usage: man_deblob.sh $SRC_DIR
. ./libdeblob.sh
PATCH_DIR=/tmp/man_deblob
if [ -e $PATCH_DIR ]
then
self_destruct_sequence $PATCH_DIR
mkdir $PATCH_DIR
else
mkdir $PATCH_DIR
fi
if test -z $1
then
SRC_DIR=/usr/src
else
SRC_DIR=$1
fi
# man4
set -A fw_list acx adw adv athn bnx bwi drm fxp inteldrm ips ipw iwi iwm \
iwn kue malo myx neo otus pgt ral radeondrm rsu rtwn rum siop tht \
thtc ti uath udl ulpt upgt urtwn uvideo wpi yds zyd
for man_blob in "${fw_list[@]}"
do
strdel "\<${man_blob}.4\>" share/man/man4/Makefile
linedel "\<${man_blob}.4\>" distrib/sets/lists/man/mi
done
linedel "./usr/share/man/man1/fw_update.1" distrib/sets/lists/man/mi
|
21,480 | 78fd608b63af5a6647833d7bf48d9c2ad2f2fa6d | import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
delta = 0.025
x = np.arange(0, 1.0, delta)
y = np.arange(0, 1.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y)**2)
Z3 = np.exp(-(X)**2 - (Y - 1)**2)
Z4 = np.exp(-(X - 0.5)**2 - (Y - 0.5)**2)
Z = (Z1 + Z2 + Z3 + Z4)
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
ax.clabel(CS, fontsize=10)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.show()
from matplotlib import pyplot as plt
import numpy as np
# generate random x
# x = np.random.random(2)
x = np.array([.35, .39])
noise = np.random.normal(0, 0, size=2)
sensors = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
distance_diffs = np.linalg.norm(x + noise - sensors, axis=1) - np.linalg.norm(x)
phi = np.concatenate((distance_diffs[:, np.newaxis], sensors), axis=1)
b = (np.linalg.norm(sensors, axis=1)**2 - distance_diffs**2) / 2
# compute least squares solution
x_hat = np.linalg.inv(phi.T @ phi) @ phi.T @ b
delta = 0.025
x = np.arange(0, 1.0, delta)
y = np.arange(0, 1.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = np.exp(-X**2 - Y**2)
Z2 = np.exp(-(X - 1)**2 - (Y)**2)
Z3 = np.exp(-(X)**2 - (Y - 1)**2)
Z4 = np.exp(-(X - 0.5)**2 - (Y - 0.5)**2)
Z = (Z1 + Z2 + Z3 + Z4)
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z)
plt.plot(sensors[:, 0], sensors[:, 1], 'k*')
# plt.plot(x[0], x[1], 'ro')
plt.plot(x_hat[1], x_hat[2], 'bo')
plt.xlim([-.1, 1.1])
plt.ylim([-.1, 1.1])
plt.legend(('Sensors', 'Position'))
plt.show()
|
21,481 | e91c8a40ea09ef4139a8eab3c0e0e2a29ce863fe | import unittest
import random
from dramakul.sites import SITES
QUERY = "beauty"
class TestSite(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sites = [site() for site in SITES]
def test_site_functions(self):
for site in self.sites:
results = site.search(QUERY)
assert len(results) > 0, site.name
result = random.choice(results)
drama = result.get_info()
assert drama, site.name
assert len(drama.episodes) > 0, site.name
|
21,482 | 0793c81a25ab8e500d4cfea6dbc05ff27b343fe6 | # Generated by Django 2.2.6 on 2020-03-28 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sanskrit', '0018_auto_20200326_2050'),
]
operations = [
migrations.AddField(
model_name='userprogress',
name='day',
field=models.DateField(null=True),
),
]
|
21,483 | 504f3ebcf1404e251d1b9b5e0eb01fed9bf25ac7 | #! /usr/bin/env python3
import unittest
import day08
class TestDay08(unittest.TestCase):
def setUp(self):
self.screen = day08.Screen(7, 3)
def test_rect(self):
self.screen.rect(3, 2)
self.assertEqual(self.screen.count_on(), 6)
self.assertEqual(self.screen.show(), ['###....', '###....', '.......'])
def test_rotate_column(self):
self.screen.rect(3, 2)
self.screen.rotate_column(1, 1)
self.assertEqual(self.screen.count_on(), 6)
self.assertEqual(self.screen.show(), ['#.#....', '###....', '.#.....'])
def test_rotate_row(self):
self.screen.rect(3, 2)
self.screen.rotate_column(1, 1)
self.screen.rotate_row(0, 4)
self.assertEqual(self.screen.count_on(), 6)
self.assertEqual(self.screen.show(), ['....#.#', '###....', '.#.....'])
if __name__ == '__main__':
unittest.main()
|
21,484 | d5a99ec16cc61085139b89eb7ea98001c23745fa | # -*- coding: utf-8 -*-
"""model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1WxDHqzntD8pMeuFAwogYLLtTpSo8cxwL
"""
import tensorflow as tf
tf.test.gpu_device_name()
# https://keras.io/
!pip install -q keras
import keras
# Import libraries and modules
import numpy as np
np.random.seed(123) # for reproducibility
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Lambda, add, Input
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.datasets import mnist
def residual_block(y):
shortcut = y
y = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding='same')(y)
y = BatchNormalization()(y)
y = keras.layers.add([shortcut, y])
y = Activation('relu')(y)
return y
# 4. Load pre-shuffled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# 5. Preprocess input data
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# 6. Preprocess class labels
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
# define model architecture
model = Sequential()
# input convolution
model.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
# residual blocks
model.add(Lambda(residual_block))
model.add(Lambda(residual_block))
model.add(Lambda(residual_block))
model.add(Lambda(residual_block))
model.add(Lambda(residual_block))
# output layer
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
# splitting model
inp = Input(shape=(28,28,1,))
# input convolution
y = Conv2D(32, (3, 3), input_shape=(28,28,1))(inp)
y = BatchNormalization()(y)
y = Activation('relu')(y)
# residual blocks
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
y = residual_block(y)
# policy
y_policy = Conv2D(2, (1, 1))(y)
y_policy = BatchNormalization()(y_policy)
y_policy = Flatten()(y_policy)
y_policy = Activation('relu')(y_policy)
policy_out = Dense(26, activation='softmax')(y_policy)
# value
y_value = Conv2D(1, (1, 1))(y)
y_value = BatchNormalization()(y_value)
y_value = Activation('relu')(y_value)
y_value = Dense(64, activation='relu')(y_value)
value_out = Dense(1, activation='tanh')(y_value)
model = Model(inp, [policy_out,value_out])
# next - custom loss function!
# 8. Compile model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 9. Fit model on training data
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=10, verbose=1)
# 10. Evaluate model on test data
score = model.evaluate(X_test, Y_test, verbose=0)
print(score)
|
21,485 | 935287f3a399425d7d6df65b184b28737f21188d | from sys import stdin
res = []
for x in range(int(stdin.readline())):
maxv = int(stdin.readline())
data = []
for i in range(maxv):
data.append(int(stdin.readline()))
if (reversed(data) == sorted(data)):
res.append("Y")
continue
needed = range(1, maxv+1)
branch = []
lake = []
i = 1
while (len(lake) != maxv) or (len(data)==0):
if len(branch) != 0 and (branch[0] == i):
lake = [branch.pop(branch.index(branch[0]))] + lake
i+=1
elif len(data) == 0:
break
elif data[-1] == i:
lake = [data.pop(data.index(data[-1]))]+lake
i+=1
else:
branch = [data.pop(data.index(data[-1]))] + branch
if list(reversed(lake)) == sorted(list(reversed(lake))) and len(lake) == maxv:
res.append('Y')
else:
res.append('N')
for b in res:
print b
## if i == 1:
## current = data.pop(data[-1])
## if current == i:
## lake = [i]+lake
## else:
## branch = [i]+branch
##
## else:
|
21,486 | 65ae2f7f4c03d85cb60a41ca8859436d7c83ec6b | import re
import requests
from bs4 import BeautifulSoup
p = re.compile('[a-z]+')
## 1) ip address check
## 2) access log 에서 IP 뽑아내기
## 3) access log ip list count
'''
ip="241.1.1.112343434"
#aa=re.match(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}[^0-9]",ip)
#aa.group()
aa=re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",ip)
if aa:
ip = aa.group()
print("IP:",ip)
#
#ip_candidates = re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", ip)
#
#print(ip_candidates )
'''
import requests
def url_status_check(url):
target = url
r = requests.get(target)
result=r.status_code
headers = r.headers
header = r.headers['content-type']
url = r.url
json = r.json
history=r.history
r.text
'''
for i in open("url.txt",'r'):
#print(i.strip('\n'))
url = i.strip('\n')
for reg in open("regs.txt",'r'):
target=url +"/" + reg.strip('\n')
print( target )
r = requests.get(target)
print(r.status_code)
'''
return result,url,header,json,history, headers,r.text
url='http://www.naver.com'
url = url_status_check(url)
#print( url[6] )
plain_text = url[6]
soup = BeautifulSoup(plain_text,'lxml')
for link in soup.find_all('a'):
#print(link.get('href')) #ranks = soup.find("dl", {"class": "blind"})
#url_list = re.findall('[a-zA-Z0-9]\S*@\S*[a-zA-Z]', link.get('href'))
url_list = re.match(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', link.get('href'))
#mail_list = re.match('\\[^@]+@[^@]+\.[^@]+',link )
emails = re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", str(soup))
emails_1 = re.findall(r"[^@]+@[^@]+\.[^@]+",str(soup))
if (url_list!=None):
print("url:",url_list.group())
print("mail:",emails)
print("mail:",emails_1)
else:
pass
#print('No data !!')
#for u in
|
21,487 | c96e98779c2bb13f425128c9b8b914722932c26c | //:: import os, sys
//:: import importlib
import importlib
import grpc
import os
import sys
from concurrent import futures
from infra.common.logging import logger
import ${proxy_handler}
asic = os.environ.get('ASIC', 'capri')
def grpc_server_start():
proxyServer = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
return proxyServer
def grpc_client_connect():
if 'HAL_GRPC_PORT' in os.environ:
port = os.environ['HAL_GRPC_PORT']
else:
port = '50054'
channel = grpc.insecure_channel('localhost:%s' %(port))
return channel
def set_grpc_forward_channel(grpc_channel):
global channel
channel = grpc_channel
proxyServer = grpc_server_start()
//:: ws_top = ws_top or os.path.abspath(os.path.dirname(sys.argv[0]) + '/..')
//:: os.environ['WS_TOP'] = ws_top
//:: fullpath = ws_top + '/nic/build/x86_64/iris/' + asic + '/gen/proto/'
//:: sys.path.insert(0, fullpath)
if 'WS_TOP' in os.environ:
ws_top = os.environ['WS_TOP']
else:
ws_top = os.path.dirname(sys.argv[0]) + '/..'
ws_top = os.path.abspath(ws_top)
os.environ['WS_TOP'] = ws_top
fullpath = ws_top + '/nic/build/x86_64/iris/' + asic + '/gen/proto/'
sys.path.insert(0, fullpath)
//:: grpc_service_reg_str = ''
//:: # Iterate through all files in the gen/proto/hal folder, and add create
//:: # python classes for the grpc proxy server
//:: for fileFullName in os.listdir(fullpath):
//:: if not fileFullName.endswith(".py"):
//:: continue
//:: #endif
//:: if 'grpc' in fileFullName:
//:: continue
//:: #endif
//:: fileName = fileFullName[:-3]
//:: # fileGrpcName = fileName[:fileName.find('_')] + '_pb2_grpc'
//:: fileGrpcName = fileName[:fileName.rfind('_')] + '_pb2_grpc'
//:: if not os.path.isfile(fullpath + fileGrpcName + ".py"):
//:: continue
//:: #endif
//:: fileModule = importlib.import_module(fileName)
//:: fileGrpcModule = importlib.import_module(fileGrpcName)
//:: for service in fileModule.DESCRIPTOR.services_by_name.items():
//:: stubName = service[0] + 'Stub'
//:: servicerName = service[0] + 'Servicer'
${fileName} = importlib.import_module('${fileName}')
${fileGrpcName} = importlib.import_module('${fileGrpcName}')
class ${service[0]}(${fileGrpcName}.${servicerName}):
//:: for table in service[1].methods_by_name.items():
def ${table[0]}(self, request, context):
stub = ${fileGrpcName}.${stubName}(channel)
response, err = ${proxy_handler}.CreateConfigFromDol(request, '${table[0]}')
if err:
#logger.info("Sending DOL message for message type %s\n%s\n"%(type(request), request))
response = stub.${table[0]}(request)
#logger.info("Received HAL response \n%s\n" %(response))
return response
//:: #endfor
//:: # Add the service to the proxy server.
pass
//::
//:: service_reg = fileGrpcName + '.add_' + servicerName + '_to_server(' + service[0] + '(), proxyServer)'
//:: grpc_service_reg_str += ' ' + service_reg + '\n'
//::
//:: #endfor
//:: #endfor
//::
def grpc_service_register(proxyServer):
${grpc_service_reg_str}
//::
grpc_service_register(proxyServer)
|
21,488 | c274c02bf98938e8ddc7fb1c401b967da437409b | #!/usr/bin/env python3
# This script walks through the revision history of the
# legislators-social-media.yaml file to construct a historical view of social
# media accounts for legislators.
import git
import rtyaml
import datetime
def main():
repo = git.Repo('..')
print('loading all legislators')
legis = rtyaml.load(open('../legislators-historical.yaml'))
legis.extend(rtyaml.load(open('../legislators-current.yaml')))
# examine each commit to the social yaml file and merge into results
for commit in repo.iter_commits(paths=['legislators-social-media.yaml']):
created = datetime.datetime.fromtimestamp(commit.committed_date)
print('examining', created)
for blob in commit.tree.blobs:
if blob.path == 'legislators-social-media.yaml':
try:
social = rtyaml.load(blob.data_stream)
merge(social, legis, created)
except rtyaml.yaml.error.YAMLError as e:
print("yaml in commit didn't parse: {}".format(commit))
output_file = 'social.yaml'
print('writing {}'.format(output_file))
open(output_file, 'w').write(rtyaml.dump(legis))
def merge(social, legis, committed):
"merge the social information into the legisltors, recording the date if needed"
if type(social) != list:
return
date = committed.strftime('%Y-%m-%d')
for s in social:
# get the legislator
l = find(s['id']['bioguide'], legis)
# set the social property if needed
if not 'social' in l:
l['social'] = {}
# add any new social info
for platform, profile_id in s['social'].items():
if platform not in l['social']:
l['social'][platform] = {}
if profile_id not in l['social'][platform]:
l['social'][platform][profile_id] = date
if date < l['social'][platform][profile_id]:
l['social'][platform][profile_id] = date
def find(id, legis):
"Find the legislator with bioguide id"
matches = list(filter(lambda l: l['id']['bioguide'] == id, legis))
assert len(matches) == 1
return matches[0]
if __name__ == "__main__":
main()
|
21,489 | 421aa3a10e4c22ccdee7fe256b5a8650cbe429a1 | """Program F17 - Exit
Fungsi ini akan menawarkan pengguna untuk menyimpan data. Jika
pengguna ingin menyimpan data, fungsi akan memanggil fungsi save.
Setelahnya, fungsi akan memberhentikan keseluruhan program.
Akses : -
"""
# KAMUS
# Daftar library standar
import sys
# Daftar library lokal
from save import save
from constant import active_account
# Daftar Konstanta
# active_account : int
# Daftar variabel
# save_option : str
# ALGORITMA
def exit(databases):
# Mendapatkan data terkait pengguna
try:
username = databases[active_account][1]
except IndexError:
username = "Anda"
# Memvalidasi input dari pengguna
while True:
save_option = input("(^.^) : Apakah "
+ username
+ " mau melakukan penyimpanan file "
+ "yang sudah diubah? (Y/N) ")
# Input valid, salah satu huruf Y/y/N/n
if save_option in "YyNn" and len(save_option) == 1:
break
# Input tidak valid, pengisian diulang
print("\nm(>.<)m : Input tidak sesuai. Ulangi! \n")
# Pengguna ingin menyimpan data, melakukan fungsi save
if save_option in "Yy":
print()
save(databases)
# Program selesai, terminasi loop program utama
sys.exit("\n(^O^)/ : Sampai jumpa!")
|
21,490 | 5877a3775320f63c9ecab34c4d3ad380f1ac2a5a | import pymysql
# connect mysql database
# MYSQLdb.connect("localhost", testuser", "test123", "testDB", charset='utf8')
db = pymysql.connect("localhost", "root", "abc", "trycatch", charset="utf8")
cursor = db.cursor()
cursor.execute("select VERSION()")
data = cursor.fetchone()
print "db version : %s " % data
db.close()
#create a table
db = pymysql.connect("localhost", "root", "abc", "trycatch", charset="utf8")
cursor = db.cursor()
cursor.execute("drop table if exists steaf")
sql = "create table steaf(" \
"name varchar(100) NOT NULL," \
"age int," \
"sex char(1)," \
"image_path varchar(200)," \
"serialize_file varchar(200))"
cursor.execute(sql)
db.close()
print "create table done."
#insert some values
db = pymysql.connect("localhost", "root", "abc", "trycatch", charset="utf8")
cursor = db.cursor()
age = 20
image_path = "/home/jack/Desktop/target/lidongdong.jpg"
sql = "insert into steaf values(" \
"'lidongdong'," \
"%d," \
"'m'," \
"'%s'," \
"'%s')" % (age, image_path, "")
try:
cursor.execute(sql)
db.commit()
except:
print "error"
db.rollback()
db.close()
print "insert data done."
#select
db = pymysql.connect("localhost", "root", "abc", "trycatch", charset="utf8")
cursor = db.cursor()
sql = "select * from steaf"
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
name = row[0]
age = row[1]
sex = row[2]
image_path = row[3]
serialize_file = row[4]
print name, age, sex, image_path, serialize_file
except:
print "error"
db.close()
print "select done."
|
21,491 | a6d152f4616d42096eb24c8e1cbb00a054ef69e7 |
# import required libraries
import random
from tkinter import *
class Start:
def __init__(self, root):
# set up and grid first displayed frame
self.frame = Frame()
self.frame.grid()
# creates design for the first frame
self.heading_frame = Frame(self.frame, width=500, height=18, bg="#CCE5FF")
self.under_heading_frame = Frame(self.frame, width=500, height=6, bg="#BDDEFF")
self.blank_frame = Frame(self.frame, height=40, width=500)
self.label_background_frame = Frame(self.frame, height=150, width=500, bg="white")
self.heading_label = Label(self.label_background_frame, height=3, width=41, text="Math Quiz", font=("Helvetica 15 bold"),
fg="#6C6C6C", justify=CENTER, bg="white")
self.under_label_frame = Frame(self.frame, width=500, height=7, bg="#E8E8E8")
self.start_game_button = Button(self.frame, text="PLAY", font=("Helvetica 15 bold"), fg="#747475", border=0, command=lambda: self.toGame())
# grid all created objects above
self.heading_label.grid()
self.heading_frame.grid(row=0)
self.under_heading_frame.grid(row=1)
self.blank_frame.grid(row=2)
self.label_background_frame.grid(row=3)
self.under_label_frame.grid(row=4)
self.start_game_button.grid(row=6, pady=50)
# function to send user to Game class
def toGame(self, ):
# deletes the current frame
self.frame.destroy()
Game()
class Game:
def __init__(self):
# set up variables
self.round_number = IntVar()
self.round_number.set(1)
self.correctly_answered = IntVar()
self.correctly_answered.set(0)
self.incorrectly_answered = IntVar()
self.incorrectly_answered.set(0)
current_equation = StringVar()
# set up and grid first displayed frame
self.frame = Frame(width=500, height=600)
self.frame.grid()
# creates design for the main Game frame
self.heading_frame = Frame(self.frame, width=500, height=65, bg="#CCE5FF")
self.under_heading_frame = Frame(self.frame, width=500, height=6, bg="#BDDEFF")
self.heading_label = Label(self.heading_frame, text="Math Quiz", font=("Helvetica 15"),
fg="#6C6C6C", bg="#CCE5FF", width=45, height=2, justify=CENTER)
self.blank_frame = Frame(self.frame, width=500, height=30)
self.label = Label(self.frame, text="Round {}\n\nEnter your answer in the entry\nbox below and click Check Answer.".format(self.round_number.get()), font=("Helvetica 12"), justify=CENTER)
self.second_blank_frame = Frame(self.frame, width=500, height=30)
self.question_info_frame = Frame(self.frame, width=500, height=30, bg="white")
self.question_label = Label(self.question_info_frame, bg="white", width=45, height=3, text=self.newQuestion(), font=("Helvetica 15"), justify=CENTER)
self.under_question_info_frame = Frame(self.frame, width=500, height=5, bg="#E8E8E8")
self.third_blank_frame = Frame(self.frame, width=500, height=40)
self.user_input = Entry(self.frame, width=21, font=("Helvetica 15"))
self.check_answer_button = Button(self.frame, text="Check Answer", font=("Helvetica 12"), width=15, command=lambda: self.checkAnswer())
self.fourth_blank_frame = Frame(self.frame, width=500, height=40)
self.help_button = Button(self.frame, text="Help", font=("Helvetica 12"), width=15, command=lambda: self.toHelp())
self.stats_button = Button(self.frame, text="Statistics", font=("Helvetica 12"), width=15, command=lambda: self.toStats())
self.footer_blank = Frame(self.frame, width=500, height=20)
self.footer = Frame(self.frame,width=500, height=6, bg="#CCE5FF")
# grid all created objects above
self.heading_label.grid(row=1)
self.heading_frame.grid(row=1)
self.under_heading_frame.grid(row=2)
self.blank_frame.grid(row=3)
self.label.grid(row=4)
self.second_blank_frame.grid(row=5)
self.question_info_frame.grid(row=6)
self.question_label.grid(row=7)
self.under_question_info_frame.grid(row=8)
self.third_blank_frame.grid(row=9)
self.user_input.grid(row=10)
self.check_answer_button.grid(row=11)
self.fourth_blank_frame.grid(row=12)
self.help_button.grid(row=13)
self.stats_button.grid(row=14)
self.footer_blank.grid(row=15)
self.footer.grid(row=16)
# function to generate a new question
def newQuestion(self):
# creates a random question using operators
# from list variable and the random library
operators = ["+", "-", "*"]
equation = "{} {} {}".format(random.randint(0,12), random.choice(operators), random.randint(0,12))
self.current_equation = equation
# returns newly generated question to question label
return equation
# function to check if the user's answer is correct
def checkAnswer(self):
equation = self.current_equation
try:
# checks if user input is empty
if self.user_input.get() != "":
# compare user's answer with correct answer
if (int(self.user_input.get())) == eval(equation):
# sets user input background to green
# and adds 1 to correctly_answered
self.user_input.configure(bg="#90EE90")
is_right = self.correctly_answered.get()
is_right += 1
self.correctly_answered.set(is_right)
else:
# sets user input background to red
# and adds 1 to incorrectly_answer
self.user_input.configure(bg="#FF5733")
is_wrong = self.incorrectly_answered.get()
is_wrong += 1
self.incorrectly_answered.set(is_wrong)
else:
# if user input is empty set background to red
# doesn't skip round.
self.user_input.configure(bg="#FF5733")
return
# adds 1 to round counter and updates
# label on main Game frame
self.question_label.config(text=self.newQuestion())
rnum = self.round_number.get()
rnum += 1
self.round_number.set(rnum)
self.label.config(text="Round {}\n\nEnter your answer in the entry\nbox below and click Check Answer.".format(rnum))
# parses user input as integer, if not
# sets user input background to red
except ValueError:
self.user_input.configure(bg="#FF5733")
# functions to send user to Stats and Help classes
def toStats(self):
num_correct = self.correctly_answered.get()
num_wrong = self.incorrectly_answered.get()
# deletes the current frame
# and sends user to Stats
self.frame.destroy()
Stats(num_correct, num_wrong)
def toHelp(self):
# deletes the current frame
# and sends user to Help
self.frame.destroy()
Help()
class Stats:
def __init__(self, correctly_answered, incorrectly_answered):
# set up and grid first displayed frame
self.frame = Frame()
self.frame.grid()
# creates design for the main Stats frame
self.heading_frame = Frame(self.frame, width=500, height=20, bg="#CCE5FF")
self.under_heading_frame = Frame(self.frame, width=500, height=6, bg="#BDDEFF")
self.blank_frame = Frame(self.frame, height=50)
self.heading_label = Label(self.frame, text="Statistics", font=("Helvetica 15 bold"), fg="#6C6C6C", justify=CENTER)
self.second_blank_frame = Frame(self.frame, height=50)
self.instructions_text = Label(self.frame, text="Answered correctly: {}\nAnswered incorrectly: {}\n".format(correctly_answered, incorrectly_answered), font=("Helvetica 12"), justify=CENTER)
self.third_blank_frame = Frame(self.frame, width=500, height=50)
self.dismiss_button = Button(self.frame, text="Dismiss", font=("Helvetica 12"), width=15, command=lambda: self.dismissStats())
self.fourth_blank_frame = Frame(self.frame, width=500, height=35)
# grid all created objects above
self.heading_frame.grid()
self.under_heading_frame.grid()
self.blank_frame.grid()
self.heading_label.grid()
self.second_blank_frame.grid()
self.instructions_text.grid()
self.third_blank_frame.grid()
self.dismiss_button.grid()
self.fourth_blank_frame.grid()
# function to send user back to Game class
def dismissStats(self):
# deletes the current frame
# and sends user to Game
self.frame.destroy()
Game()
class Help:
def __init__(self):
# set up and grid first displayed frame
self.frame = Frame()
self.frame.grid()
# creates design for the main Stats frame
self.heading_frame = Frame(self.frame, width=500, height=20, bg="#CCE5FF")
self.under_heading_frame = Frame(self.frame, width=500, height=6, bg="#BDDEFF")
self.blank_frame = Frame(self.frame, height=50)
self.heading_label = Label(self.frame, text="Help and Instructions", font=("Helvetica 15 bold"), fg="#6C6C6C", justify=CENTER)
self.second_blank_frame = Frame(self.frame, height=50)
self.instructions_text = Label(self.frame, text="Begin the quiz by clicking the Play button.\n"
"Type your answer in the entry form below\n"
"the question and click the button to submit it.", font=("Helvetica 12"), justify=CENTER)
self.third_blank_frame = Frame(self.frame, width=500, height=50)
self.dismiss_button = Button(self.frame, text="Dismiss", font=("Helvetica 12"), width=15, command=lambda: self.dismissHelp())
self.fourth_blank_frame = Frame(self.frame, width=500, height=35)
# grid all created objects above
self.heading_frame.grid()
self.under_heading_frame.grid()
self.blank_frame.grid()
self.heading_label.grid()
self.second_blank_frame.grid()
self.instructions_text.grid()
self.third_blank_frame.grid()
self.dismiss_button.grid()
self.fourth_blank_frame.grid()
# function to send user back to Help class
def dismissHelp(self):
# deletes the current frame
# and sends user to Game
self.frame.destroy()
Game()
# tkinter
gui = Tk()
gui.title("Math Quiz")
var = Start(gui)
gui.mainloop() |
21,492 | b8084026cae1f98bf40cac1ff643224a40c55834 | class Solution:
def distanceBetweenBusStops(self, distance: List[int], start: int, destination: int) -> int:
if start > destination:
start, destination = destination, start
clockwise = sum(distance[start:destination])
counterclockwise = sum(distance) - clockwise
return min(clockwise, counterclockwise) |
21,493 | 893901a590937bda99ac38493bc6472f7ec589a3 | from flask import jsonify
from dao.buyer import BuyerDAO
class BuyerHandler:
def build_buyer_dict(self, row):
result = {}
result['b_id'] = row[0]
result['u_name'] = row[1]
result['u_lastname'] = row[2]
result['u_email'] = row[3]
result['u_password'] = row[4]
result['u_address'] = row[5]
result['u_city'] = row[6]
result['u_region'] = row[7]
result['u_phone'] = row[8]
result['u_age'] = row[9]
return result
def build_buyer_attributes(self, b_id, u_name, u_lastname, u_email, u_password, u_address, u_city, u_region, u_phone, u_age):
result = {}
result['b_id'] = b_id
result['u_name'] = u_name
result['u_lastname'] = u_lastname
result['u_email'] = u_email
result['u_password'] = u_password
result['u_address'] = u_address
result['u_city'] = u_city
result['u_region'] = u_region
result['u_phone'] = u_phone
result['u_age'] = u_age
return result
def build_transaction_dict(self, row):
result = {}
result['t_id'] = row[0]
result['s_id'] = row[1]
result['b_id'] = row[2]
result['ba_id'] = row[3]
result['c_id'] = row[4]
result['r_id'] = row[5]
result['t_qty'] = row[6]
result['t_total'] = row[7]
result['t_date'] = row[8]
result['t_donation'] = row[9]
result['t_reservation'] = row[10]
return result
def build_resource_dict(self, row):
result = {}
result['r_id'] = row[0]
result['r_name'] = row[1]
if(row[2]==1):
result['r_category'] = "Water"
if (row[3]==1):
result['r_type'] = "Bottled Water"
elif(row[3]==2):
result['r_type'] = "1 Gallon Water"
elif (row[2] == 2):
result['r_category'] = "Fuel"
if (row[3]==1):
result['r_type'] = "Diesel"
elif(row[3]==2):
result['r_type'] = "Gasoline"
else:
result['r_type'] = "Propane"
elif (row[2] == 3):
result['r_category'] = "Baby Food"
result['r_type'] = "Baby Food"
elif (row[2] == 4):
result['r_category'] = "Medications"
result['r_type'] = "Medications"
elif (row[2] == 5):
result['r_category'] = "Canned Food"
result['r_type'] = "Canned Food"
elif (row[2] == 6):
result['r_category'] = "Dry Food"
result['r_type'] = "Dry Food"
elif (row[2] == 7):
result['r_category'] = "Ice"
result['r_type'] = "Ice"
elif (row[2] == 8):
result['r_category'] = "Medical Devices"
result['r_type'] = "Medical Devices"
elif (row[2] == 9):
result['r_category'] = "Heavy Equipment"
result['r_type'] = "Heavy Equipment"
elif (row[2] == 10):
result['r_category'] = "Tools"
result['r_type'] = "Tools"
elif (row[2] == 11):
result['r_category'] = "Clothing"
result['r_type'] = "Clothing"
elif (row[2] == 12):
result['r_category'] = "Batteries"
result['r_type'] = "Batteries"
else:
result['r_category'] = "Power Generators"
if (row[3]==1):
result['r_type'] = "Diesel Power Generator"
elif(row[3]==2):
result['r_type'] = "Gasoline Power Generator"
else:
result['r_type'] = "Propane Power Generator"
result['rq_qty'] = row[4]
result['rq_date'] = row[5]
return result
def getAllBuyers(self):
dao = BuyerDAO()
buyer_list = dao.getAllBuyers()
if not buyer_list:
return jsonify(Error="Buyer Not Found"), 404
else:
result_list = []
for row in buyer_list:
result = self.build_buyer_dict(row)
result_list.append(result)
return jsonify(Buyers=result_list)
def getBuyerById(self, b_id):
dao = BuyerDAO()
buyer_list = dao.getBuyerById(b_id)
if not buyer_list:
return jsonify(Error="Buyer Not Found"), 404
else:
result = self.build_buyer_dict(buyer_list)
return jsonify(Buyer=result)
def getResourcesByBuyerId(self, b_id):
dao = BuyerDAO()
buyer_list = dao.getResourcesByBuyerId(b_id)
if not buyer_list:
return jsonify(Error="Buyer Not Found"), 404
else:
result_list = []
for row in buyer_list:
result = self.build_resource_dict(row)
result_list.append(result)
return jsonify(Resources=result_list)
def searchBuyers(self, args):
name = args.get("name")
lastname = args.get("lastname")
region = args.get("region")
dao = BuyerDAO()
buyer_list = []
if (len(args) == 3) and name and lastname and region:
buyer_list = dao.getBuyerByRegionNameAndLastName(region, name,lastname)
elif (len(args) == 2) and name and lastname:
buyer_list = dao.getBuyerByNameandLastName(name, lastname)
elif (len(args) == 2) and name and region:
buyer_list = dao.getBuyerByNameandRegion(name, region)
elif (len(args) == 2) and lastname and region:
buyer_list = dao.getBuyerByLastNameandRegion(lastname, region)
elif (len(args) == 1) and name:
buyer_list = dao.getBuyerByName(name)
elif (len(args) == 1) and lastname:
buyer_list = dao.getBuyerByLastName(lastname)
elif (len(args) == 1) and region:
buyer_list = dao.getBuyerByRegion(region)
else:
return jsonify(Error = "Malformed query string"), 400
if not buyer_list:
return jsonify(Error="Buyer Not Found"), 404
else:
result_list = []
for row in buyer_list:
result = self.build_buyer_dict(row)
result_list.append(result)
return jsonify(Buyers=result_list)
def getTransactionsByBuyerId(self, b_id):
dao = BuyerDAO()
transaction_list = dao.getTransactionsByBuyerId(b_id)
if not transaction_list:
return jsonify(Error="Transaction Not Found"), 404
else:
result_list = []
for row in transaction_list:
result = self.build_transaction_dict(row)
result_list.append(result)
return jsonify(Transactions=result_list)
def insertBuyer(self, form):
if len(form) != 9:
return jsonify(Error = "Malformed post request"), 400
else:
u_name = form['u_name']
u_lastname = form['u_lastname']
u_email = form['u_email']
u_password = form['u_password']
u_address = form['u_address']
u_city = form['u_city']
u_region = form['u_region']
u_phone = form['u_phone']
u_age = form['u_age']
if u_name and u_lastname and u_email and u_password and u_address and u_city and u_region and u_phone and u_age:
dao = BuyerDAO()
b_id = dao.insert(u_name, u_lastname, u_email, u_password, u_address, u_city, u_region, u_phone, u_age)
result = self.build_buyer_attributes(b_id, u_name, u_lastname, u_email, u_password, u_address, u_city, u_region, u_phone, u_age)
return jsonify(Buyer=result), 201
else:
return jsonify(Error="Unexpected attributes in post request"), 400
def deleteBuyer(self, b_id):
dao = BuyerDAO()
if not dao.getBuyerById(b_id):
return jsonify(Error = "Buyer not found."), 404
else:
dao.delete(b_id)
return jsonify(DeleteStatus = "OK"), 200
def updateBuyer(self, b_id, form):
dao = BuyerDAO()
if not dao.getBuyerById(b_id):
return jsonify(Error = "Buyer not found."), 404
else:
if len(form) != 9:
return jsonify(Error="Malformed update request"), 400
else:
u_name = form['u_name']
u_lastname = form['u_lastname']
u_email = form['u_email']
u_password = form['u_password']
u_address = form['u_address']
u_city = form['u_city']
u_region = form['u_region']
u_phone = form['u_phone']
u_age = form['u_age']
if u_name and u_lastname and u_email and u_password and u_address and u_city and u_region and u_phone and u_age:
dao = BuyerDAO()
dao.update(b_id, u_name, u_lastname, u_email, u_password, u_address, u_city, u_region, u_phone, u_age)
result = self.build_buyer_attributes(b_id, u_name, u_lastname, u_email, u_password, u_address, u_city, u_region, u_phone, u_age)
return jsonify(Buyer=result), 201
else:
return jsonify(Error="Unexpected attributes in update request"), 400
|
21,494 | 1eadecdab730f2944e3c059a3ca8954fe0e0de5f | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
时间:2021/3/29 23:19
LeetCode原题链接:https://leetcode-cn.com/problems/increasing-decreasing-string/
"""
class Solution:
"""解法一:不断的遍历 a-z
思路:
直接翻译题目,a-z z-a 不断地重复,直至 s 为空
"""
def sortString(self, s: str) -> str:
alpha_list = [chr(i) for i in range(97, 123)]
input_alpha = list(s)
result = []
while input_alpha:
for alpha in alpha_list:
if alpha in input_alpha:
result.append(alpha)
input_alpha.remove(alpha)
for alpha in alpha_list[::-1]:
if alpha in input_alpha:
result.append(alpha)
input_alpha.remove(alpha)
return "".join(result)
class Solution2:
"""解法二:桶计数
思路:
1. 遍历一遍字母,进行计数
2. 来回遍历 0-25 即 a-z,直至所有的计数为 0
"""
def sortString(self, s: str) -> str:
alpha_count = [0] * 26
for alpha in s:
alpha_count[ord(alpha) - ord("a")] += 1
result = []
while len(result) < len(s):
for i in range(0, 26):
if alpha_count[i] != 0:
result.append(chr(i+ord("a")))
alpha_count[i] -= 1
for i in range(25, -1, -1):
if alpha_count[i] != 0:
result.append(chr(i + ord("a")))
alpha_count[i] -= 1
return "".join(result)
if __name__ == "__main__":
solution = Solution()
s = "aaaabbbbcccc"
print(f"预期:abccbaabccba")
print(f"实际:{solution.sortString(s)}")
solution = Solution()
s = "leetcode"
print(f"预期:cdelotee")
print(f"实际:{solution.sortString(s)}")
solution = Solution2()
s = "aaaabbbbcccc"
print(f"预期:abccbaabccba")
print(f"实际:{solution.sortString(s)}")
solution = Solution2()
s = "leetcode"
print(f"预期:cdelotee")
print(f"实际:{solution.sortString(s)}")
|
21,495 | c157b9ba8bdf3fac8af6c8288b1005853d44b4b3 | """
The metainterpreter and metabuiltins.
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
There are seven Forth registers: W, IP, PSP, RSP, X, UP, and TOS. They are
assigned to hardware registers as follows:
+---+--+
|IP |J |
|PSP|SP|
|RSP|Y |
|TOS|Z |
+---+--+
To start the metainterpreter, set RSP to point to a safe area of return stack,
put the address of QUIT into IP, and then call IP.
"""
from StringIO import StringIO
from struct import pack
from cauliflower.assembler import (A, ADD, AND, B, BOR, C, I, IFE, IFN, J,
MUL, PEEK, PC, POP, PUSH, SET, SP, SUB, X,
XOR, Y, Z, Absolute, assemble, call, until)
from cauliflower.utilities import library, read, write
class EvenStringIO(StringIO):
def seek(self, value, *args, **kwargs):
StringIO.seek(self, value * 2, *args, **kwargs)
def tell(self):
rv = StringIO.tell(self)
if rv % 2:
raise Exception("Offset %d is odd!" % rv)
return rv // 2
IMMEDIATE = 0x4000
HIDDEN = 0x8000
def PUSHRSP(register):
"""
Push onto RSP.
"""
ucode = assemble(SUB, Y, 0x1)
ucode += assemble(SET, [Y], register)
return ucode
def POPRSP(register):
"""
Pop from RSP.
"""
ucode = assemble(SET, register, [Y])
ucode += assemble(ADD, Y, 0x1)
return ucode
def _push(register):
"""
Push onto the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, Z, register)
return ucode
def _pop(register):
"""
Pop off the stack, manipulating both TOS and PSP.
"""
ucode = assemble(SET, register, Z)
ucode += assemble(SET, Z, POP)
return ucode
class MetaAssembler(object):
"""
Assembler which pulls threads together to form a Forth core.
"""
# Pointer to the previous word defined, used to chain all words onto a
# linked list.
previous = 0x0
# Workspace address.
workspace = 0x7000
def __init__(self):
# Hold codewords for threads as we store them.
self.asmwords = {}
self.codewords = {}
self.datawords = {}
# Initialize the space.
self.space = EvenStringIO()
self.bootloader()
self.lib()
def bootloader(self):
"""
Set up the bootloader.
"""
self.space.write(assemble(SET, Y, 0xd000))
self.space.write(assemble(SET, J, 0x5))
self.space.write(assemble(SET, PC, [J]))
# Allocate space for the address of QUIT.
self.space.write("\x00\x00")
# Allocate space for STATE.
self.STATE = self.space.tell()
self.space.write("\x00\x00")
# And HERE.
self.HERE = self.space.tell()
self.space.write("\x00\x00")
# And LATEST, too.
self.LATEST = self.space.tell()
self.space.write("\x00\x00")
# Don't forget FB.
self.FB = self.space.tell()
self.space.write("\x80\x00")
# NEXT. Increment IP and move through it.
ucode = assemble(ADD, J, 0x1)
ucode += assemble(SET, PC, [J])
self.prim("next", ucode)
# EXIT. Pop RSP into IP and then call NEXT.
ucode = POPRSP(J)
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("exit", ucode)
# ENTER. Save IP to RSP, dereference IP to find the caller, enter the
# new word, call NEXT.
ucode = PUSHRSP(J)
ucode += assemble(SET, J, [J])
ucode += assemble(SET, PC, self.asmwords["next"])
self.prim("enter", ucode)
def lib(self):
self.library = {}
for name in library:
print "Adding library function", name
self.library[name] = self.space.tell()
self.space.write(library[name]())
def finalize(self):
# Write HERE and LATEST.
location = self.space.tell()
here = pack(">H", location)
latest = pack(">H", self.previous)
self.space.seek(self.HERE)
self.space.write(here)
self.space.seek(self.LATEST)
self.space.write(latest)
self.space.seek(0x5)
self.space.write(pack(">H", self.codewords["quit"]))
# Reset file pointer.
self.space.seek(location)
def prim(self, name, ucode):
"""
Write primitive assembly directly into the core.
"""
self.asmwords[name] = self.space.tell()
self.space.write(ucode)
def create(self, name, flags):
"""
Write a header into the core and update the previous header marker.
"""
location = self.space.tell()
self.datawords[name] = location
print "Creating data word", name, "at 0x%x" % location
length = len(name)
if flags:
length |= flags
header = pack(">HH", self.previous, length)
# Swap locations.
self.previous = location
self.space.write(header)
self.space.write(name.encode("utf-16-be"))
location = self.space.tell()
print "Creating code word", name, "at 0x%x" % location
self.codewords[name] = location
def asm(self, name, ucode, flags=None):
"""
Write an assembly-level word into the core.
Here's what the word looks like:
|prev|len |name|asm |NEXT|
"""
print "Adding assembly word %s" % name
self.create(name, flags)
self.space.write(ucode)
self.space.write(assemble(SET, PC, self.asmwords["next"]))
def thread(self, name, words, flags=None):
"""
Assemble a thread of words into the core.
Here's what a thread looks like:
|prev|len |name|ENTER|word|EXIT|
"""
print "Adding Forth thread %s" % name
self.create(name, flags)
# ENTER/DOCOL bytecode.
ucode = assemble(SET, PC, self.asmwords["enter"])
self.space.write(ucode)
for word in words:
if isinstance(word, int):
self.space.write(pack(">H", word))
elif word in self.codewords:
self.space.write(pack(">H", self.codewords[word]))
else:
raise Exception("Can't reference unknown word %r" % word)
self.space.write(pack(">H", self.asmwords["exit"]))
ma = MetaAssembler()
# Deep primitives.
ma.prim("read", read(A))
ma.prim("write", write(A))
# Top of the line: Go back to the beginning of the string.
ucode = assemble(SET, B, 0x0)
ucode += assemble(SET, C, ma.workspace)
# Read a character into A.
ucode += call(ma.asmwords["read"])
ucode += assemble(SET, [C], A)
ucode += assemble(ADD, B, 0x1)
ucode += assemble(ADD, C, 0x1)
# If it's a space, then we're done. Otherwise, go back to reading things from
# the keyboard.
ucode = until(ucode, (IFN, 0x20, [C]))
ucode += assemble(SET, C, ma.workspace)
ma.prim("word", ucode)
preamble = assemble(SET, C, 0x0)
ucode = assemble(MUL, C, 10)
ucode += assemble(SET, X, [A])
ucode += assemble(SUB, X, ord("0"))
ucode += assemble(ADD, C, X)
ucode += assemble(ADD, A, 0x1)
ucode += assemble(SUB, B, 0x1)
ucode = until(ucode, (IFE, B, 0x0))
ma.prim("snumber", preamble + ucode)
# Compiling words.
ucode = _push([J])
ucode += assemble(ADD, J, 0x1)
ma.asm("literal", ucode)
ma.asm("'", ucode)
ucode = assemble(SET, PC, Z)
ma.asm("call", ucode)
# Low-level memory manipulation.
ucode = assemble(SET, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("!", ucode)
# TOS lets us cheat hard.
ucode = assemble(SET, Z, [Z])
ma.asm("@", ucode)
ucode = assemble(ADD, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("+!", ucode)
ucode = assemble(SUB, [Z], PEEK)
# Move the stack back, and then pop the next word into TOS.
ucode += assemble(ADD, SP, 0x1)
ucode += _pop(Z)
ma.asm("-!", ucode)
# Low-level branching.
ucode = assemble(ADD, J, [J + 0x1])
ma.asm("branch", ucode)
# Ugh.
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(ADD, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0branch", ucode)
# Goddammit DCPU!
ucode = assemble(SUB, J, [J + 0x1])
ma.asm("nbranch", ucode)
ucode = assemble(IFN, Z, 0x0)
ucode += assemble(SUB, J, [J + 0x1])
ucode += assemble(IFE, Z, 0x0)
ucode += assemble(ADD, J, 0x1)
ma.asm("0nbranch", ucode)
# Low-level tests.
# I bet there's a trick to this. I'll revisit this later.
ucode = assemble(IFN, J, 0x0)
ucode += assemble(SET, A, 0x1)
ucode += assemble(IFE, J, 0x0)
ucode += assemble(SET, A, 0x0)
ucode += assemble(SET, J, A)
ma.asm("0=", ucode)
def IF(then, otherwise=[]):
if otherwise:
then += ["branch", len(otherwise)]
return ["0=", "0branch", len(then)] + then + otherwise
def UNTIL(loop):
return loop + ["0nbranch", len(loop)]
# Main stack manipulation.
ucode = assemble(SET, PUSH, Z)
ma.asm("dup", ucode)
# Return stack manipulation.
ucode = _push(0xd000)
ma.asm("r0", ucode)
ucode = _push(Y)
ma.asm("rsp@", ucode)
ucode = _pop(Y)
ma.asm("rsp!", ucode)
ucode = _push([Y])
ucode += assemble(ADD, Y, 0x1)
ma.asm("r>", ucode)
ucode = assemble(SUB, Y, 0x1)
ucode += _pop([Y])
ma.asm(">r", ucode)
ucode = _push([Y])
ma.asm("r@", ucode)
ucode = _pop([Y])
ma.asm("r!", ucode)
ucode = assemble(ADD, Y, 0x1)
ma.asm("rdrop", ucode)
# Arithmetic.
ucode = assemble(ADD, Z, POP)
ma.asm("+", ucode)
# Low-level input.
ucode = assemble(SET, PUSH, Z)
ucode += assemble(SET, A, Z)
ucode += call(ma.asmwords["word"])
ma.asm("key", ucode)
# High-level input.
ucode = call(ma.asmwords["word"])
ucode += _push(B)
ucode += _push(C)
ma.asm("word", ucode)
ucode = _pop(A)
ucode += _pop(B)
ucode += call(ma.asmwords["snumber"])
ucode += _push(C)
ma.asm("snumber", ucode)
# Output.
ucode = assemble(SET, A, [ma.FB])
ucode += _pop([A])
ucode += assemble(ADD, [ma.FB], 0x1)
ma.asm("emit", ucode)
# Global access.
# This could be done in Forth, but it's so small in assembly!
ucode = _pop([ma.HERE])
ucode += assemble(ADD, [ma.HERE], 0x1)
ma.asm(",", ucode)
ucode = assemble(SET, [ma.STATE], 0x0)
ma.asm("[", ucode)
ucode = assemble(SET, [ma.STATE], 0x1)
ma.asm("]", ucode)
ucode = _push([ma.LATEST])
ma.asm("latest", ucode)
# Compiler stuff.
ucode = call(ma.asmwords["read"])
ucode += _push([C])
ma.asm("char", ucode)
# Pop the target address (below TOS) into a working register. Leave length on
# TOS.
preamble = assemble(SET, A, POP)
# Use B as our linked list pointer.
preamble += assemble(SET, B, ma.LATEST)
# Top of the loop. Dereference B to move along the list.
ucode = assemble(SET, B, [B])
# Compare lengths; if they don't match, go to the next one.
ucode = until(ucode, (IFN, [B + 0x1], Z))
# memcmp() the strings.
ucode += assemble(ADD, B, 0x1)
ucode += assemble(SET, C, A)
ucode += assemble(SET, A, Z)
ucode += call(ma.library["memcmp"])
ucode += assemble(SUB, B, 0x1)
# If it succeeded, push the address back onto the stack and then jump out.
ucode += assemble(IFN, A, 0x0)
ucode += assemble(SET, Z, B)
ucode += assemble(IFN, A, 0x0)
ucode += assemble(ADD, PC, 0x4)
# Loop until we hit NULL.
ucode = until(ucode, (IFE, B, 0x0))
# We finished the loop and couldn't find anything. Guess we'll just set Z to
# 0x0 and exit.
ucode += assemble(SET, Z, 0x0)
ma.asm("find", preamble + ucode)
ma.thread("+1", ["literal", 0x1, "+"])
ma.thread(">cfa", ["+1", "dup", "@", "+", "+1"])
# Grab HERE. It's going to live in A for a bit.
preamble = assemble(SET, A, [ma.HERE])
# Write LATEST to HERE, update LATEST.
preamble += assemble(SET, [A], [ma.LATEST])
preamble += assemble(SET, [ma.LATEST], A)
# Move ahead, write length.
preamble += assemble(ADD, A, 0x1)
preamble += assemble(SET, [A], Z)
# Set the hidden flag.
preamble += assemble(BOR, [A], HIDDEN)
# SP is nerfed, so grab the source address and put it in B.
preamble += assemble(SET, B, PEEK)
# Loop. Copy from the source address to the target address.
ucode = assemble(SUB, Z, 0x1)
ucode += assemble(SET, [A], [B])
ucode += assemble(ADD, A, 0x1)
ucode += assemble(ADD, B, 0x1)
# Break when we have no more bytes to copy.
ucode = until(ucode, (IFE, Z, 0x0))
# Write out the new HERE.
ucode += assemble(SET, [ma.HERE], A)
# Get the stack to be sane again. Shift it down and then pop, same as 2drop.
ucode += assemble(ADD, SP, 0x1)
ucode += assemble(SET, Z, POP)
ma.asm("create", preamble + ucode)
# The stack points to the top of the header. Move forward one...
ucode = assemble(ADD, Z, 0x1)
# Now XOR in the hidden flag.
ucode += assemble(XOR, [Z], HIDDEN)
# And pop the stack.
ucode += assemble(SET, Z, POP)
ma.asm("hidden", ucode)
# We get to grab LATEST ourselves. On the plus side, no stack touching.
ucode = assemble(SET, A, ma.LATEST)
# XOR that flag!
ucode += assemble(XOR, [A + 0x1], IMMEDIATE)
ma.asm("immediate", ucode)
ucode = assemble(AND, Z, IMMEDIATE)
ma.asm("immediate?", ucode)
ma.thread(":", [
"word",
"create",
"literal", ma.asmwords["enter"],
",",
"latest",
"@",
"hidden",
"]",
])
ma.thread(";", [
"literal", ma.asmwords["exit"],
",",
"latest",
"@",
"hidden",
"[",
], flags=IMMEDIATE)
ma.thread("interpret-found", [
"dup",
"+1",
"immediate?",
] + IF([
">cfa",
"call",
], [
">cfa",
",",
])
)
ma.thread("interpret", [
"word",
"find",
"dup",
] + IF([
"interpret-found",
])
)
ma.thread("quit", ["r0", "rsp!", "interpret", "nbranch", 0x2])
ma.finalize()
|
21,496 | c4c94b4b03513be075531eaca7a9963a99f62144 | # The code in this file is modified after
# https://github.com/dvolgyes/zenodo_get/blob/master/zenodo_get/zget.py
import sys
import json
import signal
import time
import hashlib
from contextlib import contextmanager
from .utils import download
from ai4water.backend import requests, os
abort_signal = False
abort_counter = 0
exceptions = False
def ctrl_c(func):
signal.signal(signal.SIGINT, func)
return func
@ctrl_c
def handle_ctrl_c(*args, **kwargs):
global abort_signal
global abort_counter
global exceptions
abort_signal = True
abort_counter += 1
if abort_counter >= 2:
if exceptions:
raise Exception('\n Immediate abort. There might be unfinished files.')
else:
sys.exit(1)
#see https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python/24176022#24176022
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def check_hash(filename, checksum):
algorithm, value = checksum.split(':')
if not os.path.exists(filename):
return value, 'invalid'
h = hashlib.new(algorithm)
with open(filename, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
h.update(data)
digest = h.hexdigest()
return value, digest
def download_from_zenodo(
outdir,
doi,
cont=False,
tolerate_error=False,
include:list = None,
**kwargs
):
"""
to suit the requirements of this package.
:param outdir: Output directory, created if necessary. Default: current directory.
:param doi: str, Zenodo DOI
:param cont: True, Do not continue previous download attempt. (Default: continue.)
:param tolerate_error: False, Continue with next file if error happens.
:param include : files to download. Files which are not in include will not be
downloaded.
:param kwargs:
sandbox: bool, Use Zenodo Sandbox URL.
timeout: int, Connection time-out. Default: 15 [sec].
pause: float, Seconds to wait before retry attempt, e.g. 0.5
retry: int, Number of times to Retry on error.
"""
_wget = kwargs.get('wget', None)
md5 = kwargs.get('md5', False)
keep = kwargs.get('keep', False)
timeout = kwargs.get('timeout', 15)
sandbox = kwargs.get('sandbox', False)
pause = kwargs.get('pause', 0.5)
retry = kwargs.get('retry', 0)
with cd(outdir):
url = doi
if not url.startswith('http'):
url = 'https://doi.org/' + url
try:
r = requests.get(url, timeout=timeout)
except requests.exceptions.ConnectTimeout:
raise TimeoutError("Connection timeout.")
except Exception:
raise ConnectionError
if not r.ok:
raise ValueError(f'DOI {doi} could not be resolved. Try again, or use record ID.')
recordID = r.url.split('/')[-1]
if not sandbox:
url = 'https://zenodo.org/api/records/'
else:
url = 'https://sandbox.zenodo.org/api/records/'
try:
r = requests.get(url + recordID, timeout=timeout)
except requests.exceptions.ConnectTimeout:
raise TimeoutError('Connection timeout during metadata reading.')
except Exception:
raise ConnectionError('Connection error during metadata reading.')
if r.ok:
js = json.loads(r.text)
files = js['files']
if include:
assert isinstance(include, list)
filenames = [f['key'] for f in files]
assert all([file in filenames for file in include]), f"invlid {include}"
# only consider those files which are in include
files = [file for file in files if file['key'] in include]
total_size = sum(f['size'] for f in files)
if md5 is not None:
with open('md5sums.txt', 'wt') as md5file:
for f in files:
fname = f['key']
checksum = f['checksum'].split(':')[-1]
md5file.write(f'{checksum} {fname}\n')
if _wget is not None:
if _wget == '-':
for f in files:
link = f['links']['self']
print(link)
else:
with open(_wget, 'wt') as wgetfile:
for f in files:
fname = f['key']
link = 'https://zenodo.org/record/{}/files/{}'.format(
recordID, fname
)
wgetfile.write(link + '\n')
else:
print('Title: {}'.format(js['metadata']['title']))
print('Keywords: ' +
(', '.join(js['metadata'].get('keywords', []))))
print('Publication date: ' + js['metadata']['publication_date'])
print('DOI: ' + js['metadata']['doi'])
print('Total size: {:.1f} MB'.format(total_size / 2 ** 20))
for f in files:
if abort_signal:
print('Download aborted with CTRL+C.')
print('Already successfully downloaded files are kept.')
break
link = f['links']['self']
size = f['size'] / 2 ** 20
print()
print(f'Link: {link} size: {size:.1f} MB')
fname = f['key']
checksum = f['checksum']
remote_hash, local_hash = check_hash(fname, checksum)
if remote_hash == local_hash and cont:
print(f'{fname} is already downloaded correctly.')
continue
for _ in range(retry + 1):
try:
filename = download(link)
except Exception as e:
print(' Download error.')
time.sleep(pause)
else:
break
else:
print(' Too many errors.')
if not tolerate_error:
raise Exception('Download is aborted. Too many errors')
print(f' Ignoring {filename} and downloading the next file.')
continue
h1, h2 = check_hash(filename, checksum)
if h1 == h2:
print(f'Checksum is correct. ({h1})')
else:
print(f'Checksum is INCORRECT!({h1} got:{h2})')
if not keep:
print(' File is deleted.')
os.remove(filename)
else:
print(' File is NOT deleted!')
if not tolerate_error:
sys.exit(1)
else:
print('All files have been downloaded.')
else:
raise Exception('Record could not get accessed.') |
21,497 | 9e546b5224db9e18aab937169702f9ec5a1f406d | import logging
from .handlers import ChannelHandler
from .formatters import TextFormatter
__all__ = ['ChannelHandler', 'TextFormatter']
__version__ = '0.1.0'
def get_channel_logger(channel_id: str, auth_token: str, name='', timeout=10) -> logging.Logger:
log = logging.getLogger(name)
handler = ChannelHandler(
channel_id=channel_id,
bot_token=auth_token,
level=logging.INFO,
timeout=timeout
)
log.addHandler(handler)
return log |
21,498 | 9fa02b914ab9edcfb9738b3796f75f3f0478ec55 | import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import c as clight
import scipy.optimize
from . import lumi_guido
twopi = 2 * np.pi
fourpi = 4 * np.pi
def crossing_spec(x1=0, x2=0, y1=0, y2=0, px1=0, px2=0, py1=0, py2=0, degree=False):
xingh = px1 - px2
xingv = py1 - py2
seph = x1 - x2
sepv = y1 - y2
angh = px1 + px2
angv = py1 + py2
offh = x1 + x2
offv = y1 + y2
phi = np.arctan2(xingv, xingh)
xing = np.sqrt(xingh**2 + xingv**2)
sep = np.sqrt(seph**2 + sepv**2)
ang = np.sqrt(angh**2 + angv**2)
off = np.sqrt(offh**2 + offv**2)
if degree:
phi *= 180 / np.pi
return xing / 2, sep / 2, ang / 2, off / 2, phi
def orbit_spec(xing=0, sep=0, ang=0, off=0, phi=0, degree=False):
if degree:
phi *= np.pi / 180
cc = np.cos(phi)
ss = np.sin(phi)
xingh = cc * xing
xingv = ss * xing
offh = -ss * off
offv = cc * off
seph = -ss * sep
sepv = cc * sep
angh = -ss * ang
angv = cc * ang
x1 = seph + offh
x2 = -seph + offh
y1 = sepv + offv
y2 = -sepv + offv
px1 = xingh + angh
px2 = -xingh + angh
py1 = xingv + angv
py2 = -xingv + angv
return x1, x2, y1, y2, px1, px2, py1, py2
class IP:
"""Interaction Point class
name: name of the IP
betx [m]: beta function at the IP in the horizontal plane
bety [m]: beta function at the IP in the vertical plane
sepx [m]: half horizontal separation at the IP
sepy [m]: half vertical separation at the IP
px [rad]: half horizontal crossing angle at the IP
py [rad]: half vertical crossing angle at the IP
dx[m]: half horizontal dispersion at the IP
dy[m]: half vertical dispersion at the IP
ccvx[rad]: half horizontal crab cavity voltage at the IP
ccvy[rad]: half vertical crab cavity voltage at the IP
ccrf[rad]: crab cavity RF frequency at the IP
cclag[rad]: crab cavity RF phase lag at the IP
visible_cross_section[mb]: visible cross section at the IP
total_cross_section[mb]: total cross section at the IP
"""
def set_crossing(self, xing, sep, ang, off, phi, degree=True):
(
self.x1,
self.x2,
self.y1,
self.y2,
self.px1,
self.px2,
self.py1,
self.py2,
) = orbit_spec(xing, sep, ang, off, phi, degree=degree)
def get_crossing(self, degree=True):
return crossing_spec(
self.x1,
self.x2,
self.y1,
self.y2,
self.px1,
self.px2,
self.py1,
self.py2,
degree=degree,
)
def get_phi(self, degree=True):
phi = np.arctan2(self.py, self.px)
if degree:
phi *= 180 / np.pi
return phi
@property
def phi(self):
return self.get_phi()
@phi.setter
def phi(self, phi):
self.set_crossing(*self.get_crossing(degree=True), phi=phi, degree=True)
@property
def sep(self):
return np.sqrt(self.sepx**2 + self.sepy**2)
@property
def sigx(self):
return np.sqrt(self.betx * self.emitx + self.dx**2)
@property
def sigy(self):
return np.sqrt(self.bety * self.emity + self.dy**2)
def sep_(self, sep):
cc = np.cos(self.phi)
ss = np.sin(self.phi)
self.sepx = -ss * sep
self.sepy = cc * sep
return self
def __init__(
self,
name="ip5",
betx=1,
bety=1,
alfx=0,
alfy=0,
sepx=0,
sepy=0,
px=0,
py=0,
dx=0,
dy=0,
ccx=0,
ccy=0,
# ccr12=23.3305,
# ccr34=23.3305,
ccrf=400e6,
cclag=0,
visible_cross_section=81,
total_cross_section=81,
):
self.name = name
self.betx = betx
self.bety = bety
self.alfx = alfx
self.alfy = alfy
self.sepx = sepx
self.sepy = sepy
self.px = px
self.py = py
self.dx = dx
self.dy = dy
self.dpx = 0
self.dpy = 0
self.ccx = ccx
self.ccy = ccy
# self.ccr12 = ccr12
# self.ccr34 = ccr34
self.ccrf = ccrf
self.cclag = cclag
self.visible_cross_section = visible_cross_section
self.total_cross_section = total_cross_section
def __repr__(self) -> str:
out = []
for kk in ["name", "betx", "bety", "sepx", "sepy", "px", "py", "ccx", "ccy"]:
vv = getattr(self, kk)
if vv != 0:
out.append(f"{kk}={vv!r}")
return f"IP({', '.join(out)})"
def clone(self, **kwargs):
ip = IP(
name=self.name,
betx=self.betx,
bety=self.bety,
alfx=self.alfx,
alfy=self.alfy,
sepx=self.sepx,
sepy=self.sepy,
px=self.px,
py=self.py,
dx=self.dx,
dy=self.dy,
ccx=self.ccx,
ccy=self.ccy,
# ccr12=self.ccr12,
# ccr34=self.ccr34,
ccrf=self.ccrf,
cclag=self.cclag,
visible_cross_section=self.visible_cross_section,
total_cross_section=self.total_cross_section,
)
for k, v in kwargs.items():
setattr(ip, k, v)
return ip
def pileup(self, bunch):
"""Pile-up"""
l = self.luminosity(bunch)
return l * self.visible_cross_section * 1e-31 / (bunch.nb * bunch.frev)
def normalized_crossing_angle(self, bunch):
"""Normalized crossing angle"""
phix = (self.px) / (np.sqrt(bunch.emitx / self.betx))
phiy = (self.py) / (np.sqrt(bunch.emity / self.bety))
return phix, phiy
def normalized_separation(self, bunch):
"""Normalized separation"""
nsepx = self.sepx / (np.sqrt(bunch.emitx * self.betx))
nsepy = self.sepy / (np.sqrt(bunch.emity * self.bety))
return nsepx, nsepy
# def crabing_angles(self, bunch):
# """Crabbing angles"""
# phix = self.ccr12 * self.ccvx / bunch.energy * twopi * self.ccrf / clight
# phiy = self.ccr34 * self.ccvy / bunch.energy * twopi * self.ccrf / clight
# return phix, phiy
def geometric_factor(self, bunch):
"""Geometric factor"""
sigx = np.sqrt(self.betx * bunch.emitx)
sigy = np.sqrt(self.bety * bunch.emity)
effx = self.px + self.ccx
effy = self.py + self.ccy
geox = 1 / np.sqrt(1 + ((bunch.sigz * effx) / sigx) ** 2)
geoy = 1 / np.sqrt(1 + ((bunch.sigz * effy) / sigy) ** 2)
return geox, geoy
def separation_factor(self, bunch):
"""Separation factor"""
sigx = np.sqrt(self.betx * bunch.emitx)
sigy = np.sqrt(self.bety * bunch.emity)
fx = np.exp(-self.sepx**2 / sigx**2)
fy = np.exp(-self.sepy**2 / sigy**2)
return fx, fy
def lumi_headon(self, bunch):
sigx = np.sqrt(self.betx * bunch.emitx)
sigy = np.sqrt(self.bety * bunch.emity)
L0 = (bunch.ppb**2 * bunch.nb * bunch.frev) / (fourpi * sigx * sigy)
return L0
def lumi_simple(self, bunch):
L0 = self.lumi_headon(bunch)
geox, geoy = self.geometric_factor(bunch)
fx, fy = self.separation_factor(bunch)
L = L0 * geox * geoy * fx * fy
return L
def luminosity(self, bunch, verbose=False):
ccr12 = 1
ccr34 = 1
ccvx = self.ccx / ccr12 * bunch.energy / twopi / self.ccrf * clight
ccvy = self.ccy / ccr34 * bunch.energy / twopi / self.ccrf * clight
return lumi_guido.luminosity(
f=bunch.frev,
nb=bunch.nb,
N1=bunch.ppb,
N2=bunch.ppb,
x_1=self.sepx,
x_2=-self.sepx,
y_1=self.sepy,
y_2=-self.sepy,
px_1=self.px,
px_2=-self.px,
py_1=self.py,
py_2=-self.py,
energy_tot1=bunch.energy,
energy_tot2=bunch.energy,
deltap_p0_1=bunch.delta,
deltap_p0_2=bunch.delta,
epsilon_x1=bunch.emitnx,
epsilon_x2=bunch.emitnx,
epsilon_y1=bunch.emitny,
epsilon_y2=bunch.emitny,
sigma_z1=bunch.sigz,
sigma_z2=bunch.sigz,
beta_x1=self.betx,
beta_x2=self.betx,
beta_y1=self.bety,
beta_y2=self.bety,
alpha_x1=self.alfx,
alpha_x2=-self.alfx,
alpha_y1=self.alfy,
alpha_y2=-self.alfy,
dx_1=self.dx,
dx_2=self.dx,
dy_1=self.dy,
dy_2=self.dy,
dpx_1=self.dpx,
dpx_2=self.dpx,
dpy_1=self.dpy,
dpy_2=self.dpy,
CC_V_x_1=ccvx,
CC_f_x_1=self.ccrf,
CC_phase_x_1=0,
CC_V_x_2=-ccvx,
CC_f_x_2=self.ccrf,
CC_phase_x_2=0,
CC_V_y_1=ccvy,
CC_f_y_1=self.ccrf,
CC_phase_y_1=0,
CC_V_y_2=-ccvy,
CC_f_y_2=self.ccrf,
CC_phase_y_2=0,
R12_1=ccr12,
R22_1=0,
R34_1=ccr34,
R44_1=0,
R12_2=ccr12,
R22_2=0,
R34_2=ccr34,
R44_2=0,
verbose=verbose,
sigma_integration=3,
)
def betastar_from_lumi(self, bunch, target, betaratio=1):
"""Solve for the betastar that give a target luminosity"""
ip = self.clone()
def ftosolve(beta):
ip.betx = beta
ip.bety = beta * betaratio
return ip.luminosity(bunch) - target
res = scipy.optimize.root(ftosolve, ip.betx)
if res.success:
ftosolve(res.x[0])
return ip
else:
print(res)
raise ValueError("Could not find betastar")
def betastar_from_pileup(self, bunch, target, betaratio=1):
"""Solve for the betastar that give a target pileup"""
ip = self.clone()
def ftosolve(beta):
ip.betx = beta
ip.bety = beta * betaratio
return ip.pileup(bunch) - target
res = scipy.optimize.root(ftosolve, ip.betx)
if res.success:
beta = res.x[0]
ip.betx = beta
ip.bety = beta * betaratio
return ip
else:
print(res)
raise ValueError("Could not find betastar")
def sep_from_lumi(self, bunch, target):
ip = self.clone()
def ftosolve(sep):
ip.sep_(sep)
return ip.luminosity(bunch) - target
res = scipy.optimize.root(ftosolve, ip.sep)
if res.success:
ip.sep_(res.x[0])
return ip
else:
print(res)
raise ValueError("Could not find separation")
def sep_from_lumi_simple(self, bunch, target):
l_nosep = self.clone().sep_(0).lumi_simple(bunch)
factor = target / l_nosep
sigx = np.sqrt(self.betx * bunch.emitx)
sigy = np.sqrt(self.bety * bunch.emity)
if factor > 1:
sep = 0 # no levelling possible
else:
sep = np.sqrt(-np.log(factor) * sigx * sigy) # small error
return self.clone().sep_(sep)
def burnoff(self, bunch):
"""Burnoff time in seconds"""
return self.luminosity(bunch) * self.total_cross_section * 1e-31
def info(self, bunch):
print(f"sigma_x : {np.sqrt(self.betx * bunch.emitx)}")
print(f"sigma_y : {np.sqrt(self.bety * bunch.emity)}")
print(f"Normalized crosing angles: {self.normalized_crossing_angle(bunch)}")
print(f"Normalized separations : {self.normalized_separation(bunch)}")
class Bunch:
"""Bunch class
nb: number of particles per bunch
ppb: number of protons per bunch
emitx [m.rad]: horizontal emittance
emity [m.rad]: vertical emittance
sigz [m]: RMS bunch length
sigdpp: RMS energy spread
energy [eV]: beam energy
ips: list of InteractionPoint objects
longdist: longitudinal distribution
pmass [eV]: proton mass
frev [Hz]: revolution frequency
"""
def __init__(
self,
nb=1960,
ppb=2.2e11,
emitnx=2.5e-6,
emitny=2.5e-6,
sigz=7.61e-2,
sigdpp=1.2e-4,
energy=7e12,
ips=(),
long_dist="gaussian",
frev=11245.5,
pmass=0.9382720813e9,
delta=1,
):
self.nb = nb
self.ppb = ppb
self.emitnx = emitnx
self.emitny = emitny
self.sigz = sigz
self.sigdpp = sigdpp
self.energy = energy
self.ips = ips
self.longdist = long_dist
self.frev = frev
self.pmass = pmass
self.delta = delta
def __repr__(self) -> str:
out = []
for kk in ["energy", "nb", "ppb", "emitnx", "emitny", "sigz"]:
vv = getattr(self, kk)
if vv != 0:
out.append(f"{kk}={vv!r}")
return f"IP({', '.join(out)})"
@property
def gamma(self):
return self.energy / self.pmass
@property
def emitx(self):
return self.emitnx / self.gamma
@property
def emity(self):
return self.emitny / self.gamma
def luminosity(self, verbose=False):
return np.array([ip.luminosity(self, verbose=verbose) for ip in self.ips])
def ip_info(self):
for ip in self.ips:
ip.info(self)
def clone(self, **kwargs):
bb = Bunch(
nb=self.nb,
ppb=self.ppb,
emitnx=self.emitnx,
emitny=self.emitny,
sigz=self.sigz,
sigdpp=self.sigdpp,
energy=self.energy,
ips=self.ips,
long_dist=self.longdist,
frev=self.frev,
pmass=self.pmass,
delta=self.delta,
)
for k, v in kwargs.items():
setattr(bb, k, v)
return bb
class BetaStarLeveling:
def __init__(
self,
bunches,
ips,
lumi_start,
lumi_lev,
lumi_ramp_time,
betastar_end=0.15,
lumi2=1.4e35,
lumi8=2e37,
):
self.bunches = bunches
self.ips = ips
self.lumi_start = lumi_start
self.lumi_lev = lumi_lev
self.lumi_ramp_time = lumi_ramp_time
self.betastar_end = betastar_end
self.lumi2 = lumi2
self.lumi8 = lumi8
def betastar_leveling(self, fillduration=15 * 3600, dt=60, verbose=False):
"""Integrate the luminosity over time"""
out = []
ip1 = self.ips[1].betastar_from_lumi(self.bunch, self.lumi_start)
ip5 = self.ips[5].betastar_from_lumi(self.bunch, self.lumi_start)
ip2 = self.ips[2].sep_from_lumi_simple(self.bunch, self.lumi2)
ip8 = self.ips[8].sep_from_lumi_simple(self.bunch, self.lumi8)
nc={1:0,2:0,5:0,8:0} # number of colliding bunches for each IP
for ip in (1,2,5,8):
for bb in self.bunches:
if ip in bb.ips:
nc[ip]+=bb.nb
if verbose:
print(nc)
# first step
bunches=[b.clone() for b in self.bunches]
tt = 0 # luminosity time
ips={1:ip1,2:ip2,5:ip5,8:ip8}
out.append((tt, bunches, ips))
# lumi ramp
lumi15 = self.lumi_start
dlumi = (self.lumi_lev - self.lumi_start) / self.lumi_ramp_time * dt
while lumi15 < self.lumi_lev:
# clone and burnoff
bunches=[b.clone() for b in bunches]
ips={1:ip1.clone(),2:ip2.clone(),5:ip5.clone(),8:ip8.clone()}
for bb in bunches:
for ip in bb.ips:
bb.ppb -= ips[ip].burnoff(bb) * dt
# levelling
lumi15 += dlumi
ip1.betastar_from_lumi(bunches[0].clone(nb=nc[1]), lumi15)
ip5.betastar_from_lumi(bunches[0].clone(nb=nc[5]), lumi15)
ip2.sep_from_lumi_simple(bunches[0].clone(nb=nc[2]), self.lumi2)
ip8.sep_from_lumi_simple(bunches[0].clone(nb=nc[8]), self.lumi8)
# update
out.append((tt, bunches, ip1, ip5, ip2, ip8))
tt += dt
if tt > fillduration:
break
# lumi leveling
lumi15 = self.lumi_lev # small error here
while ip1.betx > self.betastar_end:
# clone and burnoff
bunches=[b.clone() for b in bunches]
ips={1:ip1.clone(),2:ip2.clone(),5:ip5.clone(),8:ip8.clone()}
for bb in bunches:
for ip in bb.ips:
bb.ppb -= ips[ip].burnoff(bb) * dt
# levelling
ip1.betastar_from_lumi(bunches[0].clone(nb=nc[1]), lumi15)
ip5.betastar_from_lumi(bunches[0].clone(nb=nc[5]), lumi15)
ip2.sep_from_lumi_simple(bunches[0].clone(nb=nc[2]), self.lumi2)
ip8.sep_from_lumi_simple(bunches[0].clone(nb=nc[8]), self.lumi8)
# update
out.append((tt, bunches, ip1, ip5, ip2, ip8))
tt += dt
if tt > fillduration:
break
# lumi decay
while tt < fillduration:
# clone and burnoff
bunches=[b.clone() for b in bunches]
ips={1:ip1.clone(),2:ip2.clone(),5:ip5.clone(),8:ip8.clone()}
for bb in bunches:
for ip in bb.ips:
bb.ppb -= ips[ip].burnoff(bb) * dt
# levelling
ip2.sep_from_lumi_simple(bunches[0].clone(nb=nc[2]), self.lumi2)
ip8.sep_from_lumi_simple(bunches[0].clone(nb=nc[8]), self.lumi8)
# update
out.append((tt, bunches, ip1, ip5, ip2, ip8))
tt += dt
return StableBeam(*zip(*out))
class StableBeam:
def __init__(self,tt,bunches,ip1,ip5,ip2,ip8):
self.tt = tt
self.bunches = bunches
self.ip1 = ip1
self.ip5 = ip5
self.ip2 = ip2
self.ip8 = ip8
|
21,499 | 4820b4962dd4b59a53bf70ed8c92460f3a0ab13e | # pip install Pillow opencv-python numpy
import tkinter as tk
import cv2 as cv
import PIL.Image
import PIL.ImageTk
root = tk.Tk()
CAM_ADRESSES = ["/dev/video0", "/dev/video1", "/dev/video2"]
camera = cv.VideoCapture(CAM_ADRESSES[0])
lbimage = tk.Label()
lbimage.pack(side="top", anchor="center",expand=True, fill="both")
def capture():
try:
res, frame = camera.read()
if res:
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
img = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
lbimage.imgtk = img
lbimage.configure(image=img)
root.after(1, capture)
except Exception as exc:
print(f"Hata: {exc}")
root.geometry("800x600")
root.after(1000, capture)
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.