text stringlengths 38 1.54M |
|---|
#!/usr/env/bin python
"""
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
For example,
Given [100, 4, 200, 1, 3, 2],
The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.
Your algorithm should run in O(n) complexity.
"""
class Solution:
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
longest_len = 1
current_len = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1] + 1:
current_len += 1
if nums[i] != nums[ i -1 ] + 1 and nums[i] != nums[ i -1 ]:
longest_len = max(longest_len, current_len)
current_len = 1
return max(longest_len, current_len)
if __name__ == "__main__":
s = Solution()
print(s.longestConsecutive([1,2,0,1])) |
# content of test_assert1.py
'''
def f():
return 3
def test_function():
assert f() == 4
'''
import pytest
def test_zero_division():
with pytest.raises(ZeroDivisionError):
1 / 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
RULE_STRATEGY_RES_SMS = 'RES_SMS'
RULE_STRATEGY_LISTEN_LOG = 'LIS_LOG'
RULE_STRATEGY_CHOICES = (
(RULE_STRATEGY_RES_SMS, 'Respond SMS'),
(RULE_STRATEGY_LISTEN_LOG, 'Listen SMS & CALL'),
)
RULE_TYPE_DEVICE = 'DEV'
RULE_TYPE_ACTION = 'ACT'
RULE_TYPE_CHOICES = (
(RULE_TYPE_DEVICE, 'Device'),
(RULE_TYPE_ACTION, 'Action'),
)
|
from typing import Iterable
from xml.etree import ElementTree as ET
from upnpavcontrol.core import didllite
ET.register_namespace('upnp', 'urn:schemas-upnp-org:metadata-1-0/upnp/')
ET.register_namespace('dc', 'http://purl.org/dc/elements/1.1/')
ET.register_namespace('avt-event', 'urn:schemas-upnp-org:metadata-1-0/AVT/')
ET.register_namespace('rcs', 'urn:schemas-upnp-org:metadata-1-0/RCS/')
ET.register_namespace('event', 'urn:schemas-upnp-org:event-1-0')
ET.register_namespace('didl', 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/')
ET.register_namespace('', 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/')
NS = {
"soap_envelope": "http://schemas.xmlsoap.org/soap/envelope/",
"device": "urn:schemas-upnp-org:device-1-0",
"service": "urn:schemas-upnp-org:service-1-0",
"event": "urn:schemas-upnp-org:event-1-0",
"control": "urn:schemas-upnp-org:control-1-0",
'upnp': 'urn:schemas-upnp-org:metadata-1-0/upnp/',
'dc': 'http://purl.org/dc/elements/1.1/',
'didl': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'
}
def add_subelement_if(parent, tag, ns, text):
if text is None:
return
nsuri = NS[ns]
element = ET.SubElement(parent, f'{{{nsuri}}}{tag}')
element.text = str(text)
return element
def format_didllite(elements: Iterable[didllite.MusicTrack]):
root = ET.Element('{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}DIDL-Lite')
for element in elements:
item = format_music_item(element)
root.append(item)
return ET.tostring(root).decode('utf-8')
def format_music_item(didlelement: didllite.MusicTrack):
item = ET.Element('{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item')
item.attrib['id'] = didlelement.id
item.attrib['parentID'] = didlelement.parentID
item.attrib['restricted'] = '1'
add_subelement_if(item, 'class', 'upnp', didlelement.upnpclass)
add_subelement_if(item, 'title', 'dc', didlelement.title)
add_subelement_if(item, 'album', 'upnp', didlelement.album)
add_subelement_if(item, 'artist', 'upnp', didlelement.artist)
add_subelement_if(item, 'originalTrackNumber', 'upnp', didlelement.originalTrackNumber)
for res in didlelement.res:
nsuri = NS['didl']
element = ET.SubElement(item, f'{{{nsuri}}}res')
element.attrib['protocolInfo'] = res.protocolInfo
element.text = res.uri
return item
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 20 09:51:20 2018
@author: philippe
"""
# import bempp modules
import bempp.api
from bempp.api.operators.potential import helmholtz as helmholtz_potential
# import usual python librairies
bempp.api.global_parameters.assembly.boundary_operator_assembly_type = 'hmat' #dense,hmat
bempp.api.global_parameters.assembly.potential_operator_assembly_type = 'hmat'
#bempp.api.global_parameters.assembly.enable_interpolation_for_oscillatory_kernels
bempp.api.global_parameters.hmat.admissibility = 'weak'
bempp.api.global_parameters.hmat.eps = 0.001
bempp.api.global_parameters.hmat.max_block_size = 10000
import numpy as np
import time
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from matplotlib.patches import Circle
import sys
#SparseDiscreteBoundaryOperator
# import special functions
from scipy.special import spherical_jn as jn
from scipy.special import spherical_yn as yn
from scipy.special import eval_legendre
from scipy.sparse.linalg import gmres
# define analytical solution: scattering from the unit sphere
def analytic_sol(R,a,theta,Ampli,k,N_terms):
result = 0
for n in range(N_terms):
tosum = 0
cn = -Ampli*(2*n+1)*(-1j)**(n)*(jn(n,k*a,derivative=True)/ (jn(n,k*a,derivative=True) - 1j*yn(n,k*a,derivative=True)))
tosum = cn*(jn(n,k*R,derivative=False) - 1j*yn(n,k*R,derivative=False))*eval_legendre(n,np.cos(theta))
result = result + tosum
return result
def pressure_db(u,p0):
return 10*np.log10(np.abs(u)**2/p0**2)
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.niter = 0
self.residuals = []
def __call__(self, rk=None):
self.niter += 1
self.residuals.append(rk)
if self._disp:
print('iteration %3i: residual = %s' % (self.niter, str(rk)))
def run_bempp_sphere(freq):
# Parameters
#freq = 546.2197646913849
#freq = 1638.6592940741543
path = "/home/philippe/Documents/ESA/Python_SC/bempp/"
name = "sphere"
freq = float(freq)
omega = 2*np.pi*freq
c = 343.2
#rho = 1.204
k = omega/c
Ampli = 1
p0 = 2e-5
vis_mesh = 0
vis_figs = 1
save_data = 0
# linear solver parameters
tol_gmres = 1e-5
# define input boundary condition: plane wave in the x-direction
def dirichlet_fun(x, n, domain_index, result):
result[0] = np.exp(1j * k * x[0])
def neumann_fun(x, n, domain_index, result):
result[0] = 1j * k * n[0] * np.exp(1j * k * x[0])
# create the grid with approximately 10 elements per wavelength
h = 2*np.pi/(10*k)
grid = bempp.api.shapes.sphere(h=h)
space = bempp.api.function_space(grid, "P", 1) #P,1
print("Mesh successfully loaded !")
print("The space has {0} dofs".format(space.global_dof_count))
if vis_mesh == 1:
grid.plot()
# define the operators to set up the bem problem
identity = bempp.api.operators.boundary.sparse.identity(
space, space, space)
dlp = bempp.api.operators.boundary.helmholtz.double_layer(
space, space, space, k)
hyp = bempp.api.operators.boundary.helmholtz.hypersingular(
space, space, space, k, use_slp=True)
ntd = bempp.api.operators.boundary.helmholtz.osrc_ntd(space, k)
# define the regularized burton miller formulation
burton_miller = .5 * identity - dlp - ntd * hyp
#eta = 0.03*1j
#burton_miller = .5 * identity - dlp - eta*hyp
dirichlet_grid_fun = bempp.api.GridFunction(space, fun=dirichlet_fun)
neumann_grid_fun = bempp.api.GridFunction(space, fun=neumann_fun)
rhs_fun = dirichlet_grid_fun - ntd * neumann_grid_fun
# bem assembling
print("Assembling bem operator...")
t = time.time()
discrete_op = burton_miller.strong_form()
coeffs = rhs_fun.coefficients
elapsed_hm = time.time() - t
print("Assembling bem operator: %1.1f sec" %elapsed_hm)
# solve the linear system
t = time.time()
counter = gmres_counter()
x, info = gmres(discrete_op, coeffs,x0 = coeffs, maxiter = 100,tol = tol_gmres,callback=counter)
elapsed_gmres = time.time() - t
print("Gmres solving time: %1.1f sec" %elapsed_gmres)
It = counter.niter
Residuals = np.asarray(counter.residuals)
total_field = bempp.api.GridFunction(discrete_op, coefficients=x)
if vis_figs == 1:
plt.figure(figsize=(9, 3))
plt.semilogy(range(0,It), Residuals,"b-*",linewidth=2.0,label="residuals",markersize=10,markeredgecolor="k")
plt.semilogy(range(0,It), tol_gmres*np.ones(It),"k--",linewidth=1.5,label="gmres tolerance")
plt.legend()
plt.grid(True,which="both",ls="--",alpha=0.5)
plt.title("Gmres convergence")
if save_data == 1:
np.savetxt("Residuals_gmres.txt",Residuals)
####################################################################################
# plot polar pressure response in the xy-plane
theta = np.linspace(0, 2 * np.pi, 200)
xc = 0
yc = 0
z_cut=0
R = 3
# define the bempp solution
points = np.array([(R-xc)*np.cos(theta), (R-yc)*np.sin(theta), z_cut*np.ones(len(theta))])
slp_pot_polar = helmholtz_potential.double_layer(
space, points, k)
res_polar = np.exp(1j * k * points[0]) + slp_pot_polar.evaluate(total_field)
u_polar = np.abs(res_polar.flat/(np.exp(1j *k * points[0])))
# define the analytical solution
u_in = np.exp(-1j * k * points[0])
u_polar_ex = np.abs(analytic_sol(R,1,theta,Ampli,k,100) + u_in)
if vis_figs == 1:
plt.figure(figsize=(10, 8))
plt.polar(theta, u_polar,"r-.",linewidth=2.0,label="bempp")
plt.polar(theta, u_polar_ex,"b:",linewidth=2.0,label="analytical")
plt.legend()
if save_data == 1:
np.savetxt("polar_data"+str(freq)+"Hz.txt",u_polar)
np.savetxt("polar_data_ex"+str(freq)+"Hz.txt",u_polar_ex)
err = np.max(np.abs(u_polar - u_polar_ex))
print("max error polar plot = %1.3e" %err)
# compute the numerical solution on a grid
t = time.time()
Nx = 60
Ny = 60
xmin, xmax, ymin, ymax = [-3, 3, -3, 3]
plot_grid = np.mgrid[xmin:xmax:Nx * 1j, ymin:ymax:Ny * 1j]
points_grid = np.vstack((plot_grid[0].ravel(),
plot_grid[1].ravel(),
z_cut*np.ones(plot_grid[0].size)))
u_evaluated = np.zeros(points_grid.shape[1], dtype=np.complex128)
u_evaluated[:] = np.nan
x, y, z = points_grid
slp_pot = helmholtz_potential.double_layer(
space, points_grid, k)
res = np.exp(1j * k * points_grid[0]) + slp_pot.evaluate(total_field)
u_evaluated = res.reshape((Nx, Ny))
elapsed = time.time() - t
print("Computing the external field: %1.1f sec" %elapsed)
# visualize results
if vis_figs == 1:
fig,ax = plt.subplots(1,figsize=(10, 8))
ax.set_aspect('equal')
cmap = cm.magma
#p = ax.imshow(pressure_db(u_evaluated.T,p0), interpolation='bilinear',extent=[xmin, xmax, ymin, ymax],
#cmap=cmap, vmin = 80, vmax =100,origin='lower')
mini = 80
maxi = 100
levels = np.linspace(mini,maxi,maxi-mini+1)
Z = pressure_db(u_evaluated,p0)
p = ax.contourf(x.reshape((Nx, Ny)), y.reshape((Nx, Ny)), Z, levels,
cmap=cm.get_cmap(cmap, len(levels)))
p2 = ax.contour(x.reshape((Nx, Ny)), y.reshape((Nx, Ny)), Z, p.levels, colors='white',linewidths=0.5,linestyles='solid',alpha=0.5) #dashed, dashdot
circ = Circle((0,0),1.05,color="white")
ax.add_patch(circ)
ax.set_xlabel('x')
ax.set_ylabel('y')
cbar = plt.colorbar(p,ticks=levels)
cbar.set_label('Pressure (dB)', rotation=90)
ax.set_title("total pressure field unit sphere, solution in plane z=%1.1f" %z_cut)
plt.show()
if save_data == 1:
np.savetxt("grid_data_real"+str(freq)+"Hz.txt",np.real(u_evaluated))
np.savetxt("grid_data_imag"+str(freq)+"Hz.txt",np.imag(u_evaluated))
#np.savetxt("grid_points_x.txt",x)
#np.savetxt("grid_points_y.txt",y)
#np.savetxt("grid_points_z.txt",z)
p_real = np.real(res[0,:])
p_imag = np.imag(res[0,:])
to_save = np.array([x,y,z,p_real,p_imag])
to_save = np.transpose(to_save)
np.savetxt(path+"data/2d_slice"+name+str(freq)+"Hz_XY.csv", to_save, delimiter=',', header="x , y , z , data_r, data_i", comments="")
#del discrete_op, coeffs, x, total_field, u_polar, slp_pot_polar, slp_pot, res, u_evaluated, to_save, p_real,p_imag
import gc
gc.collect()
return It, elapsed_gmres, elapsed_hm, err, space.global_dof_count
if __name__ == "__main__":
#freq = 163.86592940741542
import resource
data_It = []
Err = []
T_gmres = []
T_hmat = []
N_dofs = []
U_mem = []
if len(sys.argv) > 1:
print("***************************************************************")
print("************* bempp for high frequency scattering *************")
print("***************************************************************")
for i in range(0,150,50): # change this range to modify the frequency loop
tmp = float(sys.argv[1]) + i
print("Running frequency " +str(tmp) + " Hz")
NbIt, t_gmres, t_hmat, err, n_dofs = run_bempp_sphere(tmp)
# mem_usage = memory_usage(run_bempp_sphere(tmp))
print("frequency " +str(tmp) + " Hz finished")
# print('Memory usage (in chunks of .1 seconds): %s' % mem_usage)
# print('Maximum memory usage: %s' % max(mem_usage))
used_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
print("used memory: " +str(used_mem/1000)+ " Mb")
print("---------------------------------------------------------------")
print("---------------------------------------------------------------")
U_mem.append(used_mem)
data_It.append(NbIt)
Err.append(err)
T_gmres.append(t_gmres)
T_hmat.append(t_hmat)
N_dofs.append(n_dofs)
print("Number of Iterations gmres: ", data_It)
print("Error polar plot at R = 3: ", Err)
print("Time gmres: ", T_gmres)
print("Time H mat assembly: ", T_hmat)
print("Number of dofs: ", N_dofs)
print("Used mem (kB): ", U_mem)
#np.savetxt("It.txt",data_It)
#np.savetxt("Err.txt",Err)
#np.savetxt("T_gmres.txt",T_gmres)
#np.savetxt("T_hmat.txt",T_hmat)
#np.savetxt("N_dofs.txt",N_dofs)
#run_bempp_sphere(sys.argv[1])
else:
raise sys.exit("usage: python " +str(sys.argv[0])+ " <frequency>")
|
#!C:\Users\amham\AppData\Local\Programs\Python\Python38-32\python.exe
#!python
print("Content-Type: text/html")
print()
#number
print(1)
#string
print('hello world')
#boolean
print(True)
print(False)
#expression
print(1+1)
print('hello'+'world') #hello world
#comparison operator
print(1==1)
print(1<2)
print(2<1)
#Membership operator
print('world' in ' hello world')
import os.path
print(os.path.exists('boolean.py')) |
#!/usr/bin/env python
import tkinter as tk
import seaborn as sns
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
def create_plot(color): #create plot
sns.set(style = 'white')
data = np.random.rand(10, 10)
f, ax = plt.subplots(figsize = (6, 6))
if (int(color) < 3):
cmap = sns.color_palette(values[color])
else:
cmap = sns.color_palette(values[color], as_cmap = True)
sns.heatmap(data, cmap = cmap, center = 0, linewidths = 0.5)
return f
def redraw(): #redraw heatmap when change button
color = v.get()
print(color)
fig = create_plot(color)
canvas.figure = fig
canvas.draw()
def init(root): #initialize radio buttons and quit button
label = tk.Label(root, text = """Choose a color palette:""").pack(anchor = tk.W)
for val, text in values.items():
tk.Radiobutton(root,
text = text,
padx = 20,
variable = v,
command = redraw,
value = val).pack(anchor=tk.W)
v.set("1")
button = tk.Button(root, text="Quit", command=root.quit)
button.pack()
sns.set()
root = tk.Tk()
root.wm_title("Heatmap")
v = tk.StringVar()
#some random color schemes
values = {"1": "pastel", "2": "hls",
"3": "Blues", "4": "YlOrBr",
"5": "icefire"}
init(root)
#initial plot
fig = create_plot("1")
canvas = FigureCanvasTkAgg(fig, master=root)
canvas.draw()
canvas.get_tk_widget().pack()
tk.mainloop()
|
from django.shortcuts import render
from products.models import *
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def catalog_all(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True).order_by("-id")
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def kruzhevo(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3)
main_category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
# new_items = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__new_items=True)
# hit_items = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__hit_items=True)
return render(request, 'catalog/catalog.html', locals())
def elastichnoe(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=15)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=15)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def na_setke(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=1)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=1)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def s_resnichkami(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=2)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=2)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def polotno(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=3)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=3)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def shantili(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=4)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=3, product__subcategories=4)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def elastichnaja_tesma(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4)
main_category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def breteli(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=5)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=5)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def kosaja_bejka(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=6)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=6)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def tonnelnaja_lenta(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=7)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=7)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def believye_rezinki(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=8)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=4, product__subcategories=8)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def furnitura(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5)
main_category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def krjuchki(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=9)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=9)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def zastezhki(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=10)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=10)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def kostochki(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=11)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=11)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def believoj_paralon(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=12)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=5, product__subcategories=12)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def prochee(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=6)
main_category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=6)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals())
def dekorativnye_elementy(request):
products_list = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=6, product__subcategories=13)
category = ProductImage.objects.filter(is_active=True, is_main=True, product__is_active=True, product__category=6, product__subcategories=13)[:1]
paginator = Paginator(products_list, 20)
page = request.GET.get('page')
try:
products = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
products = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
products = paginator.page(paginator.num_pages)
return render(request, 'catalog/catalog.html', locals()) |
from Stack import Stack
my_stack = Stack()
my_stack.push(1)
my_stack.push(3)
print(my_stack)
print(my_stack.pop())
print(my_stack.peek())
print(my_stack.pop())
print(my_stack.pop())
|
#!/user/bin/even Python3
# -*- coding:utf-8 -*-
# utils.py
# 工具类
# author:zhaohexin
# time:2020/2/19 3:11 下午
import datetime
from django.db.models import Count
from testcase.models import Testcases
from configures.models import Configures
def get_count_by_project(datas):
"""
1、计算当前接口所关联的配置数及用例数
2、对时间进行格式化
:param datas:
:return:
"""
datas_list = []
for item in datas:
create_time = item["create_time"]
item["create_time"] = create_time.split("T")[0] + " " + create_time.split("T")[1].split(".")[0]
# 获取用例数量
interface_id = item["id"]
testcase_count = Testcases.objects.filter(interface_id=interface_id).count()
# 获取配置数量
configures_count = Configures.objects.filter(interface_id=interface_id).count()
item["testcases"] = testcase_count
item["configures"] = configures_count
datas_list.append(item)
return datas_list
|
#Given an open file that maps group names to object names, get maps of group -> object, object -> group
#Each line of the file should be a tab-separated list of names, where object names follow group name
#For example:
#[GROUP NAME 1]\t[OBJECT NAME 1]\t[OBJECT NAME 2]
#[GROUP NAME 2]\t[OBJECT NAME 3]
def read_split_file(split_file):
group_to_object = {}
object_to_group = {}
for line in split_file:
line = line.strip()
if not line:
continue
entries = map(lambda e: e.strip(), line.split('\t'))
group_id = entries[0]
obj_ids = entries[1:]
if group_id in group_to_object:
raise FeatureMapFileFormatError("The feature with ID '%s' was "
"already found in the feature map file. Feature IDs must "
"be unique." % group_id)
else:
group_to_object[group_id] = obj_ids
for obj_id in obj_ids:
if obj_id in object_to_group:
raise FeatureMapFileFormatError("The object with ID '%s' was "
"already found in the feature map file (mapped to "
"feature '%s'). Object IDs must be unique." % (obj_id,
object_to_group[obj_id]))
else:
object_to_group[obj_id] = group_id
return group_to_object, object_to_group
def read_mapping_file(mapping_file):
samplemap = {}
header_row = True
headerfields = None
n_fields = None
for row in mapping_file:
row = row.strip()
if not row:
continue
fields = map(lambda e: e.strip(), row.split('\t'))
if header_row:
headerfields = fields
n_fields = len(headerfields)
header_row = False
continue
if len(fields) != n_fields:
raise MappingFileFormatError("row does not have the same number of columns (=%s) as the first row" % n_fields)
sample_id = fields[0]
sample_md = fields[1:]
if sample_id in samplemap:
raise MappingFileFormatError("The sample with ID '%s' was already "
"found in the mapping file. Sample IDs must be unique."
% sample_id)
else:
fieldmap = {}
for i in range(len(sample_md)):
fieldmap[headerfields[i + 1]] = sample_md[i]
samplemap[sample_id] = fieldmap
return samplemap
class MappingFileFormatError(Exception):
pass
class FeatureMapFileFormatError(Exception):
pass
|
"""
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import yaml
from programy.clients.events.console.config import ConsoleConfiguration
from programy.clients.events.tcpsocket.config import SocketConfiguration
from programy.clients.polling.slack.config import SlackConfiguration
from programy.clients.polling.telegram.config import TelegramConfiguration
from programy.clients.polling.twitter.config import TwitterConfiguration
from programy.clients.polling.xmpp.config import XmppConfiguration
from programy.clients.restful.config import RestConfiguration
from programy.clients.restful.flask.facebook.config import FacebookConfiguration
from programy.clients.restful.flask.kik.config import KikConfiguration
from programy.clients.restful.flask.line.config import LineConfiguration
from programy.clients.restful.flask.twilio.config import TwilioConfiguration
from programy.clients.restful.flask.viber.config import ViberConfiguration
from programy.clients.restful.sanic.config import SanicRestConfiguration
from programy.utils.console.console import outputLog
class ConfigurationWriter:
def __init__(self):
pass # pragma: no cover
def add_to_config(self, config_data, configuration, defaults=True):
config_data[configuration.id] = {}
configuration.to_yaml(config_data[configuration.id], defaults)
def execute(self, args):
config_data = {}
if args is None:
raise Exception("Args empty")
if args.clients is None:
raise Exception("No clients defined")
if 'all' in args.clients or 'console' in args.clients:
self.add_to_config(config_data, ConsoleConfiguration(), args.defaults)
if 'all' in args.clients or 'socket' in args.clients:
self.add_to_config(config_data, SocketConfiguration(), args.defaults)
if 'all' in args.clients or 'slack' in args.clients:
self.add_to_config(config_data, SlackConfiguration(), args.defaults)
if 'all' in args.clients or 'telegram' in args.clients:
self.add_to_config(config_data, TelegramConfiguration(), args.defaults)
if 'all' in args.clients or 'twitter' in args.clients:
self.add_to_config(config_data, TwitterConfiguration(), args.defaults)
if 'all' in args.clients or 'xmpp' in args.clients:
self.add_to_config(config_data, XmppConfiguration(), args.defaults)
if 'all' in args.clients or 'rest' in args.clients:
self.add_to_config(config_data, RestConfiguration(name="rest"))
if 'all' in args.clients or 'facebook' in args.clients:
self.add_to_config(config_data, FacebookConfiguration(), args.defaults)
if 'all' in args.clients or 'kik' in args.clients:
self.add_to_config(config_data, KikConfiguration(), args.defaults)
if 'all' in args.clients or 'line' in args.clients:
self.add_to_config(config_data, LineConfiguration(), args.defaults)
if 'all' in args.clients or 'twilio' in args.clients:
self.add_to_config(config_data, TwilioConfiguration(), args.defaults)
if 'all' in args.clients or 'viber' in args.clients:
self.add_to_config(config_data, ViberConfiguration(), args.defaults)
if 'all' in args.clients or 'sanic' in args.clients:
self.add_to_config(config_data, SanicRestConfiguration(name="sanic"))
client_config = ConsoleConfiguration()
bot_config = client_config.configurations[0]
self.add_to_config(config_data, bot_config, args.defaults)
brain_config = bot_config.configurations[0]
self.add_to_config(config_data, brain_config, args.defaults)
self.write_yaml(args.file, config_data)
def write_yaml(self, filename, data):
outputLog(self, "Writing new config file to [%s]" % filename)
try:
with open(filename, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
except Exception as excep:
outputLog(self, "Failed to write new config file [%s] - [%s]" % (filename, str(excep)))
@staticmethod
def create_arguments():
parser = argparse.ArgumentParser(description='Program-Y Configuration Writer')
parser.add_argument('-f', '--file', default="config.yaml", help="Name of file to create")
parser.add_argument('-c', '--clients', nargs='+',
help="Name of client config to create, use multiple times or all")
parser.add_argument('-d', '--defaults', action='store_true',
help="Create all config settings with default values")
return parser
@staticmethod
def run(args=None):
parser = ConfigurationWriter.create_arguments()
try:
app = ConfigurationWriter()
if args is None:
app.execute(parser.parse_args())
else:
app.execute(args)
except Exception as excep:
outputLog(None, "Error: [%s]" % str(excep))
parser.print_help()
if __name__ == '__main__': # pragma: no cover
ConfigurationWriter.run() # pragma: no cover
|
import tensorflow as tf
def weight_variable(name, shape):
"""
Create a weight variable with appropriate initialization
:param name: weight name
:param shape: weight shape
:return: initialized weight variable
"""
initer = tf.contrib.layers.xavier_initializer(uniform=False)
return tf.get_variable('W_' + name, dtype=tf.float32,
shape=shape, initializer=initer)
def bias_variable(name, shape):
"""
Create a bias variable with appropriate initialization
:param name: bias variable name
:param shape: bias variable shape
:return: initial bias variable
"""
initial = tf.constant(0., shape=shape, dtype=tf.float32)
return tf.get_variable('b_' + name, dtype=tf.float32,
initializer=initial)
def PReLU(x, scope):
# PReLU(x) = x if x > 0, alpha*x otherwise
alpha = tf.get_variable(scope + "/alpha", shape=[1],
initializer=tf.constant_initializer(0), dtype=tf.float32)
output = tf.nn.relu(x) + alpha * (x - abs(x)) * 0.5
return output
# function for 2D spatial dropout:
def dropout(x, keep_prob):
# x is a tensor of shape [batch_size, height, width, channels]
return tf.nn.dropout(x, keep_prob)
# function for unpooling max_pool:
def max_unpool(inputs, pooling_indices, output_shape=None, k_size=[1, 2, 2, 1]):
# NOTE! this function is based on the implementation by kwotsin in
# https://github.com/kwotsin/TensorFlow-ENet
# inputs has shape [batch_size, height, width, channels]
# pooling_indices: pooling indices of the previously max_pooled layer
# output_shape: what shape the returned tensor should have
pooling_indices = tf.cast(pooling_indices, tf.int32)
input_shape = tf.shape(inputs, out_type=tf.int32)
one_like_pooling_indices = tf.ones_like(pooling_indices, dtype=tf.int32)
batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
batch_range = tf.reshape(tf.range(input_shape[0], dtype=tf.int32), shape=batch_shape)
b = one_like_pooling_indices * batch_range
y = pooling_indices // (output_shape[2] * output_shape[3])
x = (pooling_indices // output_shape[3]) % output_shape[2]
feature_range = tf.range(output_shape[3], dtype=tf.int32)
f = one_like_pooling_indices * feature_range
inputs_size = tf.size(inputs)
indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, inputs_size]))
values = tf.reshape(inputs, [inputs_size])
ret = tf.scatter_nd(indices, values, output_shape)
return ret
def drop_connect(w, keep_prob):
return tf.nn.dropout(w, keep_prob=keep_prob) * keep_prob
|
import sys
import os
import re
import numpy as np
import adagram
import multiprocessing
from functools import partial
import warnings
import argparse
warnings.filterwarnings("ignore")
WINDOW_SIZE = 3
def find_context(ind, words, window=WINDOW_SIZE):
if ind - window < 0:
start = 0
else:
start = ind - window
if ind + window > len(words):
end = len(words) + 1
else:
end = ind + window + 1
left = words[start:ind]
right = words[ind + 1:end]
return left + right
def find_sense(word, context, model):
try:
if '_DET' in word or 'PRON' in word:
sense = 0
else:
probs = model.disambiguate(word, context)
sense = np.argmax(probs, axis=0)
except:
sense = 0
return str(sense)
def sense_line(line, model, window=WINDOW_SIZE):
new_line = ''
words = line.split()
for ind, word in enumerate(words):
context = find_context(ind, words, window=window)
sense = find_sense(word, context, model=model)
word = word + '_' + sense
new_line += word + ' '
return new_line.strip()
def sense_file(file, model, folder, folder_sense, window=WINDOW_SIZE):
with open('{}/{}'.format(folder, file), 'r') as file_r, open('{}/sense_'.format(folder_sense) + file, 'a') as file_w:
for line in file_r:
new_file = sense_line(line, model=model, window=window)
file_w.write(new_file + '\n')
return file
def main():
parser = argparse.ArgumentParser(description='Parsing corpus using UDPipe.')
parser.add_argument('lang_1', type=str, help='name of the language to parse as in downloaded corpus \
(the name must match the .txt file name from the corpus)')
parser.add_argument('lang_2', type=str, help='name of the second language in pair as in downloaded corpus \
(the name must match the .txt file name from the corpus)')
parser.add_argument('vm', type=str, help='Adagram model')
args = parser.parse_args()
lang_1 = args.lang_1
lang_2 = args.lang_2
vm = args.vm
folder = '../languages/' + lang_1 + '_' + lang_2 + '/' + lang_1
if not os.path.isdir(folder):
folder = '../languages/' + lang_2 + '_' + lang_1 + '/' + lang_1 # where small files with lemma_pos
folder_sense = folder + '/' + lang_1 + '_sense' # new folder for disambiguated files
folder += '/' + lang_1 + '_lemma_pos'
vm = adagram.VectorModel.load(vm)
files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f)) and 'lemma_pos' in f]
files.sort(key=lambda x: int(re.findall('[0-9]+', x)[0]), reverse=True)
pool = multiprocessing.Pool(8)
if not os.path.exists(folder_sense):
os.mkdir(folder_sense)
partial_sense = partial(sense_file, model=vm, folder=folder, folder_sense=folder_sense, window=WINDOW_SIZE)
for i in pool.imap(partial_sense, files):
print(i)
if __name__ == "__main__":
main()
|
import re
with open('8.in') as f:
instructions = [x.strip() for x in f.readlines()]
acc, ip, executed = 0, 0, set()
while True:
if ip in executed:
print(acc)
break
executed.add(ip)
cmd, val = re.match(r'(acc|jmp|nop) ([+\-][\d]+)', instructions[ip]).groups()
val = int(val)
if cmd == 'acc':
acc += val
elif cmd == 'jmp':
ip += val
continue
ip += 1
|
# -*- coding: utf-8 -*-
from core.views.helpers.Helper import Helper
class Html(Helper):
"""Html Helper Class for rendering well formatted html elements"""
charset = "UTF-8"
tags = {
'meta' : '<meta{0}/>',
'metalink' : '<link href="{0}"{1}/>',
'link' : '<a href="{0}"{0}>{2}</a>',
'mailto' : '<a href="mailto:{0}" {1}>{2}</a>',
'form' : '<form {1}>',
'formend' : '</form>',
'input' : '<input name="{0}" {1}/>',
'textarea' : '<textarea name="{0}" {1}>{2}</textarea>',
'hidden' : '<input type="hidden" name="{0}" {1}/>',
'checkbox' : '<input type="checkbox" name="{0}" {1}/>',
'checkboxmultiple' : '<input type="checkbox" name="{0}[]"{1} />',
'radio' : '<input type="radio" name="{0}" id="{2}" {3} />{4}',
'selectstart' : '<select name="{0}"{1}>',
'selectmultiplestart' : '<select name="{0}[]"{1}>',
'selectempty' : '<option value=""{0}> </option>',
'selectoption' : '<option value="{0}"{1}>{2}</option>',
'selectend' : '</select>',
'optiongroup' : '<optgroup label="{0}"{1}>',
'optiongroupend' : '</optgroup>',
'checkboxmultiplestart' : '',
'checkboxmultipleend' : '',
'password' : '<input type="password" name="{0}" {1}/>',
'file' : '<input type="file" name="{0}" {1}/>',
'file_no_model' : '<input type="file" name="{0}" {1}/>',
'submit' : '<input {0}/>',
'submitimage' : '<input type="image" src="{0}" {1}/>',
'button' : '<button type="{0}"{1}>{2}</button>',
'image' : '<img src="{0}" {1}/>',
'tableheader' : '<th{0}>{0}</th>',
'tableheaderrow' : '<tr{0}>{1}</tr>',
'tablecell' : '<td{0}>{1}</td>',
'tablerow' : '<tr{0}>{1}</tr>',
'block' : '<div{0}>{1}</div>',
'blockstart' : '<div{1}>',
'blockend' : '</div>',
'tag' : '<{0}{1}>{2}</{3}>',
'tagstart' : '<{0}{1}>',
'tagend' : '</{0}>',
'para' : '<p{0}>{1}</p>',
'parastart' : '<p{0}>',
'label' : '<label for="{0}"{1}>{2}</label>',
'fieldset' : '<fieldset{0}>{1}</fieldset>',
'fieldsetstart' : '<fieldset><legend>{0}</legend>',
'fieldsetend' : '</fieldset>',
'legend' : '<legend>{0}</legend>',
'css' : '<link rel="{0}" type="text/css" href="{1}" {2}/>',
'style' : '<style type="text/css"{0}>{1}</style>',
'charset' : '<meta http-equiv="Content-Type" content="text/html; charset={0}" />',
'ul' : '<ul{0}>{1}</ul>',
'ol' : '<ol{0}>{1}</ol>',
'li' : '<li{0}>{1}</li>',
'error' : '<div{0}>{1}</div>',
'javascriptblock' : '<script type="text/javascript"{0}>{1}</script>',
'javascriptstart' : '<script type="text/javascript">',
'javascriptlink' : '<script type="text/javascript" src="{0}"{1}></script>',
'javascriptend' : '</script>'
}
def __init__(self, charset = "UTF-8"):
self.charset = charset
def tag(self, name, text = None, options = None):
"""
Render an Html.tag that matches the name variable
"""
if text == None:
text = ""
if options == None:
options = ""
else:
pass
if name in self.tags:
print(self.tags[name].format(options, text))
|
import numpy as np
def conv_forward(input_data, filters, bias, padding=0, stride=1):
"""
:param input_data: input m x nc x iw x ih
:param filters: filter nf x nc x fw x fh n为filter数量
:param bias: bias 每一filter对应一个实数b nf x 1
:param padding: padding layer:l
:param stride: stride layer:l
:return:
"""
m, nc, iw, ih = input_data.shape
nf, nc, fw, fh = filters.shape
ow, oh = (iw + 2*padding - fw) / stride + 1, (ih + 2*padding - fh) / stride + 1
ow, oh = int(ow), int(oh)
z = np.zeros((m, nf, ow, oh))
# 加padding
padding_input_data = np.pad(input_data, ((0, 0), (0, 0), (padding, padding), (padding, padding)),'constant')
for index_m in range(m): # 遍历batch中所有样本
sample = padding_input_data[index_m]
for index_w in range(ow):
for index_h in range(oh):
sample_slice = sample[:, index_w*stride:index_w*stride+fw, index_h*stride:index_h*stride+fh] # 输入数据与filter对应的一个卷积块
for index_f in range(nf): # 遍历所有filter
z[index_m, index_f, index_w, index_h] = np.sum(sample_slice * filters[index_f]) + bias[index_f][0]
a = np.maximum(z, 0)
cache = (z, input_data, filters, bias, padding, stride)
return a, cache
def conv_backword(da, cache):
"""
:param da: 后一层传回的da 用于计算dz
:param cache:
:return:
"""
# da: m batch大小, nf filter个数, ow, oh
z, input_data, filters, bais, padding, stride = cache
padding_a_1 = np.pad(input_data, ((0, 0), (0, 0), (padding, padding), (padding, padding)), 'constant')
# print(z.shape,padding_a_1.shape)
# relu 求导
z[z <= 0] = 0
z[z > 0] = 1
dz = da * z
m,nf,w,h = dz.shape
f = filters.shape[3]
# m x nc x iw x ih
da_1 = np.zeros(input_data.shape)
dw = np.zeros(filters.shape)
db = np.zeros(bais.shape)
padding_da_1 = np.pad(da_1, ((0, 0), (0, 0), (padding, padding), (padding, padding)), 'constant')
for index_m in range(m): # 遍历batch 样本
for index_c in range(nf):
for index_w in range(w):
for index_h in range(h): # 遍历dz的所有通道,也是遍历所有filter
w_start = index_w * stride
w_end = index_w * stride + f
h_start = index_h * stride
h_end = index_h * stride + f
padding_da_1[index_m,:,w_start:w_end,h_start:h_end] += dz[index_m,index_c,index_w,index_h] * filters[index_c]
dw[index_c] += dz[index_m,index_c,index_w,index_h] * padding_a_1[index_m,:,w_start:w_end,h_start:h_end]
db[index_c] += dz[index_m,index_c,index_w,index_h]
if padding:
da_1[index_m, :, :, :] = padding_da_1[index_m,:,padding:-padding,padding:-padding]
else:
da_1[index_m, :, :, :] = padding_da_1[index_m]
return da_1, dw, db
# def test_conv_2():
# # np.random.seed(1)
# input = np.random.randn(10,1,28,28).astype(np.float64)
# w = np.random.randn(6,1,5,5).astype(np.float64)
# b = np.random.randn(6,1).astype(np.float64)
# y = np.ones((10,6,24,24)).astype(np.float64)
# for i in range(50):
# print('-'*50)
# z, cache = conv_forward(input, w, b)
# loss = np.mean(np.square(z - y))
# print(loss)
# dz = z-y
# _, dw, db = conv_backword(dz, cache)
# # print(dw)
# # print(b)
# # print(db)
# w -= dw * 0.001
# b -= db * 0.001
# test_conv_2()
# def test_conv():
# np.random.seed(1)
# x = np.array([x for x in range(1,17)] + [x for x in range(1,17)]).reshape(1,2,4,4)
# w = np.ones((1, 2, 4, 4)) * 0.2
# b = np.ones((1, 1))
# print(x)
# print("\n")
#
# a, cache_conv = conv_forward(x, w, b)
#
# print(a.shape)
# print(a)
# print("\n")
# testa = np.ones((1,1,3,3))
# print("\n")
# da_1, dw, db = conv_backword(testa, cache_conv)
# print(db)
# print("\n")
#
# print(dw)
# print("\n")
# test_conv()
def pool_forward(input_data, pool_size, stride, mode='max'):
"""
数据经过 pooling layer, (m,nc)不变, (iw,ih)会变
:param input_data: input m x nc x iw x ih 前一层输出的
:param pool_size: f
:param stride:
:param mode:
:return: max or average
"""
m, nc, iw, ih = input_data.shape
ow, oh = int((iw-pool_size)/stride) + 1, int((ih-pool_size)/stride) + 1
output = np.zeros((m, nc, ow, oh))
for index_m in range(m): # 遍历 batch 内数据
sample = input_data[index_m] # 当前数据
for index_w in range(0, ow):
for index_h in range(0, oh):
for index_c in range(len(sample)): # 遍历 channel
sample_slice = \
sample[index_c,index_w*stride:index_w*stride+pool_size, index_h*stride:index_h*stride+pool_size]
if mode == 'max':
sample_slice = np.max(sample_slice)
elif mode == 'avg':
sample_slice = np.mean(sample_slice)
output[index_m, index_c, index_w, index_h] = sample_slice
cache = (input_data, pool_size, stride, mode)
return output, cache
def pool_backward(da, cache):
"""
:param da: (m x nc x w x h)
:param cache: input_data , pool_size , stride
:return:
"""
# dz为后一层导数,经过pooling layer 传递到前一层
# a_prev 为前一层的输出,也是当前pooling layer的输入
a_prev, pool_size, stride, mode = cache
da_m, da_nc, da_w, da_h = da.shape
da_prev = np.zeros(a_prev.shape)
# 遍历传入 da 的每一个值,并反向传递累加到dz_prev
for index_m in range(da_m):
for index_c in range(da_nc):
for index_w in range(da_w):
for index_h in range(da_h):
w_start = index_w * stride
w_end = index_w * stride + pool_size
h_start = index_h * stride
h_end = index_h * stride + pool_size
a_slice = a_prev[index_m,index_c,w_start:w_end,h_start:h_end]
if mode == 'max':
mask = a_slice == np.max(a_slice)
da_prev[index_m,index_c,w_start:w_end,h_start:h_end] += \
da[index_m,index_c,index_w,index_h] * mask
elif mode == 'avg':
da_prev[index_m, index_c, w_start:w_end, h_start:h_end] += \
da[index_m,index_c,index_w,index_h] * np.ones((pool_size,pool_size)) / (pool_size*pool_size)
return da_prev
# def test_pool():
# np.random.seed(1)
# # A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
# # A_prev = np.random.randn(2, 4, 4, 3)
# A_prev = np.random.randn(1, 1, 6, 6)
# A_prev = np.array(range(1,37)).reshape(1, 1, 6, 6)
#
# pool = 2
# stride = 2
# res, cache = pool_forward(A_prev,pool,stride,'avg')
# print(res)
# print('-'*50)
# # res = np.ones((1, 1, 3, 3))
# res = pool_backward(res,cache)
# print(res)
# print(res.shape)
# test_pool()
def fc_forward(ai_1, w, b, mode):
"""
:param ai_1: ai-1
:param w: wi
:param b: bi
:return: ai-1, zi, ai, wi, bi
"""
z = np.dot(w, ai_1) + b
if mode == 'relu':
ai = np.maximum(z, 0)
else :
if z.shape[1] == 1:
exp = np.exp(z - np.max(z))
ai = exp / np.sum(exp)
else:
exp = np.exp(z - np.max(z, axis=0, keepdims=True))
ai = exp / np.sum(exp, axis=0, keepdims=True)
cache = (ai_1, z, w)
return ai, cache
def fc_backward(da, cache, dz=None):
"""
:param da: dai 第 i 层 da 用于求dz
:param dz: dzi 如传入dzi则函数内不再根据da,z计算dzi
:return: dai-1 用于前一层求导计算
"""
ai_1, z, w = cache
if dz is None:
z[z <= 0] = 0
z[z > 0] = 1
dz = da * z.T
dw = np.dot(dz.T, ai_1.T)
db = np.sum(dz.T, axis=1, keepdims=True)
dai_1 = np.dot(dz, w)
return dw,db,dai_1
def get_data():
with open('../data/mnist_train.csv', 'r') as f:
# with open('../data/mnist_test.csv', 'r') as f:
data = [x.strip().split(',') for x in f]
data = np.asarray(data, dtype='float').T
# print(data[0][:10])
y = np.zeros((10, len(data[0])))
for i, x in enumerate(data[0]):
y[:, i][int(x)] = 1
x = np.delete(data, 0, 0)
x = (x / 255.0 * 0.99) + 0.01
x = x - x.mean(axis=1, keepdims=True)
x = x / np.sqrt(x.var(axis=1, keepdims=True) + 10e-8)
return x, y
def get_test_data():
with open('../data/mnist_test.csv', 'r') as f:
data = [x.strip().split(',') for x in f]
data = np.asarray(data, dtype='float').T
# print(data[0][:10])
y = data[0]
x = np.delete(data, 0, 0)
x = ((x / 255.0) * 0.99) + 0.01
x = x - x.mean(axis=1, keepdims=True)
x = x / np.sqrt(x.var(axis=1, keepdims=True) + 10e-8)
return x, y
class LeNet:
def __init__(self,learning_rate):
self.batch_size = 64
# self.w_a = np.random.randn(6, 1, 5, 5)/10
# self.b_a = np.random.randn(6, 1)/10
self.w_a = np.random.uniform(-1/6, 1/6, (6, 1, 5, 5))
self.b_a = np.random.uniform(-1, 1, (6, 1))
self.vdw_a = np.zeros(self.w_a.shape)
self.vdb_a = np.zeros(self.b_a.shape)
# self.w_c = np.random.randn(16, 6, 5, 5)/10
# self.b_c = np.random.randn(16, 1)/10
self.w_c = np.random.uniform(-1/6, 1/6, (16, 6, 5, 5))
self.b_c = np.random.uniform(-1, 1,(16, 1))
self.vdw_c = np.zeros(self.w_c.shape)
self.vdb_c = np.zeros(self.b_c.shape)
# self.w_e = np.random.randn(120, 16, 4, 4)/10
# self.b_e = np.random.randn(120, 1)/10
self.w_e = np.random.uniform(-1/10, 1/10, (120, 16, 4, 4))
self.b_e = np.random.uniform(-1, 1,(120, 1))
self.vdw_e = np.zeros(self.w_e.shape)
self.vdb_e = np.zeros(self.b_e.shape)
#
# self.w_f = np.random.randn(84, 120)
# self.b_f = np.zeros((84, 1))
self.w_f = np.random.randn(84, 120)
self.b_f = np.random.randn(84, 1)
self.vdw_f = np.zeros(self.w_f.shape)
self.vdb_f = np.zeros(self.b_f.shape)
self.w_g = np.random.randn(10, 84)
self.b_g = np.random.randn(10, 1)
# self.w_g = np.random.uniform(-2.4/85, 2.4/85, (10, 84))
# self.b_g = np.random.uniform(-2.4/85, 2.4/85, (10, 1))
self.vdw_g = np.zeros(self.w_g.shape)
self.vdb_g = np.zeros(self.b_g.shape)
self.x, self.y = self.get_batch_data()
self.learning_rate = learning_rate
self.beta = 0.9
print('learning_rate',self.learning_rate)
@staticmethod
def cross_entropy(p, y, epsilon=1e-12):
p = np.clip(p, epsilon, 1. - epsilon)
m = p.shape[1]
ce = -np.sum(y * np.log(p)) / m
return ce
def get_batch_data(self):
x, y = get_data()
m = x.shape[1]
batch_split_index = []
index = self.batch_size
while index <= m:
batch_split_index.append(index)
index += self.batch_size
x_batches = np.split(x, batch_split_index, axis=1)
y_batches = np.split(y, batch_split_index, axis=1)
for index in range(len(x_batches)):
batch_size = x_batches[index].shape[1]
x_batches[index] = x_batches[index].T.reshape(batch_size,1,28,28)
return x_batches,y_batches
def test(self):
x, y = get_test_data()
m = x.shape[1]
x = x.T
x = x.reshape(m,1,28,28)
true = 0
for i in range(m):
features = x[i].reshape(1,1,28,28)
label = y[i]
p = self.predict(features)
print(p)
p = p.tolist()
pn = p.index(max(p))
print(f'predict : {pn}, Ture: {label}')
print('------')
if pn == label:
true += 1
print(true/m *100)
# break
def predict(self,x):
batch_size = x.shape[0]
a_a, cache_a = conv_forward(x, self.w_a, self.b_a, padding=0, stride=1)
a_b, cache_b = pool_forward(a_a, 2, 2, mode='avg')
a_c, cache_c = conv_forward(a_b, self.w_c, self.b_c, padding=0, stride=1)
a_d, cache_d = pool_forward(a_c, 2, 2, mode='avg')
a_e, cache_e = conv_forward(a_d, self.w_e, self.b_e, padding=0, stride=1)
a_e_reshape = a_e.reshape(batch_size, 120).T
a_f, cache_f = fc_forward(a_e_reshape, self.w_f, self.b_f, 'relu')
a_g, cache_g = fc_forward(a_f, self.w_g, self.b_g, 'softmax')
return a_g
def train(self,iteration):
beta = self.beta
for epoch in range(1,iteration+1):
print('-'*50,epoch)
self.learning_rate = self.learning_rate/epoch
for x, y in zip(self.x, self.y):
# print('=='*20)
batch_size = x.shape[0]
# --- forward -----
# layer a convolution layer
# input : (batch-size, 1, 28, 28)
# padding : 1
# stride : 1
# filters : (6, 1, 5, 5)
# b : (6, 1)
# output : (batch-size, 6, 24, 24)
a_a, cache_a = conv_forward(x, self.w_a, self.b_a, padding=0, stride=1)
# print('a',np.mean(a_a))
# layer b pooling layer
# input : (batch-size, 6, 24, 24)
# stride : 2
# filter : (2,2)
# output : (batch-size, 6, 12, 12)
# pool_forward(input_data, pool_size, stride, mode='max')
a_b, cache_b = pool_forward(a_a, 2, 2, mode='avg')
# layer c convolution layer
# input : (batch-size, 6, 12, 12)
# padding : 1
# stride : 1
# filters : (16, 6, 5, 5)
# b : (16, 1)
# output : (batch-size, 16, 8, 8)
a_c, cache_c = conv_forward(a_b, self.w_c, self.b_c, padding=0, stride=1)
# print('c',np.mean(a_c))
# layer d pooling layer
# input : (batch-size, 16, 8, 8)
# stride : 2
# filter : (2,2)
# output : (batch-size, 16, 4, 4)
a_d, cache_d = pool_forward(a_c, 2, 2, mode='avg')
# layer e convolution layer
# input : (batch-size, 16, 4, 4)
# padding : 1
# stride : 1
# filters : (120, 16, 4, 4)
# b : (120, 1)
# output : (batch-size, 120, 1, 1)
# reshape to (120*batch-size)
a_e, cache_e = conv_forward(a_d, self.w_e, self.b_e, padding=0, stride=1)
a_e_reshape = a_e.reshape(batch_size,120).T
# print('e',np.mean(a_e))
# layer f fully connected layer
# input : (120, batch_size)
# w : (84, 120)
# b : (84, 1)
# output : (84,batch-size)
a_f, cache_f = fc_forward(a_e_reshape, self.w_f, self.b_f,'relu')
# print('f',np.mean(a_f))
# layer g output with softmax
# input : (batch-size, 84)
# w : (10, 84)
# b : (10, 1)
# output : (batch-size, 10)
a_g, cache_g = fc_forward(a_f, self.w_g, self.b_g, 'softmax')
loss = self.cross_entropy(a_g, y)
# print("\n")
print(f'loss : {loss}')
# print("\n")
# --- backward ---
dz_g = (a_g - y).T / batch_size
dw_g, db_g, da_f = fc_backward(None, cache_g, dz_g)
# print('dwg',np.abs(dw_g).mean())
# print('dbg',np.abs(db_g).mean())
# self.w_g -= dw_g * self.learning_rate
# self.b_g -= db_g * self.learning_rate
vdw_g = beta * self.vdw_g + (1 - beta) * dw_g
vdb_g = beta * self.vdb_g + (1 - beta) * db_g
self.vdw_g = vdw_g
self.vdb_g = vdb_g
self.w_g -= vdw_g * self.learning_rate
self.b_g -= vdb_g * self.learning_rate
# layer f fully connected layer
# input : (120.batch_size)
# w : (84, 120)
# b : (84, 1)
# output : (84,batch-size)
dw_f, db_f, da_e = fc_backward(da_f, cache_f, None)
# print('dwf',np.abs(dw_f).mean())
# print('dbf',np.abs(db_f).mean())
# self.w_f -= dw_f * self.learning_rate
# self.b_f -= db_f * self.learning_rate
vdw_f = beta * self.vdw_f + (1 - beta) * dw_f
vdb_f = beta * self.vdb_f + (1 - beta) * db_f
self.vdw_f = vdw_f
self.vdb_f = vdb_f
self.w_f -= vdw_f * self.learning_rate
self.b_f -= vdb_f * self.learning_rate
# layer e convolution layer
# input : (batch-size, 16, 4, 4)
# padding : 1
# stride : 1
# filters : (120, 16, 4, 4)
# b : (120, 1)
# output : (batch-size, 120, 1, 1)
# reshape to (120*batch-size)
da_e_reshape = da_e.reshape(batch_size, 120, 1, 1)
da_d, dw_e, db_e = conv_backword(da_e_reshape, cache_e)
# print('dwe',np.abs(dw_e).mean())
# print('dbe',np.abs(db_e).mean())
# self.w_e -= dw_e * self.learning_rate
# self.b_e -= db_e * self.learning_rate
vdw_e = beta * self.vdw_e + (1 - beta) * dw_e
vdb_e = beta * self.vdb_e + (1 - beta) * db_e
self.vdw_e = vdw_e
self.vdb_e = vdb_e
self.w_e -= vdw_e * self.learning_rate
self.b_e -= vdb_e * self.learning_rate
# layer d pooling layer
# input : (batch-size, 16, 8, 8)
# stride : 2
# filter : (2,2)
# output : (batch-size, 16, 4, 4)
da_c = pool_backward(da_d,cache_d)
# layer c convolution layer
# input : (batch-size, 6, 12, 12)
# padding : 1
# stride : 1
# filters : (16, 6, 5, 5)
# b : (16, 1)
# output : (batch-size, 16, 8, 8)
da_b,dw_c,db_c = conv_backword(da_c,cache_c)
# print('dwc',np.abs(dw_c).mean())
# print('dbc',np.abs(db_c).mean())
# self.w_c -= dw_c * self.learning_rate
# self.b_c -= db_c * self.learning_rate
vdw_c = beta * self.vdw_c + (1 - beta) * dw_c
vdb_c = beta * self.vdb_c + (1 - beta) * db_c
self.vdw_c = vdw_c
self.vdb_c = vdb_c
self.w_c -= vdw_c * self.learning_rate
self.b_c -= vdb_c * self.learning_rate
# layer b pooling layer
# input : (batch-size, 6, 24, 24)
# stride : 2
# filter : (2,2)
# output : (batch-size, 6, 12, 12)
# pool_forward(input_data, pool_size, stride, mode='max')
da_a = pool_backward(da_b, cache_b)
# layer a convolution layer
# input : (batch-size, 1, 28, 28)
# padding : 1
# stride : 1
# filters : (6, 1, 5, 5)
# b : (6, 1)
# output : (batch-size, 6, 24, 24)
da_x, dw_a, db_a = conv_backword(da_a, cache_a)
# self.w_a -= dw_a * self.learning_rate
# self.b_a -= db_a * self.learning_rate
# print('dwa',np.abs(dw_a).mean())
# print('dba',np.abs(db_a).mean())
# print(dw_a)
vdw_a = beta * self.vdw_a + (1 - beta) * dw_a
vdb_a = beta * self.vdb_a + (1 - beta) * db_a
self.vdw_a = vdw_a
self.vdb_a = vdb_a
self.w_a -= vdw_a * self.learning_rate
self.b_a -= vdb_a * self.learning_rate
# return
if __name__ == '__main__':
net = LeNet(0.0001)
net.train(5)
net.test()
|
"""
《邢不行-2020新版|Python数字货币量化投资课程》
无需编程基础,助教答疑服务,专属策略网站,一旦加入,永续更新。
课程详细介绍:https://quantclass.cn/crypto/class
邢不行微信: xbx9025
本程序作者: 邢不行/西蒙斯
# 课程内容
- 列表介绍
- 列表常见操作
功能:本程序主要介绍python的常用内置数据结果,如list、dict、str等。希望以后大家只要看这个程序,就能回想起相关的基础知识。
"""
# =====list介绍
# 使用[]中括号就可以新建一个数组。
# list_var = [] # 这是一个空list
# print(list_var, type(list_var))
# list是具有顺序的一组对象,其中的元素不需要是同类型
# list_var = [1, '2', 3, 4.0, 5, 6, 'seven', [8], '九'] # list举例,其中包含了整数、小数、字符串、数组
# print(list_var)
# =====list常见操作:索引,选取list中的某个元素
# list_var = [1, '2', 3, 4.0, 5, 6, 'seven', [8], '九'] # list举例
# print(list_var[0]) # 输出排在第1个位置的元素。位置的计数是从0开始的。
# print(list_var[3]) # 输出排在第4个位置的元素。
# print(list_var[8]) # 输出排在第9个位置的元素。也就是最后一个元素。
# print(list_var[-1]) # 输出最后一个元素的另外一种方式。
# print(list_var[-2]) # 输出最后第二个位置的元素。
# print(list_var[9]) # 超出长度会报错 IndexError: list index out of range
# print(list_var[-10]) # 超出长度会报错 IndexError: list index out of range
# list_var[3] = 100 # 可以根据索引,直接修改list中对应位置的元素
# print(list_var)
# =====list常见操作:切片,选取list中的一连串元素
# list_var = [1, '2', 3, 4.0, 5, 6, 'seven', [8], '九'] # list举例
# print(list_var[3:8]) # list[a:b],从第a个位置开始,一直到第b个位置之前的那些元素
# print(list_var[:4]) # list[:b],从头开始,一直到第b个位置之前的那些元素
# print(list_var[3:]) # list[a:],从第a个位置开始,一直到最后一个元素
# print(list_var[1:7:3]) # list[a:b:c],每c个元素,选取其中的第一个
# =====list常见操作:两个list相加
# list_var1 = [1, '2', 3, 4.0, 5]
# list_var2 = [6, 'seven', [8], '九']
# print(list_var1 + list_var2) # 两个list相加
# =====list常见操作:判断一个元素是否在list当中
# list_var = [1, '2', 3, 4.0, 5]
# print(1 in list_var) # 判断1元素,是否在list_var中出现
# print(100 in list_var) # 判断100元素,是否在list_var中出现
# =====list常见操作:len,max,min
# list_var = [1, 2, 3, 4, 5]
# print(len(list_var)) # list中元素的个数,或者说是list的长度
# print(len([])) # 空list的长度是?
# print(max(list_var)) # 这个list中最大的元素,
# print(min(list_var)) # 最小的元素
# =====list常见操作:删除其中的一个元素
# list_var = [1, 2, 3, 4, 5]
# del list_var[0] # 删除位置0的那个元素
# print(list_var)
# =====list常见操作:如何查找一个元素的在list中的位置
# list_var = [3, 5, 1, 2, 4] # 如何才能知道1这个元素,在list中的位置是什么?
# 不知道的话,直接搜索
# =====list常见操作:append,在后方增加一个元素
# list_var = [1, '2', 3, 4.0, 5]
# list_var.append(6)
# print(list_var)
# list_var.append(['seven', [8], '九'])
# print(list_var)
# =====list常见操作:两个list合并
# list_var = [1, '2', 3, 4.0, 5]
# list_var.extend([6, 'seven', [8], '九'])
# print(list_var)
# =====list常见操作:逆序、排序、
# list_var = [3, 5, 1, 2, 4]
# list_var.reverse()
# print(list_var)
# list_var = [3, 5, 1, 2, 4]
# list_var.sort()
# print(list_var)
# list_var = [3, 5, 1, 2, 4]
# print(sorted(list_var))
# print(list_var)
|
from django.test import TestCase, Client
from student.models import Student
class StudentTestCase(TestCase):
def setUp(self):
Student.objects.create(
name='wei',
sex=1,
email='wei@123.com',
profession='程序员',
qq='333',
phone='3222',
)
def test_create_and_sex_show(self):
student = Student.objects.create(
name='dd',
sex=1,
email='qq@qq.com',
profession='程序员',
qq='3333',
phone='3221',
)
self.assertEqual(student.sex_show, '男', '性别字段内容跟展示不一样!')
def test_filter(self):
Student.objects.create(
name='dd',
sex=1,
email="qq@qq.com",
profession='程序员',
qq='3333',
phone='3221',
)
name = 'wei'
students = Student.objects.filter(name = name)
self.assertEqual(students.count(), 1, '应该只存在一个名称为{}的记录'.format(name))
def test_get_index(self):
# 测试首页的可用性
client = Client()
response = client.get('/student/index/')
self.assertEqual(response.status_code, 200, 'status code must be 200!')
def test_post_student(self):
client = Client()
data = dict(
name = 'test_for_post',
sex = 1,
email ='33@dd.com',
profession = '程序员',
qq = '3333',
phone = '3222',
)
response = client.post('/student/index/', data)
self.assertEqual(response.status_code, 302, 'status code must be 302!')
response = client.get('/student/index/')
self.assertTrue(b'test_for_post' in response.content,
"response content must contain 'test_for_post' ")
# Create your tests here.
|
import os
import SCons
#Env = Environment()
ProjectName="SMFC4B0"
OutFile="./Out\\"+ProjectName # name of the final executable.
LibFile="./Lib\\" # name of the final executable.
DllFile="./Dll\\" # name of the final executable.
ObjDir=str("./Obj/") # Directory for the obj files.
IncDir=str("./Inc/")
RecursiveSourceFolders=[]#["./../../../../../04_Engineering/01_Source_Code"] #Folders which will be scanned recursivly for the extensions *.c
SourceFolders=[] #Folders which will be scanned for the extensions *.c
SourceFiles=[] #specific files to be built
RecursiveHeaderFolders=["./../../../../../04_Engineering/01_Source_Code/common"] #Folders which will be scanned recursivly for the extensions *.h
HeaderFolders=[] #Folders which will be scanned for the extensions *.h
RecursiveLibFolders=[] #Folders which will be scanned recursivly for the extensions *.lib
LibFolders=[] #Folders which will be scanned for the extensions *.lib
LibFiles=[] #specific files to be linked
ComponentsList = \
[ \
#"00_Custom",\
"cb",\
#"cct",\
#"cipp",\
"cml",\
"ecm",\
"em",\
"fct",\
"fex",\
"fsd",\
"ftrc",\
"gen",\
"hla",\
"ld",\
"memo",\
"ofc",\
"pc",\
"ped",\
"pv",\
"rtw",\
"sac",\
"semo",\
"sib",\
"sr",\
"tsa",\
"vcl",\
"vdy",\
"vodca"\
]
Targetdict = {\
#"00_Custom":"MCU",\
"cb":"VME_MONO",\
"cct":"VME_STEREO",\
"cipp":"MCU",\
"cml":"VME_STEREO",\
"ecm":"VME_STEREO",\
"em":"VME_STEREO",\
"fct":"VME_MONO",\
"fex":"VME_FPGA",\
"fsd":"VME_STEREO",\
"ftrc":"VME_STEREO",\
"gen":"VME_STEREO",\
"hla":"VME_MONO",\
"ld":"VME_MONO",\
"memo":"MCU",\
"ofc":"VME_STEREO",\
"pc":"VME_STEREO",\
"ped":"VME_STEREO",\
"pv":"VME_STEREO",\
"rtw":"VME_STEREO",\
"sac":"VME_STEREO",\
"semo":"VME_STEREO",\
"sib":"VME_STEREO",\
"sr":"VME_MONO",\
"tsa":"VME_STEREO",\
"vcl":"VME_STEREO",\
"vdy":"VME_MONO",\
"vodca":"VME_STEREO",\
}
Includedict = {\
#"00_Custom":[],\
"cb":["D:/Sandboxs/Algorithm/CB_CameraBlockage/04_Engineering/02_Development_Tools/ti_tools"],\
"cct":[],\
"cipp":["D:/Sandboxs/Algorithm/CIPP_CommonImagePreProcessing/04_Engineering/02_Development_Tools/ti_tools"],\
"cml":[],\
"ecm":[],\
"em":[],\
"fct":[],\
"fex":[],\
"fsd":[],\
"ftrc":[],\
"gen":[],\
"hla":[],\
"ld":[],\
"memo":[],\
"ofc":[],\
"pc":[],\
"ped":[],\
"pv":[],\
"rtw":[],\
"sac":[],\
"semo":[],\
"sib":[],\
"sr":[],\
"tsa":[],\
"vcl":[],\
"vdy":[],\
"vodca":[],\
} |
import sys
from PyQt5 import QtWidgets, QtCore, QtGui
from GuiLayOut2 import Ui_MainWindow
from SerialConnect import SerialConnect
class AppWindow(QtWidgets.QMainWindow):
serialCom = SerialConnect()
def __init__(self):
super(AppWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.btn_kl15.clicked.connect(self.btnKl15Clicked)
self.ui.btn_SSM_A.clicked.connect(self.btnSSM_AClicked)
self.ui.btn_Power.clicked.connect(self.btnPowerClicked)
self.ui.btn_connect.clicked.connect(self.btnReconnectClicked)
self.happyDog = QtGui.QPixmap('C:\ssm\Tools\PowerManager\images\ONMEME.jpg')
self.sadDog = QtGui.QPixmap('C:\ssm\Tools\PowerManager\images\sleepMEME.jpg')
self.serialCom.getArduinoIOstatus()
self.initLblStatus()
def initLblStatus(self):
if self.serialCom.serialStatus is self.serialCom.comOn:
#updated KL15 status
if self.serialCom.kl15Status is self.serialCom.io_status_on:
self.ui.lbl_kl15.setText(self.serialCom.io_status_on)
self.ui.lbl_kl15.setStyleSheet('background-color:green' )
else:
self.ui.lbl_kl15.setText(self.serialCom.io_status_off)
self.ui.lbl_kl15.setStyleSheet('background-color:red')
#update kl30 status
if self.serialCom.kl30Status is self.serialCom.io_status_on:
self.ui.lbl_SSM_A.setText(self.serialCom.io_status_on)
self.ui.lbl_SSM_A.setStyleSheet('background-color:green')
else:
self.ui.lbl_SSM_A.setText(self.serialCom.io_status_off)
self.ui.lbl_SSM_A.setStyleSheet('background-color:red')
#update all power status
if self.serialCom.kl15Status is self.serialCom.io_status_off and self.serialCom.kl30Status is self.serialCom.io_status_off:
self.ui.lbl_Power.setText(self.serialCom.io_status_off)
self.ui.lbl_Power.setStyleSheet('background-color:red')
self.serialCom.rbtStatus = self.serialCom.io_status_off
else:
self.ui.lbl_Power.setText(self.serialCom.io_status_on)
self.ui.lbl_Power.setStyleSheet('background-color:green')
#if serial is not connected
else:
initLable = 'UNKNOWN'
initColr = 'red'
self.setALLlable(initColr, initLable)
self.setComStatus()
# kl15 button handeler
def btnKl15Clicked(self):
colr, text = self.serialCom.get_kl_15_Status()
self.ui.lbl_kl15.setStyleSheet('background-color:'+colr)
self.ui.lbl_kl15.setText(text)
# ssm_A kl30 button handeler
def btnSSM_AClicked(self):
colr, text = self.serialCom.get_kl_30_SSM_A_Status()
#colr2, text2 = self.serialCom.get_kl_30_SSM_B_Status()
self.ui.lbl_SSM_A.setText(text)
self.ui.lbl_SSM_A.setStyleSheet('background-color:'+colr)
# all power button handeler
def btnPowerClicked(self):
colr, text = self.serialCom.getRebootStatus()
self.setALLlable(colr, text)
# reconnect button handeler
def btnReconnectClicked(self):
colr, comStat = self.serialCom.reconnectSerial()
if comStat is self.serialCom.comOn:
text = 'ON'
else:
text = 'OFF'
self.setALLlable(colr, text)
self.setComStatus()
#update all the labels at the same time
def setALLlable(self, colr, text):
self.ui.lbl_kl15.setText(text)
self.ui.lbl_kl15.setStyleSheet('background-color:' + colr)
self.ui.lbl_SSM_A.setText(text)
self.ui.lbl_SSM_A.setStyleSheet('background-color:' + colr)
self.ui.lbl_Power.setText(text)
self.ui.lbl_Power.setStyleSheet('background-color:' + colr)
if text is self.serialCom.io_status_on:
self.ui.lbl_miri.setPixmap(self.happyDog)
elif text is self.serialCom.io_status_off:
self.ui.lbl_miri.setPixmap(self.sadDog)
#Set status of com button and the status bar
def setComStatus(self):
if self.serialCom.serialStatus is self.serialCom.comOn:
self.ui.bar_ComStatus.setValue(100)
self.ui.btn_connect.setText('CONNECTED')
self.ui.lbl_miri.setPixmap(self.happyDog)
else:
self.ui.bar_ComStatus.setValue(0)
self.ui.btn_connect.setText('RECONNECT')
self.ui.lbl_miri.setPixmap(self.sadDog)
#recheck the serial connection status in run time
def updateComStatus(self):
colr, comStat = self.serialCom.comSerialStatus()
#self.serialCom.getArduinoStaus()
if comStat is self.serialCom.comOff:
text = 'UNKNOWN'
self.setALLlable(colr, text)
self.setComStatus()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
application = AppWindow()
timer = QtCore.QTimer()
timer.timeout.connect(application.updateComStatus)
timer.start(1000) # every 10,000 milliseconds
application.show()
sys.exit(app.exec()) |
#Anna Wójcik
import numpy as np
import math
A =np.matrix([[1, 2/3, 2, 5/2, 5/3, 5],
[3/2,1,3,10/3,3,9],
[1/2,1/3,1,4/3,7/8,5/2],
[2/5,3/10,3/4,1,5/6,12/5],
[3/5,1/3,8/7,6/5,1,3],
[1/5,1/9,2/5,5/12,1/3,1]])
B= np.matrix([[1, 2/5,3,7/3,1/2,1],
[5/2,1,4/7,5/8,1/3,3],
[1/3,7/4,1,1/2,2,1/2],
[3/7,8/5,2,1,4,2],
[2,3,1/2,1/4,1,1/2],
[1,1/3,2,1/2,2,1]])
C = np.matrix([[1,17/4,17/20,8/5,23/6,8/3],
[4/17,1,1/5,2/5,9/10,2/3],
[20/17,5,1,21/10,51/10,10/3],
[5/8,5/2,10/21,1,5/2,11/6],
[6/23,10/9,10/51,2/5,1,19/30],
[3/8,3/2,3/10,6/11,30/19,1]])
C_new = np.matrix([[1,17/20,23/6,8/3,17/4,8/5],
[20/17,1,51/10,10/3,5,21/10],
[6/23,10/51,1,19/30,10/9,2/5],
[3/8,3/10,30/19,1,3/2,6/11],
[4/17,1/5,9/10,2/3,1,2/5],
[5 / 8, 10 / 21, 5 / 2, 11 / 6, 5 / 2, 1]])
RA = np.array([[3],[1]]) #znany ranking przedmiotu 5,6
RB = np.array([[2], [1/2], [1]]) #znany ranking przedmiotu 4,5,6
RC = np.array([[2], [5]]) #znany ranking przedmiotu 2,4
def rank_geo(matrix,k,known_obj_rank,last = True):
n = len(matrix)
vn = len(known_obj_rank)
A = np.full((n - k , n - k), -1)
np.fill_diagonal(A, n-1)
b = []
for i in range(0,n - k):
val = 1
for j in range(0,vn):
val = val * known_obj_rank[j]
for k in range(0,n):
val = val * matrix[i,k]
log_val = math.log10(val)
b.append(log_val)
W = np.linalg.solve(A, b)
W2 = [[10 ** W[i]] for i in range(len(W))]
if(last):
result = np.concatenate((W2, known_obj_rank))
else:
result = np.array([W2[0],known_obj_rank[0].tolist(),W2[1],known_obj_rank[1].tolist(),W2[2],W2[3]])
return result
rankingA_geo = rank_geo(A,2,RA)
rankingB_geo = rank_geo(B,3,RB)
rankingC_geo = rank_geo(C_new,2,RC,False)
names = ["A", "B", "C"]
matrix = [A, B, C_new]
print(" ")
ranking =[rankingA_geo,rankingB_geo,rankingC_geo]
for i in range(0,3):
temp = ranking[i]
print("Ranking macierzy "+ names[i] + " m. geom. : ")
print(ranking[i] )
print(" ")
|
#!/usr/bin/env python
import os, sys
import time
import devicemanagerSUT as devicemanager
from sut_lib import clearFlag, setFlag, checkDeviceRoot, stopProcess, checkStalled, waitForDevice
if (len(sys.argv) <> 2):
print "usage: cleanup.py <ip address>"
sys.exit(1)
cwd = os.getcwd()
pidDir = os.path.join(cwd, '..')
flagFile = os.path.join(pidDir, 'proxy.flg')
errorFile = os.path.join(pidDir, 'error.flg')
processNames = [ 'org.mozilla.fennec',
'org.mozilla.fennec_aurora',
'org.mozilla.fennec_unofficial',
'org.mozilla.firefox',
'org.mozilla.firefox_beta',
'org.mozilla.roboexample.test',
]
if os.path.exists(flagFile):
print "Warning proxy.flg found during cleanup"
clearFlag(flagFile)
print "Connecting to: " + sys.argv[1]
dm = devicemanager.DeviceManagerSUT(sys.argv[1])
dm.debug = 5
devRoot = checkDeviceRoot(dm)
if not str(devRoot).startswith("/mnt/sdcard"):
setFlag(errorFile, "Remote Device Error: devRoot from devicemanager [%s] is not correct" % str(devRoot))
sys.exit(1)
if dm.dirExists(devRoot):
status = dm.removeDir(devRoot)
print "removeDir() returned [%s]" % status
if status is None or not status:
setFlag(errorFile, "Remote Device Error: call to removeDir() returned [%s]" % status)
sys.exit(1)
if not dm.fileExists('/system/etc/hosts'):
print "restoring /system/etc/hosts file"
try:
dm.sendCMD(['exec mount -o remount,rw -t yaffs2 /dev/block/mtdblock3 /system'])
data = "127.0.0.1 localhost"
dm.verifySendCMD(['push /mnt/sdcard/hosts ' + str(len(data)) + '\r\n', data], newline=False)
dm.verifySendCMD(['exec dd if=/mnt/sdcard/hosts of=/system/etc/hosts'])
except devicemanager.DMError, e:
print "Exception hit while trying to restore /system/etc/hosts: %s" % str(e)
setFlag(errorFile, "failed to restore /system/etc/hosts")
sys.exit(1)
if not dm.fileExists('/system/etc/hosts'):
setFlag(errorFile, "failed to restore /system/etc/hosts")
sys.exit(1)
else:
print "successfully restored hosts file, we can test!!!"
errcode = checkStalled(os.environ['SUT_NAME'])
if errcode > 1:
if errcode == 2:
print "processes from previous run were detected and cleaned up"
elif errocode == 3:
setFlag(errorFile, "Remote Device Error: process from previous test run present")
sys.exit(2)
for p in processNames:
if dm.dirExists('/data/data/%s' % p):
print dm.uninstallAppAndReboot(p)
waitForDevice(dm)
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 206)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MyFrame1
###########################################################################
class MyFrame1 ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Tic Tac Toe", pos = wx.DefaultPosition, size = wx.Size( 500,525 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.matrix = [[None,None,None],[None,None,None],[None,None,None]]
gSizer2 = wx.GridSizer( 4, 3, 0, 0 )
self.m_button1 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button1.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button1.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button2 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button2.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button2.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button2, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button3 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button3.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button3.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button3, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button4 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button4.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button4.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button4, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button5 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button5.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button5.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button5, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button6 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button6.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button6.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button6, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button7 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button7.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button7.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button7, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button8 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button8.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button8.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button8, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_button9 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,90 ), 0 )
self.m_button9.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
self.m_button9.SetFont( wx.Font( 28, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_button9, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
# bottom
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"Winner: ", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
self.m_staticText2.SetFont( wx.Font( 20, 74, 90, 90, False, "Arial" ) )
gSizer2.Add( self.m_staticText2, 0, wx.ALL|wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
gSizer2.Add( self.m_staticText1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL, 5 )
self.m_button11 = wx.Button( self, wx.ID_ANY, u"Reset", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer2.Add( self.m_button11, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.SetSizer( gSizer2 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.m_button1.Bind( wx.EVT_LEFT_DOWN, self.Cross1 )
self.m_button1.Bind( wx.EVT_RIGHT_DOWN, self.Knots1 )
self.m_button2.Bind( wx.EVT_LEFT_DOWN, self.Cross2 )
self.m_button2.Bind( wx.EVT_RIGHT_DOWN, self.Knots2 )
self.m_button3.Bind( wx.EVT_LEFT_DOWN, self.Cross3 )
self.m_button3.Bind( wx.EVT_RIGHT_DOWN, self.Knots3 )
self.m_button4.Bind( wx.EVT_LEFT_DOWN, self.Cross4 )
self.m_button4.Bind( wx.EVT_RIGHT_DOWN, self.Knots4 )
self.m_button5.Bind( wx.EVT_LEFT_DOWN, self.Cross5 )
self.m_button5.Bind( wx.EVT_RIGHT_DOWN, self.Knots5 )
self.m_button6.Bind( wx.EVT_LEFT_DOWN, self.Cross6 )
self.m_button6.Bind( wx.EVT_RIGHT_DOWN, self.Knots6 )
self.m_button7.Bind( wx.EVT_LEFT_DOWN, self.Cross7 )
self.m_button7.Bind( wx.EVT_RIGHT_DOWN, self.Knots7 )
self.m_button8.Bind( wx.EVT_LEFT_DOWN, self.Cross8 )
self.m_button8.Bind( wx.EVT_RIGHT_DOWN, self.Knots8 )
self.m_button9.Bind( wx.EVT_LEFT_DOWN, self.Cross9 )
self.m_button9.Bind( wx.EVT_RIGHT_DOWN, self.Knots9 )
self.m_button11.Bind( wx.EVT_BUTTON, self.reset )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def Cross1( self, event ):
self.m_button1.SetLabel("X")
self.matrix[0][0] = 1
self.test()
def Knots1( self, event ):
self.m_button1.SetLabel("O")
self.matrix[0][0] = 0
self.test()
def Cross2( self, event ):
self.m_button2.SetLabel("X")
self.matrix[0][1] = 1
self.test()
def Knots2( self, event ):
self.m_button2.SetLabel("O")
self.matrix[0][1] = 0
self.test()
def Cross3( self, event ):
self.m_button3.SetLabel("X")
self.matrix[0][2] = 1
self.test()
def Knots3( self, event ):
self.m_button3.SetLabel("O")
self.matrix[0][2] = 0
self.test()
def Cross4( self, event ):
self.m_button4.SetLabel("X")
self.matrix[1][0] = 1
self.test()
def Knots4( self, event ):
self.m_button4.SetLabel("O")
self.matrix[1][0] = 0
self.test()
def Cross5( self, event ):
self.m_button5.SetLabel("X")
self.matrix[1][1] = 1
self.test()
def Knots5( self, event ):
self.m_button5.SetLabel("O")
self.matrix[1][1] = 0
self.test()
def Cross6( self, event ):
self.m_button6.SetLabel("X")
self.matrix[1][2] = 1
self.test()
def Knots6( self, event ):
self.m_button6.SetLabel("O")
self.matrix[1][2] = 0
self.test()
def Cross7( self, event ):
self.m_button7.SetLabel("X")
self.matrix[2][0] = 1
self.test()
def Knots7( self, event ):
self.m_button7.SetLabel("O")
self.matrix[2][0] = 0
self.test()
def Cross8( self, event ):
self.m_button8.SetLabel("X")
self.matrix[2][1] = 1
self.test()
def Knots8( self, event ):
self.m_button8.SetLabel("O")
self.matrix[2][1] = 0
self.test()
def Cross9( self, event ):
self.m_button9.SetLabel("X")
self.matrix[2][2] = 1
self.test()
def Knots9( self, event ):
self.m_button9.SetLabel("O")
self.matrix[2][2] = 0
self.test()
def reset(self, event):
self.matrix = [[None,None,None],[None,None,None],[None,None,None]]
self.m_button1.SetLabel("")
self.m_button2.SetLabel("")
self.m_button3.SetLabel("")
self.m_button4.SetLabel("")
self.m_button5.SetLabel("")
self.m_button6.SetLabel("")
self.m_button7.SetLabel("")
self.m_button8.SetLabel("")
self.m_button9.SetLabel("")
self.m_staticText2.SetLabel("Winner: ")
def test(self):
# horizontal
for i in range(3):
hor = []
hor.append(self.matrix[i][i])
col = []
# rows
if(len(set(self.matrix[i]))==1) and self.matrix[i][0] != None and self.matrix[i][1] != None and self.matrix[i][2] != None:
if self.matrix[i][i] == 1:
self.m_staticText2.SetLabel("Winner: X")
else:
self.m_staticText2.SetLabel("Winner: O")
for j in range(3):
col.append(self.matrix[j][i])
if(len(set(col))==1) and self.matrix[0][j] != None and self.matrix[1][j] != None and self.matrix[2][j] != None:
if col[0] == 1:
self.m_staticText2.SetLabel("Winner: X")
else:
self.m_staticText2.SetLabel("Winner: O")
if(len(set(hor))==1) and self.matrix[0][0] != None and self.matrix[1][1] != None and self.matrix[2][2] != None:
if self.matrix[0][0] == 1:
self.m_staticText2.SetLabel("Winner: X")
else:
self.m_staticText2.SetLabel("Winner: O")
if(len(set([self.matrix[2][0], self.matrix[1][1], self.matrix[0][2]]))==1) and self.matrix[2][0] != None and self.matrix[1][1] != None and self.matrix[0][2] != None:
if self.matrix[2][0] == 1:
self.m_staticText2.SetLabel("Winner: X")
else:
self.m_staticText2.SetLabel("Winner: O")
def Cal( self, event ):
event.Skip()
class MainApp(wx.App):
def OnInit(self):
mainFrame=MyFrame1(None)
mainFrame.Show(True)
return True
###########################################################################
## Class MyPanel2
###########################################################################
class MyPanel2 ( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 500,300 ), style = wx.TAB_TRAVERSAL )
def __del__( self ):
pass
if __name__ == '__main__':
app = MainApp()
app.MainLoop()
|
from ESD import EnumSubDomain
import requests
import os
class subdomain(object):
def __init__(self):
self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
'Referer':'https://www.baidu.com',
'X-Auth':'404 notfound'}
def save(self, domain):
try:
r = requests.get('http://'+domain, timeout=3, headers=self.headers, verify=False)
if r.status_code == 404 or r.status_code == 200 or r.status_code == 302:
with open('target_tmp.txt', 'a') as ff:
ff.write(r.url+'\n')
except Exception as e:
print(e)
pass
def filter(self):
list_a = []
with open('target_tmp.txt', 'r') as f:
list_one = set(list(f.readlines()))
for i in list_one:
if i.strip().rstrip('/') not in list_a:
list_a.append(i.strip().rstrip('/'))
for i in list_a:
with open('target.txt', 'a') as f:
f.write(i+'\n')
def remove(self):
if os.path.exists('target_tmp.txt'):
f = open('target_tmp.txt','w')
f.truncate()
f.close()
if os.path.exists('target.txt'):
f = open('target.txt','w')
f.truncate()
f.close()
def main(self):
threads = []
self.remove()
with open('domain.txt', 'r') as f:
for sub in f.readlines():
try:
domains = EnumSubDomain(sub.strip()).run()
for i in domains.keys():
if domains[i][0] == "0.0.0.1" or domains[i][0] == '127.0.0.1':
continue
else:
self.save(i)
except Exception as e:
print(e)
pass
self.filter()
# main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from outis_post.models import OutisPost
from outis_user.models import OutisUser
class OutisPostCollection(models.Model):
user_id = models.ForeignKey(OutisUser, on_delete=models.CASCADE)
post_id = models.ForeignKey(OutisPost, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Outis_Post_Collection'
verbose_name_plural = 'Outis_Post_Collection'
def __str__(self):
return self.id
class OutisUserCollection(models.Model):
main_user_id = models.ForeignKey(OutisUser, verbose_name='main_user_id',
related_name='main_user_id', on_delete=models.CASCADE)
sub_user_id = models.ForeignKey(OutisUser, verbose_name='sub_user_id',
related_name='sub_user_id')
date = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Outis_User_Collection'
verbose_name_plural = 'Outis_User_Collection'
def __str__(self):
return self.id
|
import numpy as np
class CylinderModel():
def __init__(self, distance_threshold, tuple_radius_minmax, tuple_theta_phi): #TODO: change theta phi behaviour
self.n = 2
self.name = 'cylinder'
self.t = distance_threshold
self.minradius = tuple_radius_minmax[0]
self.maxradius = tuple_radius_minmax[1]
self.dir = self.direction(tuple_theta_phi[0], tuple_theta_phi[1])
def direction(self, theta, phi):
#TODO: Copied this code - give credit
'''Return the direction vector of a cylinder defined
by the spherical coordinates theta and phi.
'''
return np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)])
def fit(self, S, N):
# Fit model to S
# Cylinder axis (normalized)
a = np.cross(N[0], N[1]) #f they are parallel, a = [0,0,0] -> division by 0 in next step!!!
a /= np.sqrt(a.dot(a))
# Check angle #TODO: Make this faster
if np.dot(a, self.dir) < -0.98 or np.dot(a, self.dir) > 0.98: # The angle threshold in radians
# Cylinder cross-section plane normal
b = np.cross(a, N[0])
#TODO: Make sure that second point is picked so that there can in fact be an intersection - dot > 0.00001
# point on axis
dot = np.dot(b,N[1])
w = S[1] - S[0]
s = -np.dot(b, w) / dot
p = w + s * N[1] + S[0]
#vector from axis to point on surface
rv = p - S[1]
# Radius
r = np.sqrt(rv.dot(rv))
# Radius normal:
rv /= r
a3 = np.cross(a, rv)
a3 /= np.linalg.norm(a3)
# Check radius
if r > self.minradius and r < self.maxradius:
return [p, r, a, rv, a3] # Cylinder model point on axis, radius, axis
else:
return None
else:
return None
def evaluate(self, model, R, min_sample):
# Transformation matrix - drop dimension of main axis
b = np.stack((model[3],model[4]), axis = 1)
# Recentered:
Rc = R-model[0]
# Change basis - row * column -> columns of b = rows of b.T = rows of inv(b)
twod = np.dot(Rc, b)
# Distances to axis:
distances = np.sqrt(twod[:,0]**2+twod[:,1]**2)
# Distance within radius + threshold
inlier_indices = np.where((distances <= model[1] + self.t) & (distances >= model[1] - self.t))[0]
outlier_indices = np.where((distances > model[1] + self.t) | (distances < model[1] - self.t))[0]
if len(inlier_indices) > min_sample:
return [inlier_indices, outlier_indices]
else:
return None
"""TODO: implement this way
For a plane, {p1, p2, p3} constitutes a minimal set
when not taking into account the normals in the points. To
confirm the plausibility of the generated plane, the deviation
of the plane’s normal from n1, n2, n3 is determined and the
candidate plane is accepted only if all deviations are less than
the predefined angle α.
""""
class PlaneModel():
def __init__(self, distance_threshold):
self.n = 1
self.name = 'plane'
self.t = distance_threshold
pass
def fit(self, S, N):
#N is the normal vector of our plane model, S is a point on it, no fitting needed
N = N/np.sqrt(N.ravel().dot(N.ravel()))
return [S, N]
def evaluate(self, model, R, min_sample):
#Profject every point on plane normal vector thereby getting distances to plane.
distances = np.dot((R - model[0]), model[1].T)
inlier_indices = np.where((distances <= self.t) & (distances >= -self.t))[0]
outlier_indices = np.where((distances > self.t) | (distances < -self.t))[0]
if len(inlier_indices) > min_sample:
return [inlier_indices, outlier_indices]
else:
return None
|
#Crea un programa que pida: Nombre, Edad, Teléfono, Dirección, Email y Nacionalidad e imprima un
#mensaje personalizado para él donde diga algo similar a lo siguiente:
#Hola Luis, me dijiste que tienes 43 años de edad y que tu teléfono es el 3315648790, además sé que
#tu dirección es Independencia #23 y tu correo electrónico es luis25@gmail.com
print ("Ingrese su nombre porfavor: ")
nombre=input()
print ("Ingrese su edad: ")
edad=input()
print("Ingrese su número de teléfono: ")
tel=input()
print("Ingrese su dirección: ")
direc=input()
print("Ingrese su e-mail: ")
mail=input()
print ("Ingrese su nacionalidad: ")
nacion=input()
print(f" Bienvenido {nombre} \n Tu edad es de {edad} años \n Tu número telefónico es: {tel} \n Tu dirección es: {direc} \n Tu e-mail es: {mail} \n Tu nacionalidad es: {nacion}")
|
a = []
s = input()
while s != "end":
s = s.replace(" ","")
a.append(s)
s = input()
b = [i for i in a[0::4]]
c = [i for i in a[1::4]]
d = [i for i in a[2::4]]
e = [i for i in a[3::4]]
print(b)
print(c)
print(d)
print(e) |
"""
Listas
Tipos de elementos en una lista
Acceder y modificar lo elementos de una lista
"""
miLista = ["uno", "dos", "tres", "cuatro", "cinco"]
print(miLista[-1])
print(miLista[-2])
print(miLista[-3])
#miLista[-1] = 14
#miLista[-1] = "String"
print(miLista[-1])
#miLista[1]
print(miLista[3:])
# ["cuatro", "cinco"]
print(miLista[1:4])
# ["dos", "tres"]
print(miLista[::-1])
# ["cinco", "cuatro", "tres", "dos", "uno"]
miLista = ['uno',[2,3,4], 'dos', 'tres']
print(miLista)
print(miLista[1])
print(miLista[1][2])
print(miLista[1][::])
print(miLista[1][1::])
miLista[3]= False
print(miLista)
# Concatenar
letras = ['A','B','C','D']
numeros = [1,2,3,4,5]
print(numeros+letras)
# Replicar Listas
numeros *=3
#numeros = numeros * 3
print(numeros)
# Metodos de las listas append, extend, pop...
letras.append(True)
print(letras)
lista = [1,2,3]
otraLista = [4,5]
lista.extend(otraLista)
# lista + otraLista
print(lista)
lista.pop()
print(lista)
lista.pop(-1)
print(lista)
lista.pop(1)
print(lista)
desordenada = [4,6,1,8,3,9,4,4]
# Metodo Insert
desordenada.insert(10,13)
# Metodo Remove
desordenada.remove(1)
desordenada.sort()
print(desordenada)
desordenada.reverse()
print(desordenada)
# Operaciones de interprete
print(len(desordenada))
print(min(desordenada))
print(max(desordenada))
# Buscar e imprimir dentro de una lista
print('D' in letras)
# Metodo count
print(desordenada.count(4))
listaTest =[1,8,5,4,2,7]
# Metodo Index
print(listaTest.index(7))
# Listas multidimensionales
print( '####### Lista de contactos ########')
contactos = [
[
'Juan','juan@mail.com'
],
[
'Angel','angel@gmail.com'
],
[
'Daniel','daniel@gmail.com'
]
]
for contacto in contactos:
for elemento in contacto:
if contacto.index(elemento) == 0:
print(f'Nombre: {elemento}')
else:
print(f'Email: {elemento}')
print('\n')
print(contactos[1][1]) |
import unittest
import json
import iris.server as server
def to_json(data):
return json.dumps(data)
def response_json(response):
return json.loads(response.data.decode("utf-8"))
class IrisInputValidationTest(unittest.TestCase):
request_url = '/iris/v1/predict'
def setUp(self):
self.app = server.app
self.app.predictor = (lambda m: 0)
self.client = self.app.test_client()
def test_correct_request_returns_success_response(self):
body = to_json({'sample': [1, 2, 3, 4]})
response = self.client.post(self.request_url, content_type="application/json", data=body)
self.assertValidResponse(response)
def test_incorrect_request_invalid_content_type(self):
response = self.client.post(self.request_url, content_type="text/plain", data="Blah")
self.assertBadRequest(response, server.INVALID_CONTENT_TYPE)
def test_incorrect_request_no_sample(self):
response = self.client.post(self.request_url, content_type="application/json", data=to_json({}))
self.assertBadRequest(response, server.MISSING_SAMPLE)
def test_incorrect_request_sample_wrong_type(self):
data = to_json({'sample': 'rubbish'})
response = self.client.post(self.request_url, content_type="application/json", data=data)
self.assertBadRequest(response, server.BAD_SAMPLE)
def test_incorrect_request_sample_wrong_type(self):
data = to_json({'sample': 'rubbish'})
response = self.client.post(self.request_url, content_type="application/json", data=data)
self.assertBadRequest(response, server.BAD_SAMPLE)
def test_incorrect_request_sample_wrong_sample_size(self):
data = to_json({'sample': [0, 0, 0]})
response = self.client.post(self.request_url, content_type="application/json", data=data)
self.assertBadRequest(response, server.BAD_SAMPLE)
data = to_json({'sample': [0, 0, 0, 0, 0]})
response = self.client.post(self.request_url, content_type="application/json", data=data)
self.assertBadRequest(response, server.BAD_SAMPLE)
def test_incorrect_request_sample_wrong_sample_entry(self):
data = to_json({'sample': [0, 0, 0, 'rubbish']})
response = self.client.post(self.request_url, content_type="application/json", data=data)
self.assertBadRequest(response, server.BAD_SAMPLE)
def assertBadRequest(self, response, expected_message):
self.assertEqual(400, response.status_code, "Response status code is BAD_REQUEST")
self.assertEqual("application/json", response.content_type, "Response content type")
data = json.loads(response.data.decode("utf-8"))
self.assertTrue('message' in data, "Error response contains 'message' field")
message = data['message']
self.assertEqual(expected_message, message, "Expected error message")
def assertValidResponse(self, response):
self.assertEqual(200, response.status_code, "Response status code is OK")
self.assertEqual("application/json", response.content_type, "Response content type")
self.assertTrue(len(response.data) > 0, "Response contains data")
data = json.loads(response.data.decode("utf-8"))
self.assertTrue('label' in data, "Response data contains label field")
label = data['label']
self.assertTrue(type(label) == int, "Label is integer")
class IrisModelErrorTest(unittest.TestCase):
request_url = '/iris/v1/predict'
def setUp(self):
self.app = server.app
self.client = self.app.test_client()
self.app.predictor = (lambda s: None)
def test_when_model_returns_none_server_responds_with_error(self):
body = to_json({'sample': [1, 2, 3, 4]})
response = self.client.post(self.request_url, content_type="application/json", data=body)
self.assertEqual(500, response.status_code)
|
# job class
class TableJob(object):
def __init__(self, sql_list):
self.sql_list = sql_list
self.id = ''
self.t_year = ''
self.t_month = ''
self.t_day = ''
self.t_hour = ''
self.t_minute = ''
self.t_second = ''
self.function_name = ''
self.run_mark = ''
def binding_value(self):
self.id = self.sql_list[0]
self.t_year = self.sql_list[1]
self.t_month = self.sql_list[2]
self.t_day = self.sql_list[3]
self.t_hour = self.sql_list[4]
self.t_minute = self.sql_list[5]
self.t_second = self.sql_list[6]
self.function_name = str(self.sql_list[7]).split('/')[(len(str(self.sql_list[7]).split('/')) - 1)]
self.run_mark = self.sql_list[8]
|
# Simple Function
# Parts ; 'def' 'function_name''(parameters & arguments)'
def plus(x, y):
print(x+y)
# plus(4, 5) |
Global = None
WIDTH = 300
HEIGHT = 300
def _init():
global Global
Global = {}
def set_value(key: str, value):
global Global
Global[key] = value
def get_value(key: str):
global Global
return Global[key]
|
import argparse
import random
def euro_lotto():
numbers = random.sample(xrange(1,50),5)
lucky = random.sample(xrange(1,11),2)
numbers.sort()
lucky.sort()
print "Chosen numbers are %s, %s, %s, %s and %s" % (
numbers[0], numbers[1], numbers[2], numbers[3], numbers[4])
print "Lucky stars are %s and %s" % (lucky[0], lucky[1])
def lotto():
number = random.sample(xrange(1,49),6)
bonus = number[5]
number.pop(5)
number.sort()
print "Chosen numbers are %s, %s, %s, %s and %s with bonus ball %s" % (
numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], bonus)
def main():
parser = argparse.ArgumentParser(description='Option selector.\n'
'1. Euromillions generator\n2. Lotto generator')
parser.add_argument('--choice', type=int, help='Option (1 or 2)')
args = parser.parse_args()
i = args.choice
print i
if i == 1:
print "Running Euromillions generator..."
euro_lotto()
elif i == 2:
print "Running Lotto generator..."
lotto()
else:
raise Exception('Please set choice as 1, 2 or 3')
if __name__ == "main":
main()
|
from django.urls import path, include
from rest_framework.authtoken import views as auth_views
from rest_framework.routers import DefaultRouter, SimpleRouter
from . import views
router = DefaultRouter()
router.register('profile', views.UserProfileViewSet)
router.register('incidents', views.IncidentsViewSet)
router.register('detail', views.DetailViewSet)
router.register('Transports', views.TransportsViewSet)
router.register('TypeIncidents', views.TypeIncidentsViewSet)
router.register('Motivation', views.MotivationViewSet)
router.register('Periode', views.PeriodeViewSet)
router.register('Group', views.GroupViewSet)
urlpatterns = [
path('', include(router.urls)),
] |
# 06_is_palindrome
def is_palindrome(s):
if s == '':
return True
else:
if s[0] == s[-1]:
return True and is_palindrome(s[1:-1])
else:
return False
print is_palindrome('abba')
|
# -*- coding:utf-8 -*-
from __future__ import division
import time
import numpy as np
import pycrfsuite as crf
from sklearn.cross_validation import KFold
from sklearn.grid_search import ParameterGrid
from metrics import scorer, f1_score
class GridSearch(object):
def __init__(self, param_searches, param_base={}, cv=5,
scorer=scorer(f1_score, tags_discard={'O'}), model=None):
self.param_searches = param_searches
self.param_base = param_base
self.cv = cv
self.scorer = scorer
self.model = model
self.graphical_model = 'crf1d'
self.best_score = .0
self.best_algorithm = 'lbfgs'
self.best_param = {}
def search(self, X, y, verbose):
for param_search in self.param_searches:
if isinstance(param_search.values()[0], dict):
for _, param_gs in param_search.items():
self.search_grid(X, y, param_gs, verbose)
else:
self.search_grid(X, y, param_search, verbose)
if self.model:
trainer = crf.Trainer(verbose)
trainer.select(self.best_algorithm, self.graphical_model)
trainer.set_params(self.best_param)
for xseq, yseq in zip(X, y):
trainer.append(xseq, yseq)
trainer.train(model)
def search_grid(self, X, y, param_grid, verbose):
if '__algorithm' in param_grid.keys():
algorithm = param_grid['__algorithm']
else:
algorithm = self.best_algorithm
if '__best_parameter' in param_grid.keys() and param_grid['__best_parameter']:
self.param_base = self.best_param.copy()
param_grid = ParameterGrid({p[0]: p[1] for p in param_grid.items() if not p[0].startswith('__')})
for param in param_grid:
trainer = crf.Trainer(verbose=verbose)
param_train = self.param_base.copy()
param_train.update(param)
trainer.select(algorithm, self.graphical_model)
trainer.set_params(param_train)
if isinstance(self.cv, int):
cv = KFold(n=len(X), n_folds=self.cv, shuffle=True, random_state=None)
print('Parameter: (%s) %s' % (algorithm, param_train))
cv_score = []
for j, indices in enumerate(cv):
X_train, y_train = X[indices[0]], y[indices[0]]
X_test, y_test = X[indices[1]], y[indices[1]]
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
start = time.time()
trainer.train('model')
fit_elapsed_in_sec = time.time() - start
trainer.clear()
tagger = crf.Tagger()
tagger.open('model')
start = time.time()
y_pred = [tagger.tag(xseq) for xseq in X_test]
predict_elapsed_in_sec = time.time() - start
tagger.close()
score = self.scorer(y_pred, y_test)
print(' cv(%i): score %.4f, train size %i, test size %i, train elapsed %.4f sec, test elapsed %.4f sec' %
(j, score, X_train.shape[0], X_test.shape[0], fit_elapsed_in_sec, predict_elapsed_in_sec))
cv_score.append(score)
score = np.mean(cv_score)
if self.best_score < score:
self.best_score = score
self.best_param = param_train
self.best_algorithm = algorithm
del cv_score[:]
|
"""
Search harder for service dependencies. The APT, Yum, and files backends
have already found the services of note but the file and package resources
which need to trigger restarts have not been fully enumerated.
"""
from collections import defaultdict
import logging
import os.path
import re
import subprocess
# Pattern for matching pathnames in init scripts and such.
pattern = re.compile(r'(/[/0-9A-Za-z_.-]+)')
def services(b):
logging.info('searching for service dependencies')
# Command fragments for listing the files in a package.
commands = {'apt': ['dpkg-query', '-L'],
'yum': ['rpm', '-ql']}
# Build a map of the directory that contains each file in the
# blueprint to the pathname of that file.
dirs = defaultdict(list)
for pathname in b.files:
dirname = os.path.dirname(pathname)
if dirname not in ('/etc', '/etc/init', '/etc/init.d'):
dirs[dirname].append(pathname)
def service_file(manager, service, pathname):
"""
Add dependencies for every pathname extracted from init scripts and
other dependent files.
"""
content = open(pathname).read()
for match in pattern.finditer(content):
if match.group(1) in b.files:
b.add_service_file(manager, service, match.group(1))
for dirname in b.sources.iterkeys():
if -1 != content.find(dirname):
b.add_service_source(manager, service, dirname)
def service_package(manager, service, package_manager, package):
"""
Add dependencies for every file in the blueprint that's also in
this service's package or in a directory in this service's package.
"""
try:
p = subprocess.Popen(commands[package_manager] + [package],
close_fds=True,
stdout=subprocess.PIPE)
except KeyError:
return
for line in p.stdout:
pathname = line.rstrip()
if pathname in b.files:
b.add_service_file(manager, service, pathname)
elif pathname in dirs:
b.add_service_file(manager, service, *dirs[pathname])
def service(manager, service):
"""
Add extra file dependencies found in packages. Then add extra file
dependencies found by searching file content for pathnames.
"""
b.walk_service_packages(manager,
service,
service_package=service_package)
if 'sysvinit' == manager:
service_file(manager, service, '/etc/init.d/{0}'.format(service))
elif 'upstart' == manager:
service_file(manager,
service,
'/etc/init/{0}.conf'.format(service))
b.walk_service_files(manager, service, service_file=service_file)
b.walk_services(service=service)
|
from django.contrib import admin
from .models import *
admin.site.register(Book)
admin.site.register(Reviews)
admin.site.register(Borrower)
admin.site.register(Genre)
admin.site.register(Language)
admin.site.register(Status)
admin.site.register(Borrowing_duration)
admin.site.register(Late_return_charge) |
'''
Project Euler Problem #30
Approach: get all possible combinations of digits and find answer
'''
import itertools
nums = range(10)
#change combination to numbers
def inNum(p, s):
if len(p) != len(s):
return False
st = list(p)
for c in s:
if int(c) in st:
st.remove(int(c))
else:
return False
return True
ans = set()
for i in range(1,7):
print("i:", i)
for p in itertools.combinations_with_replacement(nums,i):
n = sum(map(lambda x: x**5, p))
if inNum(p, str(n)):
ans.add(n)
ans.remove(0)
ans.remove(1)
print('result: ', ans)
print('sum:', sum(ans))
|
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
sym = 0
ans = ''
temp = [-1]
for index in range(len(s)):
if s[index] == '(':
temp.append(index)
sym += 1
elif s[index] == ')':
if sym > 0:
sym -= 1
temp.pop()
else:
temp.append(index)
temp.append(len(s))
for index in range(len(temp)-1):
ans += s[temp[index]+1:temp[index+1]]
return ans |
from invoke import task, Collection
from . import build, deploy, provision
@task(aliases=['ipython'])
def shell(c):
"Load a REPL with project state already set up."
pass
@task(aliases=['run_tests'], default=True)
def test(c):
"Run the test suite with baked-in args."
pass
ns = Collection(shell, test, build, deploy, provision)
|
import os
from ..ReportGitDownload import *
# Report how much data is downloaded from the GHE instance for a specific repository
class ReportGitHubGitDownload(ReportGitDownload):
def name(self):
return "github-git-download"
def metaName(self):
return self.repository + "/" + self.name()
def fileName(self):
return os.path.join(self.dataDirectory, "repository", self.repositoryOwner, self.repositoryName, self.name() + ".tsv")
def detailedFileName(self):
return os.path.join(self.dataDirectory, "repository", self.repositoryOwner, self.repositoryName, self.name() + "-detailed.tsv") |
from PyQt5.QtWidgets import QTextEdit, QHBoxLayout
class OutputText(QTextEdit):
name = 'output_frame'
def __init__(self, parent, output: str):
super().__init__(parent=parent)
self.parent = parent
self.setObjectName(OutputText.name)
self.output = output
self.insertPlainText(output)
self.setReadOnly(True)
class OutputFrame(QHBoxLayout):
def __init__(self, parent, output: str):
super().__init__()
output_text = OutputText(parent=parent, output=output)
self.addWidget(output_text)
|
import requests
BASE = "http://127.0.0.1:5000/"
response = requests.get("http://monikaantwan.pythonanywhere.com/destinationList/1/1")
print(response.json()) |
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from Bikes import models, forms
def home(request):
""" shows all messages from staff to user or from user's to staff """
messages = []
if request.user.is_authenticated():
user = request.user
if user.is_superuser:
messages = models.Message.objects.filter(owner="to")
else:
order = models.Order.objects.get(name=user.username, email=user.email)
messages = models.Message.objects.filter(user=order, owner="from")
return render(request, 'home.html', {"your_messages": messages})
else:
return HttpResponseRedirect(reverse('bikes:type'))
# Logs in and out
def loginer(request):
"""Login's user"""
form = forms.LoginForm()
if request.method == 'POST':
form = forms.LoginForm(request.POST)
if form.is_valid():
user = authenticate(
username=form.cleaned_data['name'],
email=form.cleaned_data['email'],
password=form.cleaned_data['password']
)
if user is not None:
if user.is_active:
login(request, user)
messages.add_message(request, messages.SUCCESS, "You are now logged in!")
return HttpResponseRedirect(reverse('bikes:type'))
else:
messages.add_message(request, messages.ERROR, "This account has been disabled sorry!")
return HttpResponseRedirect(reverse('bikes:type'))
else:
messages.add_message(request, messages.ERROR, "Invalid Login!")
return render(request, 'login.html', {'form': form})
def logout_view(request):
"""logs out user"""
logout(request)
messages.add_message(request, messages.SUCCESS, "Logged out successfully!")
return HttpResponseRedirect(reverse('bikes:type'))
# Users Views
def message(request):
"""user could create a message to send as message to admin"""
user = request.user
order = ""
if request.user.is_authenticated():
if user.is_superuser:
return HttpResponseRedirect(reverse('bikes:all_orders'))
order = models.Order.objects.get(name=user.username, email=user.email)
else:
order = models.Order.objects.get(name="Anonymous")
message = forms.MessageForm()
if request.method == "POST":
message = forms.MessageForm(request.POST)
if message.is_valid():
form = message.save(commit=False)
form.user = order
form.owner = "to"
form.save()
messages.success(request, "Message sent! We will respond shortly.")
return HttpResponseRedirect(reverse('home'))
return render(request, 'admin_messages.html', {'message': message})
@login_required
def delete_message(request):
""" deletes message """
pk = request.POST.get('pk')
user = request.user
if not user.is_superuser:
order = models.Order.objects.get(name=user.username, email=user.email)
message = get_object_or_404(models.Message, pk=pk)
message.delete(keep_parents=True)
return HttpResponseRedirect(reverse('home'))
|
import matplotlib.pyplot as plt
import numpy as np
def reduce_array(array, n):
idx = np.arange(0, len(array), n)
return array[idx]
def format_time(array):
array = array - min(array)
return array / 60
def save_plot(x, tensione, corrente, temperatura, data_path):
plt.style.use('bmh')
x = format_time(x)
tensione = tensione * 500
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.75)
twin1 = ax.twinx()
twin2 = ax.twinx()
# Offset the right spine of twin2. The ticks and label have already been
# placed on the right by twinx above.
twin2.spines.right.set_position(("axes", 1.2))
p1, = ax.plot(x, tensione, "b-", label="tensione", linewidth=1)
p2, = twin1.plot(x, corrente, "r-", label="corrente", linewidth=1)
p3, = twin2.plot(x, temperatura, "g-", label="temperatura", linewidth=1)
ax.set_ylim(-3.0, 15.0)
twin1.set_ylim(0, 30)
twin2.set_ylim(0, 100)
ax.set_xlabel("Time")
ax.set_ylabel("tensione")
twin1.set_ylabel("corrente")
twin2.set_ylabel("temperatura")
ax.yaxis.label.set_color(p1.get_color())
twin1.yaxis.label.set_color(p2.get_color())
twin2.yaxis.label.set_color(p3.get_color())
tkw = dict(size=4, width=1.5)
ax.tick_params(axis='y', colors=p1.get_color(), **tkw)
twin1.tick_params(axis='y', colors=p2.get_color(), **tkw)
twin2.tick_params(axis='y', colors=p3.get_color(), **tkw)
ax.tick_params(axis='x', **tkw)
ax.legend(handles=[p1, p2, p3])
plt.savefig(data_path, dpi=300)
|
"""
An asynchronous client for Google Cloud KMS
"""
import json
import os
from typing import Any
from typing import AnyStr
from typing import Dict
from typing import IO
from typing import Optional
from typing import Tuple
from typing import Union
from gcloud.aio.auth import AioSession # pylint: disable=no-name-in-module
from gcloud.aio.auth import BUILD_GCLOUD_REST # pylint: disable=no-name-in-module
from gcloud.aio.auth import Token # pylint: disable=no-name-in-module
# Selectively load libraries based on the package
if BUILD_GCLOUD_REST:
from requests import Session
else:
from aiohttp import ClientSession as Session # type: ignore[assignment]
SCOPES = [
'https://www.googleapis.com/auth/cloudkms',
]
def init_api_root(api_root: Optional[str]) -> Tuple[bool, str]:
if api_root:
return True, api_root
host = os.environ.get('KMS_EMULATOR_HOST')
if host:
return True, f'http://{host}/v1'
return False, 'https://cloudkms.googleapis.com/v1'
class KMS:
_api_root: str
_api_is_dev: bool
def __init__(
self, keyproject: str, keyring: str, keyname: str,
service_file: Optional[Union[str, IO[AnyStr]]] = None,
location: str = 'global', session: Optional[Session] = None,
token: Optional[Token] = None, api_root: Optional[str] = None,
) -> None:
self._api_is_dev, self._api_root = init_api_root(api_root)
self._api_root = (
f'{self._api_root}/projects/{keyproject}/locations/{location}/'
f'keyRings/{keyring}/cryptoKeys/{keyname}'
)
self.session = AioSession(session)
self.token = token or Token(
service_file=service_file,
session=self.session.session, # type: ignore[arg-type]
scopes=SCOPES,
)
async def headers(self) -> Dict[str, str]:
if self._api_is_dev:
return {'Content-Type': 'application/json'}
token = await self.token.get()
return {
'Authorization': f'Bearer {token}',
'Content-Type': 'application/json',
}
# https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/decrypt
async def decrypt(
self, ciphertext: str,
session: Optional[Session] = None,
) -> str:
url = f'{self._api_root}:decrypt'
body = json.dumps({
'ciphertext': ciphertext,
}).encode('utf-8')
s = AioSession(session) if session else self.session
resp = await s.post(url, headers=await self.headers(), data=body)
plaintext: str = (await resp.json())['plaintext']
return plaintext
# https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys/encrypt
async def encrypt(
self, plaintext: str,
session: Optional[Session] = None,
) -> str:
url = f'{self._api_root}:encrypt'
body = json.dumps({
'plaintext': plaintext,
}).encode('utf-8')
s = AioSession(session) if session else self.session
resp = await s.post(url, headers=await self.headers(), data=body)
ciphertext: str = (await resp.json())['ciphertext']
return ciphertext
async def close(self) -> None:
await self.session.close()
async def __aenter__(self) -> 'KMS':
return self
async def __aexit__(self, *args: Any) -> None:
await self.close()
|
#object:-if card matches it will speak Permission Granted
#sudo apt-get install espeak ...to install espeak
import serial
from time import sleep
ser = serial.Serial("/dev/serial0",9600,timeout=1)
from os import system
i = 0
while True:
val = ser.read(12)
sleep(.1)
if len( val.strip() ) == 12:
if val== "0200107EDCB0":
system("espeak -v +f3 \"Permision, Granted\"")
else:
system("espeak -v +f3 \"Permision, Denied\"")
|
import pytest
from votesmart.methods.measure import *
def test_Measure():
method = Measure(api_instance='test')
|
from django.db import migrations
pizza_type_master_data = [
{"type": "Regular"},
{"type": "Square"}
]
pizza_size_master_data = [
{"size": "Small"},
{"size": "Medium"},
{"size": "Large"}
]
pizza_topping_master_data = [
{"topping": "Onion"},
{"topping": "Tomato"},
{"topping": "Corn"},
{"topping": "Capsicum"},
{"topping": "Cheese"},
{"topping": "Jalapeno"},
]
def add_pizza_type_master_data(apps, schema_editor):
db_alias = schema_editor.connection.alias
Type = apps.get_model("app", "Type")
_new_data = []
for d in pizza_type_master_data:
_new_data.append(Type(type=d.get("type")))
Type.objects.using(db_alias).bulk_create(_new_data)
def add_pizza_size_master_data(apps, schema_editor):
db_alias = schema_editor.connection.alias
Size = apps.get_model("app", "Size")
_new_data = []
for d in pizza_size_master_data:
_new_data.append(Size(size=d.get("size")))
Size.objects.using(db_alias).bulk_create(_new_data)
def add_pizza_topping_master_data(apps, schema_editor):
db_alias = schema_editor.connection.alias
Topping = apps.get_model("app", "Topping")
_new_data = []
for d in pizza_topping_master_data:
_new_data.append(Topping(topping=d.get("topping")))
Topping.objects.using(db_alias).bulk_create(_new_data)
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.RunPython(add_pizza_type_master_data),
migrations.RunPython(add_pizza_size_master_data),
migrations.RunPython(add_pizza_topping_master_data),
]
|
# Generated by Django 2.0 on 2018-01-10 06:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('org', '0006_auto_20180110_0631'),
]
operations = [
migrations.RemoveField(
model_name='user_con_company',
name='Company',
),
migrations.RemoveField(
model_name='user_con_company',
name='User',
),
migrations.DeleteModel(
name='User_Con_Company',
),
]
|
from src.database import SQLDatabase
from src.outlier_model import OutlierModel
import pandas as pd
from pathlib import Path
import os
import logging
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output
# set and configure logger, stream handler, and file handler
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
file_handler = logging.FileHandler('info.log')
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
# get absolute path of file so relative paths can be used later
cur_path = Path(__file__)
data_path = os.path.join(cur_path.parent, 'data/input/Outliers.csv')
if __name__ == '__main__':
# read in data from our data base
db = SQLDatabase(data_path, 'OutlierDB.db', 'OutlierTable')
db_conn = db.create_connection()
price_data = pd.read_sql('SELECT * FROM OutlierTable', db_conn)
# remove any null values from the dataset
price_data = price_data[price_data['Price'].notna()]
price_data = price_data[price_data['Date'].notna()]
# ensure date column is a date
price_data['Date'] = pd.to_datetime(price_data.Date)
# sort of our data by date
price_data = price_data.sort_values(by='Date')
# calculate rolling average
price_data['Monthly Rolling Average Price'] = price_data['Price'].rolling(30).mean()
# get initial time series price
price_fig = px.line(price_data, x='Date', y=["Price", "Monthly Rolling Average Price"])
price_fig.update_layout(title_text='Price Data Over Time', title_x=0.5)
price_fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
price_fig.update_yaxes(title_text="Price", title_standoff=25)
price_fig.update_xaxes(title_text="Date", title_standoff=25)
# get histogram of price
price_his = px.histogram(price_data, x='Price', nbins=20, color="Weekday")
price_his.update_layout(title_text='Histogram of Prices by Day', title_x=0.5)
price_his.update_xaxes(title_text="Price", title_standoff=25)
# difference data
price_data['Price Difference'] = price_data['Price'] - price_data['Price'].shift(1)
# plot the difference data
diff_fig = px.line(price_data, x='Date', y="Price Difference")
diff_fig.update_layout(title_text='Differenced Price Data Over Time', title_x=0.5)
diff_fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
diff_fig.update_yaxes(title_text="Differenced Price", title_standoff=25)
diff_fig.update_xaxes(title_text="Date", title_standoff=25)
# train outlier model
model_obj = OutlierModel(price_data, 0.05)
model_data = model_obj.train_model()
# plot outliers
outliers_fig = px.scatter(model_data, x='Date', y="Price", color='Outlier')
outliers_fig.update_layout(title_text='Price Data Over Time by Outliers', title_x=0.5)
outliers_fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
outliers_fig.update_yaxes(title_text="Price", title_standoff=25)
outliers_fig.update_xaxes(title_text="Date", title_standoff=25)
# initialise very basic Plotly Dash Application
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='Outliers Project', style={'textAlign': 'center'}),
html.Div(children='This application allows users to view and download the outliers in a provided pricing dataset.', style={'textAlign': 'center', 'padding':'10px'}),
html.Div(children='The application also allows users to dynamically set the main hyper-parameter of the model used to detect the outliers in the dataset.', style={'textAlign': 'center', 'padding': '10px'}),
html.Div(children='The backend algorithm used to detect the outliers in the dataset is the unsupervised isolation forest algorithm.', style={'textAlign': 'center', 'padding':'10px'}),
html.H2(children='Exploratory Visualisations', style={'textAlign': 'center'}),
dcc.Graph(figure=price_fig, id='price-graph'),
dcc.Graph(figure=price_his, id='price-hist'),
html.Div(children='The provided outlier dataset is not stationary. This can be seen by the increasing rolling average in the "Price Data Over Time" figure. Based on the "Histogram of Prices by Day" figure, there does not appear to be fluctuations in prices based on the day of the week.',
style={'textAlign': 'center', 'padding': '5px'}),
html.H2(children='Differencing', style={'textAlign': 'center'}),
html.Div(children='Differencing is applied to the dataset to ensure that it is stationary before it is trained. The results of differencing can be seen below.', style={'textAlign': 'center', 'padding': '10px'}),
dcc.Graph(figure=diff_fig, id='diff-graph'),
html.H2(children='Outlier Visualisation and Results', style={'textAlign': 'center'}),
html.Div(children='The slider below allows users to dynamically adjust the contamination parameter of the outlier model. Contamination is an estimate of the percentage of outliers in a dataset. The default value has been set to 5 percent.',
style={'textAlign': 'center', 'padding': '10px'}),
html.Div( children='The higher the contamination parameter is set, the more outliers will be visible in the graph and dataset below.',
style={'textAlign': 'center', 'padding': '10px'}),
html.Div([
dcc.Slider(
id='slider',
min=0.0,
max=10.0,
step=0.5,
value=5,
marks={
0: {'label': '0%'},
0.5: {'label': '0.5%'},
1: {'label': '1.0%'},
1.5: {'label': '1.5%'},
2: {'label': '2.0%'},
2.5: {'label': '2.5%'},
3: {'label': '3.0%'},
3.5: {'label': '3.5%'},
4: {'label': '4.0%'},
4.5: {'label': '4.5%'},
5: {'label': '5.0%'},
5.5: {'label': '5.5%'},
6: {'label': '6.0%'},
6.5: {'label': '6.5%'},
7: {'label': '7.0%'},
7.5: {'label': '7.5%'},
8: {'label': '8.0%'},
8.5: {'label': '8.5%'},
9: {'label': '9.0%'},
9.5: {'label': '9.5%'},
10: {'label': '10.0%'}
}
),
html.Div(id='slider-output-container')
],
style={'width': '75%', 'marginLeft': 'auto', 'marginRight': 'auto', 'padding':'10px'}
),
dcc.Graph(figure=outliers_fig, id='outliers-graph'),
html.H3(children='Results Table', style={'textAlign': 'center'}),
html.Div(children='The full results of the outlier detection model can be viewed and exported from the table below.', style={'textAlign': 'center', 'padding': '10px'}),
html.Div(children=[
dash_table.DataTable(
id='final-table',
columns=[{"name": i, "id": i} for i in ['Date', 'Price', 'Outlier']],
data=model_data.to_dict('records'),
export_format='csv',
style_table={
'maxHeight': '200px',
'overflowY': 'scroll',
}
)
],
style={'width':'75%', 'marginLeft': 'auto', 'marginRight': 'auto'}
)
])
# call back function to update outlier plot and data based on slider
@app.callback([Output('outliers-graph', 'figure'),
Output('final-table', 'data')],
Input('slider', 'value'))
def retrain_model(value):
# convert value to percentage
contamination = value/100
# re-train outlier model
remodel_obj = OutlierModel(price_data, contamination)
remodel_data = remodel_obj.train_model()
reoutliers_fig = px.scatter(remodel_data, x='Date', y="Price", color='Outlier')
reoutliers_fig.update_layout(title_text='Price Data Over Time by Outliers', title_x=0.5)
reoutliers_fig.update_layout(legend=dict(yanchor="top", y=0.99, xanchor="left", x=0.01))
reoutliers_fig.update_yaxes(title_text="Price", title_standoff=25)
reoutliers_fig.update_xaxes(title_text="Date", title_standoff=25)
return reoutliers_fig, remodel_data.to_dict('records')
app.run_server(debug=True, use_reloader=False)
|
# -*- coding: utf-8 -*-
prev = [1, 2];
sum = 2;
now = 0;
while (now <= 4000000):
now = prev[0] + prev[1];
prev[0] = prev[1];
prev[1] = now;
if (now % 2) == 0:
sum += now;
print sum;
|
'''
Quick sort using list comprehension.
'''
def quick_sort(lst):
# Checks length of list.
if len(lst) <= 1:
return(lst)
# Sets pivot in the middle
pivot= lst[(len(lst) // 2)]
# Sort numbers less than pivot.
left = [x for x in lst if x < pivot]
middle = [x for x in lst if x == pivot]
# Sort numbers greater than pivot.
right = [x for x in lst if x > pivot]
# Merge three lists.
return quick_sort(left) + quick_sort(middle) + quick_sort(right)
print(quick_sort([5,4,3,8,7,1,2,6]))
|
# -*- python -*-
# Copyright (C) 2013, MagicLinux.
# Author: Yang Zhang <zy.netsec@gmail.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANT; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public LIcense for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import stat
import sys, glob
from SCons.Action import ActionFactory
from xml.dom.minidom import parse
import PkgMaker
def usage():
return '''
USAGE:
%s <package dir>
package directory include packages used by create root file system.
''' % sys.argv[0]
if len(sys.argv) < 2:
print usage()
sys.exit(1)
def line_split(lines):
result = []
for line in lines.split('\n'):
line = line.strip()
if line and line[0] != '#':
result.append(line)
return result
def get_node_value(node, name):
value = ''
for subnode in node.childNodes:
if subnode.nodeType == subnode.ELEMENT_NODE \
and subnode.tagName == name:
value = ''.join([t.data for t in subnode.childNodes \
if t.nodeType == t.TEXT_NODE])
return line_split(value)
class PkgConfigManager(PkgMaker.BaseMaker):
def __init__(self, pkgnlist, source_prefix):
self.source_prefix = source_prefix
self.source_list = source_list = []
self.build_cmds = cmds = []
source_list.extend([ '%s.xml' % a for a in pkgnlist ])
self.inst_list = inst_list = []
self.pre_list = pre_list = []
self.post_list = post_list = []
self.pkg_list = pkg_list = []
for pkgcf in source_list:
self.parseConfig(pkgcf)
def parseConfig(self, pkgcf):
cmds = self.build_cmds
try:
rootdoc = parse(pkgcf)
except:
print 'Parser mod %s.xml failed!' % mod
raise
# init
inst_list = self.inst_list
pre_list = self.pre_list
post_list = self.post_list
pkg_list = self.pkg_list
for pkgnode in rootdoc.getElementsByTagName('package'):
pkgfiles_orig = get_node_value(pkgnode, 'files')
if pkgfiles_orig:
pkgfiles = []
#pkgfiles = self.path_prefix('$source_list_pkg', pkgfiles_orig)
for pkgfile in pkgfiles_orig:
pkg_path = self.search_file(pkgfile, self.source_prefix)
if pkg_path:
pkgfiles.append(pkg_path)
else:
print "Can not find the package %s" % pkgfile
sys.exit(1)
pkg_list.extend(pkgfiles)
# extract
cmds.extend(self.get_extract_cmd(pkgfiles, '$build_prefix'))
inst_list.extend(get_node_value(pkgnode, 'install'))
pre_list.extend(get_node_value(pkgnode, 'pre_action'))
post_list.extend(get_node_value(pkgnode, 'post_action'))
# pre_action
cmds.extend(pre_list)
# install
for f in inst_list:
if f[0] == '+': # mkdir only
f = f[1:]
cmds.extend(['mkdir -p $ROOT/%s' % f])
elif f[0] == '@': # recursive
f = f[1:]
cmds.extend(['mkdir -p $ROOT/%s' % os.path.dirname(f),
'cp -a $BUILD/%s $ROOT/%s/' % (f, os.path.dirname(f))])
else:
cmds.extend(['mkdir -p $ROOT/%s' % os.path.dirname(f),
'cp -dp $BUILD/%s $ROOT/%s/' % (f, os.path.dirname(f))])
# post_action
cmds.extend(post_list)
def search_file(self, filename, pathes):
for p in pathes:
f = os.path.join(p, filename)
#f = self.env.subst(f)
files = glob.glob(f)
if files and os.access(files[0], os.R_OK):
return files[0]
if __name__ == '__main__':
pkgnlst = ['filesystem',
'glibc',
'syslib',
'busybox',
'bash',
'python',
'xorg',
'gtk2',
'parted',
'rpm',
'mkfs',
'udev',
'grub',
'trace',
'post_scripts',
]
source_prefix = sys.argv[1:]
pkgManager = PkgConfigManager(pkgnlst, source_prefix)
for pkg in pkgManager.pkg_list:
for prefix in source_prefix:
if pkg.startswith(prefix):
print pkg[len(prefix)+1:]
break
|
import sqlite3
import mysql.connector
# TODO: Write proper documentation
class DatabaseHandler:
def __init__(self):
self.conn = sqlite3.connect('rass.db')
self.cur = self.conn.cursor()
self.cur.execute("""
CREATE TABLE IF NOT EXISTS emails(
id INT PRIMARY KEY,
label_ids TEXT,
date TEXT,
fro TEXT,
recv TEXT,
subject TEXT,
body TEXT
)
""")
self.conn.commit()
# Checking that the table was created
exists = self.cur.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='emails'")
if not exists:
raise Exception
def close(self):
self.conn.commit()
self.conn.close()
def insert(self, message):
""" WARNING: Security not tested, use at your own risk! """
labs = ''.join(message['labelIds'])
dat = message['date']
fro = message['from']
recv = message['to']
subj = message['subject']
bod = message['body']
self.cur.execute("""
INSERT INTO emails (label_ids, date, fro, recv, subject, body)
VALUES (?,?,?,?,?,?)
""", (labs, dat, fro, recv, subj, bod));
self.conn.commit()
return self.cur.rowcount
# TODO: Set up procedure for adding new emails to the machine
def retrieveNext(self):
"""
:return:
"""
print() |
def letters(digit):
if digit in (1, 2, 6):
return 3
elif digit in (4, 5, 9):
return 4
elif digit in (3, 7, 8):
return 5
elif digit == 0:
return 0
def tens_letters(digit):
if digit in (4,5,6):
return 5
elif digit in (2,3,8,9):
return 6
elif digit == 7:
return 7
elif digit == 0:
return 0
# Function is passed the ones digit
def teens_letters(digit):
if digit == 0:
return 3
elif digit in (1,2):
return 6
elif digit in (5,6):
return 7
elif digit in (3,4,8,9):
return 8
elif digit == 7:
return 9
def to_int(numList):
s = ''.join(map(str, numList))
return int(s)
def num_letters(num):
# Finds the number of letters in the written form of the number num
# Only works for numbers < 10000 per problem specification, but could be expanded
digits = [int(char) for char in str(num)]
is_zero = [digit == 0 for digit in digits]
word_sum = 0
if len(digits) == 4 and digits[2] == 0 and digits[3] == 0: #Thousands, no 'and'
return letters(digits[0]) + 8 + letters(digits[1]) + 7
elif len(digits) == 4 and digits[2] == 1: # Thousands + Teens
return letters(digits[0]) + 8 + letters(digits[1]) + teens_letters(digits[3])
elif len(digits) == 4:
return letters(digits[0] + 8 + letters
elif len(digits) == 3 and digits[1] == 0 and digits[2] == 0: # Hundreds, no 'and'
return letters(digits[0]) + 7
elif len(digits) == 3 and digits[1] == 1: # Hundreds + Teens
return letters(digits[0]) + 7 + 3 + teens_letters(digits[2])
elif len(digits) == 3:
return letters(digits[0]) + 7 + 3 + tens_letters(digits[1]) + letters(digits[2])
elif len(digits) == 2 and digits[0] == 1: # Teens
return teens_letters(digits[1])
elif len(digits) == 2: # Tens
return tens_letters(digits[0]) + letters(digits[1])
elif len(digits) == 1: # Ones
return letters(digits[0])
else:
new_num = input("Please Enter a number between 1 and 9999: ")
num_letters(new_num)
def testing():
assert num_letters(7) == 5
assert num_letters(10) == 3
assert num_letters(17) == 9
assert num_letters(30) == 6
assert num_letters(99) == 10
assert num_letters(100) == 10
assert num_letters(110) == 16
assert num_letters(427) == 25
|
# -*- coding: UTF-8 -*-
import html5lib, requests
import mysql.connector
import re
import time
from bs4 import BeautifulSoup
# database info
username = 'root'
password = ''
host = 'localhost'
dbase = 'doctor_rating'
class crawlYelp:
def __init__(self):
self.dbconn = mysql.connector.connect(user=username, password=password,host=host, database=dbase)
self.cursor = self.dbconn.cursor()
def get_address(self):
select_sql = (
"SELECT cId,doctorNPI,searchResultUrl,searchKeyword FROM doctor_rating.google_results2017 WHERE websiteName LIKE '%yelp.com' AND crawlFlag = 0 LIMIT 1"
)
self.cursor.execute(select_sql)
for cursor_data in self.cursor.fetchall():
cId = cursor_data[0]
doctor_NPI = cursor_data[1]
doctor_url = cursor_data[2]
doctor_keyword = cursor_data[3]
print('---------New Url From Google----------')
print(cId, doctor_NPI, doctor_url, doctor_keyword)
if doctor_url.split('/')[3] == 'biz':
try:
print('---------Valid url----------')
insert_sql = (
"UPDATE `google_results2017` SET crawlFlag = 5 WHERE doctorNPI = %s and websiteName LIKE '%yelp.com' and cId != %s and crawlflag = 0")
insert_sql1 = (
"UPDATE `google_results2017` SET crawlFlag = 1 WHERE doctorNPI = %s and websiteName LIKE '%yelp.com' and cId = %s")
self.cursor.execute(insert_sql, (doctor_NPI, cId,))
self.cursor.execute(insert_sql1, (doctor_NPI, cId,))
self.dbconn.commit()
self.get_data(doctor_url, doctor_NPI, cId)
except Exception as err:
print("---------!!!Crawl Fail!!!----------")
print(err)
insert_sql = ("UPDATE `google_results2017` SET crawlFlag = 4 WHERE cid = %s ")
self.cursor.execute(insert_sql, (cId,))
self.dbconn.commit()
time.sleep(2)
else:
print("---------Not valid url----------")
def get_data(self,doctor_url, doctor_NPI, cId):
print("------------Doctor Information------------")
wb_data = requests.get(doctor_url)
soup = BeautifulSoup(wb_data.text,'html5lib')
if soup.select('.alert-message')[0].get_text().strip().split(' ')[0] == 'Yelpers':
print('Has closed')
rating_number = ''
insert_sql = ("UPDATE `google_results2017` SET crawlFlag = 4 WHERE cid = %s ")
self.cursor.execute(insert_sql, (cId,))
self.dbconn.commit()
else:
doctor_name = soup.select("div.biz-page-header-left > div > h1")[0].get_text().split(',')[0].strip()
try:
doctor_speciality = soup.select("div.biz-main-info.embossed-text-white > div.price-category > span > a")[0].get_text().split(',')[0].strip()
except:
doctor_speciality = ''
try:
rating_overall = soup.select("div.biz-main-info.embossed-text-white > div.rating-info.clearfix > div.biz-rating.biz-rating-very-large.clearfix > div")[0]['title'].split(' ')[0]
rating_number = soup.select("div.biz-main-info.embossed-text-white > div.rating-info.clearfix > div.biz-rating.biz-rating-very-large.clearfix > span")[0].get_text().strip().split(' ')[0]
except IndexError:
rating_overall = ''
rating_number = ''
doctor_address_data = soup.select("div > strong > address")[0].get_text()
doctor_address = ' '.join(doctor_address_data.split())
try:
doctor_street = doctor_address.split(',')[0].split(' ')[-1]
doctor_state = doctor_address.split(',')[1].split(' ')[1]
doctor_zipcode = doctor_address.split(',')[1].split(' ')[2]
except:
doctor_street = ''
doctor_state = ''
doctor_zipcode = ''
try:
print("Doctor Information:", doctor_name, doctor_speciality, rating_overall, rating_number, doctor_address, doctor_street, doctor_state, doctor_zipcode)
except Exception:
pass
args = (doctor_NPI, doctor_name, doctor_speciality, rating_overall, rating_number, doctor_address, doctor_street, doctor_state, doctor_zipcode)
add_doctor_result = ("INSERT IGNORE INTO yelp_doctor "
"(doctor_NPI, doctor_name, doctor_speciality, rating_overall, rating_number, doctor_address, doctor_street, doctor_state, doctor_zipcode) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)")
self.cursor.execute(add_doctor_result, args)
self.dbconn.commit()
if rating_number != '':
insert_sql = (
"UPDATE `google_results2017` SET ratingFlag = 1 WHERE cId = %s and websiteName LIKE '%yelp.com'")
self.cursor.execute(insert_sql, (cId,))
self.dbconn.commit()
page_number = int(rating_number) // 20 + 1
print ("Review page:" + str(page_number))
for i in range(0, page_number):
page_url = "?start=" + str(20 * i)
doctor_page_url = doctor_url + page_url
print (doctor_page_url)
wb_data = requests.get(doctor_page_url)
soup = BeautifulSoup(wb_data.text, 'html5lib')
for review_data in soup.select(".review--with-sidebar"):
try:
# print review_data
reviewer_id = review_data.select('#dropdown_user-name')[0]['href'].split('=')[1]
reviewer_name = review_data.select('#dropdown_user-name')[0].get_text().strip()
reviewer_address = review_data.select('.user-location')[0].get_text().strip()
try:
reviewer_city = review_data.select('.user-location')[0].get_text().strip().split(',')[1]
reviewer_state = review_data.select('.user-location')[0].get_text().strip().split(',')[2].strip()
except:
reviewer_city = review_data.select('.user-location')[0].get_text().strip().split(',')[0]
reviewer_state = review_data.select('.user-location')[0].get_text().strip().split(',')[1].strip()
rating_score = review_data.select('.i-stars')[0]['title'].split(' ')[0]
rating_date = review_data.select('.rating-qualifier')[0].get_text().strip().split(' ')[0].strip()
rating_content = review_data.select('p')[0].get_text().strip()
rating_useful = review_data.select('.count')[0].get_text().strip()
rating_funny = review_data.select('.count')[1].get_text().strip()
rating_cool = review_data.select('.count')[2].get_text().strip()
print ("------------Review------------")
try:
print (reviewer_id, reviewer_name, reviewer_address, reviewer_city, reviewer_state, rating_score, rating_date, rating_content, rating_useful, rating_cool, rating_funny)
except Exception:
pass
args = (doctor_NPI, reviewer_id, reviewer_name, reviewer_address, reviewer_city, reviewer_state, rating_score, rating_date, rating_content, rating_useful, rating_cool, rating_funny)
add_rating_result = ("INSERT IGNORE INTO yelp_rating "
"(doctor_NPI, reviewer_id, reviewer_name, reviewer_address, reviewer_city, reviewer_state, rating_score, rating_date, rating_content, rating_useful, rating_cool, rating_funny) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)")
self.cursor.execute(add_rating_result, args)
self.dbconn.commit()
except Exception as err:
pass
def get_task_number(self):
select_sql = (
"SELECT count(*) FROM doctor_rating.google_results2017 WHERE crawlFlag = 0 AND websiteName LIKE '%yelp.com'"
)
self.cursor.execute(select_sql)
for cursor_data in self.cursor.fetchall():
task_number = int(cursor_data[0])
print('The number of task this time: ', task_number)
return task_number
def main(self):
task_number = self.get_task_number()
for i in range(0, task_number):
self.dbconn = mysql.connector.connect(user=username, password=password, host=host, database=dbase)
self.cursor = self.dbconn.cursor()
self.get_address()
self.cursor.close()
self.dbconn.close()
if __name__ == '__main__':
crawler = crawlYelp()
crawler.main()
|
# coding:utf8
import numpy as np
import codecs
import torch
import torch.utils.data as D
from torch.autograd import Variable
from collections import deque
import pandas as pd
word2id = {} # word2id的字典
id2relation = {} # id转关系的字典
max_len = 50 # 字向量的长度
word2id_file = './data/people-relation/word2id.txt'
id2relation_file = './data/people-relation/id2relation.txt'
model_path = "./model/model_epoch99.pkl"
data_path = "./data/people-relation/test.txt"
def init_dic(word2id_file, id2relation_file):
""" 初始化两个映射字典 """
print("--- 初始 word2id, relation2id 映射字典 ---")
# 读取word2id.txt
with codecs.open(word2id_file, 'r', 'utf-8') as input_data:
data_lines = input_data.readlines()
for line in data_lines:
word2id[line.split()[0]] = int(line.split()[1])
# 读取id2relation.txt
with codecs.open(id2relation_file, 'r', 'utf-8') as input_data:
data_lines = input_data.readlines()
for line in data_lines:
id2relation[int(line.split()[0])] = line.split()[1]
def X_padding(words):
"""
把句子(words)转为 id 形式,不足 max_len 自动补全长度,超出的截断。
:param words 句子
:return max_len长度的字向量
"""
ids = []
for i in words:
if i in word2id:
ids.append(word2id[i])
else:
ids.append(word2id["unknown"])
if len(ids) >= max_len:
return ids[:max_len]
ids.extend([word2id["blank"]] * (max_len - len(ids))) # ids和长度为 max_len-len(idx) 的 BLACK的id的数组合并,补全
return ids
def pos(position):
"""
将字到实体的距离归到[0, 80]的范围内,距离小于-40的归为0,-40~40之间的 加40,大于40的归为0。
:param position: 字到实体的距离
:return: 返回归到[0, 80]距离
"""
if position < -40:
return 0
if (position >= -40) and (position <= 40):
return position + 40
if position > 40:
return 80
def position_padding(words):
"""
将位置向量归到[0, 81],长度归到max_len,长度超出截断,不足的结尾以81补全。
:param words: 位置向量
:return: 归到 [0, 81] 的位置向量
"""
words = [pos(w) for w in words]
if len(words) >= max_len:
return words[:max_len]
words.extend([81] * (max_len - len(words)))
return words
def init_data(data_file, batch_size):
"""
将待识别关系的文本和实体转成dataloader,每条数据的格式 实体1 实体2 二者的共同语料
:param 存放数据的文件(包含路径)
:return 返回数据集data和有效数据长度(末尾不够一个batch_size的数据会补齐)
"""
data = []
entity = [] # 存放两个实体
with codecs.open(data_file, 'r', 'utf-8') as tfc:
for lines in tfc:
line = lines.split()
data.append(line)
entity.append(line[0] + " " + line[1])
data_length = len(data)
remainder = data_length % batch_size
t = ["-", "-", "-00000000000000000000000000000000"] # 不够128个数据时,补充数据以保证输入的正确
for i in range(batch_size-remainder+1):
data.append(t)
sen_data = deque() # 存放句子的二维数组,其中每个句子是一个数组。
positionE1 = deque() # 存放每个句子中,每个字到实体1的距离向量。
positionE2 = deque() # 存放每个句子中,每个字到实体2的距离向量。
for line in data:
sentence = []
index1 = line[2].index(line[0])
position1 = []
index2 = line[2].index(line[1])
position2 = []
for i, word in enumerate(line[2]):
sentence.append(word) # 句子
position1.append(i - index1) # 字向量,句子中每个字到第一个实体的距离
position2.append(i - index2) # 字向量,句子中每一个字到第二个实体的距离
sen_data.append(sentence)
positionE1.append(position1)
positionE2.append(position2)
df_data = pd.DataFrame({'words': sen_data, 'positionE1': positionE1, 'positionE2': positionE2},
index=range(len(sen_data)))
df_data['words'] = df_data['words'].apply(X_padding)
df_data['positionE1'] = df_data['positionE1'].apply(position_padding)
df_data['positionE2'] = df_data['positionE2'].apply(position_padding)
sen_data = np.asarray(list(df_data['words'].values))
positionE1 = np.asarray(list(df_data['positionE1'].values))
positionE2 = np.asarray(list(df_data['positionE2'].values))
test = torch.LongTensor(sen_data[:len(sen_data) - len(sen_data) % batch_size])
position1 = torch.LongTensor(positionE1[:len(test) - len(test) % batch_size])
position2 = torch.LongTensor(positionE2[:len(test) - len(test) % batch_size])
test_datasets = D.TensorDataset(test, position1, position2)
test_dataloader = D.DataLoader(test_datasets, batch_size)
return test_dataloader, entity
if __name__ == "__main__":
init_dic(word2id_file, id2relation_file)
# GPU是否可用
use_gpu = torch.cuda.is_available()
if use_gpu:
model = torch.load(model_path)
else:
model = torch.load(model_path, map_location="cpu")
model.use_gpu = False
test_dataloader, entity = init_data(data_path, batch_size=model.batch_size)
all_relation = []
for sentence, pos1, pos2 in test_dataloader:
sentence = Variable(sentence)
pos1 = Variable(pos1)
pos2 = Variable(pos2)
y = model(sentence, pos1, pos2)
if use_gpu:
y = np.argmax(y.data.cpu().numpy(), axis=1)
else:
y = np.argmax(y.data.numpy(), axis=1)
for i, re in enumerate(y):
all_relation.append(id2relation[re])
all_relation = all_relation[:len(entity)]
for i, en in enumerate(entity):
print(en, "-->", all_relation[i])
|
import scrapy
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
# from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from kcrawler.items import KcrawlerItem
class bzhealthSpider(CrawlSpider):
name = 'bzhealthSpider'
allowed_domains = ['bizbuysell.com']
start_urls = ['http://www.bizbuysell.com/health-care-companies-for-sale/']
# rules = (
# # Extract links matching 'category.php' (but not matching 'subsection.php')
# # and follow links from them (since no callback means follow=True by default).
# Rule(LinkExtractor(allow=('category\.php', ), deny=('subsection\.php', ))),
# # Extract links matching 'item.php' and parse them with the spider's method parse_item
# Rule(LinkExtractor(allow=('item\.php', )), callback='parse_item'),
# )
rules = (
Rule(LxmlLinkExtractor(allow=('/health-care-companies-for-sale'),restrict_xpaths=('//div[@class="pagination"]/ul/li/a[@title="Next Page"]')),callback='parse_start_url',follow=True),
# Rule(LxmlLinkExtractor(allow=('/health-care-companies-for-sale')),callback='parse_item'),
)
def parse_start_url(self, response):
sel = Selector(response)
lists = sel.xpath('//a[contains(@id,"List") and not(contains(@id,"ListNumber"))]')
items = []
for li in lists:
item = KcrawlerItem()
item['source'] = u'bizbuysell'
item['title'] = li.xpath('span[2]/b[@class="title"]/text()').extract()
link = li.xpath('@href').extract()
link[0] = link[0][0:link[0].index('/?d=')] #remove the tails of link
item['link'] = link
location = li.xpath('string(span[2]/p[@class="info"])').extract()
location[0] = location[0].strip(' \t\n\r')
# item['location'] = li.xpath('span[2]/p[@class="info"]/text()').extract()
item['location'] = location
# if(item['location']==[]):
# # location =
# # location[0] = location[0].strip(' \t\n\r')
# item['location'] = li.xpath('span[2]/p[@class="info"]/text()').extract()
item['desc'] = li.xpath('span[2]/p[@class="desc"]/text()').extract()
# item['url'] = li.xpath('a/@href').extract()
# item['description'] = li.xpath('text()').re('-\s[^\n]*\\r')
items.append(item)
print item ['title']
return items |
# -*- python -*-
"""
MINT - Mimetic INTerpolation on the Sphere
MINT computes line/flux integrals of edge/face staggered vector fields. The line and flux integrals
are conserved in the sense that closed line integrals of a vector field deriving from a potential
or streamfunction are zero to near machine accuracy. MINT can also be applied to regrid vector
fields from source to destination grids.
"""
#
#__init__.py is automatically generated from __init__.py.in, DO NOT EDIT __init__.py
#
from ctypes import CDLL
from pathlib import Path
__version__ = "@VERSION@"
# open shared library, the name of the shared object is always libmint.so
# even on Windows
MINTLIB = CDLL(str(Path(__path__[0]).parent / Path('libmint.so')))
CELL_BY_CELL_DATA = 0
UNIQUE_EDGE_DATA = 1
NUM_EDGES_PER_QUAD = 4
NUM_VERTS_PER_QUAD = 4
NUM_VERTS_PER_EDGE = 2
FUNC_SPACE_W1 = 1
FUNC_SPACE_W2 = 2
__all__ = ('regrid_edges', 'grid', 'polyline_integral',
'vector_interp', 'multi_array_iter', 'message_handler', 'utils',
'iris_regrid', 'iris_utils', 'iris_flux',
MINTLIB, CELL_BY_CELL_DATA, UNIQUE_EDGE_DATA,
FUNC_SPACE_W1, FUNC_SPACE_W2,
NUM_EDGES_PER_QUAD, NUM_VERTS_PER_QUAD, NUM_VERTS_PER_EDGE)
from .message_handler import error_handler, warning_handler, printLogMessages, writeLogMessages
from .regrid_edges import RegridEdges
from .grid import Grid
from .polyline_integral import PolylineIntegral
from .vector_interp import VectorInterp
from .multi_array_iter import MultiArrayIter
from .extensive_field_adaptor import ExtensiveFieldAdaptor
from .nc_field_read import NcFieldRead
from .iris_utils import IrisToMintMeshAdaptor, createIrisCube
from .iris_regrid import IrisMintRegridder
from .iris_flux import IrisMintFlux
from .utils import getIntegralsInLonLat, getIntegralsInXYZ, saveVectorFieldVTK, saveMeshVTK, computeEdgeXYZ, computeLonLatFromXYZ
|
import requests
import json
import pickle
import time
from datetime import datetime
import logging
def save_obj(obj, name):
with open('obj/'+ name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('obj/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def update_tokens(accounts, refresh):
tokens = []
header = {'Host': 'target.my.com', 'Content-Type': 'application/x-www-form-urlencoded'}
account = ['dBp0GGsdn2khbLCk','Gcwd3JCrhpkj8DQn','cCV7EfBVutwh1yzn','iVTXNguDBEhyR2G0','STBDrKKowc1aVQ1M','n5zvIiexebHHazsF']
for ids in account:
access_token_req = {
"client_id": str(ids),
"client_secret": str(accounts[ids]),
"refresh_token": str(refresh[ids]),
"grant_type": "refresh_token"
}
response = requests.post('https://target.my.com/api/v2/oauth2/token.json', data=access_token_req, headers=header)
print(response.json())
logging.info(str(response.json()))
tokens.append(response.json()['access_token'])
return tokens
today_time = datetime.today()
logging.basicConfig(filename="logs/sample.log", level=logging.INFO)
accounts = {'dBp0GGsdn2khbLCk': '9Ne68rhbu4e2TYuJHGlbDAAPNlOB75LfA4bJomJTy73Pbl9gDAoskXjjG81l2YPqDrVI4db7bD5vDzVc9pVoilel3JryZQl34VQCJcQWwXtXixoSKOLCGVMyppIOFCHCgAxblsXzJQcMJU7qyPgoH07kyhd7JU36WMMog6jGjoYrSsbMuz0GdYtDrv6W3lUtRGUYbxHVGP7MyqvyLjzTDbG3g5Fj6kUTofHfkocofMSRdbcTXnA',
'Gcwd3JCrhpkj8DQn': '662kHNbGcUcUaRIXC5d0YnKylHfjb8oF9DSox7TEM3EciQ8CIMrmJrYWxZBYxriiPSpKsmMH99sc1PhZgOwmI6DWOvmWIzdd1Sqh0Yjj8nLyZqSGYmTLnG3qx1XbLBwbBq7uqzq6FVbfwbHWAfuwXRtIy9gfCiPAfX3wLcKvHwwwPHOxL992zq0mjbBK7Vs5PnQdT6FV60r9a5tZx4m3b1ehMx',
'cCV7EfBVutwh1yzn': 'bhYXxt9RE2tsuzsDoEfCIhPzRJ4j4NEycbqvYMMINXP498p6hRxgZCVjc8lcHKETXS00PCIqb7sZCdAp27sI6tBwBQXxjCIEo78PArnf6JIN1SGUrWbuIpFKpCftkPL8o3JnpUG0qa2nUC00TqHX5ncZ305oRh7z6Opspe7QVAoWRoOE8a2zw8xjZmw7zIPIQ9bB2IGDrwuI84DsQLeygzi0zY0p6a9zeAmXe6Q5slV7KO6tLtH',
'iVTXNguDBEhyR2G0': '1glZFtlmGyBlqecZ0XgVXvCq9u8sPKmV7N7j9Ry2mbRUiRNQTmwhP78wbYS8IqgIceplp6SmZWuYSFJLtSx24D2cSYMlCA9FICaKliz46ZsM1DoEwmgTGQOUV1W0bXzBPtrKtfbslHq30YavbqqxiLv907cGoxm7qSIT5J934h1sE2dmlLgXFlzuKaC4xEimHloXhBrOwO5atJede1HwI8',
'STBDrKKowc1aVQ1M': 'dH2305LdNFVT45oz56akxP5wavQLQBLMu25UQyOn59myfy48FT9kgNNUX5JHY0KbAEu4d5excWtab9zjZU9qsOLJuTOKpYYmRe58XcWnAT8fJF3iGrBt2n93rybe4MUQVfxhGSRRmuO0GRoaDKd76eH4EXvp7GoXwf9LcnWVGUTyxu7C38EuysrOjQXdFkGqeGpQdmXvmLF6CX3dGWatq4QJaxUipYCptOQxJe737fzCJg2xOI',
'n5zvIiexebHHazsF': 'NoETjJTbsmAyE752NUakv2bpExZii88W2U1wddWPywayBcj2sjDVMr4fzFyvY87HxWtt7tJMoUNPY6K6aI2U3PjjROJRisTbzQMbfLiogsuN70631Bb3VoBxsIbUIroICpPCAJBfJ1aGijYianGKfzVuwLDn1esiTFntX5Gm0dWPA6XjYenpCXfrMrLbONcU8I2MEDTg5mGzPsQgpdMekjA5gLtHSfxJokhoN8'}
refresh = {'dBp0GGsdn2khbLCk': '3leyQ3sJXj2cHzyXNegMnh6OMq1yJ9qWJn0MAPKLjlyTHmXq6xPhwKpVUFpEP58Y6LTf1HWdX4RRIb7ppCN0gbljadZUTSGBeWamd4B60Z8I2XVbQhYRMrYosAHIjlRnD6QAdVw2egHbpudLGLWpRCQkn9rZDZqcvUiHgdvQQcG6S3ORBFoNNBLIJp1o91oHpUYSx9kHCXfunp6BzCz7IvvTN0thU',
'Gcwd3JCrhpkj8DQn': 'cn5qkCDOUrxhTJc4HJ2d3INW7cJXV24RLQPBtyif8MdZV86Zb0q3CEJB8KZ7WawY3f7uuiLOz2uvimebDSlCObA2quRmavEcyk9poOGh5eGh0Ux0H7BgQthaUP77NIa2O7QcG54Q7TUD9SoXmOOUTfbplB3LxToSvvUACoqZbk0S8hxL9aQPbmWCn9WEGCaCcT6j2h33elfnpPnNbB',
'cCV7EfBVutwh1yzn': 'QgBD3s0ov4JF2nQalwmzcOAaej2gLEXWrbmZFByM6TkRTiYOFkeLJDFtuuobY7pVmvJZi7UTtSubLPyEIy8fUW0YgpN5ysEWzc3Qg7jPD9jw05bHYhyhrHIzVOgi58grlLFAYiw6p21cSYy8fjZuMOa8TmkwyYLqNQuGrrKs4sv44TixQ64RgqdG5KCUdYFPOR4F09ISmmA28Kg1vwUNr4Wlx1SpEGy9L4pSw6jPhhJoEIKgQiqLtdkYHyDA',
'iVTXNguDBEhyR2G0': 'F2lymTLBRZS8RB5mWPj9QIKI5t1nY59vZXOwrKDkLOeQUUArhX7Nq5jUvig8qnXFPN3FPVS8P79wR7elzxsSMTR0bRPBWeU30Iaj5s712ceJJrEfgtDCZDRMhrL2Jx74KIV0X0jKNahLbGmvv9gxDjE0VK5lOPQL8MggUyvWaUGlIl1DlRnrJV9x2mBmMt742rCKjnY0xWRl2TgWm4fWXGdkYnkOzOhxldT2OyY7S',
'STBDrKKowc1aVQ1M': 'CPhHyYJWN53nkXJDpsWAcWgncNY001Jzo2lcx0whC99TMCJHOudIToDa7H1XdjDOKRDarJlPeGrvL8sXs99n1pZwNGs0atWJzEr8wWK3eKBe1U9mVNvVKlchjdP5JQumg2LXGzZod1eC4YuB0349k3w3yHlxaHhCYQ33cCHDokPS425xqoDIDHe23sv073t6l19d3rLHR5QSgmgCnaN9b0EVpCo',
'n5zvIiexebHHazsF': 'yDsvvP2PmUmvqHGcqUyvfoH3C18GgU5lu1qXB7Fn65YoqV5dCdqnwwPQvXvDc5crvzTfLLaO9NKWI3hHmEwXHn5O6XboKFNsnlVvNkWZF6Y0t23DbpKYwRPDUL26dQldohZmavmgEGVvlBmBlAcyXYE0MZABhoQnVj2JdhQXjnyRLYUvsDr1Wmv3qeySd57C7IC4kjxJHEBezWA7SS76sFo6j9g5I7TH2FEy'}
accounts_tokens = update_tokens(accounts, refresh)
clients = [[], [], [], [], [], []]
for token in range(len(accounts_tokens)):
header = {'Host': 'target.my.com', 'Content-Type': 'application/json', 'Accept-Encoding': 'gzip,deflate,compress',
'Authorization': 'Bearer '+str(accounts_tokens[token])}
get_clients = requests.get('https://target.my.com/api/v1/clients.json', headers=header)
for i in range(len(get_clients.json())):
clients[token].append(get_clients.json()[i]['username'])
def update_client_tokens(ids_mail, mail_refresh):
tokens = []
header = {'Host': 'target.my.com', 'Content-Type': 'application/x-www-form-urlencoded'}
for ids in ids_mail:
for j in range(len(ids_mail[ids])):
access_token_req = {
"client_id": str(ids),
"client_secret": str(accounts[ids]),
"refresh_token": str(mail_refresh[ids_mail[ids][j]]),
"grant_type": "refresh_token"
}
response = requests.post('https://target.my.com/api/v2/oauth2/token.json', data=access_token_req,
headers=header)
print(response.json())
logging.info(str(response.json()))
tokens.append(response.json()['access_token'])
return tokens
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
def create_client_tokens(client_name, new_client):
cl_tkn = []
header = {'Host': 'target.my.com', 'Content-Type': 'application/x-www-form-urlencoded'}
for client in new_client:
access_token_req = {
"client_id": str(client_name),
"client_secret": str(accounts[client_name]),
"agency_client_name": str(client),
"grant_type": "agency_client_credentials"
}
response = requests.post('https://target.my.com/api/v2/oauth2/token.json', data=access_token_req, headers=header)
print(response.json())
logging.info(str(response.json()))
cl_tkn.append(response.json()['refresh_token'])
return cl_tkn
print(clients[5])
if clients[5] == ids_mail['n5zvIiexebHHazsF']:
print("Client n5zvIiexebHHazsF is OK")
logging.info("Client n5zvIiexebHHazsF is OK")
else:
print("Client n5zvIiexebHHazsF is NOT OK")
logging.info("Client n5zvIiexebHHazsF is NOT OK")
new_client = []
for elem in clients[5]:
if elem not in ids_mail['n5zvIiexebHHazsF']:
new_client.append(elem)
ids_mail['n5zvIiexebHHazsF'] = clients[5]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('n5zvIiexebHHazsF', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
print(clients[4])
if clients[4] == ids_mail['STBDrKKowc1aVQ1M']:
print("Client STBDrKKowc1aVQ1M is OK")
logging.info("Client STBDrKKowc1aVQ1M is OK")
else:
print("Client STBDrKKowc1aVQ1M is NOT OK")
logging.info("Client STBDrKKowc1aVQ1M is NOT OK")
new_client = []
for elem in clients[4]:
if elem not in ids_mail['STBDrKKowc1aVQ1M']:
new_client.append(elem)
ids_mail['STBDrKKowc1aVQ1M'] = clients[4]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('STBDrKKowc1aVQ1M', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
print(clients[3])
if clients[3] == ids_mail['iVTXNguDBEhyR2G0']:
print("Client iVTXNguDBEhyR2G0 is OK")
logging.info("Client iVTXNguDBEhyR2G0 is OK")
else:
print("Client iVTXNguDBEhyR2G0 is NOT OK")
logging.info("Client iVTXNguDBEhyR2G0 is NOT OK")
new_client = []
for elem in clients[3]:
if elem not in ids_mail['iVTXNguDBEhyR2G0']:
new_client.append(elem)
ids_mail['iVTXNguDBEhyR2G0'] = clients[3]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('iVTXNguDBEhyR2G0', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
print(clients[2])
if clients[2] == ids_mail['cCV7EfBVutwh1yzn']:
print("Client cCV7EfBVutwh1yzn is OK")
logging.info("Client cCV7EfBVutwh1yzn is OK")
else:
print("Client cCV7EfBVutwh1yzn is NOT OK")
logging.info("Client cCV7EfBVutwh1yzn is NOT OK")
new_client = []
for elem in clients[2]:
if elem not in ids_mail['cCV7EfBVutwh1yzn']:
new_client.append(elem)
ids_mail['cCV7EfBVutwh1yzn'] = clients[2]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('cCV7EfBVutwh1yzn', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
print(clients[1])
if clients[1] == ids_mail['Gcwd3JCrhpkj8DQn']:
print("Client Gcwd3JCrhpkj8DQn is OK")
logging.info("Client Gcwd3JCrhpkj8DQn is OK")
else:
print("Client Gcwd3JCrhpkj8DQn is NOT OK")
logging.info("Client Gcwd3JCrhpkj8DQn is NOT OK")
new_client = []
for elem in clients[1]:
if elem not in ids_mail['Gcwd3JCrhpkj8DQn']:
new_client.append(elem)
ids_mail['Gcwd3JCrhpkj8DQn'] = clients[1]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('Gcwd3JCrhpkj8DQn', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
print(clients[0])
if clients[0] == ids_mail['dBp0GGsdn2khbLCk']:
print("Client dBp0GGsdn2khbLCk is OK")
logging.info("Client dBp0GGsdn2khbLCk is OK")
else:
print("Client dBp0GGsdn2khbLCk is NOT OK")
logging.info("Client dBp0GGsdn2khbLCk is NOT OK")
new_client = []
for elem in clients[0]:
if elem not in ids_mail['dBp0GGsdn2khbLCk']:
new_client.append(elem)
ids_mail['dBp0GGsdn2khbLCk'] = clients[0]
save_obj(ids_mail, 'ids_mail')
refreshers = create_client_tokens('dBp0GGsdn2khbLCk', new_client)
for i in range(len(new_client)):
mail_refresh[new_client[i]] = refreshers[i]
save_obj(mail_refresh, 'mail_refresh')
ids_mail = load_obj("ids_mail")
mail_refresh = load_obj("mail_refresh")
clients_accounts = {}
clients_refresh = {}
tokens = update_client_tokens(ids_mail,mail_refresh)
proxies = {
'http': '154.117.191.114:60028',
'https': '154.117.191.114:60028',
}
for token in tokens:
header = {'Host': 'target.my.com', 'Content-Type': 'application/json', 'Accept-Encoding': 'gzip,deflate,compress',
'Authorization': 'Bearer '+str(token)}
response = requests.get('https://target.my.com/api/v1/campaigns.json', headers=header)
campaigns = response.json()
campaigns = sorted(campaigns, key=lambda x: x['status'])
for i in range(len(campaigns)):
print(len(campaigns))
logging.info(len(campaigns))
if str(campaigns[i]['status']) == 'active':
date2_str = datetime.today().strftime("%d.%m.%Y")
date2 = datetime.strptime(date2_str, '%d.%m.%Y')
date1_str = ''
date_list = str(campaigns[i]['created']).split('-')
date_list[2] = date_list[2][0:2]
date_list[2] = date_list[2] + "."
date_list[1] = date_list[1] + "."
date_list.reverse()
for word in date_list:
date1_str += word
date1 = datetime.strptime(str(date1_str), '%d.%m.%Y')
if (date2-date1).days <= 15:
print("sleeping")
logging.info("sleeping")
time.sleep(30)
response1 = requests.get('https://target.my.com/api/v1/campaigns/'+str(campaigns[i]['id'])+'.json',
headers=header)
print(response1.json())
campaign = response1.json()
if str(campaign['date_end']) == '' or str(campaign['budget_limit']) == '' or str(campaign['budget_limit']) == 0:
data = {'name': str('CHECK_')+str(campaign['name'].encode('utf-8')), 'status': 'blocked'}
edit_coms = requests.post('https://target.my.com/api/v1/campaigns/' + str(campaigns[i]['id'])
+ '.json', data=json.dumps(data), headers=header)
print(edit_coms.json())
telegram_chat = requests.post("https://api.telegram.org/bot759523145:AAEKxCONmNdMfsWBWAusKTvOsw_DB2Ok86M/sendMessage?chat_id=-1001478926327&text=" + str('CHECK_') + str(campaign['name'].encode('utf-8')), proxies=proxies)
telegram_me = requests.post("https://api.telegram.org/bot759523145:AAEKxCONmNdMfsWBWAusKTvOsw_DB2Ok86M/sendMessage?chat_id=195831781&text=" + str('CHECK_') + str(campaign['name'].encode('utf-8')), proxies=proxies)
else:
print('campaign is OK')
logging.info('campaign is OK')
else:
print("more than 14")
logging.info("more than 14")
else:
print(campaigns)
logging.info(str(campaigns))
print("Not active")
logging.info("Not active")
print('NEXT')
logging.info('NEXT')
break
print('started in' + str(today_time))
logging.info('started in' + str(today_time))
print('ended in (on next line)')
print(datetime.today())
logging.info('ended in ' + str(datetime.today()))
# 12:48
# 14:51
'''
started in 15:00
ended in (on next line)
2019-01-10 16:55:32.323252
'''
'''
started in 12:50
ended in (on next line)
2019-01-11 14:29:46.280032
''' |
class ComputeProcessorBase():
#define interface of server action
def ServerAction(self, tenat_id, server_id, action):
#do nothing just define interface
pass
|
def ans(s):
cnt = 0
for i in range(n):
if(s[i] in ['2', '4', '6', '8']):
cnt += i + 1
return cnt
if __name__ == '__main__':
n = int(input())
s = input()
print(ans(s)) |
class Stack:
def __init__(self):
self.items = []
def push(self, data):
self.items.append(data)
def pop(self):
self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
new_stack = Stack()
new_stack.push(3)
new_stack.push(1)
new_stack.pop()
print(new_stack.peek())
|
from training import Mode
from guppy import hpy
class SparsifyBase:
def __init__(
self,
model_train_obj,
sparsification_weight=5,
threshold=1e-3,
relaxed_constraints=False,
mean_threshold=False,
):
self.model_train = model_train_obj
self.threshold = threshold
self.sparsification_weight = sparsification_weight
self._logger = model_train_obj._logger
# used to relax relu
self.relaxed_constraints = relaxed_constraints
self.mean_threshold = mean_threshold
def sparsify_model(
self,
input_data_flatten,
input_data_labels,
mode=Mode.MASK,
use_cached=True,
start_pruning_from=None,
save_neuron_importance=True,
):
"""computes the neuron importance using solver and sparsifies the model
Arguments:
input_data_flatten {np.array} -- batch of input data to the solver
input_data_labels {list} -- labels of the input batch
Keyword Arguments:
mode {enum} -- masking mode used (default: {Mode.MASK})
use_cached {bool} -- flag when enabled cached solver result from previous run will be used (default: {True})
start_pruning_from {int} -- index of initial layer that will be represented in MIP and pruned from (default: {None})
Returns:
float -- percentage of parameters removed
"""
raise NotImplementedError
def _log_memory(self):
h = hpy()
self._logger.info(h.heap())
def get_sparsify_object(self):
return self
|
import logging as log
import cfnresponse
import boto3
import hashlib
import uuid
log.getLogger().setLevel(log.INFO)
secretsmanager = boto3.client('secretsmanager')
def main(event, context):
fqn = event['StackId'] + event['LogicalResourceId']
physical_id = hashlib.md5(fqn.encode('utf-8')).hexdigest()
log.info(physical_id)
try:
log.info('Input event: %s', event)
secretsmanager.put_secret_value(
SecretId=event['ResourceProperties']['Secret'],
SecretString=uuid.uuid4().hex,
)
attributes = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, attributes, physical_id)
except Exception as e:
log.exception(e)
# cfnresponse's error message is always "see CloudWatch"
cfnresponse.send(event, context, cfnresponse.FAILED, {}, physical_id) |
from mcpi.minecraft import Minecraft
import time as t
mc=Minecraft.create()
while True:
pos = mc.player.getPos()
underwater= (mc.getBlock(pos.x, pos.y+1, pos.z) in [8, 9])
if underwater:
mc.postToChat("You are underwater")
while underwater:
t.sleep(0.001)
pos = mc.player.getPos()
underwater= (mc.getBlock(pos.x, pos.y+1, pos.z) in [8, 9])
mc.postToChat("you are no longer underwater") |
import pil
import streamlit as st
import src.pages.about
import src.pages.dataAnalysis
import src.pages.dashboard
import src.pages.homePage
PAGE_PY = {
"Home": src.pages.homePage,
"About": src.pages.about,
"Statistics About COVID": src.pages.dataAnalysis,
"Dashboard": src.pages.dashboard
}
def main():
st.sidebar.title("Menu")
choices = st.sidebar.radio("Navigate", list(PAGE_PY.keys()))
PAGE_PY[choices].main()
st.sidebar.title("About")
st.sidebar.info("""
This simple dashboard is maintained by Utkarsh Sharma. You can learn more about me at [https://github.com/SharmaLlama]. """)
if __name__ == "__main__":
main()
|
import os
import fnmatch
import subprocess
dic1 = {}
dic2 = {}
d = {}
for dirname,dirs,files in os.walk('/var/bigbluebutton/published/presentation',topdown = False):
list1 = []
for name in files:
var1 = os.path.join(dirname,name)
if fnmatch.fnmatch(name,'*.ogg'):
audio_output = var1.replace('ogg','mp3')
stringz = 'ffmpeg -i '+var1+' -acodec libmp3lame -y '+audio_output
subprocess.call(stringz,shell=True)
teststring = dirname.rsplit('/',1)
result = teststring[0]
dic1[result] = audio_output
for dirname,dirs,files in os.walk('/var/bigbluebutton/published/presentation',topdown = False):
list1=[]
for name in files:
var1 = os.path.join(dirname,name)
if fnmatch.fnmatch(var1,'*/final3.mp4'):
teststring = dirname.rsplit('/',2)
result = teststring[0]
list1.append(name)
dic2[result] = var1
for key in set(dic1.keys()+dic2.keys()):
try:
d.setdefault(key,[]).append(dic1[key])
except KeyError:
pass
try:
d.setdefault(key,[]).append(dic2[key])
except:
pass
count = 0
for key,value in d.iteritems():
os.chdir(key)
if len(value) > 1:
audio = value[0]
video = value[1]
string = 'sudo ffmpeg '+'-i'+' '+audio+' '+'-i'+' '+video+' '+'-vcodec copy -shortest -y ' +key+'/'+ 'result.mp4'
subprocess.call(string,shell=True)
|
import argparse
from bitstring import BitArray
from PIL import Image
argparser = argparse.ArgumentParser(description="Converts 24-bit BMP image to 8-bit RRRGGGBB pixels in COE format")
argparser.add_argument("INPUT", help="Input file path")
argparser.add_argument("OUTPUT", help="Output file path")
args = argparser.parse_args()
# Create image object from BMP file
print(f"Opening \"{args.INPUT}\"...")
img = Image.open(args.INPUT)
print(f" Width: {img.size[0]}px\n Height: {img.size[1]}px")
# Loop through image pixels
data = b''
for y in range(img.size[1]):
for x in range(img.size[0]):
# Get current pixel
pix = img.getpixel((x, y))
# Decimate pixel values
r = int((pix[0] / 255) * 7)
g = int((pix[1] / 255) * 7)
b = int((pix[2] / 255) * 3)
# Pack bits into byte
bit = BitArray()
bit += bin(r)
bit += bin(g)
bit += bin(b)
data += bit.tobytes()
# Format bytes as COE
fstr = "memory_initialization_radix=16;\nmemory_initialization_vector=\n"
for b in data:
fstr += f"{b:0{2}x},\n"
# Remove last comma and new line
fstr = fstr[:-2]
# Print ROM info
print("\nROM Info:")
print(" Width: 8 bits")
print(f" Depth: {len(data)} bytes")
# Write COE to disk
fout = open(args.OUTPUT, "wb")
fout.write(fstr.encode("utf-8"))
fout.close()
print(f"\nCOE saved to \"{args.OUTPUT}\"")
print(f" Size: ~{int(len(fstr) / 1024)} KB")
|
import requests
from logger import log_error, log_warning, log_info
page_id = 104173088514235
access_token = 'EAA6FiKzFv1MBAEJFttTi5hkBN6TbL3rNx9ATFg7Psh1YOCZA15bELbysfu0WZADA9oZAnLDpblvQZAS6FlZAxRWwY05UldU6knrbdZAE0FD54y1MTAiwwpZBmZBv58WgNIs1MLgXiWUqTzZByMCfIilNXs1SQ4jgSC6lZB7ZBCxxJamxP4W2Lh5oJK1'
def post_to_fb(img_url, description):
img_payload = {
'message': description,
'url': img_url,
'access_token': access_token
}
r = requests.post(
f'https://graph.facebook.com/{page_id}/photos', data=img_payload)
log_info("uploaded", f"to fb | {r.json()}")
|
class LeveldbDemo():
pass
class Leveldb():
def __init__(self, filename="db"):
self.db = {}
self.filename = filename
def open(self):
self.file = open(self.filename, 'w+')
print self.file
se = self.file.read()
print se
while len(se) > 0:
kv = se.split("=")
self.db[kv[0]] = kv[1]
se = self.file.readline()
def Put(self, key, value):
self.db[key] = value
self.flush()
def get(self, key):
if self.db.has_key(key):
return self.db[key]
else:
return None
def delete(self, key):
if self.db.has_key(key):
del self.db[key]
self.flush()
def flush(self):
for key, value in self.db.items():
self.file.write("{key}={value}\n".format(key=key, value=value))
self.file.flush()
def close(self):
self.file.close()
if __name__ == '__main__':
db = Leveldb()
db.open()
db.Put("hello", "world")
print db.db
print db.get("hello")
# db.delete("hello")
# print db.get("hello") |
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import normalize
import matplotlib
import pylab
import random
def sigmoid(scores):
return 1 / (1 + np.exp(-scores))
def logistic_predict(x, coefficients, label=False):
y_predicted = coefficients[0]
for i in range(len(x)):
y_predicted += x[i] * coefficients[i+1]
if label:
predicted = sigmoid(y_predicted)
if predicted >= 0.5:
return 1
else:
return 0
return sigmoid(y_predicted)
def sgd(train_, alpha_, epochs_):
error_history = []
w_ = [random.random() for _ in range(len(train_[0]))]
for epoch in range(epochs_):
train_ = shuffle(train_)
epoch_error = []
for row in train_:
error = row[-1] - logistic_predict(row[:-1], w_)
epoch_error.append(error**2)
w_[0] += (alpha_ * error)
for i in range(len(row) - 1):
w_[i + 1] += (alpha_ * error * row[i])
error_history.append(sum(epoch_error)/len(epoch_error))
return w_, error_history
def least_squares(X, y):
# calculate the least square
shape_ = np.matrix(X).shape
x_ = np.ones((shape_[0], shape_[1]+1))
x_[:, 1:] = X
y_ = np.matrix(y)
return np.linalg.inv(x_.transpose().dot(x_)).dot(x_.transpose()).dot(y_.transpose())
def sgd_regularized(train_, alpha_, epochs_, lambda_):
error_history = []
w_ = [random.random() for _ in range(len(train_[0]))]
for epoch in range(epochs_):
train_ = shuffle(train_)
epoch_error = []
for row in train_:
error = row[-1] - logistic_predict(row[:-1], w_)
epoch_error.append(error**2)
w_[0] += (alpha_ * error * row[0])
for i in range(len(row) - 1):
w_[i + 1] += alpha_ * ((error * row[i]) - lambda_*w_[i+1])
error_history.append(sum(epoch_error)/len(epoch_error))
return w_, error_history
def mse(real, predicted):
error_ = 0
for i, j in zip(real, predicted):
error_ += (i-j)**2
return error_/len(real)
def k_fold(data_, alpha_, epochs_, k):
fold_size = int(len(data_)/k)
folds = [data_[i*fold_size: (i+1)*fold_size, :] for i in range(k)]
ws_ = []
error_ = []
for i in range(len(folds)):
test_ = folds[i]
train_ = []
for k in np.array([x for j, x in enumerate(folds) if j != i]):
train_.extend(k)
w_, _ = sgd(train_, alpha_, epochs_)
predicted_test = [logistic_predict(x, w_, label=True) for x in test_[:, :-1]]
test_error = sum(abs(test_[:, -1] - predicted_test))/len(predicted_test)
ws_.append(w_)
error_.append(test_error)
return ws_, sum(error_)/len(error_)
def plot_data(data_, save=False):
x = data_[:, :-1]
y = data_[:, -1]
colors = ['#1b9e77', '#d95f02']
plt.scatter(x[:, 0], x[:, 1], c=y, cmap=matplotlib.colors.ListedColormap(colors), s=30)
if save:
plt.savefig('results/' + save)
else:
plt.show()
plt.close()
def plot_epochs_error(epochs_, epoch_errors, save=None):
plt.plot([i for i in range(0, epochs_)], epoch_errors)
if save:
plt.savefig('results/epochs_mse_dataset1')
else:
plt.show()
plt.close()
def question_1():
data_1 = np.loadtxt("data/ex2data1.txt", delimiter=",")
# # plot dataset1:
# plot_data(data_1)
normalize(data_1[:, :-1], axis=0, copy=False)
train = data_1[:int(len(data_1) * 0.7), :]
test = data_1[int(len(data_1) * 0.7):, :]
alpha = 0.01
epochs = 1000
# # run logistic refression for dataset1:
w, epoch_errors = sgd(train_=train, alpha_=alpha, epochs_=epochs)
# show results for dataset1:
print("w: " + str(w))
plot_epochs_error(epochs, epoch_errors, save="epochs_mse_dataset1")
predicted_test = [logistic_predict(x, w, label=True) for x in test[:, :-1]]
test_error = sum(abs(test[:, -1] - predicted_test))
test_error_percent = test_error / len(predicted_test)
print("test error: ", test_error, test_error_percent)
print("\nKFOLD:")
ws, error = k_fold(data_1, alpha_=alpha, epochs_=epochs, k=5)
for i, w in enumerate(ws):
print("w" + str(i) + ": ", w)
print(error)
def question_2():
data_2 = np.loadtxt("data/dataset_mapfeature.txt", delimiter=",")
# plot_data(data_2)
normalize(data_2[:, :-1], axis=0, copy=False)
alpha = 0.01
epochs = 1000
for lambda_ in [0, 0.01, 0.25]:
W, _ = sgd_regularized(data_2[:, :-1], alpha, epochs, lambda_)
print(W)
def main():
question_1()
question_2()
if __name__ == '__main__':
main() |
import sys
n = int(input())
l = []
for line in range(n):
l.append(int(sys.stdin.readline()))
l1 = list(set(l))
c = len(l1)
lc = [l.count(l1[i]) for i in range(c)]
mx1 = max(lc)
s = lc[:]
s.remove(mx1)
mx2 = max(s)
s1 = set()
s2 = set()
for i in range(c):
if lc[i] == mx1: s1.add(l1[i])
if lc[i] == mx2: s2.add(l1[i])
v1 = abs(max(s1)-min(s2))
v2 = abs(min(s1)-max(s2))
print(max(v1, v2)) |
# Copyright (c) 2012 United States Government as represented by
# the National Aeronautics and Space Administration. No copyright
# is claimed in the United States under Title 17, U.S.Code. All Other
# Rights Reserved.
#
# The software in this package has been released as open-source software
# under the NASA Open Source Agreement. See the accompanying file
# NASA_Open_Source_Agreement.pdf for a full description of the terms.
# Common definitions for Hypatheon application
# (used by client, indexer and all utilities)
import os, sys, time, glob, string, shutil, re
import subprocess
from operator import add, and_
from StringIO import StringIO
from traceback import print_exc
from app_config import *
class UserError(Exception): pass
class FalseStart(Exception): pass
class OffNominal(Exception): pass
class CommandFailure(Exception): pass
class CommandTimeout(Exception): pass
class CommandAbort(Exception): pass
class ConnectionFailure(Exception): pass
class DatabaseError(Exception): pass
class OperationalError(Exception): pass
class SearchTermError(Exception): pass
class CancelProcess(Exception): pass
class DeletedFileError(Exception): pass
class DenialOfServiceException(Exception):
"""Exception to alert of possible denial of service attack."""
pass
class ArchiveError(Exception):
"""Exception raised if file cannot be unpacked, or is missing files."""
pass
class ExtractionError(Exception):
"""Exception raised if PVS extraction fails."""
pass
class InsertionError(Exception):
"""Exception raised if database insertion fails."""
pass
class DeletionError(Exception):
"""Exception raised if database deletion fails."""
pass
def reraise_with_trace(excep, msg):
trace = StringIO()
print_exc(20, trace)
new_msg = '***** Propagating exception %s.\n\n%s\n\n%s\n' \
% (excep, trace.getvalue(), msg)
raise excep, new_msg
# ---------- #
# Globally accessible values are collected in these variables:
app_state = {}
startup_actions = []
# ---------- Global constants ---------- #
indexing_log_name = 'indexing_log'
indexing_err_log = 'index_err_log'
db_ops_log_name = 'db_ops_log'
#db_ops_log_name = 'ops_log'
hypatheon_dir = os.environ['HYPATHEON']
home_dir = os.environ.get('HOME')
pvs_path = os.environ.get('PVSPATH', '')
# Might be running without a PVS installation:
pvs_lib_dir = pvs_path and os.path.join(pvs_path, 'lib')
pvs_script = pvs_path and os.path.join(pvs_path, 'pvs')
hypatheon_subdirs = ('bin', 'config', 'data', 'index', 'lib', 'log',
'pvs', 'tmp',)
bin_dir, config_dir, data_dir, index_dir, lib_dir, log_dir, \
pvs_conn_dir, tmp_dir = \
[ os.path.join(hypatheon_dir, d) for d in hypatheon_subdirs ]
db_glob_pattern = 'hyp-*.sdb'
default_coll_name = 'private'
default_db_path = os.path.join(data_dir, 'hyp-%s.sdb' % default_coll_name)
empty_db_path = os.path.join(data_dir, 'base', 'empty.sdb')
db_regexp_pattern = re.compile('hyp-([^-]+)(?:-(.*))?\.sdb\Z')
identity_fn = lambda arg: arg
null_proc = lambda *args: None
null_string_fn = lambda *args: ''
progress_tag = '<<<progress>>>' # for progress reporting protocol over pipes
len_progress_tag = len(progress_tag)
any_file_type = '.*'
spawn_bin = os.path.join(bin_dir, 'spawn_with_timeout')
compression_types = (('.gz', 'gunzip'), ('.bz2', 'bunzip2'))
# PVS declaration and module types
main_item_types = ['library', 'module', 'declaration', 'proof', 'step']
declaration_types = [ 'formula', 'function', 'constant',
'judgement', 'type', 'tcc', ]
# might want others later
proof_types = [ 'f_proof', 'j_proof', 't_proof', ]
step_types = [ 'primitive_rule', 'defined_rule', 'strategy', ]
module_types = [ 'theory', 'datatype' ]
non_decl_types = ['library', 'module', 'proof', 'step'] + \
module_types + proof_types + step_types
def indented_items(items):
return [ ' %s' % item for item in items ]
def cap_indented(items):
return [ ' %s' % item.capitalize() for item in items ]
all_item_types = ['library', 'module'] + module_types + \
['declaration'] + declaration_types + \
['proof'] + proof_types + \
['step'] + step_types
indented_item_types = ['library', 'module'] + indented_items(module_types) + \
['declaration'] + indented_items(declaration_types) + \
['proof'] + indented_items(proof_types) + \
['step'] + indented_items(step_types)
cap_item_types = ['Library', 'Module'] + cap_indented(module_types) + \
['Declaration'] + cap_indented(declaration_types) + \
['Proof'] + cap_indented(proof_types) + \
['Step'] + cap_indented(step_types)
# Place declaration last in list below because it's the default query type.
type_hierarchy = [['library'],
['module'] + module_types,
['proof'] + proof_types,
['step'] + step_types,
['declaration'] + declaration_types,
]
type_genus, type_species = {}, {}
for genus in type_hierarchy:
gname = genus[0]
type_genus[gname] = gname
type_species[gname] = ''
for species in genus[1:] :
type_genus[species] = gname
type_species[species] = species
status_display_types = ('Collection name:', 'Collection version:',
'PVS version:',
'Libraries:',
'Modules:',
' Theories:', ' Datatypes:',
'Declarations:',
' Formulas:', ' Functions:', ' Constants:',
' Judgements:', ' Types:', ' TCCs:',
'Proofs:',
' F_proofs:', ' J_proofs:', ' T_proofs:',
'Steps:',
' Primitive_rules:', ' Defined_rules:',
' Strategies:',
'Paths indexed:', 'Database size (MB):',
)
# ---------- Configuration classes ---------- #
from ConfigParser import ConfigParser
# Introduce a slightly enhanced ConfigParser that allows comments at
# ends of lines. Strip comments before applying 'get' methods.
class ConfigParserCommentless(ConfigParser):
def __init__(self, *args):
ConfigParser.__init__(self, *args)
def get(self, *args, **kargs):
value = ConfigParser.get(self, *args, **kargs)
return value.split('#')[0].strip()
def getint(self, *args, **kargs):
return int(self.get(*args, **kargs))
# ---------- PVS library lookup ---------- #
pvs_distrib_libs = ('prelude', 'finite_sets', 'bitvectors')
core_libs = pvs_lib_dir
PVS_LIBRARY_PATH = os.environ.get('PVS_LIBRARY_PATH', '')
pvs_library_path = PVS_LIBRARY_PATH.split(':') if PVS_LIBRARY_PATH else []
# Need to expand ~ and ~user constructions in paths.
pvs_library_path = [ os.path.expanduser(p) for p in pvs_library_path ]
if core_libs and core_libs not in pvs_library_path:
all_lib_paths = pvs_library_path + [core_libs]
else:
all_lib_paths = pvs_library_path
# Each library collection is stored in its own SQLite database file.
# Find all the database files in the library directories.
# Names are of the form hyp-*.sdb or hyp-*-<version>.sdb.
# When multiple files having the same collection name are found,
# only the newest will be active and the other are marked superseded.
def find_collections():
compression_items = \
[ (db_glob_pattern + c[0], c[1]) for c in compression_types ]
coll_dict = {}
for dir in all_lib_paths + [data_dir]:
for patt, cmd in compression_items:
# find all database files in compressed form
paths = glob.glob(os.path.join(dir, patt))
for p in paths:
subprocess.call('%s "%s"' % (cmd, p), shell=True) # uncompress
startup_actions.append("Uncompressed the database file '%s'" % p)
collec_paths = glob.glob(os.path.join(dir, db_glob_pattern))
for db_path in collec_paths:
base = os.path.basename(db_path)
date_time = os.path.getmtime(db_path)
# following must have a match
coll, version = db_regexp_pattern.match(base).groups()
entry = (date_time, db_path, version or '')
if coll in coll_dict:
coll_dict[coll].append(entry)
else:
coll_dict[coll] = [entry]
active = []
for coll, coll_files in coll_dict.items():
coll_files.sort()
newest = coll_files[-1]
active.append( (coll, newest[1],
newest[2]) ) # collection name, path, version
for dt, path, vers in coll_files[:-1]:
superseded_dbs.append(
(coll, dt, path, vers) ) # coll name, date_time, path, version
return active
# coll_dict[(coll, version or '')] = db_path
# return [ (coll[0], path, coll[1]) for coll, path in coll_dict.items() ]
superseded_dbs = []
collections = find_collections()
collections.sort()
# Look up full path for a library.
all_lib_dirs = {'prelude': 'prelude'} # prelude is a virtual directory
def find_library_dirs(*libs):
for lib in libs:
if lib in all_lib_dirs: continue
for p in all_lib_paths:
dir_lib = glob.glob(os.path.join(p, lib))
if dir_lib:
all_lib_dirs[lib] = dir_lib[0]
break
# Find all (potential) libraries on directories of all_lib_paths.
# Optionally suppress check for top.pvs membership.
def collect_library_names(every_dir=0):
lib_names = []
for d in all_lib_paths:
names = [ os.path.basename(n) for n in glob.glob('%s/*' % d)
if os.path.isdir(n) and not os.path.islink(n) and
(every_dir or 'top.pvs' in os.listdir(n)) ]
names.sort()
lib_names.append( (d, names) )
return lib_names
# Identify those theories that should be excluded from indexing.
def unindexed_files(lib_name, thy_names):
if lib_name == 'prelude': return []
actuals = list(thy_names) + ['top']
find_library_dirs(lib_name)
lib_path = all_lib_dirs.get(lib_name, '')
theories = [ os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(lib_path, '*.pvs')) ]
return [ thy for thy in theories
if thy not in actuals and
not re.search('.+_adt.*', thy) and
not re.search('.+_codt.*', thy) ]
# For standalone mode, need to run PVS script to get version info.
def get_pvs_version():
pvs_version = app_state.get('pvs_version')
if pvs_version: return pvs_version
if not pvs_script: return '?'
try:
version_cmd = '"%s" -version 2> /dev/null' % pvs_script
version = subprocess.Popen(version_cmd, shell=True,
stdout=subprocess.PIPE).stdout.read()
start = version.index('PVS Version')
try:
pvs_version = version[start:].split()[2]
app_state['pvs_version'] = pvs_version
except ValueError:
pvs_version = '?'
return pvs_version
except:
return '?'
# ---------- Logging services ---------- #
repetition_state = {} # {event_name : [prev_entry, count]}
# Emit a new entry to a log file with optional annotations for event durations.
# Another option keeps track of previous entries and omits a log entry if
# it repeats the previous one.
def emit_log_entry(log_file, entry_text, start=None, stop=None,
omit_rep_event=None):
if omit_rep_event:
rep_state = repetition_state.get(omit_rep_event, ['', 0])
prev_entry, rep_count = rep_state
if entry_text == prev_entry:
rep_state[1] += 1 # matches previous entry of this type
return
if start == None:
start = time.time()
num_text = '--'
else:
if stop == None: stop = time.time()
duration = stop - start
if duration >= 1.0:
num_text = '-- %6.1f sec --' % duration
else:
num_text = '-- %6d usec --' % int(duration * 1000000)
time_str = time.strftime('%b %d %H:%M:%S', time.localtime(start))
if omit_rep_event:
# Current entry different from previous one
if rep_count > 0:
debug_log(log_file, time_str, '--',
"*** Previous '%s' entry was repeated %d times. ***\n"
% (omit_rep_event, rep_count))
repetition_state[omit_rep_event] = [entry_text, 0]
debug_log(log_file, time_str, num_text, entry_text)
def debug_log(log_file, *values):
print >> log_file, \
reduce(add, [ ' %s' % str(v) for v in values ], '')
log_file.flush()
### Might not need append mode:
ops_log_file = open(os.path.join(log_dir, db_ops_log_name), 'a')
#ops_log_file = open(os.path.join(log_dir, 'ops_log'), 'a')
# ---------- Utility procedures ---------- #
_temp_file_template = os.path.join(tmp_dir, '%s_%%s' % os.getpid())
_temp_index = 0
def new_temp_file_name():
global _temp_index
_temp_index += 1
return _temp_file_template % _temp_index
def version_tuple(version):
return tuple(map(int, version.split('.')))
def create_empty_db():
subprocess.call('cd "%s"; build/create_empty_db data/base/empty.sdb'
% hypatheon_dir, shell=True)
# Distinguish cases 'generated/lib/thy' and 'lib/thy' from absolute paths.
def parse_theory_path(path):
dir, base = os.path.split(path)
if not dir:
return ('rel', base, '') # library name only
if os.path.isabs(dir):
return ('abs', dir, base)
# relative paths could be unexpanded; will become absolute later
# alternatively, they could be for generated or prelude theories
gen, lib = os.path.split(dir)
if lib == 'prelude' or gen == 'generated':
return (gen, lib, base) # gen = '' if not generated
return ('rel', dir, base) # will become abs after expansion
# Run a shell command and return its output.
# Provide the number of output lines expected.
def os_command_output(cmd, nlines=1):
pipe = subprocess.Popen(cmd, shell=True, bufsize=-1,
stdout=subprocess.PIPE).stdout
result = ''.join( [ pipe.readline() for i in range(nlines) ] )
pipe.close()
return result
def quote_quote(text):
return text.replace("'", "''")
def unique_list(seq):
result = []
for item in seq:
if item not in result: result.append(item)
return result
# Transpose a list of lists. Tuples are converted to lists.
# Assume uniform-length rows and columns.
def transpose_lists(lol):
result = [ [] for c in lol[0] ] # each null list needs to be fresh
for row in lol:
for val, new_col in zip(row, result):
new_col.append(val)
return result
def two_digit_min(value, divisor):
scaled = value / divisor
if scaled < 10.0:
return str(round(scaled, 1))
else:
return str(int(round(scaled)))
def approx_size(size, align=0):
if size < 1000:
if align:
return '%d ' % size
else:
return '%d' % size
elif size < 999500:
return '%sK' % two_digit_min(size, 1000.0)
elif size < 999500000:
return '%sM' % two_digit_min(size, 1000000.0)
else:
return '%sG' % two_digit_min(size, 1000000000.0)
# Convert number of seconds into string approximation (e.g., "3.2 days").
def approx_duration(duration):
# thresholds are 0.95 * sec_per_unit
if duration > 29979079:
return '%s years' % two_digit_min(duration, 31556926.0)
elif duration > 82080:
return '%s days' % two_digit_min(duration, 86400.0)
elif duration > 3420:
return '%s hours' % two_digit_min(duration, 3600.0)
else:
return '%s minutes' % two_digit_min(duration, 60.0)
# Find all positions where capitalizations occur within a string and
# insert a blank before the capital letter. Any transition from a
# non-upper-case letter to an upper-case letter is considered a
# capitalization site. Example: 'getNextItem' ==> 'get Next Item'
def separate_capitalized(s):
return ''.join([ ' '[ : int(prev.isupper() < next.isupper())] + next
for prev, next in zip('A' + s[:-1], s) ])
#def capital_split(s):
# return [ ' '[ : int(prev.isupper() < next.isupper())] + next
# for prev, next in zip('A' + s[:-1], s) ]
# Pattern for splitting a string into alphanumeric substrings.
# '_' excluded from alphanumeric char set.
alphanum_split_pattern = re.compile('[\W_]+', re.U)
ident_pattern = re.compile('[^:([{ \n]+')
# Find declaration name as leading identifier.
# Filter out VAR declarations.
decl_name_pattern = \
re.compile(
'\s*([\w?]+)(?:\(.+\))?(?:\[.+\])?(?:\{.+\})?\s*:(?!\s*var(?:\W+|\Z))',
re.IGNORECASE)
# re.compile('\s*([^:([{ \n]+)')
regexp_metachars = '*+?(|)-^$.!:{}[],='
# Check if (unescaped) regexp metachars appear in a string.
def has_regexp_metachars(str):
prev_c = ' '
for c in str:
if c in regexp_metachars and prev_c != '\\': return 1
else: prev_c = c
return 0
# Escape all regexp metachars in a string.
def regexp_escape(str):
result = str
for c in unique_list(str):
if c in regexp_metachars:
result = result.replace(c, r'\%s' % c)
return result
# Wrap string into text lines of max width, inserting newlines as needed.
# Optionally return a list of lines without newlines.
# Newlines within the string are overridden by wrapping.
def wrap_string(text, width=70, return_list=0):
words = text.split()
result = []
line, total = [], 0
for w in words:
if total + len(w) >= width:
if line: result.append(' '.join(line))
line, total = [w], len(w)
else:
line.append(w)
total += len(w) + 1
if line: result.append(' '.join(line))
if return_list: return result
else: return '\n'.join(result)
# Limit runs of blank lines to length one.
def purge_extra_blank_lines(text):
lines = text.splitlines()
prev_line = ''
result = []
for line in lines:
if line == prev_line == '': continue
result.append(line)
prev_line = line
result.append('\n')
return '\n'.join(result)
def read_text_file(path_name, as_lines=0, max_size=-1):
try:
file = open(path_name)
if as_lines: contents = file.readlines(max_size)
else: contents = file.read(max_size)
file.close()
except: # file permission or other error
contents = '' ### add logging???
try:
file.close()
except:
pass
return contents
# If supplied, error_fn accepts two string arguments, a title/category and an
# error message. If not supplied, error messages are directed to sys.stderr.
def write_text_file(content, path_name, as_lines=0, error_fn=None):
try:
file = open(path_name, 'w')
if as_lines: file.writelines(content)
else: file.write(content)
file.close()
except Exception, exc_obj:
msg = 'Exception: %s' % ' ; '.join(map(str, exc_obj.args))
if error_fn: error_fn('Error on File Operation', msg)
else: print >> sys.stderr, msg
try: file.close()
except: pass
def strip_neg(term):
if term[0] in '-!': return term[1:]
else: return term
def split_and_pad_terms(query_string):
terms = query_string.split(';')
return [ t.strip() for t in terms ] + [''] * (4 - len(terms))
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash_database import DashDatabase
class SDBTab:
def __init__(self, name, app, db):
self.app = app
self.db = db
self.name = name
self.children = []
def tab(self):
return dcc.Tab(label=self.name, children=self.children)
def update(self, session_id):
pass
# Virtual method
def create_callbacks(self, app:dash.Dash, db:DashDatabase):
pass
|
import cv2
import matplotlib.pyplot as plt
def imreadgray(arquivo):
imagem = cv2.imread(arquivo,0)
plt.imshow(imagem,cmap='gray')
plt.title='Imagem em preto e branco'
plt.show()
arquivo = input('Arquivo: ')
imreadgray('imagens/'+arquivo) |
# Generated by Django 3.1.4 on 2021-02-11 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pnr', '0020_auto_20210128_1621'),
]
operations = [
migrations.AddField(
model_name='pnr',
name='carrier_code',
field=models.CharField(blank=True, max_length=3, null=True, verbose_name='carrier code'),
),
]
|
#!/usr/bin/env python3
# coding: utf-8
import os,sys
import tkinter as tk
import tkinter.font as font
root = tk.Tk()
print(font.families())
my_font = font.Font(root,family="渦筆",size="64",weight="normal") # fc-list, wight=normal|bold
edit = tk.Text(root, wrap=tk.WORD, font=my_font) # wrap=NONE|CHAR|WORD
edit.grid(column=0, row=0, sticky=(tk.N, tk.S, tk.E, tk.W))
edit.focus_set()
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
root.title("メモ帳")
#root.iconbitmap(default="*.png")
icon_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),'icon.png')
imgicon = tk.PhotoImage(file=icon_path ,master=root)
root.tk.call('wm', 'iconphoto', root._w, imgicon)
root.geometry("600x480")
root.mainloop()
|
""""""
# Standard library modules.
import os
import sys
import glob
import shutil
import zipfile
import tarfile
import fnmatch
import subprocess
from distutils import sysconfig
from distutils.ccompiler import new_compiler
import logging
logger = logging.getLogger(__name__)
# Third party modules.
import requests
import requests_cache
requests_cache.install_cache()
# Local modules.
# Globals and constants variables.
class EmbedPython:
PYTHON_SOURCE_BASEURL = 'https://www.python.org/ftp/python/{version}/Python-{version}.tgz'
PYTHON_EMBED_BASEURL = 'https://www.python.org/ftp/python/{version}/python-{version}-embed-{arch}.zip'
GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py'
PYTHON_MANIFEST_URL = 'https://raw.githubusercontent.com/python/cpython/master/PC/python.manifest'
PYTHON_GUI_MAIN_CODE = """
#include <windows.h>
#include <stdio.h>
#include "Python.h"
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPWSTR lpstrCmd, int nShow)
{{
wchar_t *args[] = {{ L"-I", L"-c", L"import {module}; {module}.{method}()" }};
return Py_Main(3, args);
}}
"""
"""
According to the Python sys.argv documentation, "If the command was executed using the -c command line option to the interpreter, argv[0] is set to the string '-c'".
This causes problem with argument parsers which expects the program name to be the first argument.
The ``sys.argv`` are therefore modified to set first argument as the executable.
"""
PYTHON_CONSOLE_MAIN_CODE = """
#include <stdio.h>
#include <Python.h>
int main(int argc, char *argv[])
{{
wchar_t** _argv = PyMem_Malloc(sizeof(wchar_t*)*(argc + 2));
_argv[0] = L"-I";
_argv[1] = L"-c";
_argv[2] = L"import sys; sys.argv[0] = sys.executable; import {module}; {module}.{method}()";
for (int i=1; i<argc; i++) {{
wchar_t* arg = Py_DecodeLocale(argv[i], NULL);
_argv[i + 2] = arg;
}}
int returncode = Py_Main(argc + 2, _argv);
PyMem_Free(_argv);
return returncode;
}}
"""
def __init__(self, project_name, project_version, extra_wheel_dir=None):
"""
Creates the class to create an embedded distribution.
Use :meth:`add_wheel` to add wheel(s) associated to the project.
Use :meth:`add_script` to specify which script to convert to an executable.
Then call :meth:`run`.
:arg project_name: project name
:arg project_version: project version (e.g. ``0.1.2``)
:arg extra_wheel_dir: directory containing wheels to use instead of
downloading them from PyPI
"""
self.project_name = project_name
self.project_version = project_version
self.extra_wheel_dir = extra_wheel_dir
self.requirements = []
self.wheel_filepaths = []
self.scripts = []
def _download_file(self, url, filepath):
"""
Downloads file at *url* and saves it at *filepath*.
https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py
"""
r = requests.get(url, stream=True)
if r.status_code != 200:
raise IOError('Cannot download {}'.format(url))
with open(filepath, 'wb') as f:
shutil.copyfileobj(r.raw, f)
r.close()
def _download_python_embedded(self, workdir):
filepath = os.path.join(workdir, 'python_embed.zip')
try:
version = '{0.major}.{0.minor}.{0.micro}'.format(sys.version_info)
is_64bits = sys.maxsize > 2 ** 32
arch = 'amd64' if is_64bits else 'win32'
url = self.PYTHON_EMBED_BASEURL.format(version=version, arch=arch)
logger.info('downloading {0}'.format(url))
self._download_file(url, filepath)
logger.info('extracting zip in {0}'.format(workdir))
with zipfile.ZipFile(filepath, 'r') as zf:
zf.extractall(workdir)
finally:
if os.path.exists(filepath):
os.remove(filepath)
def _prepare_python(self, workdir):
logger.info('extracting python3X.zip')
for filepath in glob.glob(os.path.join(workdir, 'python*.zip')):
with zipfile.ZipFile(filepath, 'r') as zf:
zf.extractall(os.path.join(workdir, 'Lib'))
os.remove(filepath)
for filepath in glob.glob(os.path.join(workdir, '*._pth')):
os.remove(filepath)
def _fix_lib2to3(self, workdir):
logger.info('fixing lib2to3')
tarfilepath = os.path.join(workdir, 'python_source.tgz')
try:
version = '{0.major}.{0.minor}.{0.micro}'.format(sys.version_info)
url = self.PYTHON_SOURCE_BASEURL.format(version=version)
logger.info('downloading {0}'.format(url))
self._download_file(url, tarfilepath)
logger.info('extracting files in {0}'.format(workdir))
with tarfile.open(tarfilepath) as tar:
for member in tar.getmembers():
if not fnmatch.fnmatch(member.name, 'Python-*/Lib/lib2to3/fixes/*.py') and \
not fnmatch.fnmatch(member.name, 'Python-*/Lib/lib2to3/pgen2/*.py'):
continue
logger.debug('extracting {0}'.format(member.name))
buf = tar.extractfile(member)
_, path = member.name.split('/', 1)
filepath = os.path.join(workdir, path)
with open(filepath, 'wb') as fp:
fp.write(buf.read())
buf.close()
finally:
if os.path.exists(tarfilepath):
os.remove(tarfilepath)
def _install_pip(self, python_executable):
filepath = os.path.join(os.path.dirname(python_executable), 'get-pip.py')
try:
logger.info('downloading {0}'.format(self.GET_PIP_URL))
self._download_file(self.GET_PIP_URL, filepath)
args = [python_executable, filepath]
logger.debug('running {0}'.format(' '.join(args)))
subprocess.run(args, check=True)
finally:
if os.path.exists(filepath):
os.remove(filepath)
def _install_wheels(self, python_executable):
if not self.wheel_filepaths:
return
args = [python_executable, '-m', 'pip', 'install', '-U']
if self.extra_wheel_dir:
args += ['--find-links', self.extra_wheel_dir]
args.extend(self.wheel_filepaths)
if self.extra_wheel_dir:
for filepath in glob.glob(os.path.join(self.extra_wheel_dir, '*.whl')):
args.append(filepath)
logger.debug('running {0}'.format(' '.join(args)))
subprocess.run(args, check=True)
def _install_requirements(self, python_executable):
if not self.requirements:
return
args = [python_executable, '-m', 'pip', 'install', '-U']
if self.extra_wheel_dir:
args += ['--find-links', self.extra_wheel_dir]
args.extend(self.requirements)
logger.debug('running {0}'.format(' '.join(args)))
subprocess.run(args, check=True)
def _create_main(self, workdir, module, method, executable_name, console=True):
# Create code
logger.info('writing main executable code')
c_filepath = os.path.join(workdir, executable_name + '.c')
if console:
content = self.PYTHON_CONSOLE_MAIN_CODE.format(module=module, method=method)
else:
content = self.PYTHON_GUI_MAIN_CODE.format(module=module, method=method)
with open(c_filepath, 'w') as fp:
fp.write(content)
# Create manifest
logger.info('downloading Python manifest')
manifest_filepath = os.path.join(workdir, executable_name + '.exe.manifest')
self._download_file(self.PYTHON_MANIFEST_URL, manifest_filepath)
# Compile
logger.info('compiling main executable code')
objects = []
try:
compiler = new_compiler(verbose=True)
compiler.initialize()
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
compiler.include_dirs.append(py_include)
if plat_py_include != py_include:
compiler.include_dirs.append(plat_py_include)
compiler.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
objects = compiler.compile([c_filepath])
output_progname = os.path.join(workdir, executable_name)
compiler.link_executable(objects, output_progname)
finally:
if os.path.exists(c_filepath):
os.remove(c_filepath)
if os.path.exists(manifest_filepath):
os.remove(manifest_filepath)
for filepath in objects:
os.remove(filepath)
def _create_zip(self, workdir, dist_dir, fullname):
logger.info('creating zip')
zipfilepath = os.path.join(dist_dir, fullname + ".zip")
with zipfile.ZipFile(zipfilepath, "w") as zf:
for dirpath, _dirnames, filenames in os.walk(workdir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, path)
def add_wheel(self, filepath):
"""
Adds a wheel to be installed.
"""
self.wheel_filepaths.append(filepath)
def add_requirement(self, requirement):
"""
Adds a requirement to be installed (e.g. a PyPI package).
"""
self.requirements.append(requirement)
def add_script(self, module, method, executable_name, console=True):
"""
Adds a console script to be converted to an exectuable.
:arg module: module containing the method to start the script
(e.g. ``package1.sample.gui``)
:arg method: name of method to execute
(e.g. ``main``)
:arg executable_name: filename of the final executable
:arg console: whether the script should run as a console script or
a GUI script.
"""
self.scripts.append([module, method, executable_name, console])
def run(self, dist_dir, clean=True, zip_dist=False):
"""
Creates an embedded distribution with the specified wheel(s) and script(s).
:arg dist_dir: destination directory
:arg clean: whether to remove all existing files in the destination directory
:arg zip_dist: whether to create a zip of the distribution
"""
if sys.platform != 'win32':
raise OSError('Only windows platform supported')
if sys.version_info.major != 3:
raise OSError('Only Python 3 supported')
# Create working directory
fullname = '{0}-{1}'.format(self.project_name, self.project_version)
workdir = os.path.join(dist_dir, fullname)
if os.path.exists(workdir) and clean:
shutil.rmtree(workdir)
os.makedirs(workdir, exist_ok=True)
# Install python
python_executable = os.path.join(workdir, 'python.exe')
if not os.path.exists(python_executable):
self._download_python_embedded(workdir)
self._prepare_python(workdir)
self._fix_lib2to3(workdir)
# Install pip
self._install_pip(python_executable)
# Install wheels and requirements
self._install_wheels(python_executable)
self._install_requirements(python_executable)
# Process entry points
for module, method, executable_name, console in self.scripts:
self._create_main(workdir, module, method, executable_name, console)
# Create zip
if zip_dist:
self._create_zip(workdir, dist_dir, fullname)
return workdir
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wyxbcga', '0001_initial'),
]
run_before = [
('yiupu', '0002_delete_jpmwh'),
]
operations = [
migrations.RemoveField(
model_name='eezxvbbvmn',
name='redlewthb',
),
migrations.RemoveField(
model_name='ojshxdt',
name='fakazhjh',
),
]
|
#!/usr/bin/env python
# VHDL linter for OHWR coding style
import sys
import vhdllint.rulesexec
import libghdl.thin
import libghdl.iirs as iirs
# Import all rules
from vhdllint.filerules.check_line_length import CheckLineLength
from vhdllint.filerules.check_no_blank_line_at_eof import CheckNoBlankLineAtEOF
from vhdllint.filerules.check_missing_newline import CheckMissingNewline
from vhdllint.filerules.check_no_tab import CheckNoTAB
from vhdllint.filerules.check_no_trailing_spaces import CheckNoTrailingSpaces
from vhdllint.filerules.check_newline import CheckNewline
from vhdllint.filerules.check_header import CheckHeader
from vhdllint.filerules.check_charset import CheckCharSet
from vhdllint.lexrules.check_keyword_case import CheckKeywordCase
from vhdllint.lexrules.check_comments import CheckComments
from vhdllint.lexrules.check_spaces import CheckSpaces
from vhdllint.syntaxrules.check_attribute_decl import CheckAttributeDecl
from vhdllint.syntaxrules.check_attribute_name import CheckAttributeName
from vhdllint.syntaxrules.check_entity_simple import CheckEntitySimple
from vhdllint.syntaxrules.check_enum_char_lit import CheckEnumCharLit
from vhdllint.syntaxrules.check_guarded_signals import CheckGuardedSignals
from vhdllint.syntaxrules.check_disconnection import CheckDisconnection
from vhdllint.syntaxrules.check_simple_block import CheckSimpleBlock
from vhdllint.syntaxrules.check_group import CheckGroup
from vhdllint.syntaxrules.check_ports_mode import CheckPortsMode
from vhdllint.syntaxrules.check_config_spec import CheckConfigSpec
from vhdllint.syntaxrules.check_file_name import CheckFileName
from vhdllint.syntaxrules.check_one_unit import CheckOneUnit
from vhdllint.syntaxrules.check_generics import CheckGenerics
from vhdllint.syntaxrules.check_ports_name import CheckPortsName
from vhdllint.syntaxrules.check_basic_indent import CheckBasicIndent
from vhdllint.syntaxrules.check_name_decl import CheckNameDecl
from vhdllint.syntaxrules.check_ieee_packages import CheckIeeePackages
from vhdllint.syntaxrules.check_signals_name import CheckSignalsName
from vhdllint.syntaxrules.check_context_use import CheckContextUse
from vhdllint.syntaxrules.check_end_label import CheckEndLabel
from vhdllint.syntaxrules.check_parenthesis import CheckParenthesis
from vhdllint.syntaxrules.check_process_label import CheckProcessLabel
from vhdllint.syntaxrules.check_subprg_is_layout import CheckSubprgIsLayout
from vhdllint.syntaxrules.check_complex_stmt_layout import CheckComplexStmtLayout
from vhdllint.syntaxrules.check_instantiation import CheckInstantiation
from vhdllint.syntaxrules.check_entity_layout import CheckEntityLayout
from vhdllint.syntaxrules.check_context_clauses import CheckContextClauses
# [VHDLVersion] [M] VHDL standard version
# There is no specific rule, the analyzer will catch errors
libghdl.thin.set_option("--std=93c")
# Create rules
rules = vhdllint.rulesexec.RulesExec()
# List of rules (v1.0):
# File rules
# [FileName] [M] Name of VHDL file
rules.add(CheckFileName(extension='.vhd'))
# [FileContent] [R] Content of a VHDL file
rules.add(CheckOneUnit(name='FileContent', patterns=['EA', 'C', 'P', 'PB']))
# [FileHeader] [M] Header comment of a VHDL file
# TODO: template
rules.add(CheckHeader(name='FileHeader'))
# [LineLength] [M] Source line length
rules.add(CheckLineLength(132))
# [EndOfLine] [M] End of line
rules.add(CheckNewline(name='EndOfLine'))
# [Language] [M] Language for comments and identifiers (I)
# Inspection
# [CharSet] [M] Character set
rules.add(CheckCharSet())
# [NoTAB] [M] No tabulation
rules.add(CheckNoTAB(name='NoTAB'))
# [LastLine] [M] Last line in a file
rules.add(CheckNoBlankLineAtEOF(name='LastLine'))
rules.add(CheckMissingNewline(name='LastLine'))
# [TrailingSpaces] [M] Trailing spaces
rules.add(CheckNoTrailingSpaces(name='TrailingSpaces'))
# Format rules
# [Comments] [M] Comment style
rules.add(CheckComments())
# [Indentation] [M] Indentation
rules.add(CheckBasicIndent(name='Indentation'))
# [WhiteSpaces] [M] Spaces
rules.add(CheckSpaces(name='Spaces'))
# [Context] [M] Context clauses
rules.add(CheckContextClauses())
# [UseClause] [M] Place of use clause
rules.add(CheckContextUse())
# [EntityLayout] [M] Layout of entity declaration
rules.add(CheckEntityLayout())
# [ComplexStmtLayout] [M] Layout of complex statements
rules.add(CheckComplexStmtLayout())
# [SubprgIsLayout] [M] Layout of is keyword in subprogram
rules.add(CheckSubprgIsLayout())
# [EndLabel] [M] Presence of the label after end
rules.add(CheckEndLabel())
# [Instantiation] [M] Layout of instantiation
rules.add(CheckInstantiation())
# [ProcessLabel] [M] Label of processes
rules.add(CheckProcessLabel())
# [Parenthesis] [M] Use of parenthesis in expressions
rules.add(CheckParenthesis())
# Identifiers
# [Keywords] [M] Keywords case
rules.add(CheckKeywordCase())
# [Identifiers] [M] Identifiers case (I)
# Inspection
# [Underscores] [M] Use of underscore in identifiers (I)
# Inspection
# [ReferenceName] [M] Reference
# [ArchNames] [M] Architectures name
rules.add(CheckNameDecl(kind=iirs.Iir_Kind.Architecture_Body,
predicate=(lambda n: n == 'arch'),
name='ArchNames'))
# [Constants] [M] Constants name
rules.add(CheckNameDecl(
kind=iirs.Iir_Kind.Constant_Declaration,
predicate=[lambda s: ("constant name '{}' must start with 'c_'".format(s)
if not s.startswith('c_') else None),
lambda s: (
"constant name '{}' must be in upper case after 'c_'".format(s)
if not s[2:].isupper() else None)],
name='Constants'))
# [GenericsName] [M] Generics name
rules.add(CheckGenerics(name='GenericsName'))
# [PortsName] [M] Ports name
rules.add(CheckPortsName(name='PortsName'))
# [SignalsName] [M] Signals name
rules.add(CheckSignalsName(name='SignalsName'))
# [TypesName] [M] Types name
rules.add(CheckNameDecl(kind=iirs.Iir_Kind.Type_Declaration,
predicate=(lambda n: len(n) >= 3 and n[:2] == 't_'),
name='TypesName'))
rules.add(CheckNameDecl(kind=iirs.Iir_Kind.Subtype_Declaration,
predicate=(lambda n: len(n) >= 3 and n[:2] == 't_'),
name='TypesName'))
# [PackagesName] [M] Packages name
rules.add(CheckNameDecl(kind=iirs.Iir_Kind.Package_Declaration,
predicate=(lambda n: len(n) >= 4 and n[-4:] == '_pkg'),
name='PackagesName'))
# Language subset
# [VHDLVersion] [M] VHDL standard version
# Set by a switch
# [IEEEPkg] [M] Use of IEEE packages
rules.add(CheckIeeePackages(extra_pkg=['math_real',
'std_logic_misc',
'std_logic_textio'],
name='IEEEPkg'))
# [NoUserAttributes] [M] Attribute declarations not allowed
# Allow attributes for synthesis tools.
rules.add(CheckAttributeDecl
(name="NoUserAttributes",
allowed=['keep', 'shreg_extract', 'opt_mode', 'resource_sharing',
'altera_attribute']))
# [NoUserAttrName] [M] Attribute names
rules.add(CheckAttributeName())
# [EntityItems] [M] Entity declarative items
rules.add(CheckEntitySimple())
# [NoCharEnumLit] [M] Character as enumeration literal
rules.add(CheckEnumCharLit())
# [GuardedSignals] [M] Guarded signals
rules.add(CheckGuardedSignals())
# [Disconnection] [M] Disconnection Specification
rules.add(CheckDisconnection())
# [BlockStatement] [M] Block statements
rules.add(CheckSimpleBlock())
# [GroupDeclaration] [M] Group and group template
rules.add(CheckGroup())
# [PortMode] [M] Buffer and linkage mode
rules.add(CheckPortsMode())
# [ConfigSpec] [M] Configuration specification
rules.add(CheckConfigSpec())
# Synthesis rules
# [RemovedSynth] [M] Language features not allowed for synthesis
# [PortsType] [M] Type of top-level ports
# [GenericType] [M] Type of top-levels generics
# [WrapperUnit] [R] Wrapper of top-level units
# [RegisterTemplate] [R] Process for a register.
# [AsyncReset] [M] Asynchronous reset
# [RegisterReset] [M] Register reset
# [SignalAttribute] [M] Signal attributes
# [VectorDirection] [M] Direction of indexes
# [EmptyArray] [M] Minimal length of arrays
# [ClockResetPorts] [M] Clock and reset ports
# [ClocksUse] [M] Usage of clocks
# [FSMCoding] [R] FSM code style
vhdllint.rulesexec.execute_and_report(rules, sys.argv[1:])
|
from utime import sleep_us
"""
->See the datasheet
____|___________________________________|_____
| power_off -> 0x00 |
| power_on -> 0x01 |
| reset -> 0x07 |
| cont_h_res_mode -> 0x10 |
| cont_h_res_mode2 -> 0x11 |
| cont_l_res_mode -> 0x13 |
| onet_h_res_mode -> 0x20 |
| onet_h_res_mode2 -> 0x21 |
| onet_l_res_mode -> 0x23 |
____|___________________________________|_____
"""
BH1750_I2C_ADDRESS_PRIM = 0x23;
BH1750_I2C_ADDRESS_SEC = 0x5C;
MODE_DICT = {'CHR': 0x10, 'CHR2': 0x11, 'CLR': 0x13, 'OHR': 0x20, 'OHR2': 0x21, 'OLR': 0x23}
class BH1750:
def __init__(self, i2c=None, address=None):
if address == None:
self._address = BH1750_I2C_ADDRESS_PRIM
self.scanI2CAddress(i2c)
else:
self._address = address
self._mode = None
self._i2c = i2c
self.power_off()
self.reset()
self.power_on()
def scanI2CAddress(self, i2c):
"""scans I2C adresses of the bme280 if finds 2 device then automatically select the primary adress"""
print('Scan i2c bus...')
devices = i2c.scan()
if devices:
for d in devices:
print("Decimal address: ", d, " | Hex address: ", hex(d))
if d in [BH1750_I2C_ADDRESS_PRIM, BH1750_I2C_ADDRESS_SEC]:
print("Connected decimal address: ", d)
self._address = d
return
else:
raise ValueError("I2C object is mandatory")
def power_on(self):
self._i2c.writeto(self._address, bytes([0x01]))
def power_off(self):
self._i2c.writeto(self._address, bytes([0x00]))
def reset(self):
self._i2c.writeto(self._address, bytes([0x07]))
def set_mode(self,mode_code = None):
if mode_code == None:
print("Please write a mode code\n"
"->Mode codes :\n"
"\'CHR\' for Continuous High Resolution Mode\n"
"\'CHR2\' for Continuous High Resolution Mode 2\n"
"\'CLR\' for Continuous Low Resolution Mode\n"
"\'OHR\' for One Time High Resolution Mode\n"
"\'OHR2\' for One Time High Resolution Mode\n"
"\'OLR\' for One Time Low Resolution Mode\n")
else:
new_mode = MODE_DICT.get(mode_code)
if new_mode is None :
raise ValueError("\nPlease write an available mode code\n"
"->Mode codes :\n"
"\'CHR\' for Continuous High Resolution Mode\n"
"\'CHR2\' for Continuous High Resolution Mode 2\n"
"\'CLR\' for Continuous Low Resolution Mode\n"
"\'OHR\' for One Time High Resolution Mode\n"
"\'OHR2\' for One Time High Resolution Mode\n"
"\'OLR\' for One Time Low Resolution Mode\n")
if new_mode & 0x20:
self._mode = new_mode;
self._i2c.writeto(self._address, bytes([self._mode]))
else:
if new_mode != self._mode:
self._mode = new_mode;
self._i2c.writeto(self._address, bytes([self._mode]))
def lux(self):
if self._mode is None:
raise ValueError("\nNo mode selected !\nPlease chose a mode with set_mode method !")
return
sleep_us(24 if self._mode in (0x13, 0x23) else 180)
data = self._i2c.readfrom(self._address, 2)
factor = 2.0 if self._mode in (0x11, 0x21) else 1.0
return (data[0] << 8 | data[1]) / (1.2 * factor) |
a=int(input())
n=a
reverse=0
while(n!=0):
b=n%10
reverse=reverse*10+b
n=n//10
print(reverse)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#import python packages
#install --> (sudo) apt-get install python-pip --> (sudo) pip install pillow python-ev3dev
#running --> run (sudo) python pythonfilename.py imagefilename.png (jpg will work along with others types) -->
# you will be given a dialogue --> just type "" and return/enter to continue
from PIL import Image, ImageFilter
import time
import os
import sys
from termcolor import colored
# paper resolution
vert_move = 15;
horiz_move = 15;
#res = (horiz_deg/horiz_move);
false = 0
true = 1
#function to ensure the motor has stopped before moving on
xxx = 0
def waitformotor(motor):
xxx = 0
# define motors and use brake mode
#move paper until color sensor recieves >50 reading
#paper.speed_regulation_enabled=u'on'
print("Init printer")
paper = ""
head = ""
pen1 = ""
pen2 = ""
def resetMotors():
waitformotor(paper)
waitformotor(head)
waitformotor(pen1)
waitformotor(pen2)
#make a function to make a dot on the page
def makedot(pen,dir):
waitformotor(pen) #double check if motor is stopped before raising pen
#resize and flip image
#filename = sys.argv[1]
def printer(filename):
w = 0
h = 0
l = 0
img2 = Image.open(filename) #open image
img=img2.convert("RGBA")
width, height = img.size # get image size
#define variables
array = []
w = width-1 #define starting width counter
print(width," x ",height)
r_array=[]
g_array = []
b_array = []
bl_array = []
#different colors: (in rgba -- remove last number in set to convert to rgb)
#red = (255,0,0,0) eg. in rgb -- (255,0,0)
#green = (0,255,0,0)
#blue = (0,0,255,0)
#black = (0,0,0,0)
#white = (255,255,255,0)
# print(img.getpixel((w,h)))
r_array = [[200]*width]*height
g_array = [[200]*width]*height
b_array = [[200]*width]*height
bl_array = [[200]*width]*height
e4col = false
while h != height-1:
while w != 0:
array.append(img.getpixel((w, h))) #get rgba black or white of each pixel and write to full array
r,g,b,a = img.getpixel((w, h)) #get rgba of each pixel
#check if red, green, or blue is greatest in rgb values --- check if black or white also --> then append array differently for each switch case
if r > g and r > b :
e4col = true
r_array[h][width-w] = 0
g_array[h][width-w] = 255
b_array[h][width-w] = 255
bl_array[h][width-w] = 255
print("R", end="")
elif g > r and g > b :
e4col = true
g_array[h][width-w] = 0
r_array[h][width-w] = 255
b_array[h][width-w] = 255
bl_array[h][width-w] = 255
print("G", end="")
elif b > r and b > g :
b_array[h][width-w] = 0
g_array[h][width-w] = 255
r_array[h][width-w] = 255
bl_array[h][width-w] = 255
print("B", end="")
elif b < 50 and r < 50 and g < 50 :
b_array[h][width-w] = 255
g_array[h][width-w] = 255
r_array[h][width-w] = 255
bl_array[h][width-w] = 0
print(str(h)+" "+str(width-w))
print(bl_array[136][80])
print("D", end="")
else:
b_array[h][width-w] = 255
g_array[h][width-w] = 255
r_array[h][width-w] = 255
bl_array[h][width-w] = 255
#print("XX"+str(h)+" "+str(width-w))
print(" ", end="")
w = w-1 #move to next pixel -- use -1 to flip image -> make images not backward when printed
print(bl_array[136][80])
print(" "+str(h))
w = width-1 #reset width counter
h = h+1 #move to next row
print(bl_array[136][80])
x = input('Is this picture ok? Press enter to continue...') #wait for dialogue to be answered then start printing
initial = time.time()
print(bl_array[136][80])
xd = width-1
yd = 0
xda = 0
while yd != height:
while xd != 0:
#print(yd, end="")
if bl_array[yd][width-xd] == 0: #is pixel black?
print("D", end="") #print block if black pixel
waitformotor(head)
# lower and raise pen
# move pen left
elif b_array[yd][max([0,xd-21])] == 0:
print("B", end="") #print block if red pixel
waitformotor(head)
# lower and raise pen
else:
print(" ", end="")
#move pen left
xd = xd - 1
xda = xda + 1
print("; PCT: "+str(int(100*xda/(width*height)))+"% ; Time Remaining: "+str(int((100-100*xda/(width*height))*(time.time()-initial)/(100*xda/(width*height)))))
yd = yd + 1
xd = width-1
# move paper forward
# reset pen location
waitformotor(paper)
#reset paper location
resetMotors()
if e4col == true:
x = input('Ready to print red/green? Press enter to continue...') #wait for dialogue to be answered then start printing
initial = time.time()
xd = width-1
yd = 0
xda = 0
while yd != height:
while xd != 0:
if r_array[yd][xd] == 0: #is pixel black?
print("R", end="") #print block if black pixel
waitformotor(head)
# lower and raise pen
makedot(pen1,-1)
# move pen left
elif g_array[yd][max([0,xd-21])] == 0:
print("G", end="") #print block if red pixel
head.run_to_abs_pos(position_sp=(horiz_move*xd), speed_sp=400, ramp_down_sp=500)
waitformotor(head)
# lower and raise pen
makedot(pen2,1)
else:
print(" ", end="")
#move pen left
xd = xd - 1
xda = xda + 1
print("; PCT: "+str(int(100*xda/(width*height)))+"% ; Time Remaining: "+str(int(((100-100*xda/(width*height))*(time.time()-initial)/(100*xda/(width*height)))))+"s")
yd = yd + 1
xd = width-1
# move paper forward
paper.run_to_abs_pos(position_sp=vert_move*(yd), speed_sp=250,ramp_down_sp=500)
# reset pen location
waitformotor(paper)
#reset paper location
resetMotors()
"""
img2 = Image.open("ev3screen.jpg")
raw = img2.tobytes()
image = Image.frombytes(img2.mode, img2.size, raw)
lcd = ev3.Screen()
lcd._img.paste(image, (0, 0))
lcd.update()
"""
|
import os
from RingPy import Ring
r = Ring.Ring()
user = input("username: ")
password = input("password: ")
r.Authenticate(user, password)
r.EstablishSession()
r.GetDevices()
outputDir = "data"
if not os.path.exists(outputDir):
os.mkdir(outputDir)
history = r.GetHistory()
for h in history:
r.GetRecording(h["id"], outputDir) |
# Compare two files and add the diffs to each other
# Nothing will be removed, then program will only add tags
# Run using python compare.py file1 file2
# Imports
import sys
def checker(index, lis, string):
for i in range(index, len(lis)):
if lis[i] == string:
return i
return False
print ""
print "###############"
print "# {}".format(sys.argv[0])
print "# Files: {}, {}".format(str(sys.argv[1]), str(sys.argv[2]))
print "###############"
print ""
linesX = []
linesY = []
with open(str(sys.argv[1])) as f:
linesX = f.readlines()
if not linesX:
linesX.append('')
with open(str(sys.argv[2])) as f:
linesY = f.readlines()
if not linesY:
linesY.append('')
x = 0
y = 0
if linesX == linesY:
print "The files are identical. Exiting program."
sys.exit(0)
while True:
if linesX[x] != linesY[y]:
print "Diff line {} in file {} and line {} in file {}".format(x, str(sys.argv[1]), y, str(sys.argv[2]))
ret = checker(x, linesY, linesX[x])
if ret:
for i in range(x, ret):
linesX.insert(i, linesY[i])
else:
linesY.insert(x, linesX[x])
x += 1
y += 1
if x >= len(linesX) and y >= len(linesY):
break
elif x >= len(linesX):
x -= 1
for i in range(y, len(linesY)):
linesX.insert(i, linesY[i])
break
elif y >= len(linesY):
y -= 1
for i in range(x, len(linesX)):
linesY.insert(i, linesX[i])
break
if linesX == linesY:
with open(str(sys.argv[1]), 'w') as f:
f.writelines(linesX)
with open(str(sys.argv[2]), 'w') as f:
f.writelines(linesY)
print "The files are now identical. Exiting program."
sys.exit(0)
|
"""a collection of context managers that modify file discovery on importings"""
import importnb
import tingle
__all__ = "Markdown", "RST", "YAML", "YML"
class LiterateMixin(importnb.Notebook):
format = None
def get_data(self, path):
if self.path.endswith(self.format):
return self.code(self.decode())
return super().get_data(path)
get_source = get_data
class Markdown(LiterateMixin):
format = ".md"
extensions = F".py{format} {format} {format}.ipynb".split()
def code(self, str):
return tingle.util.ipy_transform(tingle.python.md2py(str))
def exec_module(self, module):
super().exec_module(module)
module._ipython_display_ = lambda: print(module.__file__) or __import__(
"IPython").display.display(__import__("IPython").display.Markdown(filename=module.__file__))
class XO(LiterateMixin):
format = ".md"
extensions = F".xsh{format} .xsh{format}.ipynb".split()
@property
def execer(self):
import xonsh.execer
import builtins
if hasattr(self, '_execer'):
return self._execer
if (
hasattr(builtins, "__xonsh__")
and hasattr(builtins.__xonsh__, "execer")
and builtins.__xonsh__.execer is not None
):
self._execer = execer = builtins.__xonsh__.execer
else:
self._execer = xonsh.execer.Execer(unload=False)
return self._execer
def code(self, str):
return tingle.util.ipy_transform(tingle.python.md2py(str))
def parse(self, input):
ctx = {} # dummy for modules
return self.execer.parse(input, ctx, mode='exec',
filename=self.path, transform=True)
def exec_module(self, module):
super().exec_module(module)
module._ipython_display_ = lambda: print(module.__file__) or __import__(
"IPython").display.display(__import__("IPython").display.Markdown(filename=module.__file__))
class RST(LiterateMixin):
format = 'rst'
extensions = F".py.{format} .{format} .{format}.ipynb".split()
def code(self, str):
return tingle.util.ipy_transform(tingle.python.rst2py(str))
class LiterateDataMixin(LiterateMixin):
def code(self, code):
if self.path.endswith(".md"):
code = tingle.yml.md2yml(code)
if self.path.endswith(".rst"):
code = tingle.yml.rst2yml(code)
return code
class YAML(LiterateDataMixin):
format = '.md'
extensions = F".yml .yaml .yml.md .yaml.md".split()
def code(self, str):
code = F"""data = __import__('yaml').safe_load('''{super().code(str)}''')"""
return code
def exec_module(self, module):
super().exec_module(module)
module._ipython_display_ = lambda: __import__(
"IPython").display.display(__import__("IPython").display.JSON(module.data, root=module.__file__))
YML = YAML
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 21:10:56 2018
@author: Abhishek
"""
#import datetime
#print("'hello india it's" + str(datetime.datetime.now()))
|
class Car(object):
def __init__(self, price, speed, fuel, mileage, tax):
self.price = price
self.speed = speed
self.fuel = fuel
self.mileage = mileage
self.tax = tax
def display_all(self):
return '{} {} {} {} {}'.format(self.price, self.speed, self.fuel, self.mileage, self.tax)
return self
emp_1 = Car('$20000','35mph','full', '99mpg', '15%')
emp_2 = Car('$40000', '99mph', 'Kinda full', '7mpg', '15%')
emp_3 = Car('$2000', '30mph', 'empty', '190mpg', '12%')
emp_4 = Car('$10000', '40mph', 'full', '70mpg', '12%')
emp_5 = Car('$1000000','50mph', 'full', '6mpg', '15%')
#print('{}{}'.format(emp_1.first, emp_1.last))
print(emp_1.display_all())
print(emp_2.display_all())
print(emp_3.display_all())
print(emp_4.display_all())
print(emp_5.display_all())
|
def sum_matrix(m):
sum1 = 0
for n in m:
sum1 += sum(n)
return(sum1)
print sum_matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
|
import pytest
import strawberry
from strawberry_django import ModelResolver
from .app.models import User, Group
@pytest.fixture(autouse=True)
def testdata(db):
User.objects.bulk_create([
User(name='a', age=10),
User(name='b', age=20),
User(name='c', age=20),
])
Group.objects.create(name='x', admin_id=3)
@pytest.fixture
def schema(testdata):
class UserResolver(ModelResolver):
model = User
class GroupResolver(ModelResolver):
model = Group
@strawberry.type
class Mutation(UserResolver.mutation(), GroupResolver.mutation()):
pass
schema = strawberry.Schema(query=UserResolver.query(), mutation=Mutation)
return schema
def test_mutation_create(schema):
result = schema.execute_sync('mutation { user: createUser(data: {name: "x", age: 1}) { id name age } }')
assert not result.errors
assert result.data['user']['name'] == 'x'
assert result.data['user']['age'] == 1
user = User.objects.get(id=result.data['user']['id'])
assert user.name == 'x'
assert user.age == 1
def test_batch_create_mutation(schema):
result = schema.execute_sync('''
mutation {
user: createUsers(data: [
{name: "d", age: 1},
{name: "e", age: 2}
]) {
id
name
age
}
}''')
assert not result.errors
assert result.data['user'][0]['name'] == 'd'
assert result.data['user'][0]['age'] == 1
assert result.data['user'][1]['name'] == 'e'
assert result.data['user'][1]['age'] == 2
user = User.objects.get(id=result.data['user'][0]['id'])
assert user.name == 'd'
assert user.age == 1
user = User.objects.get(id=result.data['user'][1]['id'])
assert user.name == 'e'
assert user.age == 2
def test_mutation_update(schema):
result = schema.execute_sync('mutation { users: updateUsers(data: {name: "y"}, filters: ["id=2"]) { name } }')
assert not result.errors
assert result.data['users'][0]['name'] == 'y'
user = User.objects.get(id=2)
assert user.name == 'y'
def test_mutation_delete(schema):
assert User.objects.count() == 3
result = schema.execute_sync('mutation { users: deleteUsers(filters: ["id=2"]) { id } }')
assert not result.errors
assert result.data['users'][0]['id'] == '2'
assert User.objects.filter(id=2).count() == 0
assert User.objects.count() == 2
def test_mutation_create_with_relation(schema):
result = schema.execute_sync('mutation { group: createGroup(data: {name: "x", adminId: 3}) { admin { id }} }')
assert not result.errors
assert result.data['group']['admin']['id'] == '3'
def test_mutation_update_relation(schema):
result = schema.execute_sync('mutation { group: updateGroups(data: {adminId: 2}) { admin { id }} }')
assert not result.errors
assert result.data['group'][0]['admin']['id'] == '2'
|
import random
import matplotlib.pyplot
import numpy
from Tkinter import *
import scipy.special
def zipf(a):
if a <= 1:
raise ValueError("The parameter should be greater than 1")
a = float(a)
b = (2 ** (a - 1))
u = random.random()
v = random.random()
x = int(u ** (-1/(a-1)))
t = (1 + 1/x) ** (a - 1)
while v * x * ( (t - 1) / (b - 1)) > t / b :
u = random.random()
v = random.random()
x = int(u ** (-1/(a-1)))
t = (1 + 1/x) ** (a - 1)
return x
def exp(a):
return (scipy.special.zetac(a-1)+1)/(scipy.special.zetac(a)+1)
def prob(i, a):
return 1.0 / ((scipy.special.zetac(a)+1) * (i ** a))
class MainWin:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.button = Button(frame, text="Inchide", fg="red", command=frame.quit)
self.button.grid(row=1, column=2)
self.hi_there = Button(frame, text="Calculeaza", command=self.get_stats)
self.hi_there.grid(row=0, column=2)
self.param_label = Label(frame, text="a: ")
self.param_label.grid(row=0, column=0)
self.param_entry = Entry(frame)
self.param_entry.grid(row=0, column=1)
self.avg1000_l = Label(frame, text="Medie (1000 de valori): ")
self.avg1000_v = Entry(frame, state="readonly")
self.avg1000_l.grid(row=1, column=0)
self.avg1000_v.grid(row=1, column=1)
self.avg10000_l = Label(frame, text="Medie (10000 de valori): ")
self.avg10000_v = Entry(frame, state="readonly")
self.avg10000_l.grid(row=2, column=0)
self.avg10000_v.grid(row=2, column=1)
self.avg100000_l = Label(frame, text="Medie (100000 de valori): ")
self.avg100000_v = Entry(frame, state="readonly")
self.avg100000_l.grid(row=3, column=0)
self.avg100000_v.grid(row=3, column=1)
self.avgexp_l = Label(frame, text="Medie teoretica: ")
self.avgexp_v = Entry(frame, state="readonly")
self.avgexp_l.grid(row=4, column=0)
self.avgexp_v.grid(row=4, column=1)
def get_stats(self):
a = float(self.param_entry.get())
l1000 = [zipf(a) for i in range(1000)]
l10000 = [zipf(a) for i in range(10000)]
l100000 = [zipf(a) for i in range(100000)]
self.avg1000_v.config(state=NORMAL)
self.avg10000_v.config(state=NORMAL)
self.avg100000_v.config(state=NORMAL)
self.avgexp_v.config(state=NORMAL)
self.avg1000_v.delete(0, END)
self.avg1000_v.insert(0, repr(numpy.mean(l1000, dtype=numpy.float64)))
self.avg10000_v.delete(0, END)
self.avg10000_v.insert(0, repr(numpy.mean(l10000, dtype=numpy.float64)))
self.avg100000_v.delete(0, END)
self.avg100000_v.insert(0, repr(numpy.mean(l100000, dtype=numpy.float64)))
self.avgexp_v.delete(0, END)
self.avgexp_v.insert(0, exp(a))
self.avg1000_v.config(state="readonly")
self.avg10000_v.config(state="readonly")
self.avg100000_v.config(state="readonly")
self.avgexp_v.config(state="readonly")
self.show_hist(l100000, 1000, 6, a)
def show_hist(self, l, n, k, a):
ll = random.sample(l, n)
bins = [0.0 for i in range(k+1)]
bins[1] = min(ll)
bins[k-1] = max(ll)
bins[0] = min(l)
bins[k] = max(l)
h = (bins[k-1] - bins[1])/(k-2)
for i in range(2, k-1):
bins[i] = bins[i-1] + h
x = []
y = []
for (i,b) in enumerate(bins[:-1]):
if b != bins[i+1]:
x.append(b)
y.append(prob(b, a) * len(l)) # normalization so it looks good on the graph
x.append(bins[k])
y.append(prob(bins[k], a) * len(l))
matplotlib.pyplot.clf()
matplotlib.pyplot.hist(l, bins=bins, range=(bins[0], bins[k]), log=True)
matplotlib.pyplot.plot(x, y, 'r--', drawstyle='line')
matplotlib.pyplot.show()
def main():
root = Tk()
main_win = MainWin(root)
root.mainloop()
if __name__ == '__main__':
main()
|
__author__ = 'Swolfod'
# -*- coding: utf-8 -*-
from utilities.djangoUtils import *
from Lushu.models import *
from operator import attrgetter
from random import randint
from django.core.urlresolvers import reverse
from Lushu.consts import *
def showHome():
majorCities = [city for city in MajorCity.objects.order_by("id")[:50]]
majorCities.sort(key = attrgetter('name_en'))
return secureRender(request, "home.html", {"majorCities": majorCities, "invalid": {"destination": True}})
def closestSight(lng, lat, sights):
minDist = 9999999
closest = None
for sight in sights:
dist = calcDistance(lng, lat, sight["lng"], sight["lat"])
if dist < minDist:
minDist = dist
closest = sight
return closest, minDist
def sightsInArea(latN, latS, lngE, lngW):
cities = {}
travelCities = TravelCity.objects.filter(longitude__gte=lngW, longitude__lte=lngE, latitude__gte=latS, latitude__lte=latN).all()
for travelCity in travelCities:
sightId = "tc" + str(travelCity.id)
cities[travelCity.city_id] = {
"id": sightId,
"title": travelCity.name,
"pic": travelCity.imgUrl,
"rating": randint(1, 10) / 2,
"lat": travelCity.latitude,
"lng": travelCity.longitude,
"minDuration": randint(1, 2) / 2,
"maxDuration": randint(2, 6) / 2
}
majorCities = MajorCity.objects.filter(id__lte=50, longitude__gte=lngW, longitude__lte=lngE, latitude__gte=latS, latitude__lte=latN).all()
for majorCity in majorCities:
if majorCity.city_id in cities:
continue
sightId = "mc" + str(travelCity.id)
cities[travelCity.city_id] = {
"id": sightId,
"title": majorCity.name_en,
"pic": majorCity.imgUrl,
"rating": randint(1, 10) / 2,
"lat": majorCity.latitude,
"lng": majorCity.longitude,
"minDuration": randint(1, 2) / 2,
"maxDuration": randint(2, 6) / 2
}
citySights = [sight for cityId, sight in cities.items()]
nationalParks = NationalPark.objects.filter(longitude__gte=lngW, longitude__lte=lngE, latitude__gte=latS, latitude__lte=latN).all()
parkSights = []
for park in nationalParks:
sightId = "pk" + str(park.id)
parkSights.append({
"id": sightId,
"title": park.name_en,
"pic": reverse("images.views.getImage", args=(park.photos.filter(toShow=True).first().photoUrl, THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT)),
"rating": randint(1, 10) / 2,
"lat": park.latitude,
"lng": park.longitude,
"minDuration": randint(1, 2) / 2,
"maxDuration": randint(2, 6) / 2
})
outlets = Outlet.objects.filter(longitude__gte=lngW, longitude__lte=lngE, latitude__gte=latS, latitude__lte=latN).all()
outletSights = []
for outlet in outlets:
sightId = "ol" + str(outlet.id)
outletSights.append({
"id": sightId,
"title": outlet.name,
"pic": "http://maps.googleapis.com/maps/api/staticmap?center={0},{1}&zoom=8&size={2}x{3}&maptype=roadmap&markers=color:red%7C{0},{1}&sensor=false".format(outlet.latitude, outlet.longitude, THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT),
"rating": randint(1, 10) / 2,
"lat": outlet.latitude,
"lng": outlet.longitude,
"minDuration": 0.5,
"maxDuration": 0.5
})
return citySights, parkSights, outletSights |
import re
import urllib
import help_fns
from hoster import streamcloud
from hoster import ecostream
from hoster import filenuke
from hoster import movshare
from sites import movie
urlHost = "http://www.movie4k.to/"
regexSearchResult = '<TR id="(coverPreview\d{6,7})">\n\W*<TD width="550" id="tdmovies">\n\W*<a href="(.*)">(.*)<\/a>\n\W*<\/TD>\n\W*\n\W*<TD.*\n(\W*.*\n){0,20}\W*<\/TD>\W*\n\W*<TD.*>\n\W*.*<\/TD>\n\W*<TD.*<\/TD>\n\W*<TD.*src="http:\/\/img\.movie2k\.to\/img\/(.*)".*<\/TD>'
regexPicture = '"\).hover\(function\(e\){\n\W*\$\("body"\)\.append\("<p id=\'coverPreview\'><img src=\'(.*)\' alt=\'Image preview\' width=105 /></p>"\);'
regexFilmHosterList = '<tr id="tablemoviesindex2" >\\r\\n\\s*.*\\r\\n\\s*\\r\\n\\s*<a href="(.*)">(\\d{2}\\.\\d{2}\\.\\d{4}).*\\r\\n\\s*.*\\r\\n\\s*"16" /> (.*)</a>'
regexSerieHosterList = '<a href=\\\\?"(.{0,70}\.html)\\\\?" style=\\\\?"margin-left:18px;\\\\?"><img border=0 style=\\\\?"vertical-align:top;\\\\?" src=\\\\?".*\\\\?" alt=\\\\?".*\\\\?" title=\\\\?".*\\\\?" width=\\\\?"16\\\\?"> (.*)<\/a><\/td><\/tr>'
regexStaffeln = '<OPTION value="(\d{1,2})"( selected)?>Staffel \d{1,2}<\/OPTION>'
regexEpisoden = 'value="([^<>]*)"( selected)?>Episode (\d{1,2})'
regexVideoLink = '<a target="_blank" href="(.*)"><img border=0 src="http://img.movie2k.to/img/click_link.jpg" alt="(.*)" title="(.*)" width="742"></a>'
class Movie4k:
knownHosts = {'Streamcloud': streamcloud,
'Streamclou': streamcloud,
'Ecostream': ecostream,
'Filenuke': filenuke,
'Movshare': movshare}
def getName(self):
return "Movie4k"
def searchFilm(self, pSearchString):
url = urlHost + "movies.php?list=search"
data = {"search": pSearchString}
data = urllib.urlencode(data)
link = help_fns.openUrlWithData(url, data)
rows = help_fns.extractText(link, '<TR id="coverPreview', '</TR>')
res = []
for r in rows:
m = movie.Movie(r)
res.append({"urlFilm": urlHost + m.url, "displayName": m.name, "picture": self.getPicture(m.pictureID, link)})
return res
def getHostsByFilm(self, pUrl):
link = help_fns.openUrl(pUrl)
match = re.compile(' (.*)</a></td>.*<a href=\\\\"(.*)\\\\">Quality.*smileys/(\d)').findall(link)
res = []
for m in match:
res.append({"urlVideo": m[1], "hoster": m[0], "quality": m[2]})
return res
def getLinkByHostLink(self, pUrl, pHoster):
if pHoster in self.knownHosts:
try:
return self.knownHosts[pHoster].getVideoUrl(pUrl)
except:
return "Fehler bei " + pHoster
else:
return pHoster + " gibt es noch nicht"
def getPicture(self, pictureID, link):
match = re.compile(pictureID + regexPicture).findall(link)
if match:
return match[0]
else:
return ""
|
sentence = input("Sentence kiriting: ")
for i in range(len(sentence)):
if i % 2 == 1:
print(sentence[i])
|
import copy
import itertools
import os
from collections import Counter
"""
The facts:
174k words ~ in words.txt
awk '{print length}' words.txt | sort -n | uniq -c
// words length
96 2
978 3
3919 4
8672 5
15290 6
23208 7
28558 8
25011 9
20404 10
15581 11
11382 12
7835 13
5134 14
3198 15
1938 16
1125 17
594 18
328 19
159 20
62 21
29 22
13 23
9 24
2 25
2 27
1 28
"""
class HangmanWordPassEngine:
"""
Performs general bookkeeping of current possible hangman word set space.
Provides techniques to prune word set space given
a correctly or incorrectly guessed letter along with other parameters.
Manages intermediate pass data
Used exclusively by HangmanLetterStrategy.
"""
#This dict contains the counts of all the characters in each of
#the word length arranged word sets
_letter_counters = {}
_static_initalized = False
_sorted_dictfile = None
_passfile_A = None
_passfile_B = None
_unchanging_randval = '234902358039284234832893842'
def __init__(self, answer_length, settings, mystery_letter):
self._settings = settings
self._display = settings.display
self._answer_length = answer_length
self._mystery_letter = mystery_letter
self._current_words_pipeline_readable = None
self._current_pass = 1
#self._regex_used = 0
self.__initialize_passes()
def __del__(self):
self._answer_length = None
self._display = None
self._settings = None
self._current_words_pipeline_readable = None
self._current_write_passfile = None
self._current_read_passfile = None
self._previous_write_passfile = None
self._pass_cycle = None
@staticmethod
#remove leftover files
def cleanup():
try:
if HangmanWordPassEngine._sorted_dictfile != None:
os.remove(HangmanWordPassEngine._sorted_dictfile.name)
if HangmanWordPassEngine._passfile_A != None:
os.remove(HangmanWordPassEngine._passfile_A.name)
if HangmanWordPassEngine._passfile_B != None:
os.remove(HangmanWordPassEngine._passfile_B.name)
HangmanWordPassEngine._sorted_dictfile = None
HangmanWordPassEngine._passfile_A = None
HangmanWordPassEngine._passfile_B = None
HangmanWordPassEngine._letter_counters.clear()
HangmanWordPassEngine._letter_counters = None
except OSError as e:
print 'Operation failed: %s' % e
@staticmethod
#sort the dictionary file and write it out
def initialize(settings):
if HangmanWordPassEngine._static_initalized == False:
settings.display.clock("Statically Initializing engine 0.1")
HangmanWordPassEngine.__sort_and_write_dictfile_words(settings)
settings.display.clock("Statically Initializing engine 0.2\n")
HangmanWordPassEngine._static_initalized = True
def setup(self, letter_strategy):
"""
Setup the engine. Initialize word set and counter structures to accurately
reflect current word set and tally data
Args: self, strategy
Returns: Nothing
"""
self._display.chatty("Entering setup")
counter = None
# access class static _letter_counters dict for possible cached copy of counter
counter_tuple = HangmanWordPassEngine._letter_counters.get(self._answer_length)
if counter_tuple != None: pass_size, counter = counter_tuple
if counter != None:
letter_strategy.set_letter_counts(pass_size, copy.deepcopy(counter))
# Set first pass of dictionary words
# grab the words from the sorted dictionary file using the get_dictfile_stream
pass_A = \
(word for word in HangmanWordPassEngine.__get_grouped_words_stream(self._answer_length))
self._current_words_pipeline_readable = pass_A
else:
# Second pass of dictionary words
# tally all the dictionary words and store for later
#file_pass_B = (word for word in self._settings.get_dictfile_words(self._answer_length))
pass_B = \
(word for word in HangmanWordPassEngine.__get_grouped_words_stream(self._answer_length))
tally, pass_size, _ = self.__process_and_tally_filtered_stream(set(), pass_B)
letter_strategy.set_letter_counts(pass_size, tally)
counts_deepcpy = copy.deepcopy(tally)
HangmanWordPassEngine._letter_counters[self._answer_length] = (pass_size, counts_deepcpy)
self._display.chatty("Finished setup")
#helper routine to setup engine pass files
def __initialize_passes(self):
try:
#make the log name hard to guess
id = HangmanWordPassEngine._unchanging_randval
if HangmanWordPassEngine._passfile_A == None and \
HangmanWordPassEngine._passfile_B == None:
# setup appropriate file streams for pass files
HangmanWordPassEngine._passfile_A = open("pass_" + id + "_A.log", 'w')
HangmanWordPassEngine._passfile_B = open("pass_" + id + "_B.log", 'w')
pass_sequence = [HangmanWordPassEngine._passfile_A, HangmanWordPassEngine._passfile_B]
# setup cycle to alternate files for reading and writing
self._pass_cycle = itertools.cycle(pass_sequence)
self._current_write_passfile = None
except IOError as e:
print 'Operation failed: %s' % e
def set_pass_params(self, pass_params_tuple_vector):
''''
set_pass_params
Input tuple vector should be of following format
filter_params_tuple = (last_guess_correct, letter, hangman_pattern, hangman_tally, regex, exclusion)
'input' vector in used to reduce the word space
last_guess_correct - last guess state (either correct or incorrect)
letter - the letter to reduce the word set space from
hangman_pattern - current hangman letter pattern state sequence
hangman_tally - counter of letters in hangman pattern w/o mystery letter
regex - compile regex object created from hangman_pattern
exclusion - exclusion set of letters already guessed
'''
self._current_pass_params = pass_params_tuple_vector
def reduce(self):
"""
Reduce the word set space, update the unique map of letter/counts
given the new word set universe
Returns tuple w/ updated state
"""
last_guess_correct, guess, hangman_pattern, hangman_tally, regex, exclusion \
= self._current_pass_params
assert(last_guess_correct != None and guess != None and exclusion != None \
and hangman_pattern != None and hangman_tally != None and regex != None)
if last_guess_correct:
updated_state_tuple = self.__filter_correct_guess()
else:
updated_state_tuple = self.__filter_wrong_guess()
return updated_state_tuple
def __possible_hangman_words(self):
"""
Generator function to iterate through the current hangman word pass sequence
"""
while True:
try:
words_iter = iter(self._current_words_pipeline_readable)
word = words_iter.next()
yield word
except StopIteration:
break;
def __process_and_tally_filtered_stream(self, exclusion, (filtered_stream)):
"""
Store the filtered word pass generator stream and tally the words while its being written to file.
"""
assert(filtered_stream != None)
#write to the pass file
updated_state_tuple = self.__write_and_tally_stream(exclusion, filtered_stream)
# store the lazy stream of the file
# grab the words from the recently output pass using the read_pass_stream function
self._current_words_pipeline_readable = (word for word in self.__read_pass_stream())
return updated_state_tuple
def __filter_wrong_guess(self):
"""
Reduce the word set space,
Words containing 'letter' are candidates for removal
Args:
self
letter - the incorrect letter to reduce the word set space from
Returns:
Nothing
"""
_, wrong_letter, _, _, _, exclusion = self._current_pass_params
#generator comprehension to generate all words that don't have the letter
#store the filtered pass
words_filtered_stream = (word for word in self.__possible_hangman_words() if word.find(wrong_letter) == -1)
updated_state_tuple = self.__process_and_tally_filtered_stream(exclusion, words_filtered_stream)
return updated_state_tuple
def __filter_correct_guess(self):
"""
Reduce the word set space, examining each candidate word from a stream
Returns: Nothing
"""
words_filtered_stream = itertools.ifilter(None, \
itertools.imap(self.__filter_candidate_word_regex, self.__possible_hangman_words()))
_, _, _, _, _, exclusion = self._current_pass_params
updated_state_tuple = self.__process_and_tally_filtered_stream(exclusion, words_filtered_stream)
return updated_state_tuple
def __filter_candidate_word_regex(self, word):
"""
Determine if there is a match using the compiled regex and the candidate word
Much faster than the regular __filter_candidate_word
"""
_, _, _, _, regex, _ = self._current_pass_params
if regex.match(word) == None: return False
return word
def __filter_candidate_word(self, word):
"""
Reduce the word set space:
a) knowing words not containing 'letter' are candidates for removal
b) words that don't have the same letter counts for 'letter' are candidates
c) words whose positions for 'letter' don't match are candidates
"""
_, correct_letter, hangman_pattern, hangman_tally, _, _ = self._current_pass_params
assert(word != None and correct_letter != None and hangman_tally != None)
#if the correct letter is not found in the word, it can't be a candidate
if word.find(correct_letter) == -1: return False;
#if the correct hangman pattern letter counts don't match those of the word, it can't be a candidate
if self.__check_letter_counts_match(word, hangman_tally) == False: return False;
#if the correct hangman pattern letter positions don't match those of the word, it can't be a candidate
if self.__check_letter_positions_match(word, hangman_pattern) == False: return False;
#print 'HangmanWordPassEngine - filter candidate word - matches!!!', word
return word;
def __check_letter_counts_match(self, word, hangman_tally):
"""
Ensure the known letter counts, in the given word, match the letter counts in the tally
Args:
self
word - given dictionary word
hangman_tally - tally of the known letter counts to check against
Returns:
boolean - True or False
"""
assert(word != None and hangman_tally != None)
tally = copy.deepcopy(hangman_tally)
processed = set()
#iterate through the characters in the word
for i in range(len(word)):
letter = word[i]
#Only care if the letter is in the counter
#Or has already been processed as having been in the counter
if tally[letter] != 0 or letter in processed:
tally[letter] -= 1
processed.add(letter)
if sum(tally.values()) != 0:
return False
return True
def __check_letter_positions_match(self, word, hangman_pattern):
"""
Ensure the known letter positions in the hangman word match those in the given word
Args:
self
word - given dictionary word
hangman - hangman secret word
Returns:
boolean - True or False
"""
assert(word != None and hangman_pattern != None)
for i in range(len(hangman_pattern)):
if hangman_pattern[i] != self._mystery_letter and hangman_pattern[i] != word[i]:
return False
return True
@staticmethod
def __sort_and_write_dictfile_words(settings):
"""
Function to read and then sort lines from dictionary file
Assuming the dictionary words are well formed words and unique
"""
try:
#make the log name hard to guess
id = HangmanWordPassEngine._unchanging_randval
fdr = open(settings.get_dictfile_name())
HangmanWordPassEngine._sorted_dictfile = open("sorted_dictionary_" + id + ".txt",'w')
lines = fdr.readlines()
lines.sort(key=len)
map(HangmanWordPassEngine._sorted_dictfile.write, lines)
fdr.close()
HangmanWordPassEngine._sorted_dictfile.close()
except IOError as e:
print 'Operation failed: %s' % e
@staticmethod
def __get_sorted_dict_stream():
"""
Generator function to read each word (line) from sorted dictionary file
"""
#last_read_word = None
try:
if HangmanWordPassEngine._sorted_dictfile.closed:
HangmanWordPassEngine._sorted_dictfile =\
open(HangmanWordPassEngine._sorted_dictfile.name)
with HangmanWordPassEngine._sorted_dictfile as fd:
for wordline in fd:
word = wordline.strip().lower()
#performance hit
#nice to have but not necessary given well-formed dictionary assumption
#since this is a sorted file we can easily skip duplicates just in case
#if last_read_word != None and last_read_word == word: continue
yield word
#last_read_word = word
except IOError as e:
print 'gd Operation failed: %s' % e
@staticmethod
#acquires the sorted dictionary stream and returns the word in relevant word group arranged by length
def __get_grouped_words_stream(group_key):
for key, igroup in \
itertools.groupby(HangmanWordPassEngine.__get_sorted_dict_stream(), lambda x: len(x)):
if group_key == key:
for word in igroup:
yield word
def __read_pass_stream(self):
"""
Generator function to read each word (line) from pass file
"""
self._current_read_passfile = self._previous_write_passfile
try:
if self._current_read_passfile.closed:
self._current_read_passfile = open(self._current_read_passfile.name, 'r')
with self._current_read_passfile as fd:
for wordline in fd:
word = wordline.strip()
#self._display.chatty("read_passfile {}, word: {}".format(self._current_read_passfile, word))
yield word
except IOError as e:
print 'rp Operation failed: %s' % e
def __write_and_tally_stream(self, exclusion, (words_stream)):
"""
Function to write each word from a generator strean to a pass file
Tallies the words stream while they are being written (saving an extra file read)
By "tally" - specifically, tally the unique word letters
Given a word, tally the various letters in the word by uniqueness.
If there are two a's in a word record only 1 a. If an exclusion set is
provided, ignore the letters found in the exclusion set e.g. already guessed letters.
"""
assert(exclusion != None and words_stream != None)
tally = Counter()
word_num = -1
last_word = None
self._current_write_passfile = next(self._pass_cycle)
try:
if self._current_write_passfile.closed:
self._current_write_passfile = open(self._current_write_passfile.name, 'w')
#self._display.clock("Deciding next guess 1.22")
#self._display.clock("write and tally passfile 1.23")
with self._current_write_passfile as fd:
for word_num, word in enumerate(iter(words_stream)):
#self._display.chatty("write_passfile {}, word: {}".format(self._current_write_passfile, word))
fd.write("{}\n".format(word))
#self._display.clock("write and tally passfile 1.24{}".format(word_num))
processed = set()
for i in range(len(word)):
letter = word[i]
if exclusion != None and letter in exclusion:
continue
if letter not in processed:
tally[letter] += 1
processed.add(letter)
if word_num + 1 == 1:
last_word = word
#self._display.clock("write and tally passfile 1.25")
except IOError as e:
print 'wtp Operation failed: %s' % e
self._previous_write_passfile = self._current_write_passfile
return (tally, word_num + 1, last_word)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.